Commit 2d9b5dcd authored by Gao Xiang's avatar Gao Xiang Committed by Greg Kroah-Hartman
Browse files

staging: erofs: decompress asynchronously if PG_readahead page at first



For the case of nr_to_read == lookahead_size, it is better to
decompress asynchronously as well since no page will be needed immediately.

Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarGao Xiang <gaoxiang25@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 23edf3ab
Loading
Loading
Loading
Loading
+8 −1
Original line number Diff line number Diff line
@@ -1345,8 +1345,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
{
	struct inode *const inode = mapping->host;
	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
	const bool sync = __should_decompress_synchronously(sbi, nr_pages);

	bool sync = __should_decompress_synchronously(sbi, nr_pages);
	struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
	gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
	struct page *head = NULL;
@@ -1364,6 +1364,13 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
		prefetchw(&page->flags);
		list_del(&page->lru);

		/*
		 * A pure asynchronous readahead is indicated if
		 * a PG_readahead marked page is hitted at first.
		 * Let's also do asynchronous decompression for this case.
		 */
		sync &= !(PageReadahead(page) && !head);

		if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
			list_add(&page->lru, &pagepool);
			continue;