Commit 2c684234 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds
Browse files

mm: add page_cache_readahead_unbounded



ext4 and f2fs have duplicated the guts of the readahead code so they can
read past i_size.  Instead, separate out the guts of the readahead code
so they can call it directly.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Tested-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarEric Biggers <ebiggers@google.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-14-willy@infradead.org


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b0f31d78
Loading
Loading
Loading
Loading
+2 −33
Original line number Diff line number Diff line
@@ -342,37 +342,6 @@ static int ext4_get_verity_descriptor(struct inode *inode, void *buf,
	return desc_size;
}

/*
 * Prefetch some pages from the file's Merkle tree.
 *
 * This is basically a stripped-down version of __do_page_cache_readahead()
 * which works on pages past i_size.
 */
static void ext4_merkle_tree_readahead(struct address_space *mapping,
				       pgoff_t start_index, unsigned long count)
{
	LIST_HEAD(pages);
	unsigned int nr_pages = 0;
	struct page *page;
	pgoff_t index;
	struct blk_plug plug;

	for (index = start_index; index < start_index + count; index++) {
		page = xa_load(&mapping->i_pages, index);
		if (!page || xa_is_value(page)) {
			page = __page_cache_alloc(readahead_gfp_mask(mapping));
			if (!page)
				break;
			page->index = index;
			list_add(&page->lru, &pages);
			nr_pages++;
		}
	}
	blk_start_plug(&plug);
	ext4_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
	blk_finish_plug(&plug);
}

static struct page *ext4_read_merkle_tree_page(struct inode *inode,
					       pgoff_t index,
					       unsigned long num_ra_pages)
@@ -386,8 +355,8 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode,
		if (page)
			put_page(page);
		else if (num_ra_pages > 1)
			ext4_merkle_tree_readahead(inode->i_mapping, index,
						   num_ra_pages);
			page_cache_readahead_unbounded(inode->i_mapping, NULL,
					index, num_ra_pages, 0);
		page = read_mapping_page(inode->i_mapping, index, NULL);
	}
	return page;
+1 −1
Original line number Diff line number Diff line
@@ -2177,7 +2177,7 @@ out:
 * use ->readpage() or do the necessary surgery to decouple ->readpages()
 * from read-ahead.
 */
int f2fs_mpage_readpages(struct address_space *mapping,
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages, bool is_readahead)
{
+0 −3
Original line number Diff line number Diff line
@@ -3373,9 +3373,6 @@ int f2fs_reserve_new_block(struct dnode_of_data *dn);
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages, bool is_readahead);
struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
			int op_flags, bool for_write);
struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
+2 −33
Original line number Diff line number Diff line
@@ -222,37 +222,6 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
	return size;
}

/*
 * Prefetch some pages from the file's Merkle tree.
 *
 * This is basically a stripped-down version of __do_page_cache_readahead()
 * which works on pages past i_size.
 */
static void f2fs_merkle_tree_readahead(struct address_space *mapping,
				       pgoff_t start_index, unsigned long count)
{
	LIST_HEAD(pages);
	unsigned int nr_pages = 0;
	struct page *page;
	pgoff_t index;
	struct blk_plug plug;

	for (index = start_index; index < start_index + count; index++) {
		page = xa_load(&mapping->i_pages, index);
		if (!page || xa_is_value(page)) {
			page = __page_cache_alloc(readahead_gfp_mask(mapping));
			if (!page)
				break;
			page->index = index;
			list_add(&page->lru, &pages);
			nr_pages++;
		}
	}
	blk_start_plug(&plug);
	f2fs_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
	blk_finish_plug(&plug);
}

static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
					       pgoff_t index,
					       unsigned long num_ra_pages)
@@ -266,8 +235,8 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
		if (page)
			put_page(page);
		else if (num_ra_pages > 1)
			f2fs_merkle_tree_readahead(inode->i_mapping, index,
						   num_ra_pages);
			page_cache_readahead_unbounded(inode->i_mapping, NULL,
					index, num_ra_pages, 0);
		page = read_mapping_page(inode->i_mapping, index, NULL);
	}
	return page;
+3 −0
Original line number Diff line number Diff line
@@ -625,6 +625,9 @@ void page_cache_sync_readahead(struct address_space *, struct file_ra_state *,
void page_cache_async_readahead(struct address_space *, struct file_ra_state *,
		struct file *, struct page *, pgoff_t index,
		unsigned long req_count);
void page_cache_readahead_unbounded(struct address_space *, struct file *,
		pgoff_t index, unsigned long nr_to_read,
		unsigned long lookahead_count);

/*
 * Like add_to_page_cache_locked, but used to add newly allocated pages:
Loading