Commit d72dc8a2 authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds
Browse files

mm: make pagevec_lookup() update index

Make pagevec_lookup() (and underlying find_get_pages()) update index to
the next page where iteration should continue.  Most callers want this
and also pagevec_lookup_tag() already does this.

Link: http://lkml.kernel.org/r/20170726114704.7626-3-jack@suse.cz


Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 26b433d0
Loading
Loading
Loading
Loading
+4 −6
Original line number Diff line number Diff line
@@ -1633,13 +1633,12 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)

	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
	pagevec_init(&pvec, 0);
	while (index <= end && pagevec_lookup(&pvec, bd_mapping, index,
	while (index <= end && pagevec_lookup(&pvec, bd_mapping, &index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

			index = page->index;
			if (index > end)
			if (page->index > end)
				break;
			if (!page_has_buffers(page))
				continue;
@@ -1670,7 +1669,6 @@ unlock_page:
		}
		pagevec_release(&pvec);
		cond_resched();
		index++;
	}
}
EXPORT_SYMBOL(clean_bdev_aliases);
@@ -3552,7 +3550,8 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
		unsigned want, nr_pages, i;

		want = min_t(unsigned, end - index, PAGEVEC_SIZE);
		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, want);
		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, &index,
					  want);
		if (nr_pages == 0)
			break;

@@ -3594,7 +3593,6 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
		if (nr_pages < want)
			break;

		index = pvec.pages[i - 1]->index + 1;
		pagevec_release(&pvec);
	} while (index < end);

+1 −3
Original line number Diff line number Diff line
@@ -468,7 +468,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
		unsigned long nr_pages;

		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, &index,
					  (pgoff_t)num);
		if (nr_pages == 0)
			break;
@@ -536,8 +536,6 @@ next:
		/* The no. of pages is less than our desired, we are done. */
		if (nr_pages < num)
			break;

		index = pvec.pages[i - 1]->index + 1;
		pagevec_release(&pvec);
	} while (index <= end);

+2 −6
Original line number Diff line number Diff line
@@ -1720,7 +1720,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,

	pagevec_init(&pvec, 0);
	while (index <= end) {
		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
		nr_pages = pagevec_lookup(&pvec, mapping, &index, PAGEVEC_SIZE);
		if (nr_pages == 0)
			break;
		for (i = 0; i < nr_pages; i++) {
@@ -1737,7 +1737,6 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
			}
			unlock_page(page);
		}
		index = pvec.pages[nr_pages - 1]->index + 1;
		pagevec_release(&pvec);
	}
}
@@ -2348,7 +2347,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)

	pagevec_init(&pvec, 0);
	while (start <= end) {
		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, &start,
					  PAGEVEC_SIZE);
		if (nr_pages == 0)
			break;
@@ -2357,8 +2356,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)

			if (page->index > end)
				break;
			/* Up to 'end' pages must be contiguous */
			BUG_ON(page->index != start);
			bh = head = page_buffers(page);
			do {
				if (lblk < mpd->map.m_lblk)
@@ -2403,7 +2400,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
				pagevec_release(&pvec);
				return err;
			}
			start++;
		}
		pagevec_release(&pvec);
	}
+2 −3
Original line number Diff line number Diff line
@@ -1178,11 +1178,10 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
	pagevec_init(&pvec, 0);
	next = 0;
	do {
		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
		if (!pagevec_lookup(&pvec, mapping, &next, PAGEVEC_SIZE))
			break;
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
			next = page->index;
			if (PageFsCache(page)) {
				__fscache_wait_on_page_write(cookie, page);
				__fscache_uncache_page(cookie, page);
@@ -1190,7 +1189,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
		}
		pagevec_release(&pvec);
		cond_resched();
	} while (++next);
	} while (next);

	_leave("");
}
+8 −9
Original line number Diff line number Diff line
@@ -401,7 +401,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
	const pgoff_t end = lend >> huge_page_shift(h);
	struct vm_area_struct pseudo_vma;
	struct pagevec pvec;
	pgoff_t next;
	pgoff_t next, index;
	int i, freed = 0;
	long lookup_nr = PAGEVEC_SIZE;
	bool truncate_op = (lend == LLONG_MAX);
@@ -420,7 +420,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
		/*
		 * When no more pages are found, we are done.
		 */
		if (!pagevec_lookup(&pvec, mapping, next, lookup_nr))
		if (!pagevec_lookup(&pvec, mapping, &next, lookup_nr))
			break;

		for (i = 0; i < pagevec_count(&pvec); ++i) {
@@ -432,13 +432,13 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
			 * only possible in the punch hole case as end is
			 * max page offset in the truncate case.
			 */
			next = page->index;
			if (next >= end)
			index = page->index;
			if (index >= end)
				break;

			hash = hugetlb_fault_mutex_hash(h, current->mm,
							&pseudo_vma,
							mapping, next, 0);
							mapping, index, 0);
			mutex_lock(&hugetlb_fault_mutex_table[hash]);

			/*
@@ -455,8 +455,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,

				i_mmap_lock_write(mapping);
				hugetlb_vmdelete_list(&mapping->i_mmap,
					next * pages_per_huge_page(h),
					(next + 1) * pages_per_huge_page(h));
					index * pages_per_huge_page(h),
					(index + 1) * pages_per_huge_page(h));
				i_mmap_unlock_write(mapping);
			}

@@ -475,14 +475,13 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
			freed++;
			if (!truncate_op) {
				if (unlikely(hugetlb_unreserve_pages(inode,
							next, next + 1, 1)))
							index, index + 1, 1)))
					hugetlb_fix_reserve_counts(inode);
			}

			unlock_page(page);
			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
		}
		++next;
		huge_pagevec_release(&pvec);
		cond_resched();
	}
Loading