Commit ef8e5717 authored by Matthew Wilcox's avatar Matthew Wilcox
Browse files

page cache: Convert delete_batch to XArray



Rename the function from page_cache_tree_delete_batch to just
page_cache_delete_batch.

Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent a332125f
Loading
Loading
Loading
Loading
+13 −15
Original line number Diff line number Diff line
@@ -272,7 +272,7 @@ void delete_from_page_cache(struct page *page)
EXPORT_SYMBOL(delete_from_page_cache);

/*
 * page_cache_tree_delete_batch - delete several pages from page cache
 * page_cache_delete_batch - delete several pages from page cache
 * @mapping: the mapping to which pages belong
 * @pvec: pagevec with pages to delete
 *
@@ -285,23 +285,18 @@ EXPORT_SYMBOL(delete_from_page_cache);
 *
 * The function expects the i_pages lock to be held.
 */
static void
page_cache_tree_delete_batch(struct address_space *mapping,
static void page_cache_delete_batch(struct address_space *mapping,
			     struct pagevec *pvec)
{
	struct radix_tree_iter iter;
	void **slot;
	XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
	int total_pages = 0;
	int i = 0, tail_pages = 0;
	struct page *page;
	pgoff_t start;

	start = pvec->pages[0]->index;
	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
	mapping_set_update(&xas, mapping);
	xas_for_each(&xas, page, ULONG_MAX) {
		if (i >= pagevec_count(pvec) && !tail_pages)
			break;
		page = radix_tree_deref_slot_protected(slot,
						       &mapping->i_pages.xa_lock);
		if (xa_is_value(page))
			continue;
		if (!tail_pages) {
@@ -310,8 +305,11 @@ page_cache_tree_delete_batch(struct address_space *mapping,
			 * have our pages locked so they are protected from
			 * being removed.
			 */
			if (page != pvec->pages[i])
			if (page != pvec->pages[i]) {
				VM_BUG_ON_PAGE(page->index >
						pvec->pages[i]->index, page);
				continue;
			}
			WARN_ON_ONCE(!PageLocked(page));
			if (PageTransHuge(page) && !PageHuge(page))
				tail_pages = HPAGE_PMD_NR - 1;
@@ -322,11 +320,11 @@ page_cache_tree_delete_batch(struct address_space *mapping,
			 */
			i++;
		} else {
			VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages
					!= pvec->pages[i]->index, page);
			tail_pages--;
		}
		radix_tree_clear_tags(&mapping->i_pages, iter.node, slot);
		__radix_tree_replace(&mapping->i_pages, iter.node, slot, NULL,
				workingset_lookup_update(mapping));
		xas_store(&xas, NULL);
		total_pages++;
	}
	mapping->nrpages -= total_pages;
@@ -347,7 +345,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,

		unaccount_page_cache_page(mapping, pvec->pages[i]);
	}
	page_cache_tree_delete_batch(mapping, pvec);
	page_cache_delete_batch(mapping, pvec);
	xa_unlock_irqrestore(&mapping->i_pages, flags);

	for (i = 0; i < pagevec_count(pvec); i++)