Commit 8d93b41c authored by Matthew Wilcox's avatar Matthew Wilcox
Browse files

mm: Convert add_to_swap_cache to XArray



Combine __add_to_swap_cache and add_to_swap_cache into one function
since there is no more need to preload.

Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 69b6c131
Loading
Loading
Loading
Loading
+29 −64
Original line number Diff line number Diff line
@@ -107,14 +107,15 @@ void show_swap_cache_info(void)
}

/*
 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 * but sets SwapCache flag and private instead of mapping and index.
 */
int __add_to_swap_cache(struct page *page, swp_entry_t entry)
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
{
	int error, i, nr = hpage_nr_pages(page);
	struct address_space *address_space;
	struct address_space *address_space = swap_address_space(entry);
	pgoff_t idx = swp_offset(entry);
	XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
	unsigned long i, nr = 1UL << compound_order(page);

	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -123,50 +124,30 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
	page_ref_add(page, nr);
	SetPageSwapCache(page);

	address_space = swap_address_space(entry);
	xa_lock_irq(&address_space->i_pages);
	do {
		xas_lock_irq(&xas);
		xas_create_range(&xas);
		if (xas_error(&xas))
			goto unlock;
		for (i = 0; i < nr; i++) {
			VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
			set_page_private(page + i, entry.val + i);
		error = radix_tree_insert(&address_space->i_pages,
					  idx + i, page + i);
		if (unlikely(error))
			break;
			xas_store(&xas, page + i);
			xas_next(&xas);
		}
	if (likely(!error)) {
		address_space->nrpages += nr;
		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
		ADD_CACHE_INFO(add_total, nr);
	} else {
		/*
		 * Only the context which have set SWAP_HAS_CACHE flag
		 * would call add_to_swap_cache().
		 * So add_to_swap_cache() doesn't returns -EEXIST.
		 */
		VM_BUG_ON(error == -EEXIST);
		set_page_private(page + i, 0UL);
		while (i--) {
			radix_tree_delete(&address_space->i_pages, idx + i);
			set_page_private(page + i, 0UL);
		}
		ClearPageSwapCache(page);
		page_ref_sub(page, nr);
	}
	xa_unlock_irq(&address_space->i_pages);

	return error;
}
unlock:
		xas_unlock_irq(&xas);
	} while (xas_nomem(&xas, gfp));

	if (!xas_error(&xas))
		return 0;

int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
	int error;

	error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
	if (!error) {
		error = __add_to_swap_cache(page, entry);
		radix_tree_preload_end();
	}
	return error;
	ClearPageSwapCache(page);
	page_ref_sub(page, nr);
	return xas_error(&xas);
}

/*
@@ -217,7 +198,7 @@ int add_to_swap(struct page *page)
		return 0;

	/*
	 * Radix-tree node allocations from PF_MEMALLOC contexts could
	 * XArray node allocations from PF_MEMALLOC contexts could
	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
	 * stops emergency reserves from being allocated.
	 *
@@ -229,7 +210,6 @@ int add_to_swap(struct page *page)
	 */
	err = add_to_swap_cache(page, entry,
			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
	/* -ENOMEM radix-tree allocation failure */
	if (err)
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
@@ -413,19 +393,11 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
				break;		/* Out of memory */
		}

		/*
		 * call radix_tree_preload() while we can wait.
		 */
		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
		if (err)
			break;

		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
		err = swapcache_prepare(entry);
		if (err == -EEXIST) {
			radix_tree_preload_end();
			/*
			 * We might race against get_swap_page() and stumble
			 * across a SWAP_HAS_CACHE swap_map entry whose page
@@ -433,26 +405,19 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
			 */
			cond_resched();
			continue;
		}
		if (err) {		/* swp entry is obsolete ? */
			radix_tree_preload_end();
		} else if (err)		/* swp entry is obsolete ? */
			break;
		}

		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
		/* May fail (-ENOMEM) if XArray node allocation failed. */
		__SetPageLocked(new_page);
		__SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
		if (likely(!err)) {
			radix_tree_preload_end();
			/*
			 * Initiate read into locked page and return.
			 */
			/* Initiate read into locked page */
			lru_cache_add_anon(new_page);
			*new_page_allocated = true;
			return new_page;
		}
		radix_tree_preload_end();
		__ClearPageLocked(new_page);
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely