Commit 552446a4 authored by Matthew Wilcox's avatar Matthew Wilcox
Browse files

shmem: Convert shmem_add_to_page_cache to XArray



We can use xas_find_conflict() instead of radix_tree_gang_lookup_slot()
to find any conflicting entry and combine the three paths through this
function into one.

Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent e21a2955
Loading
Loading
Loading
Loading
+34 −47
Original line number Diff line number Diff line
@@ -577,9 +577,11 @@ static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
 */
static int shmem_add_to_page_cache(struct page *page,
				   struct address_space *mapping,
				   pgoff_t index, void *expected)
				   pgoff_t index, void *expected, gfp_t gfp)
{
	int error, nr = hpage_nr_pages(page);
	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
	unsigned long i = 0;
	unsigned long nr = 1UL << compound_order(page);

	VM_BUG_ON_PAGE(PageTail(page), page);
	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
@@ -591,46 +593,39 @@ static int shmem_add_to_page_cache(struct page *page,
	page->mapping = mapping;
	page->index = index;

	xa_lock_irq(&mapping->i_pages);
	if (PageTransHuge(page)) {
		void __rcu **results;
		pgoff_t idx;
		int i;

		error = 0;
		if (radix_tree_gang_lookup_slot(&mapping->i_pages,
					&results, &idx, index, 1) &&
				idx < index + HPAGE_PMD_NR) {
			error = -EEXIST;
		}

		if (!error) {
			for (i = 0; i < HPAGE_PMD_NR; i++) {
				error = radix_tree_insert(&mapping->i_pages,
						index + i, page + i);
				VM_BUG_ON(error);
	do {
		void *entry;
		xas_lock_irq(&xas);
		entry = xas_find_conflict(&xas);
		if (entry != expected)
			xas_set_err(&xas, -EEXIST);
		xas_create_range(&xas);
		if (xas_error(&xas))
			goto unlock;
next:
		xas_store(&xas, page + i);
		if (++i < nr) {
			xas_next(&xas);
			goto next;
		}
		if (PageTransHuge(page)) {
			count_vm_event(THP_FILE_ALLOC);
			__inc_node_page_state(page, NR_SHMEM_THPS);
		}
	} else if (!expected) {
		error = radix_tree_insert(&mapping->i_pages, index, page);
	} else {
		error = shmem_replace_entry(mapping, index, expected, page);
	}

	if (!error) {
		mapping->nrpages += nr;
		if (PageTransHuge(page))
			__inc_node_page_state(page, NR_SHMEM_THPS);
		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
		__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
		xa_unlock_irq(&mapping->i_pages);
	} else {
unlock:
		xas_unlock_irq(&xas);
	} while (xas_nomem(&xas, gfp));

	if (xas_error(&xas)) {
		page->mapping = NULL;
		xa_unlock_irq(&mapping->i_pages);
		page_ref_sub(page, nr);
		return xas_error(&xas);
	}
	return error;

	return 0;
}

/*
@@ -1183,7 +1178,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
	 */
	if (!error)
		error = shmem_add_to_page_cache(*pagep, mapping, index,
						radswap);
						radswap, gfp);
	if (error != -ENOMEM) {
		/*
		 * Truncation and eviction use free_swap_and_cache(), which
@@ -1700,7 +1695,7 @@ repeat:
				false);
		if (!error) {
			error = shmem_add_to_page_cache(page, mapping, index,
						swp_to_radix_entry(swap));
						swp_to_radix_entry(swap), gfp);
			/*
			 * We already confirmed swap under page lock, and make
			 * no memory allocation here, so usually no possibility
@@ -1806,13 +1801,8 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
				PageTransHuge(page));
		if (error)
			goto unacct;
		error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK,
				compound_order(page));
		if (!error) {
		error = shmem_add_to_page_cache(page, mapping, hindex,
							NULL);
			radix_tree_preload_end();
		}
						NULL, gfp & GFP_RECLAIM_MASK);
		if (error) {
			mem_cgroup_cancel_charge(page, memcg,
					PageTransHuge(page));
@@ -2281,11 +2271,8 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
	if (ret)
		goto out_release;

	ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
	if (!ret) {
		ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL);
		radix_tree_preload_end();
	}
	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
						gfp & GFP_RECLAIM_MASK);
	if (ret)
		goto out_release_uncharge;