Commit 89eb946a authored by Matthew Wilcox's avatar Matthew Wilcox
Browse files

mm: Convert page migration to XArray

parent 560d454b
Loading
Loading
Loading
Loading
+18 −30
Original line number Diff line number Diff line
@@ -323,7 +323,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
	page = migration_entry_to_page(entry);

	/*
	 * Once radix-tree replacement of page migration started, page_count
	 * Once page cache replacement of page migration started, page_count
	 * *must* be zero. And, we don't want to call wait_on_page_locked()
	 * against a page without get_page().
	 * So, we use get_page_unless_zero(), here. Even failed, page fault
@@ -438,10 +438,10 @@ int migrate_page_move_mapping(struct address_space *mapping,
		struct buffer_head *head, enum migrate_mode mode,
		int extra_count)
{
	XA_STATE(xas, &mapping->i_pages, page_index(page));
	struct zone *oldzone, *newzone;
	int dirty;
	int expected_count = 1 + extra_count;
	void **pslot;

	/*
	 * Device public or private pages have an extra refcount as they are
@@ -467,21 +467,16 @@ int migrate_page_move_mapping(struct address_space *mapping,
	oldzone = page_zone(page);
	newzone = page_zone(newpage);

	xa_lock_irq(&mapping->i_pages);

	pslot = radix_tree_lookup_slot(&mapping->i_pages,
 					page_index(page));
	xas_lock_irq(&xas);

	expected_count += hpage_nr_pages(page) + page_has_private(page);
	if (page_count(page) != expected_count ||
		radix_tree_deref_slot_protected(pslot,
					&mapping->i_pages.xa_lock) != page) {
		xa_unlock_irq(&mapping->i_pages);
	if (page_count(page) != expected_count || xas_load(&xas) != page) {
		xas_unlock_irq(&xas);
		return -EAGAIN;
	}

	if (!page_ref_freeze(page, expected_count)) {
		xa_unlock_irq(&mapping->i_pages);
		xas_unlock_irq(&xas);
		return -EAGAIN;
	}

@@ -495,7 +490,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
	if (mode == MIGRATE_ASYNC && head &&
			!buffer_migrate_lock_buffers(head, mode)) {
		page_ref_unfreeze(page, expected_count);
		xa_unlock_irq(&mapping->i_pages);
		xas_unlock_irq(&xas);
		return -EAGAIN;
	}

@@ -523,16 +518,13 @@ int migrate_page_move_mapping(struct address_space *mapping,
		SetPageDirty(newpage);
	}

	radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
	xas_store(&xas, newpage);
	if (PageTransHuge(page)) {
		int i;
		int index = page_index(page);

		for (i = 1; i < HPAGE_PMD_NR; i++) {
			pslot = radix_tree_lookup_slot(&mapping->i_pages,
						       index + i);
			radix_tree_replace_slot(&mapping->i_pages, pslot,
						newpage + i);
			xas_next(&xas);
			xas_store(&xas, newpage + i);
		}
	}

@@ -543,7 +535,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
	 */
	page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));

	xa_unlock(&mapping->i_pages);
	xas_unlock(&xas);
	/* Leave irq disabled to prevent preemption while updating stats */

	/*
@@ -583,22 +575,18 @@ EXPORT_SYMBOL(migrate_page_move_mapping);
int migrate_huge_page_move_mapping(struct address_space *mapping,
				   struct page *newpage, struct page *page)
{
	XA_STATE(xas, &mapping->i_pages, page_index(page));
	int expected_count;
	void **pslot;

	xa_lock_irq(&mapping->i_pages);

	pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page));

	xas_lock_irq(&xas);
	expected_count = 2 + page_has_private(page);
	if (page_count(page) != expected_count ||
		radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) {
		xa_unlock_irq(&mapping->i_pages);
	if (page_count(page) != expected_count || xas_load(&xas) != page) {
		xas_unlock_irq(&xas);
		return -EAGAIN;
	}

	if (!page_ref_freeze(page, expected_count)) {
		xa_unlock_irq(&mapping->i_pages);
		xas_unlock_irq(&xas);
		return -EAGAIN;
	}

@@ -607,11 +595,11 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,

	get_page(newpage);

	radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
	xas_store(&xas, newpage);

	page_ref_unfreeze(page, expected_count - 1);

	xa_unlock_irq(&mapping->i_pages);
	xas_unlock_irq(&xas);

	return MIGRATEPAGE_SUCCESS;
}