Commit 2a52bcbc authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

rmap: extend try_to_unmap() to be usable by split_huge_page()



Add support for two ttu_flags:

  - TTU_SPLIT_HUGE_PMD would split PMD if it's there, before trying to
    unmap page;

  - TTU_RMAP_LOCKED indicates that caller holds relevant rmap lock;

Also, change rwc->done to !page_mapcount() instead of !page_mapped().
try_to_unmap() works on pte level, so we are really interested in the
mappedness of this small page rather than of the compound page it's a
part of.

Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b9773199
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -106,6 +106,9 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
			__split_huge_pmd(__vma, __pmd, __address);	\
	}  while (0)


void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address);

#if HPAGE_PMD_ORDER >= MAX_ORDER
#error "hugepages can't be allocated by the buddy allocator"
#endif
@@ -173,6 +176,10 @@ static inline int split_huge_page(struct page *page)
static inline void deferred_split_huge_page(struct page *page) {}
#define split_huge_pmd(__vma, __pmd, __address)	\
	do { } while (0)

static inline void split_huge_pmd_address(struct vm_area_struct *vma,
		unsigned long address) {}

static inline int hugepage_madvise(struct vm_area_struct *vma,
				   unsigned long *vm_flags, int advice)
{
+3 −0
Original line number Diff line number Diff line
@@ -86,6 +86,7 @@ enum ttu_flags {
	TTU_MIGRATION = 2,		/* migration mode */
	TTU_MUNLOCK = 4,		/* munlock mode */
	TTU_LZFREE = 8,			/* lazy free mode */
	TTU_SPLIT_HUGE_PMD = 16,	/* split huge PMD if any */

	TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */
	TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */
@@ -93,6 +94,8 @@ enum ttu_flags {
	TTU_BATCH_FLUSH = (1 << 11),	/* Batch TLB flushes where possible
					 * and caller guarantees they will
					 * do a final flush if necessary */
	TTU_RMAP_LOCKED = (1 << 12)	/* do not grab rmap lock:
					 * caller holds it */
};

#ifdef CONFIG_MMU
+1 −4
Original line number Diff line number Diff line
@@ -3006,15 +3006,12 @@ out:
	}
}

static void split_huge_pmd_address(struct vm_area_struct *vma,
				    unsigned long address)
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));

	pgd = pgd_offset(vma->vm_mm, address);
	if (!pgd_present(*pgd))
		return;
+16 −8
Original line number Diff line number Diff line
@@ -1431,6 +1431,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
	if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
		goto out;

	if (flags & TTU_SPLIT_HUGE_PMD)
		split_huge_pmd_address(vma, address);
	pte = page_check_address(page, mm, address, &ptl, 0);
	if (!pte)
		goto out;
@@ -1576,10 +1578,10 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
	return is_vma_temporary_stack(vma);
}

static int page_not_mapped(struct page *page)
static int page_mapcount_is_zero(struct page *page)
{
	return !page_mapped(page);
};
	return !page_mapcount(page);
}

/**
 * try_to_unmap - try to remove all page table mappings to a page
@@ -1606,12 +1608,10 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
	struct rmap_walk_control rwc = {
		.rmap_one = try_to_unmap_one,
		.arg = &rp,
		.done = page_not_mapped,
		.done = page_mapcount_is_zero,
		.anon_lock = page_lock_anon_vma_read,
	};

	VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);

	/*
	 * During exec, a temporary VMA is setup and later moved.
	 * The VMA is moved under the anon_vma lock but not the
@@ -1623,9 +1623,12 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
	if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
		rwc.invalid_vma = invalid_migration_vma;

	if (flags & TTU_RMAP_LOCKED)
		ret = rmap_walk_locked(page, &rwc);
	else
		ret = rmap_walk(page, &rwc);

	if (ret != SWAP_MLOCK && !page_mapped(page)) {
	if (ret != SWAP_MLOCK && !page_mapcount(page)) {
		ret = SWAP_SUCCESS;
		if (rp.lazyfreed && !PageDirty(page))
			ret = SWAP_LZFREE;
@@ -1633,6 +1636,11 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
	return ret;
}

static int page_not_mapped(struct page *page)
{
	return !page_mapped(page);
};

/**
 * try_to_munlock - try to munlock a page
 * @page: the page to be munlocked