Commit bfe7b00d authored by Song Liu's avatar Song Liu Committed by Linus Torvalds
Browse files

mm, thp: introduce FOLL_SPLIT_PMD

Introduce a new foll_flag: FOLL_SPLIT_PMD.  As the name says
FOLL_SPLIT_PMD splits huge pmd for given mm_struct, the underlining huge
page stays as-is.

FOLL_SPLIT_PMD is useful for cases where we need to use regular pages, but
would switch back to huge page and huge pmd on.  One of such example is
uprobe.  The following patches use FOLL_SPLIT_PMD in uprobe.

Link: http://lkml.kernel.org/r/20190815164525.1848545-4-songliubraving@fb.com


Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
Reviewed-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fb4fb04f
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -2591,6 +2591,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_COW	0x4000	/* internal GUP flag */
#define FOLL_ANON	0x8000	/* don't do file mappings */
#define FOLL_LONGTERM	0x10000	/* mapping lifetime is indefinite: see below */
#define FOLL_SPLIT_PMD	0x20000	/* split huge pmd before returning */

/*
 * NOTE on FOLL_LONGTERM:
+6 −2
Original line number Diff line number Diff line
@@ -384,7 +384,7 @@ retry_locked:
		spin_unlock(ptl);
		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
	}
	if (flags & FOLL_SPLIT) {
	if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
		int ret;
		page = pmd_page(*pmd);
		if (is_huge_zero_page(page)) {
@@ -393,7 +393,7 @@ retry_locked:
			split_huge_pmd(vma, pmd, address);
			if (pmd_trans_unstable(pmd))
				ret = -EBUSY;
		} else {
		} else if (flags & FOLL_SPLIT) {
			if (unlikely(!try_get_page(page))) {
				spin_unlock(ptl);
				return ERR_PTR(-ENOMEM);
@@ -405,6 +405,10 @@ retry_locked:
			put_page(page);
			if (pmd_none(*pmd))
				return no_page_table(vma, flags);
		} else {  /* flags & FOLL_SPLIT_PMD */
			spin_unlock(ptl);
			split_huge_pmd(vma, pmd, address);
			ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
		}

		return ret ? ERR_PTR(ret) :