Commit 337d9abf authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds
Browse files

mm: thp: check pmd_trans_unstable() after split_huge_pmd()

split_huge_pmd() doesn't guarantee that the pmd is normal pmd pointing
to pte entries, which can be checked with pmd_trans_unstable().  Some
callers make this assertion and some do it differently and some not, so
let's do it in a unified manner.

Link: http://lkml.kernel.org/r/1464741400-12143-1-git-send-email-n-horiguchi@ah.jp.nec.com


Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e3a2713c
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -279,6 +279,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
			spin_unlock(ptl);
			ret = 0;
			split_huge_pmd(vma, pmd, address);
			if (pmd_trans_unstable(pmd))
				ret = -EBUSY;
		} else {
			get_page(page);
			spin_unlock(ptl);
+2 −0
Original line number Diff line number Diff line
@@ -512,6 +512,8 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
		}
	}

	if (pmd_trans_unstable(pmd))
		return 0;
retry:
	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE) {
+1 −1
Original line number Diff line number Diff line
@@ -163,7 +163,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE) {
				split_huge_pmd(vma, pmd, addr);
				if (pmd_none(*pmd))
				if (pmd_trans_unstable(pmd))
					continue;
			} else {
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
+1 −2
Original line number Diff line number Diff line
@@ -210,9 +210,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
				}
			}
			split_huge_pmd(vma, old_pmd, old_addr);
			if (pmd_none(*old_pmd))
			if (pmd_trans_unstable(old_pmd))
				continue;
			VM_BUG_ON(pmd_trans_huge(*old_pmd));
		}
		if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
			break;