Commit 31d49da5 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds
Browse files

mm/hugetlb: simplify hugetlb unmap

For hugetlb like THP (and unlike regular page), we do tlb flush after
dropping ptl.  Because of the above, we don't need to track force_flush
like we do now.  Instead we can simply call tlb_remove_page() which will
do the flush if needed.

No functionality change in this patch.

Link: http://lkml.kernel.org/r/1465049193-22197-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com


Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 337d9abf
Loading
Loading
Loading
Loading
+21 −33
Original line number Diff line number Diff line
@@ -3177,7 +3177,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
			    unsigned long start, unsigned long end,
			    struct page *ref_page)
{
	int force_flush = 0;
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
	pte_t *ptep;
@@ -3196,19 +3195,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
	tlb_start_vma(tlb, vma);
	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
	address = start;
again:
	for (; address < end; address += sz) {
		ptep = huge_pte_offset(mm, address);
		if (!ptep)
			continue;

		ptl = huge_pte_lock(h, mm, ptep);
		if (huge_pmd_unshare(mm, &address, ptep))
			goto unlock;
		if (huge_pmd_unshare(mm, &address, ptep)) {
			spin_unlock(ptl);
			continue;
		}

		pte = huge_ptep_get(ptep);
		if (huge_pte_none(pte))
			goto unlock;
		if (huge_pte_none(pte)) {
			spin_unlock(ptl);
			continue;
		}

		/*
		 * Migrating hugepage or HWPoisoned hugepage is already
@@ -3216,7 +3218,8 @@ again:
		 */
		if (unlikely(!pte_present(pte))) {
			huge_pte_clear(mm, address, ptep);
			goto unlock;
			spin_unlock(ptl);
			continue;
		}

		page = pte_page(pte);
@@ -3226,9 +3229,10 @@ again:
		 * are about to unmap is the actual page of interest.
		 */
		if (ref_page) {
			if (page != ref_page)
				goto unlock;

			if (page != ref_page) {
				spin_unlock(ptl);
				continue;
			}
			/*
			 * Mark the VMA as having unmapped its page so that
			 * future faults in this VMA will fail rather than
@@ -3244,30 +3248,14 @@ again:

		hugetlb_count_sub(pages_per_huge_page(h), mm);
		page_remove_rmap(page, true);
		force_flush = !__tlb_remove_page(tlb, page);
		if (force_flush) {
			address += sz;
			spin_unlock(ptl);
			break;
		}
		/* Bail out after unmapping reference page if supplied */
		if (ref_page) {
			spin_unlock(ptl);
			break;
		}
unlock:

		spin_unlock(ptl);
	}
		tlb_remove_page(tlb, page);
		/*
	 * mmu_gather ran out of room to batch pages, we break out of
	 * the PTE lock to avoid doing the potential expensive TLB invalidate
	 * and page-free while holding it.
		 * Bail out after unmapping reference page if supplied
		 */
	if (force_flush) {
		force_flush = 0;
		tlb_flush_mmu(tlb);
		if (address < end && !ref_page)
			goto again;
		if (ref_page)
			break;
	}
	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
	tlb_end_vma(tlb, vma);