Commit 2288a9a6 authored by Jason Gunthorpe's avatar Jason Gunthorpe
Browse files

mm/hmm: return -EFAULT when setting HMM_PFN_ERROR on requested valid pages



hmm_range_fault() should never return 0 if the caller requested a valid
page, but the pfns output for that page would be HMM_PFN_ERROR.

hmm_pte_need_fault() must always be called before setting HMM_PFN_ERROR to
detect if the page is in faulting mode or not.

Fix two cases in hmm_vma_walk_pmd() and reorganize some of the duplicated
code.

Fixes: d08faca0 ("mm/hmm: properly handle migration pmd")
Fixes: da4c3c73 ("mm/hmm/mirror: helper to snapshot CPU page table")
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 76612d6c
Loading
Loading
Loading
Loading
+21 −17
Original line number Diff line number Diff line
@@ -371,8 +371,10 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
{
	struct hmm_vma_walk *hmm_vma_walk = walk->private;
	struct hmm_range *range = hmm_vma_walk->range;
	uint64_t *pfns = range->pfns;
	unsigned long addr = start, i;
	uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
	unsigned long npages = (end - start) >> PAGE_SHIFT;
	unsigned long addr = start;
	bool fault, write_fault;
	pte_t *ptep;
	pmd_t pmd;

@@ -382,14 +384,6 @@ again:
		return hmm_vma_walk_hole(start, end, -1, walk);

	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
		bool fault, write_fault;
		unsigned long npages;
		uint64_t *pfns;

		i = (addr - range->start) >> PAGE_SHIFT;
		npages = (end - addr) >> PAGE_SHIFT;
		pfns = &range->pfns[i];

		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
				     0, &fault, &write_fault);
		if (fault || write_fault) {
@@ -398,8 +392,15 @@ again:
			return -EBUSY;
		}
		return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
	} else if (!pmd_present(pmd))
	}

	if (!pmd_present(pmd)) {
		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
				     &write_fault);
		if (fault || write_fault)
			return -EFAULT;
		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
	}

	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
		/*
@@ -416,8 +417,7 @@ again:
		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
			goto again;

		i = (addr - range->start) >> PAGE_SHIFT;
		return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
		return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
	}

	/*
@@ -426,15 +426,19 @@ again:
	 * entry pointing to pte directory or it is a bad pmd that will not
	 * recover.
	 */
	if (pmd_bad(pmd))
	if (pmd_bad(pmd)) {
		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
				     &write_fault);
		if (fault || write_fault)
			return -EFAULT;
		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
	}

	ptep = pte_offset_map(pmdp, addr);
	i = (addr - range->start) >> PAGE_SHIFT;
	for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
	for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
		int r;

		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
		if (r) {
			/* hmm_vma_handle_pte() did pte_unmap() */
			hmm_vma_walk->last = addr;