Commit 2733ea14 authored by Jason Gunthorpe's avatar Jason Gunthorpe
Browse files

mm/hmm: remove the customizable pfn format from hmm_range_fault

Presumably the intent here was that hmm_range_fault() could put the data
into some HW specific format and thus avoid some work. However, nothing
actually does that, and it isn't clear how anything actually could do that
as hmm_range_fault() provides CPU addresses which must be DMA mapped.

Perhaps there is some special HW that does not need DMA mapping, but we
don't have any examples of this, and the theoretical performance win of
avoiding an extra scan over the pfns array doesn't seem worth the
complexity. Plus pfns needs to be scanned anyhow to sort out any
DEVICE_PRIVATE pages.

This version replaces the uint64_t with an usigned long containing a pfn
and fixed flags. On input flags is filled with the HMM_PFN_REQ_* values,
on successful output it is filled with HMM_PFN_* values, describing the
state of the pages.

amdgpu is simple to convert, it doesn't use snapshot and doesn't use
per-page flags.

nouveau uses only 16 hmm_pte entries at most (ie fits in a few cache
lines), and it sweeps over its pfns array a couple of times anyhow. It
also has a nasty call chain before it reaches the dma map and hardware
suggesting performance isn't important:

   nouveau_svm_fault():
     args.i.m.method = NVIF_VMM_V0_PFNMAP
     nouveau_range_fault()
      nvif_object_ioctl()
       client->driver->ioctl()
	  struct nvif_driver nvif_driver_nvkm:
	    .ioctl = nvkm_client_ioctl
	   nvkm_ioctl()
	    nvkm_ioctl_path()
	      nvkm_ioctl_v0[type].func(..)
	      nvkm_ioctl_mthd()
	       nvkm_object_mthd()
		  struct nvkm_object_func nvkm_uvmm:
		    .mthd = nvkm_uvmm_mthd
		   nvkm_uvmm_mthd()
		    nvkm_uvmm_mthd_pfnmap()
		     nvkm_vmm_pfn_map()
		      nvkm_vmm_ptes_get_map()
		       func == gp100_vmm_pgt_pfn
			struct nvkm_vmm_desc_func gp100_vmm_desc_spt:
			  .pfn = gp100_vmm_pgt_pfn
			 nvkm_vmm_iter()
			  REF_PTES == func == gp100_vmm_pgt_pfn()
			    dma_map_page()

Link: https://lore.kernel.org/r/5-v2-b4e84f444c7d+24f57-hmm_no_flags_jgg@mellanox.com


Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Tested-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 5c8f3c4c
Loading
Loading
Loading
Loading
+10 −18
Original line number Diff line number Diff line
@@ -184,10 +184,7 @@ The usage pattern is::
      range.notifier = &interval_sub;
      range.start = ...;
      range.end = ...;
      range.pfns = ...;
      range.flags = ...;
      range.values = ...;
      range.pfn_shift = ...;
      range.hmm_pfns = ...;

      if (!mmget_not_zero(interval_sub->notifier.mm))
          return -EFAULT;
@@ -229,15 +226,10 @@ The hmm_range struct has 2 fields, default_flags and pfn_flags_mask, that specif
fault or snapshot policy for the whole range instead of having to set them
for each entry in the pfns array.

For instance, if the device flags for range.flags are::
For instance if the device driver wants pages for a range with at least read
permission, it sets::

    range.flags[HMM_PFN_VALID] = (1 << 63);
    range.flags[HMM_PFN_WRITE] = (1 << 62);

and the device driver wants pages for a range with at least read permission,
it sets::

    range->default_flags = (1 << 63);
    range->default_flags = HMM_PFN_REQ_FAULT;
    range->pfn_flags_mask = 0;

and calls hmm_range_fault() as described above. This will fill fault all pages
@@ -246,18 +238,18 @@ in the range with at least read permission.
Now let's say the driver wants to do the same except for one page in the range for
which it wants to have write permission. Now driver set::

    range->default_flags = (1 << 63);
    range->pfn_flags_mask = (1 << 62);
    range->pfns[index_of_write] = (1 << 62);
    range->default_flags = HMM_PFN_REQ_FAULT;
    range->pfn_flags_mask = HMM_PFN_REQ_WRITE;
    range->pfns[index_of_write] = HMM_PFN_REQ_WRITE;

With this, HMM will fault in all pages with at least read (i.e., valid) and for the
address == range->start + (index_of_write << PAGE_SHIFT) it will fault with
write permission i.e., if the CPU pte does not have write permission set then HMM
will call handle_mm_fault().

Note that HMM will populate the pfns array with write permission for any page
that is mapped with CPU write permission no matter what values are set
in default_flags or pfn_flags_mask.
After hmm_range_fault completes the flag bits are set to the current state of
the page tables, ie HMM_PFN_VALID | HMM_PFN_WRITE will be set if the page is
writable.


Represent and manage device memory from core kernel point of view
+10 −25
Original line number Diff line number Diff line
@@ -766,17 +766,6 @@ struct amdgpu_ttm_tt {
};

#ifdef CONFIG_DRM_AMDGPU_USERPTR
/* flags used by HMM internal, not related to CPU/GPU PTE flags */
static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
	(1 << 0), /* HMM_PFN_VALID */
	(1 << 1), /* HMM_PFN_WRITE */
};

static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
	0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
	0, /* HMM_PFN_NONE */
};

/**
 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
 * memory and start HMM tracking CPU page table update
@@ -815,18 +804,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
		goto out;
	}
	range->notifier = &bo->notifier;
	range->flags = hmm_range_flags;
	range->values = hmm_range_values;
	range->pfn_shift = PAGE_SHIFT;
	range->start = bo->notifier.interval_tree.start;
	range->end = bo->notifier.interval_tree.last + 1;
	range->default_flags = hmm_range_flags[HMM_PFN_VALID];
	range->default_flags = HMM_PFN_REQ_FAULT;
	if (!amdgpu_ttm_tt_is_readonly(ttm))
		range->default_flags |= range->flags[HMM_PFN_WRITE];
		range->default_flags |= HMM_PFN_REQ_WRITE;

	range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns),
				     GFP_KERNEL);
	if (unlikely(!range->pfns)) {
	range->hmm_pfns = kvmalloc_array(ttm->num_pages,
					 sizeof(*range->hmm_pfns), GFP_KERNEL);
	if (unlikely(!range->hmm_pfns)) {
		r = -ENOMEM;
		goto out_free_ranges;
	}
@@ -867,7 +853,7 @@ retry:
	 * the notifier_lock, and mmu_interval_read_retry() must be done first.
	 */
	for (i = 0; i < ttm->num_pages; i++)
		pages[i] = hmm_device_entry_to_page(range, range->pfns[i]);
		pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);

	gtt->range = range;
	mmput(mm);
@@ -877,7 +863,7 @@ retry:
out_unlock:
	up_read(&mm->mmap_sem);
out_free_pfns:
	kvfree(range->pfns);
	kvfree(range->hmm_pfns);
out_free_ranges:
	kfree(range);
out:
@@ -902,7 +888,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
	DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
		gtt->userptr, ttm->num_pages);

	WARN_ONCE(!gtt->range || !gtt->range->pfns,
	WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
		"No user pages to check\n");

	if (gtt->range) {
@@ -912,7 +898,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
		 */
		r = mmu_interval_read_retry(gtt->range->notifier,
					 gtt->range->notifier_seq);
		kvfree(gtt->range->pfns);
		kvfree(gtt->range->hmm_pfns);
		kfree(gtt->range);
		gtt->range = NULL;
	}
@@ -1003,8 +989,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)

		for (i = 0; i < ttm->num_pages; i++) {
			if (ttm->pages[i] !=
				hmm_device_entry_to_page(gtt->range,
					      gtt->range->pfns[i]))
			    hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
				break;
		}

+1 −26
Original line number Diff line number Diff line
@@ -85,7 +85,7 @@ static inline struct nouveau_dmem *page_to_dmem(struct page *page)
	return container_of(page->pgmap, struct nouveau_dmem, pagemap);
}

static unsigned long nouveau_dmem_page_addr(struct page *page)
unsigned long nouveau_dmem_page_addr(struct page *page)
{
	struct nouveau_dmem_chunk *chunk = page->zone_device_data;
	unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
@@ -671,28 +671,3 @@ out_free_src:
out:
	return ret;
}

void
nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
			 struct hmm_range *range)
{
	unsigned long i, npages;

	npages = (range->end - range->start) >> PAGE_SHIFT;
	for (i = 0; i < npages; ++i) {
		struct page *page;
		uint64_t addr;

		page = hmm_device_entry_to_page(range, range->pfns[i]);
		if (page == NULL)
			continue;

		if (!is_device_private_page(page))
			continue;

		addr = nouveau_dmem_page_addr(page);
		range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
		range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
		range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM;
	}
}
+1 −2
Original line number Diff line number Diff line
@@ -37,9 +37,8 @@ int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
			     struct vm_area_struct *vma,
			     unsigned long start,
			     unsigned long end);
unsigned long nouveau_dmem_page_addr(struct page *page);

void nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
			      struct hmm_range *range);
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
static inline void nouveau_dmem_init(struct nouveau_drm *drm) {}
static inline void nouveau_dmem_fini(struct nouveau_drm *drm) {}
+59 −28
Original line number Diff line number Diff line
@@ -369,18 +369,6 @@ out_free:
	return ret;
}

static const u64
nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
	[HMM_PFN_VALID         ] = NVIF_VMM_PFNMAP_V0_V,
	[HMM_PFN_WRITE         ] = NVIF_VMM_PFNMAP_V0_W,
};

static const u64
nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
	[HMM_PFN_ERROR  ] = ~NVIF_VMM_PFNMAP_V0_V,
	[HMM_PFN_NONE   ] =  NVIF_VMM_PFNMAP_V0_NONE,
};

/* Issue fault replay for GPU to retry accesses that faulted previously. */
static void
nouveau_svm_fault_replay(struct nouveau_svm *svm)
@@ -518,9 +506,45 @@ static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
	.invalidate = nouveau_svm_range_invalidate,
};

static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
				    struct hmm_range *range, u64 *ioctl_addr)
{
	unsigned long i, npages;

	/*
	 * The ioctl_addr prepared here is passed through nvif_object_ioctl()
	 * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
	 *
	 * This is all just encoding the internal hmm representation into a
	 * different nouveau internal representation.
	 */
	npages = (range->end - range->start) >> PAGE_SHIFT;
	for (i = 0; i < npages; ++i) {
		struct page *page;

		if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
			ioctl_addr[i] = 0;
			continue;
		}

		page = hmm_pfn_to_page(range->hmm_pfns[i]);
		if (is_device_private_page(page))
			ioctl_addr[i] = nouveau_dmem_page_addr(page) |
					NVIF_VMM_PFNMAP_V0_V |
					NVIF_VMM_PFNMAP_V0_VRAM;
		else
			ioctl_addr[i] = page_to_phys(page) |
					NVIF_VMM_PFNMAP_V0_V |
					NVIF_VMM_PFNMAP_V0_HOST;
		if (range->hmm_pfns[i] & HMM_PFN_WRITE)
			ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W;
	}
}

static int nouveau_range_fault(struct nouveau_svmm *svmm,
			       struct nouveau_drm *drm, void *data, u32 size,
			       u64 *pfns, struct svm_notifier *notifier)
			       unsigned long hmm_pfns[], u64 *ioctl_addr,
			       struct svm_notifier *notifier)
{
	unsigned long timeout =
		jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@@ -529,10 +553,8 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
		.notifier = &notifier->notifier,
		.start = notifier->notifier.interval_tree.start,
		.end = notifier->notifier.interval_tree.last + 1,
		.pfns = pfns,
		.flags = nouveau_svm_pfn_flags,
		.values = nouveau_svm_pfn_values,
		.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
		.pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
		.hmm_pfns = hmm_pfns,
	};
	struct mm_struct *mm = notifier->notifier.mm;
	int ret;
@@ -542,12 +564,15 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
			return -EBUSY;

		range.notifier_seq = mmu_interval_read_begin(range.notifier);
		range.default_flags = 0;
		range.pfn_flags_mask = -1UL;
		down_read(&mm->mmap_sem);
		ret = hmm_range_fault(&range);
		up_read(&mm->mmap_sem);
		if (ret) {
			/*
			 * FIXME: the input PFN_REQ flags are destroyed on
			 * -EBUSY, we need to regenerate them, also for the
			 * other continue below
			 */
			if (ret == -EBUSY)
				continue;
			return ret;
@@ -562,7 +587,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
		break;
	}

	nouveau_dmem_convert_pfn(drm, &range);
	nouveau_hmm_convert_pfn(drm, &range, ioctl_addr);

	svmm->vmm->vmm.object.client->super = true;
	ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
@@ -589,6 +614,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
		} i;
		u64 phys[16];
	} args;
	unsigned long hmm_pfns[ARRAY_SIZE(args.phys)];
	struct vm_area_struct *vma;
	u64 inst, start, limit;
	int fi, fn, pi, fill;
@@ -704,12 +730,17 @@ nouveau_svm_fault(struct nvif_notify *notify)
			 * access flags.
			 *XXX: atomic?
			 */
			if (buffer->fault[fn]->access != 0 /* READ. */ &&
			    buffer->fault[fn]->access != 3 /* PREFETCH. */) {
				args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
						  NVIF_VMM_PFNMAP_V0_W;
			} else {
				args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
			switch (buffer->fault[fn]->access) {
			case 0: /* READ. */
				hmm_pfns[pi++] = HMM_PFN_REQ_FAULT;
				break;
			case 3: /* PREFETCH. */
				hmm_pfns[pi++] = 0;
				break;
			default:
				hmm_pfns[pi++] = HMM_PFN_REQ_FAULT |
						 HMM_PFN_REQ_WRITE;
				break;
			}
			args.i.p.size = pi << PAGE_SHIFT;

@@ -737,7 +768,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
			fill = (buffer->fault[fn    ]->addr -
				buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
			while (--fill)
				args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
				hmm_pfns[pi++] = 0;
		}

		SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
@@ -753,7 +784,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
			ret = nouveau_range_fault(
				svmm, svm->drm, &args,
				sizeof(args.i) + pi * sizeof(args.phys[0]),
				args.phys, &notifier);
				hmm_pfns, args.phys, &notifier);
			mmu_interval_notifier_remove(&notifier.notifier);
		}
		mmput(mm);
Loading