Commit 4e598fad authored by Abdiel Janulgue's avatar Abdiel Janulgue Committed by Chris Wilson
Browse files

drm/i915/gem: Extend mmap support for lmem



Local memory objects are similar to our usual scatterlist, but instead
of using the struct page stored therein, we need to use the
sg->dma_address.

Signed-off-by: default avatarAbdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200103204137.2131004-1-chris@chris-wilson.co.uk
parent b2fcaac9
Loading
Loading
Loading
Loading
+14 −7
Original line number Diff line number Diff line
@@ -213,6 +213,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
	case -EIO: /* shmemfs failure from swap device */
	case -EFAULT: /* purged object */
	case -ENODEV: /* bad object, how did you get here! */
	case -ENXIO: /* unable to access backing store (on device) */
		return VM_FAULT_SIGBUS;

	case -ENOSPC: /* shmemfs allocation failure */
@@ -237,11 +238,9 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
	struct vm_area_struct *area = vmf->vma;
	struct i915_mmap_offset *mmo = area->vm_private_data;
	struct drm_i915_gem_object *obj = mmo->obj;
	resource_size_t iomap;
	int err;

	if (unlikely(!i915_gem_object_has_struct_page(obj)))
		return VM_FAULT_SIGBUS;

	/* Sanity check that we allow writing into this object */
	if (unlikely(i915_gem_object_is_readonly(obj) &&
		     area->vm_flags & VM_WRITE))
@@ -251,10 +250,16 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
	if (err)
		goto out;

	iomap = -1;
	if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
		iomap = obj->mm.region->iomap.base;
		iomap -= obj->mm.region->region.start;
	}

	/* PTEs are revoked in obj->ops->put_pages() */
	err = remap_io_sg_page(area,
	err = remap_io_sg(area,
			  area->vm_start, area->vm_end - area->vm_start,
			       obj->mm.pages->sgl);
			  obj->mm.pages->sgl, iomap);

	if (area->vm_flags & VM_WRITE) {
		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -553,7 +558,9 @@ __assign_mmap_offset(struct drm_file *file,
	}

	if (mmap_type != I915_MMAP_TYPE_GTT &&
	    !i915_gem_object_has_struct_page(obj)) {
	    !i915_gem_object_type_has(obj,
				      I915_GEM_OBJECT_HAS_STRUCT_PAGE |
				      I915_GEM_OBJECT_HAS_IOMEM)) {
		err = -ENODEV;
		goto out;
	}
+3 −3
Original line number Diff line number Diff line
@@ -2027,9 +2027,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int remap_io_mapping(struct vm_area_struct *vma,
		     unsigned long addr, unsigned long pfn, unsigned long size,
		     struct io_mapping *iomap);
int remap_io_sg_page(struct vm_area_struct *vma,
int remap_io_sg(struct vm_area_struct *vma,
		unsigned long addr, unsigned long size,
		     struct scatterlist *sgl);
		struct scatterlist *sgl, resource_size_t iobase);

static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
+22 −12
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@ struct remap_pfn {
	pgprot_t prot;

	struct sgt_iter sgt;
	resource_size_t iobase;
};

static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
@@ -48,12 +49,17 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
	return 0;
}

static inline unsigned long sgt_pfn(const struct sgt_iter *sgt)
#define use_dma(io) ((io) != -1)

static inline unsigned long sgt_pfn(const struct remap_pfn *r)
{
	return sgt->pfn + (sgt->curr >> PAGE_SHIFT);
	if (use_dma(r->iobase))
		return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
	else
		return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
}

static int remap_sg_page(pte_t *pte, unsigned long addr, void *data)
static int remap_sg(pte_t *pte, unsigned long addr, void *data)
{
	struct remap_pfn *r = data;

@@ -62,12 +68,12 @@ static int remap_sg_page(pte_t *pte, unsigned long addr, void *data)

	/* Special PTE are not associated with any struct page */
	set_pte_at(r->mm, addr, pte,
		   pte_mkspecial(pfn_pte(sgt_pfn(&r->sgt), r->prot)));
		   pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
	r->pfn++; /* track insertions in case we need to unwind later */

	r->sgt.curr += PAGE_SIZE;
	if (r->sgt.curr >= r->sgt.max)
		r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), false);
		r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));

	return 0;
}
@@ -108,30 +114,34 @@ int remap_io_mapping(struct vm_area_struct *vma,
}

/**
 * remap_io_sg_page - remap an IO mapping to userspace
 * remap_io_sg - remap an IO mapping to userspace
 * @vma: user vma to map to
 * @addr: target user address to start at
 * @size: size of map area
 * @sgl: Start sg entry
 * @iobase: Use stored dma address offset by this address or pfn if -1
 *
 *  Note: this is only safe if the mm semaphore is held when called.
 */
int remap_io_sg_page(struct vm_area_struct *vma,
int remap_io_sg(struct vm_area_struct *vma,
		unsigned long addr, unsigned long size,
		     struct scatterlist *sgl)
		struct scatterlist *sgl, resource_size_t iobase)
{
	struct remap_pfn r = {
		.mm = vma->vm_mm,
		.prot = vma->vm_page_prot,
		.sgt = __sgt_iter(sgl, false),
		.sgt = __sgt_iter(sgl, use_dma(iobase)),
		.iobase = iobase,
	};
	int err;

	/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
	GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);

	if (!use_dma(iobase))
		flush_cache_range(vma, addr, size);
	err = apply_to_page_range(r.mm, addr, size, remap_sg_page, &r);

	err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
	if (unlikely(err)) {
		zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
		return err;