Commit bdefca2d authored by Boris Brezillon's avatar Boris Brezillon Committed by Rob Herring
Browse files

drm/panfrost: Add the panfrost_gem_mapping concept



With the introduction of per-FD address space, the same BO can be mapped
in different address space if the BO is globally visible (GEM_FLINK)
and opened in different context or if the dmabuf is self-imported. The
current implementation does not take case into account, and attaches the
mapping directly to the panfrost_gem_object.

Let's create a panfrost_gem_mapping struct and allow multiple mappings
per BO.

The mappings are refcounted which helps solve another problem where
mappings were torn down (GEM handle closed by userspace) while GPU
jobs accessing those BOs were still in-flight. Jobs now keep a
reference on the mappings they use.

v2 (robh):
- Minor review comment clean-ups from Steven
- Use list_is_singular helper
- Just WARN if we add a mapping when madvise state is not WILLNEED.
  With that, drop the use of object_name_lock.

v3 (robh):
- Revert returning list iterator in panfrost_gem_mapping_get()

Fixes: a5efb4c9 ("drm/panfrost: Restructure the GEM object creation")
Fixes: 7282f764 ("drm/panfrost: Implement per FD address spaces")
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarBoris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: default avatarRob Herring <robh@kernel.org>
Acked-by: default avatarBoris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: default avatarSteven Price <steven.price@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200116021554.15090-1-robh@kernel.org
parent db1a0795
Loading
Loading
Loading
Loading
+82 −9
Original line number Diff line number Diff line
@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
		struct drm_file *file)
{
	struct panfrost_file_priv *priv = file->driver_priv;
	struct panfrost_gem_object *bo;
	struct drm_panfrost_create_bo *args = data;
	struct panfrost_gem_mapping *mapping;

	if (!args->size || args->pad ||
	    (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
	if (IS_ERR(bo))
		return PTR_ERR(bo);

	args->offset = bo->node.start << PAGE_SHIFT;
	mapping = panfrost_gem_mapping_get(bo, priv);
	if (!mapping) {
		drm_gem_object_put_unlocked(&bo->base.base);
		return -EINVAL;
	}

	args->offset = mapping->mmnode.start << PAGE_SHIFT;
	panfrost_gem_mapping_put(mapping);

	return 0;
}
@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
		  struct drm_panfrost_submit *args,
		  struct panfrost_job *job)
{
	struct panfrost_file_priv *priv = file_priv->driver_priv;
	struct panfrost_gem_object *bo;
	unsigned int i;
	int ret;

	job->bo_count = args->bo_handle_count;

	if (!job->bo_count)
@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
	if (!job->implicit_fences)
		return -ENOMEM;

	return drm_gem_objects_lookup(file_priv,
	ret = drm_gem_objects_lookup(file_priv,
				     (void __user *)(uintptr_t)args->bo_handles,
				     job->bo_count, &job->bos);
	if (ret)
		return ret;

	job->mappings = kvmalloc_array(job->bo_count,
				       sizeof(struct panfrost_gem_mapping *),
				       GFP_KERNEL | __GFP_ZERO);
	if (!job->mappings)
		return -ENOMEM;

	for (i = 0; i < job->bo_count; i++) {
		struct panfrost_gem_mapping *mapping;

		bo = to_panfrost_bo(job->bos[i]);
		mapping = panfrost_gem_mapping_get(bo, priv);
		if (!mapping) {
			ret = -EINVAL;
			break;
		}

		job->mappings[i] = mapping;
	}

	return ret;
}

/**
@@ -320,7 +357,9 @@ out:
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
			    struct drm_file *file_priv)
{
	struct panfrost_file_priv *priv = file_priv->driver_priv;
	struct drm_panfrost_get_bo_offset *args = data;
	struct panfrost_gem_mapping *mapping;
	struct drm_gem_object *gem_obj;
	struct panfrost_gem_object *bo;

@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
	}
	bo = to_panfrost_bo(gem_obj);

	args->offset = bo->node.start << PAGE_SHIFT;

	mapping = panfrost_gem_mapping_get(bo, priv);
	drm_gem_object_put_unlocked(gem_obj);

	if (!mapping)
		return -EINVAL;

	args->offset = mapping->mmnode.start << PAGE_SHIFT;
	panfrost_gem_mapping_put(mapping);
	return 0;
}

static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
				  struct drm_file *file_priv)
{
	struct panfrost_file_priv *priv = file_priv->driver_priv;
	struct drm_panfrost_madvise *args = data;
	struct panfrost_device *pfdev = dev->dev_private;
	struct drm_gem_object *gem_obj;
	struct panfrost_gem_object *bo;
	int ret = 0;

	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
	if (!gem_obj) {
@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
		return -ENOENT;
	}

	bo = to_panfrost_bo(gem_obj);

	mutex_lock(&pfdev->shrinker_lock);
	mutex_lock(&bo->mappings.lock);
	if (args->madv == PANFROST_MADV_DONTNEED) {
		struct panfrost_gem_mapping *first;

		first = list_first_entry(&bo->mappings.list,
					 struct panfrost_gem_mapping,
					 node);

		/*
		 * If we want to mark the BO purgeable, there must be only one
		 * user: the caller FD.
		 * We could do something smarter and mark the BO purgeable only
		 * when all its users have marked it purgeable, but globally
		 * visible/shared BOs are likely to never be marked purgeable
		 * anyway, so let's not bother.
		 */
		if (!list_is_singular(&bo->mappings.list) ||
		    WARN_ON_ONCE(first->mmu != &priv->mmu)) {
			ret = -EINVAL;
			goto out_unlock_mappings;
		}
	}

	args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);

	if (args->retained) {
		struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);

		if (args->madv == PANFROST_MADV_DONTNEED)
			list_add_tail(&bo->base.madv_list,
				      &pfdev->shrinker_list);
		else if (args->madv == PANFROST_MADV_WILLNEED)
			list_del_init(&bo->base.madv_list);
	}

out_unlock_mappings:
	mutex_unlock(&bo->mappings.lock);
	mutex_unlock(&pfdev->shrinker_lock);

	drm_gem_object_put_unlocked(gem_obj);
	return 0;
	return ret;
}

int panfrost_unstable_ioctl_check(void)
+108 −16
Original line number Diff line number Diff line
@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
	list_del_init(&bo->base.madv_list);
	mutex_unlock(&pfdev->shrinker_lock);

	/*
	 * If we still have mappings attached to the BO, there's a problem in
	 * our refcounting.
	 */
	WARN_ON_ONCE(!list_empty(&bo->mappings.list));

	if (bo->sgts) {
		int i;
		int n_sgt = bo->base.base.size / SZ_2M;
@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
	drm_gem_shmem_free_object(obj);
}

struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
			 struct panfrost_file_priv *priv)
{
	struct panfrost_gem_mapping *iter, *mapping = NULL;

	mutex_lock(&bo->mappings.lock);
	list_for_each_entry(iter, &bo->mappings.list, node) {
		if (iter->mmu == &priv->mmu) {
			kref_get(&iter->refcount);
			mapping = iter;
			break;
		}
	}
	mutex_unlock(&bo->mappings.lock);

	return mapping;
}

static void
panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
{
	struct panfrost_file_priv *priv;

	if (mapping->active)
		panfrost_mmu_unmap(mapping);

	priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
	spin_lock(&priv->mm_lock);
	if (drm_mm_node_allocated(&mapping->mmnode))
		drm_mm_remove_node(&mapping->mmnode);
	spin_unlock(&priv->mm_lock);
}

static void panfrost_gem_mapping_release(struct kref *kref)
{
	struct panfrost_gem_mapping *mapping;

	mapping = container_of(kref, struct panfrost_gem_mapping, refcount);

	panfrost_gem_teardown_mapping(mapping);
	drm_gem_object_put_unlocked(&mapping->obj->base.base);
	kfree(mapping);
}

void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
{
	if (!mapping)
		return;

	kref_put(&mapping->refcount, panfrost_gem_mapping_release);
}

void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
{
	struct panfrost_gem_mapping *mapping;

	mutex_lock(&bo->mappings.lock);
	list_for_each_entry(mapping, &bo->mappings.list, node)
		panfrost_gem_teardown_mapping(mapping);
	mutex_unlock(&bo->mappings.lock);
}

int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
	int ret;
@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
	unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
	struct panfrost_file_priv *priv = file_priv->driver_priv;
	struct panfrost_gem_mapping *mapping;

	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
	if (!mapping)
		return -ENOMEM;

	INIT_LIST_HEAD(&mapping->node);
	kref_init(&mapping->refcount);
	drm_gem_object_get(obj);
	mapping->obj = bo;

	/*
	 * Executable buffers cannot cross a 16MB boundary as the program
@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
	else
		align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;

	bo->mmu = &priv->mmu;
	mapping->mmu = &priv->mmu;
	spin_lock(&priv->mm_lock);
	ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
	ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
					 size >> PAGE_SHIFT, align, color, 0);
	spin_unlock(&priv->mm_lock);
	if (ret)
		return ret;
		goto err;

	if (!bo->is_heap) {
		ret = panfrost_mmu_map(bo);
		if (ret) {
			spin_lock(&priv->mm_lock);
			drm_mm_remove_node(&bo->node);
			spin_unlock(&priv->mm_lock);
		}
		ret = panfrost_mmu_map(mapping);
		if (ret)
			goto err;
	}

	mutex_lock(&bo->mappings.lock);
	WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
	list_add_tail(&mapping->node, &bo->mappings.list);
	mutex_unlock(&bo->mappings.lock);

err:
	if (ret)
		panfrost_gem_mapping_put(mapping);
	return ret;
}

void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{
	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
	struct panfrost_file_priv *priv = file_priv->driver_priv;
	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
	struct panfrost_gem_mapping *mapping = NULL, *iter;

	if (bo->is_mapped)
		panfrost_mmu_unmap(bo);
	mutex_lock(&bo->mappings.lock);
	list_for_each_entry(iter, &bo->mappings.list, node) {
		if (iter->mmu == &priv->mmu) {
			mapping = iter;
			list_del(&iter->node);
			break;
		}
	}
	mutex_unlock(&bo->mappings.lock);

	spin_lock(&priv->mm_lock);
	if (drm_mm_node_allocated(&bo->node))
		drm_mm_remove_node(&bo->node);
	spin_unlock(&priv->mm_lock);
	panfrost_gem_mapping_put(mapping);
}

static int panfrost_gem_pin(struct drm_gem_object *obj)
@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
	if (!obj)
		return NULL;

	INIT_LIST_HEAD(&obj->mappings.list);
	mutex_init(&obj->mappings.lock);
	obj->base.base.funcs = &panfrost_gem_funcs;

	return &obj->base.base;
+35 −6
Original line number Diff line number Diff line
@@ -13,23 +13,46 @@ struct panfrost_gem_object {
	struct drm_gem_shmem_object base;
	struct sg_table *sgts;

	struct panfrost_mmu *mmu;
	struct drm_mm_node node;
	bool is_mapped		:1;
	/*
	 * Use a list for now. If searching a mapping ever becomes the
	 * bottleneck, we should consider using an RB-tree, or even better,
	 * let the core store drm_gem_object_mapping entries (where we
	 * could place driver specific data) instead of drm_gem_object ones
	 * in its drm_file->object_idr table.
	 *
	 * struct drm_gem_object_mapping {
	 *	struct drm_gem_object *obj;
	 *	void *driver_priv;
	 * };
	 */
	struct {
		struct list_head list;
		struct mutex lock;
	} mappings;

	bool noexec		:1;
	bool is_heap		:1;
};

struct panfrost_gem_mapping {
	struct list_head node;
	struct kref refcount;
	struct panfrost_gem_object *obj;
	struct drm_mm_node mmnode;
	struct panfrost_mmu *mmu;
	bool active		:1;
};

static inline
struct  panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
{
	return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}

static inline
struct  panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
static inline struct panfrost_gem_mapping *
drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
{
	return container_of(node, struct panfrost_gem_object, node);
	return container_of(node, struct panfrost_gem_mapping, mmnode);
}

struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj,
			struct drm_file *file_priv);

struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
			 struct panfrost_file_priv *priv);
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);

void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);

+2 −1
Original line number Diff line number Diff line
@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
	struct panfrost_gem_object *bo = to_panfrost_bo(obj);

	if (!mutex_trylock(&shmem->pages_lock))
		return false;

	panfrost_mmu_unmap(to_panfrost_bo(obj));
	panfrost_gem_teardown_mappings(bo);
	drm_gem_shmem_purge_locked(obj);

	mutex_unlock(&shmem->pages_lock);
+12 −1
Original line number Diff line number Diff line
@@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref)
	dma_fence_put(job->done_fence);
	dma_fence_put(job->render_done_fence);

	if (job->bos) {
	if (job->mappings) {
		for (i = 0; i < job->bo_count; i++)
			panfrost_gem_mapping_put(job->mappings[i]);
		kvfree(job->mappings);
	}

	if (job->bos) {
		struct panfrost_gem_object *bo;

		for (i = 0; i < job->bo_count; i++) {
			bo = to_panfrost_bo(job->bos[i]);
			drm_gem_object_put_unlocked(job->bos[i]);
		}

		kvfree(job->bos);
	}

Loading