Commit 07ccd6bd authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula
Browse files

drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list



Currently we create a new mmap_offset for every call to
mmap_offset_ioctl. This exposes ourselves to an abusive client that may
simply create new mmap_offsets ad infinitum, which will exhaust physical
memory and the virtual address space. In addition to the exhaustion, a
very long linear list of mmap_offsets causes other clients using the
object to incur long list walks -- these long lists can also be
generated by simply having many clients generate their own mmap_offset.

However, we can simply use the drm_vma_node itself to manage the file
association (allow/revoke) dropping our need to keep an mmo per-file.
Then if we keep a small rbtree of per-type mmap_offsets, we can lookup
duplicate requests quickly.

Fixes: cc662126 ("drm/i915: Introduce DRM_I915_GEM_MMAP_OFFSET")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Reviewed-by: default avatarAbdiel Janulgue <abdiel.janulgue@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200120104924.4000706-3-chris@chris-wilson.co.uk


(cherry picked from commit 78655598)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent a754012b
Loading
Loading
Loading
Loading
+76 −14
Original line number Diff line number Diff line
@@ -455,10 +455,11 @@ out:

void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
	struct i915_mmap_offset *mmo;
	struct i915_mmap_offset *mmo, *mn;

	spin_lock(&obj->mmo.lock);
	list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
	rbtree_postorder_for_each_entry_safe(mmo, mn,
					     &obj->mmo.offsets, offset) {
		/*
		 * vma_node_unmap for GTT mmaps handled already in
		 * __i915_gem_object_release_mmap_gtt
@@ -487,6 +488,67 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
	i915_gem_object_release_mmap_offset(obj);
}

static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object *obj,
	   enum i915_mmap_type mmap_type)
{
	struct rb_node *rb;

	spin_lock(&obj->mmo.lock);
	rb = obj->mmo.offsets.rb_node;
	while (rb) {
		struct i915_mmap_offset *mmo =
			rb_entry(rb, typeof(*mmo), offset);

		if (mmo->mmap_type == mmap_type) {
			spin_unlock(&obj->mmo.lock);
			return mmo;
		}

		if (mmo->mmap_type < mmap_type)
			rb = rb->rb_right;
		else
			rb = rb->rb_left;
	}
	spin_unlock(&obj->mmo.lock);

	return NULL;
}

static struct i915_mmap_offset *
insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
{
	struct rb_node *rb, **p;

	spin_lock(&obj->mmo.lock);
	rb = NULL;
	p = &obj->mmo.offsets.rb_node;
	while (*p) {
		struct i915_mmap_offset *pos;

		rb = *p;
		pos = rb_entry(rb, typeof(*pos), offset);

		if (pos->mmap_type == mmo->mmap_type) {
			spin_unlock(&obj->mmo.lock);
			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
					      &mmo->vma_node);
			kfree(mmo);
			return pos;
		}

		if (pos->mmap_type < mmo->mmap_type)
			p = &rb->rb_right;
		else
			p = &rb->rb_left;
	}
	rb_link_node(&mmo->offset, rb, p);
	rb_insert_color(&mmo->offset, &obj->mmo.offsets);
	spin_unlock(&obj->mmo.lock);

	return mmo;
}

static struct i915_mmap_offset *
mmap_offset_attach(struct drm_i915_gem_object *obj,
		   enum i915_mmap_type mmap_type,
@@ -496,20 +558,22 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
	struct i915_mmap_offset *mmo;
	int err;

	mmo = lookup_mmo(obj, mmap_type);
	if (mmo)
		goto out;

	mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
	if (!mmo)
		return ERR_PTR(-ENOMEM);

	mmo->obj = obj;
	mmo->dev = obj->base.dev;
	mmo->file = file;
	mmo->mmap_type = mmap_type;
	drm_vma_node_reset(&mmo->vma_node);

	err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
				 obj->base.size / PAGE_SIZE);
	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
	if (likely(!err))
		goto out;
		goto insert;

	/* Attempt to reap some mmap space from dead objects */
	err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
@@ -517,19 +581,17 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
		goto err;

	i915_gem_drain_freed_objects(i915);
	err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
				 obj->base.size / PAGE_SIZE);
	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
	if (err)
		goto err;

insert:
	mmo = insert_mmo(obj, mmo);
	GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
	if (file)
		drm_vma_node_allow(&mmo->vma_node, file);

	spin_lock(&obj->mmo.lock);
	list_add(&mmo->offset, &obj->mmo.offsets);
	spin_unlock(&obj->mmo.lock);

	return mmo;

err:
+7 −11
Original line number Diff line number Diff line
@@ -63,7 +63,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
	INIT_LIST_HEAD(&obj->lut_list);

	spin_lock_init(&obj->mmo.lock);
	INIT_LIST_HEAD(&obj->mmo.offsets);
	obj->mmo.offsets = RB_ROOT;

	init_rcu_head(&obj->rcu);

@@ -100,8 +100,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
	struct i915_mmap_offset *mmo, *mn;
	struct i915_lut_handle *lut, *ln;
	struct i915_mmap_offset *mmo;
	LIST_HEAD(close);

	i915_gem_object_lock(obj);
@@ -117,14 +117,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
	i915_gem_object_unlock(obj);

	spin_lock(&obj->mmo.lock);
	list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
		if (mmo->file != file)
			continue;

		spin_unlock(&obj->mmo.lock);
	rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
		drm_vma_node_revoke(&mmo->vma_node, file);
		spin_lock(&obj->mmo.lock);
	}
	spin_unlock(&obj->mmo.lock);

	list_for_each_entry_safe(lut, ln, &close, obj_link) {
@@ -203,12 +197,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,

		i915_gem_object_release_mmap(obj);

		list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
		rbtree_postorder_for_each_entry_safe(mmo, mn,
						     &obj->mmo.offsets,
						     offset) {
			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
					      &mmo->vma_node);
			kfree(mmo);
		}
		INIT_LIST_HEAD(&obj->mmo.offsets);
		obj->mmo.offsets = RB_ROOT;

		GEM_BUG_ON(atomic_read(&obj->bind_count));
		GEM_BUG_ON(obj->userfault_count);
+2 −4
Original line number Diff line number Diff line
@@ -71,13 +71,11 @@ enum i915_mmap_type {
};

struct i915_mmap_offset {
	struct drm_device *dev;
	struct drm_vma_offset_node vma_node;
	struct drm_i915_gem_object *obj;
	struct drm_file *file;
	enum i915_mmap_type mmap_type;

	struct list_head offset;
	struct rb_node offset;
};

struct drm_i915_gem_object {
@@ -137,7 +135,7 @@ struct drm_i915_gem_object {

	struct {
		spinlock_t lock; /* Protects access to mmo offsets */
		struct list_head offsets;
		struct rb_root offsets;
	} mmo;

	I915_SELFTEST_DECLARE(struct list_head st_link);