Commit c017cf6b authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Drop the deferred active reference



An old optimisation to reduce the number of atomics per batch sadly
relies on struct_mutex for coordination. In order to remove struct_mutex
from serialising object/context closing, always taking and releasing an
active reference on first use / last use greatly simplifies the locking.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-15-chris@chris-wilson.co.uk
parent 754f7a0b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -112,7 +112,7 @@ static void lut_close(struct i915_gem_context *ctx)
		radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);

		vma->open_count--;
		__i915_gem_object_release_unless_active(vma->obj);
		i915_vma_put(vma);
	}
	rcu_read_unlock();
}
+1 −12
Original line number Diff line number Diff line
@@ -155,7 +155,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
		list_del(&lut->ctx_link);

		i915_lut_handle_free(lut);
		__i915_gem_object_release_unless_active(obj);
		i915_gem_object_put(obj);
	}

	mutex_unlock(&i915->drm.struct_mutex);
@@ -347,17 +347,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}

void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

	if (!i915_gem_object_has_active_reference(obj) &&
	    i915_gem_object_is_active(obj))
		i915_gem_object_set_active_reference(obj);
	else
		i915_gem_object_put(obj);
}

static inline enum fb_op_origin
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
{
+1 −23
Original line number Diff line number Diff line
@@ -161,31 +161,9 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{
	return obj->active_count;
	return READ_ONCE(obj->active_count);
}

static inline bool
i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
{
	return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
}

static inline void
i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);
	__set_bit(I915_BO_ACTIVE_REF, &obj->flags);
}

static inline void
i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);
	__clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
}

void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);

static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
+0 −8
Original line number Diff line number Diff line
@@ -120,14 +120,6 @@ struct drm_i915_gem_object {
	struct list_head batch_pool_link;
	I915_SELFTEST_DECLARE(struct list_head st_link);

	unsigned long flags;

	/**
	 * Have we taken a reference for the object for incomplete GPU
	 * activity?
	 */
#define I915_BO_ACTIVE_REF 0

	/*
	 * Is the object to be mapped as read-only to the GPU
	 * Only honoured if hardware has relevant pte bit
+1 −2
Original line number Diff line number Diff line
@@ -976,8 +976,6 @@ static int gpu_write(struct i915_vma *vma,
	if (err)
		goto err_request;

	i915_gem_object_set_active_reference(batch->obj);

	i915_vma_lock(vma);
	err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
	if (err == 0)
@@ -996,6 +994,7 @@ err_request:
err_batch:
	i915_vma_unpin(batch);
	i915_vma_close(batch);
	i915_vma_put(batch);

	return err;
}
Loading