Commit b40d7378 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Replace struct_mutex for batch pool serialisation



Switch to tracking activity via i915_active on individual nodes, only
keeping a list of retired objects in the cache, and reaping the cache
when the engine itself idles.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190804124826.30272-2-chris@chris-wilson.co.uk
parent a4e57f90
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -72,6 +72,7 @@ obj-y += gt/
gt-y += \
	gt/intel_breadcrumbs.o \
	gt/intel_context.o \
	gt/intel_engine_pool.o \
	gt/intel_engine_cs.o \
	gt/intel_engine_pm.o \
	gt/intel_gt.o \
@@ -125,7 +126,6 @@ i915-y += \
	  $(gem-y) \
	  i915_active.o \
	  i915_cmd_parser.o \
	  i915_gem_batch_pool.o \
	  i915_gem_evict.o \
	  i915_gem_fence_reg.o \
	  i915_gem_gtt.o \
+36 −22
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@

#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"

@@ -1198,25 +1199,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
			     unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	struct drm_i915_gem_object *obj;
	struct intel_engine_pool_node *pool;
	struct i915_request *rq;
	struct i915_vma *batch;
	u32 *cmd;
	int err;

	obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);
	pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
	if (IS_ERR(pool))
		return PTR_ERR(pool);

	cmd = i915_gem_object_pin_map(obj,
	cmd = i915_gem_object_pin_map(pool->obj,
				      cache->has_llc ?
				      I915_MAP_FORCE_WB :
				      I915_MAP_FORCE_WC);
	i915_gem_object_unpin_pages(obj);
	if (IS_ERR(cmd))
		return PTR_ERR(cmd);
	if (IS_ERR(cmd)) {
		err = PTR_ERR(cmd);
		goto out_pool;
	}

	batch = i915_vma_instance(obj, vma->vm, NULL);
	batch = i915_vma_instance(pool->obj, vma->vm, NULL);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto err_unmap;
@@ -1232,6 +1234,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
		goto err_unpin;
	}

	err = intel_engine_pool_mark_active(pool, rq);
	if (err)
		goto err_request;

	err = reloc_move_to_gpu(rq, vma);
	if (err)
		goto err_request;
@@ -1257,7 +1263,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
	cache->rq_size = 0;

	/* Return with batch mapping (cmd) still pinned */
	return 0;
	goto out_pool;

skip_request:
	i915_request_skip(rq, err);
@@ -1266,7 +1272,9 @@ err_request:
err_unpin:
	i915_vma_unpin(batch);
err_unmap:
	i915_gem_object_unpin_map(obj);
	i915_gem_object_unpin_map(pool->obj);
out_pool:
	intel_engine_pool_put(pool);
	return err;
}

@@ -2010,18 +2018,17 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)

static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
{
	struct drm_i915_gem_object *shadow_batch_obj;
	struct intel_engine_pool_node *pool;
	struct i915_vma *vma;
	int err;

	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
						   PAGE_ALIGN(eb->batch_len));
	if (IS_ERR(shadow_batch_obj))
		return ERR_CAST(shadow_batch_obj);
	pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
	if (IS_ERR(pool))
		return ERR_CAST(pool);

	err = intel_engine_cmd_parser(eb->engine,
				      eb->batch->obj,
				      shadow_batch_obj,
				      pool->obj,
				      eb->batch_start_offset,
				      eb->batch_len,
				      is_master);
@@ -2030,12 +2037,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
			vma = NULL;
		else
			vma = ERR_PTR(err);
		goto out;
		goto err;
	}

	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;
		goto err;

	eb->vma[eb->buffer_count] = i915_vma_get(vma);
	eb->flags[eb->buffer_count] =
@@ -2043,8 +2050,11 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
	vma->exec_flags = &eb->flags[eb->buffer_count];
	eb->buffer_count++;

out:
	i915_gem_object_unpin_pages(shadow_batch_obj);
	vma->private = pool;
	return vma;

err:
	intel_engine_pool_put(pool);
	return vma;
}

@@ -2588,6 +2598,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
	 * to explicitly hold another reference here.
	 */
	eb.request->batch = eb.batch;
	if (eb.batch->private)
		intel_engine_pool_mark_active(eb.batch->private, eb.request);

	trace_i915_request_queue(eb.request, eb.batch_flags);
	err = eb_submit(&eb);
@@ -2612,6 +2624,8 @@ err_request:
err_batch_unpin:
	if (eb.batch_flags & I915_DISPATCH_SECURE)
		i915_vma_unpin(eb.batch);
	if (eb.batch->private)
		intel_engine_pool_put(eb.batch->private);
err_vma:
	if (eb.exec)
		eb_release_vmas(&eb);
+0 −1
Original line number Diff line number Diff line
@@ -66,7 +66,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
	INIT_LIST_HEAD(&obj->mm.link);

	INIT_LIST_HEAD(&obj->lut_list);
	INIT_LIST_HEAD(&obj->batch_pool_link);

	init_rcu_head(&obj->rcu);

+0 −1
Original line number Diff line number Diff line
@@ -114,7 +114,6 @@ struct drm_i915_gem_object {
	unsigned int userfault_count;
	struct list_head userfault_link;

	struct list_head batch_pool_link;
	I915_SELFTEST_DECLARE(struct list_head st_link);

	/*
+1 −3
Original line number Diff line number Diff line
@@ -34,10 +34,8 @@ static void i915_gem_park(struct drm_i915_private *i915)

	lockdep_assert_held(&i915->drm.struct_mutex);

	for_each_engine(engine, i915, id) {
	for_each_engine(engine, i915, id)
		call_idle_barriers(engine); /* cleanup after wedging */
		i915_gem_batch_pool_fini(&engine->batch_pool);
	}

	intel_timelines_park(i915);
	i915_vma_parked(i915);
Loading