Commit 7e805762 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Drop struct_mutex from around i915_retire_requests()



We don't need to hold struct_mutex now for retiring requests, so drop it
from i915_retire_requests() and i915_gem_wait_for_idle(), finally
removing I915_WAIT_LOCKED for good.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-8-chris@chris-wilson.co.uk
parent b7234840
Loading
Loading
Loading
Loading
+1 −6
Original line number Diff line number Diff line
@@ -155,7 +155,6 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
static void clear_pages_worker(struct work_struct *work)
{
	struct clear_pages_work *w = container_of(work, typeof(*w), work);
	struct drm_i915_private *i915 = w->ce->engine->i915;
	struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
	struct i915_vma *vma = w->sleeve->vma;
	struct i915_request *rq;
@@ -173,11 +172,9 @@ static void clear_pages_worker(struct work_struct *work)
	obj->read_domains = I915_GEM_GPU_DOMAINS;
	obj->write_domain = 0;

	/* XXX: we need to kill this */
	mutex_lock(&i915->drm.struct_mutex);
	err = i915_vma_pin(vma, 0, 0, PIN_USER);
	if (unlikely(err))
		goto out_unlock;
		goto out_signal;

	batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
	if (IS_ERR(batch)) {
@@ -229,8 +226,6 @@ out_batch:
	intel_emit_vma_release(w->ce, batch);
out_unpin:
	i915_vma_unpin(vma);
out_unlock:
	mutex_unlock(&i915->drm.struct_mutex);
out_signal:
	if (unlikely(err)) {
		dma_fence_set_error(&w->dma, err);
+1 −19
Original line number Diff line number Diff line
@@ -1159,8 +1159,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
}

static int
__intel_context_reconfigure_sseu(struct intel_context *ce,
				 struct intel_sseu sseu)
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
	int ret;

@@ -1183,23 +1182,6 @@ unlock:
	return ret;
}

static int
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
	struct drm_i915_private *i915 = ce->engine->i915;
	int ret;

	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

	ret = __intel_context_reconfigure_sseu(ce, sseu);

	mutex_unlock(&i915->drm.struct_mutex);

	return ret;
}

static int
user_to_context_sseu(struct drm_i915_private *i915,
		     const struct drm_i915_gem_context_param_sseu *user,
+17 −28
Original line number Diff line number Diff line
@@ -48,11 +48,7 @@ static void retire_work_handler(struct work_struct *work)
	struct drm_i915_private *i915 =
		container_of(work, typeof(*i915), gem.retire_work.work);

	/* Come back later if the device is busy... */
	if (mutex_trylock(&i915->drm.struct_mutex)) {
	i915_retire_requests(i915);
		mutex_unlock(&i915->drm.struct_mutex);
	}

	queue_delayed_work(i915->wq,
			   &i915->gem.retire_work,
@@ -86,9 +82,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
	bool result = !intel_gt_is_wedged(gt);

	do {
	if (i915_gem_wait_for_idle(gt->i915,
					   I915_WAIT_LOCKED |
				   I915_WAIT_FOR_IDLE_BOOST,
				   I915_GEM_IDLE_TIMEOUT) == -ETIME) {
		/* XXX hide warning from gem_eio */
@@ -105,7 +99,6 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
		intel_gt_set_wedged(gt);
		result = false;
	}
	} while (i915_retire_requests(gt->i915) && result);

	if (intel_gt_pm_wait_for_idle(gt))
		result = false;
@@ -145,8 +138,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)

	user_forcewake(&i915->gt, true);

	mutex_lock(&i915->drm.struct_mutex);

	/*
	 * We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
@@ -158,8 +149,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
	 */
	switch_to_kernel_context_sync(&i915->gt);

	mutex_unlock(&i915->drm.struct_mutex);

	cancel_delayed_work_sync(&i915->gt.hangcheck.work);

	i915_gem_drain_freed_objects(i915);
+24 −16
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>

#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"

#include "i915_selftest.h"
#include "selftests/i915_random.h"
@@ -78,7 +79,7 @@ static int gtt_set(struct drm_i915_gem_object *obj,
{
	struct i915_vma *vma;
	u32 __iomem *map;
	int err;
	int err = 0;

	i915_gem_object_lock(obj);
	err = i915_gem_object_set_to_gtt_domain(obj, true);
@@ -90,15 +91,21 @@ static int gtt_set(struct drm_i915_gem_object *obj,
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	intel_gt_pm_get(vma->vm->gt);

	map = i915_vma_pin_iomap(vma);
	i915_vma_unpin(vma);
	if (IS_ERR(map))
		return PTR_ERR(map);
	if (IS_ERR(map)) {
		err = PTR_ERR(map);
		goto out_rpm;
	}

	iowrite32(v, &map[offset / sizeof(*map)]);
	i915_vma_unpin_iomap(vma);

	return 0;
out_rpm:
	intel_gt_pm_put(vma->vm->gt);
	return err;
}

static int gtt_get(struct drm_i915_gem_object *obj,
@@ -107,7 +114,7 @@ static int gtt_get(struct drm_i915_gem_object *obj,
{
	struct i915_vma *vma;
	u32 __iomem *map;
	int err;
	int err = 0;

	i915_gem_object_lock(obj);
	err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -119,15 +126,21 @@ static int gtt_get(struct drm_i915_gem_object *obj,
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	intel_gt_pm_get(vma->vm->gt);

	map = i915_vma_pin_iomap(vma);
	i915_vma_unpin(vma);
	if (IS_ERR(map))
		return PTR_ERR(map);
	if (IS_ERR(map)) {
		err = PTR_ERR(map);
		goto out_rpm;
	}

	*v = ioread32(&map[offset / sizeof(*map)]);
	i915_vma_unpin_iomap(vma);

	return 0;
out_rpm:
	intel_gt_pm_put(vma->vm->gt);
	return err;
}

static int wc_set(struct drm_i915_gem_object *obj,
@@ -280,7 +293,6 @@ static int igt_gem_coherency(void *arg)
	struct drm_i915_private *i915 = arg;
	const struct igt_coherency_mode *read, *write, *over;
	struct drm_i915_gem_object *obj;
	intel_wakeref_t wakeref;
	unsigned long count, n;
	u32 *offsets, *values;
	int err = 0;
@@ -299,8 +311,6 @@ static int igt_gem_coherency(void *arg)

	values = offsets + ncachelines;

	mutex_lock(&i915->drm.struct_mutex);
	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
	for (over = igt_coherency_mode; over->name; over++) {
		if (!over->set)
			continue;
@@ -326,7 +336,7 @@ static int igt_gem_coherency(void *arg)
					obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
					if (IS_ERR(obj)) {
						err = PTR_ERR(obj);
						goto unlock;
						goto free;
					}

					i915_random_reorder(offsets, ncachelines, &prng);
@@ -377,15 +387,13 @@ static int igt_gem_coherency(void *arg)
			}
		}
	}
unlock:
	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
	mutex_unlock(&i915->drm.struct_mutex);
free:
	kfree(offsets);
	return err;

put_object:
	i915_gem_object_put(obj);
	goto unlock;
	goto free;
}

int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
+4 −14
Original line number Diff line number Diff line
@@ -164,7 +164,6 @@ struct parallel_switch {
static int __live_parallel_switch1(void *data)
{
	struct parallel_switch *arg = data;
	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
	IGT_TIMEOUT(end_time);
	unsigned long count;

@@ -176,16 +175,12 @@ static int __live_parallel_switch1(void *data)
		for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
			i915_request_put(rq);

			mutex_lock(&i915->drm.struct_mutex);
			rq = i915_request_create(arg->ce[n]);
			if (IS_ERR(rq)) {
				mutex_unlock(&i915->drm.struct_mutex);
			if (IS_ERR(rq))
				return PTR_ERR(rq);
			}

			i915_request_get(rq);
			i915_request_add(rq);
			mutex_unlock(&i915->drm.struct_mutex);
		}

		err = 0;
@@ -205,7 +200,6 @@ static int __live_parallel_switch1(void *data)
static int __live_parallel_switchN(void *data)
{
	struct parallel_switch *arg = data;
	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
	IGT_TIMEOUT(end_time);
	unsigned long count;
	int n;
@@ -215,15 +209,11 @@ static int __live_parallel_switchN(void *data)
		for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
			struct i915_request *rq;

			mutex_lock(&i915->drm.struct_mutex);
			rq = i915_request_create(arg->ce[n]);
			if (IS_ERR(rq)) {
				mutex_unlock(&i915->drm.struct_mutex);
			if (IS_ERR(rq))
				return PTR_ERR(rq);
			}

			i915_request_add(rq);
			mutex_unlock(&i915->drm.struct_mutex);
		}

		count++;
@@ -1173,7 +1163,7 @@ __sseu_test(const char *name,
	if (ret)
		return ret;

	ret = __intel_context_reconfigure_sseu(ce, sseu);
	ret = intel_context_reconfigure_sseu(ce, sseu);
	if (ret)
		goto out_spin;

@@ -1277,7 +1267,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
		goto out_fail;

out_fail:
	if (igt_flush_test(i915, I915_WAIT_LOCKED))
	if (igt_flush_test(i915))
		ret = -EIO;

	intel_context_unpin(ce);
Loading