Commit 2f20055d authored by John Harrison's avatar John Harrison Committed by Daniel Vetter
Browse files

drm/i915: Update a bunch of execbuffer helpers to take request structures



Updated *_ring_invalidate_all_caches(), i915_reset_gen7_sol_offsets() and
i915_emit_box() to take request structures instead of ring or ringbuf/context
pairs.

For: VIZ-5115
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarTomas Elf <tomas.elf@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 1d719cda
Loading
Loading
Loading
Loading
+7 −5
Original line number Diff line number Diff line
@@ -924,7 +924,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
	return intel_ring_invalidate_all_caches(req->ring);
	return intel_ring_invalidate_all_caches(req);
}

static bool
@@ -1071,8 +1071,9 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)

static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
			    struct intel_engine_cs *ring)
			    struct drm_i915_gem_request *req)
{
	struct intel_engine_cs *ring = req->ring;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret, i;

@@ -1097,10 +1098,11 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
}

static int
i915_emit_box(struct intel_engine_cs *ring,
i915_emit_box(struct drm_i915_gem_request *req,
	      struct drm_clip_rect *box,
	      int DR1, int DR4)
{
	struct intel_engine_cs *ring = req->ring;
	int ret;

	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
@@ -1310,7 +1312,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
	}

	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(dev, ring);
		ret = i915_reset_gen7_sol_offsets(dev, params->request);
		if (ret)
			goto error;
	}
@@ -1321,7 +1323,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,

	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
			ret = i915_emit_box(ring, &cliprects[i],
			ret = i915_emit_box(params->request, &cliprects[i],
					    args->DR1, args->DR4);
			if (ret)
				goto error;
+4 −5
Original line number Diff line number Diff line
@@ -604,10 +604,9 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
	return 0;
}

static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
					      struct intel_context *ctx)
static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
	struct intel_engine_cs *ring = ringbuf->ring;
	struct intel_engine_cs *ring = req->ring;
	uint32_t flush_domains;
	int ret;

@@ -615,7 +614,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
	if (ring->gpu_caches_dirty)
		flush_domains = I915_GEM_GPU_DOMAINS;

	ret = ring->emit_flush(ringbuf, ctx,
	ret = ring->emit_flush(req->ringbuf, req->ctx,
			       I915_GEM_GPU_DOMAINS, flush_domains);
	if (ret)
		return ret;
@@ -654,7 +653,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
	return logical_ring_invalidate_all_caches(req->ringbuf, req->ctx);
	return logical_ring_invalidate_all_caches(req);
}

int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
+2 −1
Original line number Diff line number Diff line
@@ -2910,8 +2910,9 @@ intel_ring_flush_all_caches(struct intel_engine_cs *ring)
}

int
intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
	struct intel_engine_cs *ring = req->ring;
	uint32_t flush_domains;
	int ret;

+1 −1
Original line number Diff line number Diff line
@@ -446,7 +446,7 @@ bool intel_ring_stopped(struct intel_engine_cs *ring);
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);

void intel_fini_pipe_control(struct intel_engine_cs *ring);
int intel_init_pipe_control(struct intel_engine_cs *ring);