Commit 6259cead authored by John Harrison's avatar John Harrison Committed by Daniel Vetter
Browse files

drm/i915: Remove 'outstanding_lazy_seqno'



The OLS value is now obsolete. Exactly the same value is guarateed to be always
available as PLR->seqno. Thus it is safe to remove the OLS completely. And also
to rename the PLR to OLR to keep the 'outstanding lazy ...' naming convention
valid.

For: VIZ-4377
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarThomas Daniel <Thomas.Daniel@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent ff865885
Loading
Loading
Loading
Loading
+5 −8
Original line number Diff line number Diff line
@@ -1164,7 +1164,7 @@ i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));

	ret = 0;
	if (seqno == ring->outstanding_lazy_seqno)
	if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
		ret = i915_add_request(ring, NULL);

	return ret;
@@ -2421,7 +2421,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
	u32 request_ring_position, request_start;
	int ret;

	request = ring->preallocated_lazy_request;
	request = ring->outstanding_lazy_request;
	if (WARN_ON(request == NULL))
		return -ENOMEM;

@@ -2466,7 +2466,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
			return ret;
	}

	request->seqno = intel_ring_get_seqno(ring);
	request->ring = ring;
	request->head = request_start;
	request->tail = request_ring_position;
@@ -2503,8 +2502,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
	}

	trace_i915_gem_request_add(ring, request->seqno);
	ring->outstanding_lazy_seqno = 0;
	ring->preallocated_lazy_request = NULL;
	ring->outstanding_lazy_request = NULL;

	i915_queue_hangcheck(ring->dev);

@@ -2689,9 +2687,8 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
		i915_gem_free_request(request);
	}

	/* These may not have been flush before the reset, do so now */
	i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
	ring->outstanding_lazy_seqno = 0;
	/* This may not have been flushed before the reset, so clean it now */
	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
}

void i915_gem_restore_fences(struct drm_device *dev)
+3 −1
Original line number Diff line number Diff line
@@ -1211,7 +1211,9 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
			return ret;
	}

	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
	trace_i915_gem_ring_dispatch(ring,
		    i915_gem_request_get_seqno(intel_ring_get_request(ring)),
		    flags);

	i915_gem_execbuffer_move_to_active(vmas, ring);
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+2 −1
Original line number Diff line number Diff line
@@ -9910,7 +9910,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
		if (ret)
			goto cleanup_unpin;

		work->flip_queued_seqno = intel_ring_get_seqno(ring);
		work->flip_queued_seqno =
		    i915_gem_request_get_seqno(intel_ring_get_request(ring));
		work->flip_queued_ring = ring;
	}

+9 −17
Original line number Diff line number Diff line
@@ -876,22 +876,14 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring,
	}
}

static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
static int logical_ring_alloc_request(struct intel_engine_cs *ring,
				      struct intel_context *ctx)
{
	struct drm_i915_gem_request *request;
	int ret;

	/* XXX: The aim is to replace seqno values with request structures.
	 * A step along the way is to switch to using the PLR in preference
	 * to the OLS. That requires the PLR to only be valid when the OLS is
	 * also valid. I.e., the two must be kept in step. */

	if (ring->outstanding_lazy_seqno) {
		WARN_ON(ring->preallocated_lazy_request == NULL);
	if (ring->outstanding_lazy_request)
		return 0;
	}
	WARN_ON(ring->preallocated_lazy_request != NULL);

	request = kmalloc(sizeof(*request), GFP_KERNEL);
	if (request == NULL)
@@ -907,7 +899,7 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,

	kref_init(&request->ref);

	ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
	ret = i915_gem_get_seqno(ring->dev, &request->seqno);
	if (ret) {
		intel_lr_context_unpin(ring, ctx);
		kfree(request);
@@ -921,7 +913,7 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
	request->ctx = ctx;
	i915_gem_context_reference(request->ctx);

	ring->preallocated_lazy_request = request;
	ring->outstanding_lazy_request = request;
	return 0;
}

@@ -1098,7 +1090,7 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
		return ret;

	/* Preallocate the olr before touching the ring */
	ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
	ret = logical_ring_alloc_request(ring, ringbuf->FIXME_lrc_ctx);
	if (ret)
		return ret;

@@ -1351,7 +1343,8 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
				(ring->status_page.gfx_addr +
				(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
	intel_logical_ring_emit(ringbuf, 0);
	intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
	intel_logical_ring_emit(ringbuf,
		i915_gem_request_get_seqno(ring->outstanding_lazy_request));
	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
	intel_logical_ring_emit(ringbuf, MI_NOOP);
	intel_logical_ring_advance_and_submit(ringbuf);
@@ -1376,8 +1369,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)

	intel_logical_ring_stop(ring);
	WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
	i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
	ring->outstanding_lazy_seqno = 0;
	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);

	if (ring->cleanup)
		ring->cleanup(ring);
+27 −25
Original line number Diff line number Diff line
@@ -911,17 +911,20 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
		return ret;

	for_each_ring(waiter, dev_priv, i) {
		u32 seqno;
		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
			continue;

		seqno = i915_gem_request_get_seqno(
					   signaller->outstanding_lazy_request);
		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
					   PIPE_CONTROL_QW_WRITE |
					   PIPE_CONTROL_FLUSH_ENABLE);
		intel_ring_emit(signaller, lower_32_bits(gtt_offset));
		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
		intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
		intel_ring_emit(signaller, seqno);
		intel_ring_emit(signaller, 0);
		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
					   MI_SEMAPHORE_TARGET(waiter->id));
@@ -949,16 +952,19 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
		return ret;

	for_each_ring(waiter, dev_priv, i) {
		u32 seqno;
		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
			continue;

		seqno = i915_gem_request_get_seqno(
					   signaller->outstanding_lazy_request);
		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
					   MI_FLUSH_DW_OP_STOREDW);
		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
					   MI_FLUSH_DW_USE_GTT);
		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
		intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
		intel_ring_emit(signaller, seqno);
		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
					   MI_SEMAPHORE_TARGET(waiter->id));
		intel_ring_emit(signaller, 0);
@@ -987,9 +993,11 @@ static int gen6_signal(struct intel_engine_cs *signaller,
	for_each_ring(useless, dev_priv, i) {
		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
		if (mbox_reg != GEN6_NOSYNC) {
			u32 seqno = i915_gem_request_get_seqno(
					   signaller->outstanding_lazy_request);
			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
			intel_ring_emit(signaller, mbox_reg);
			intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
			intel_ring_emit(signaller, seqno);
		}
	}

@@ -1024,7 +1032,8 @@ gen6_add_request(struct intel_engine_cs *ring)

	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
	intel_ring_emit(ring,
		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
	intel_ring_emit(ring, MI_USER_INTERRUPT);
	__intel_ring_advance(ring);

@@ -1142,7 +1151,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
			PIPE_CONTROL_WRITE_FLUSH |
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
	intel_ring_emit(ring,
		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
	intel_ring_emit(ring, 0);
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
	scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1161,7 +1171,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
			PIPE_CONTROL_NOTIFY);
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
	intel_ring_emit(ring,
		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
	intel_ring_emit(ring, 0);
	__intel_ring_advance(ring);

@@ -1401,7 +1412,8 @@ i9xx_add_request(struct intel_engine_cs *ring)

	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
	intel_ring_emit(ring,
		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
	intel_ring_emit(ring, MI_USER_INTERRUPT);
	__intel_ring_advance(ring);

@@ -1870,8 +1882,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)

	intel_unpin_ringbuffer_obj(ringbuf);
	intel_destroy_ringbuffer_obj(ringbuf);
	i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
	ring->outstanding_lazy_seqno = 0;
	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);

	if (ring->cleanup)
		ring->cleanup(ring);
@@ -2004,7 +2015,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
	int ret;

	/* We need to add any requests required to flush the objects and ring */
	if (ring->outstanding_lazy_seqno) {
	if (ring->outstanding_lazy_request) {
		ret = i915_add_request(ring, NULL);
		if (ret)
			return ret;
@@ -2022,22 +2033,13 @@ int intel_ring_idle(struct intel_engine_cs *ring)
}

static int
intel_ring_alloc_seqno(struct intel_engine_cs *ring)
intel_ring_alloc_request(struct intel_engine_cs *ring)
{
	int ret;
	struct drm_i915_gem_request *request;

	/* XXX: The aim is to replace seqno values with request structures.
	 * A step along the way is to switch to using the PLR in preference
	 * to the OLS. That requires the PLR to only be valid when the OLS
	 * is also valid. I.e., the two must be kept in step. */

	if (ring->outstanding_lazy_seqno) {
		WARN_ON(ring->preallocated_lazy_request == NULL);
	if (ring->outstanding_lazy_request)
		return 0;
	}

	WARN_ON(ring->preallocated_lazy_request != NULL);

	request = kmalloc(sizeof(*request), GFP_KERNEL);
	if (request == NULL)
@@ -2045,13 +2047,13 @@ intel_ring_alloc_seqno(struct intel_engine_cs *ring)

	kref_init(&request->ref);

	ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
	ret = i915_gem_get_seqno(ring->dev, &request->seqno);
	if (ret) {
		kfree(request);
		return ret;
	}

	ring->preallocated_lazy_request = request;
	ring->outstanding_lazy_request = request;
	return 0;
}

@@ -2092,7 +2094,7 @@ int intel_ring_begin(struct intel_engine_cs *ring,
		return ret;

	/* Preallocate the olr before touching the ring */
	ret = intel_ring_alloc_seqno(ring);
	ret = intel_ring_alloc_request(ring);
	if (ret)
		return ret;

@@ -2127,7 +2129,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

	BUG_ON(ring->outstanding_lazy_seqno);
	BUG_ON(ring->outstanding_lazy_request);

	if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
Loading