Commit a4b3a571 authored by Daniel Vetter's avatar Daniel Vetter
Browse files

drm/i915: Convert i915_wait_seqno to i915_wait_request



Updated i915_wait_seqno() to take a request structure instead of a seqno value
and renamed it accordingly. Internally, it just pulls the seqno out of the
request and calls on to __wait_seqno() as before. However, all the code further
up the stack is now simplified as it can just pass the request object straight
through without having to peek inside.

For: VIZ-4377
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarThomas Daniel <Thomas.Daniel@intel.com>
[danvet: Squash in hunk from an earlier patch which was rebased
wrongly.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 9bfc01a2
Loading
Loading
Loading
Loading
+1 −18
Original line number Diff line number Diff line
@@ -2629,8 +2629,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
			bool interruptible,
			s64 *timeout,
			struct drm_i915_file_private *file_priv);
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
				 uint32_t seqno);
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -3117,20 +3116,4 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
	}
}

/* XXX: Temporary solution to be removed later in patch series. */
static inline int __must_check i915_gem_check_ols(
				     struct intel_engine_cs *ring, u32 seqno)
{
	int ret;

	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));

	ret = 0;
	if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
		ret = i915_add_request(ring, NULL);

	return ret;
}
/* XXX: Temporary solution to be removed later in patch series. */

#endif
+19 −17
Original line number Diff line number Diff line
@@ -1308,32 +1308,40 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
}

/**
 * Waits for a sequence number to be signaled, and cleans up the
 * Waits for a request to be signaled, and cleans up the
 * request and object lists appropriately for that event.
 */
int
i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
i915_wait_request(struct drm_i915_gem_request *req)
{
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible = dev_priv->mm.interruptible;
	struct drm_device *dev;
	struct drm_i915_private *dev_priv;
	bool interruptible;
	unsigned reset_counter;
	int ret;

	BUG_ON(req == NULL);

	dev = req->ring->dev;
	dev_priv = dev->dev_private;
	interruptible = dev_priv->mm.interruptible;

	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
	BUG_ON(seqno == 0);

	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
	if (ret)
		return ret;

	ret = i915_gem_check_ols(ring, seqno);
	ret = i915_gem_check_olr(req);
	if (ret)
		return ret;

	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
	return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
				 NULL, NULL);
	i915_gem_request_reference(req);
	ret = __i915_wait_seqno(req->ring, i915_gem_request_get_seqno(req),
				reset_counter, interruptible, NULL, NULL);
	i915_gem_request_unreference(req);
	return ret;
}

static int
@@ -1363,18 +1371,13 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
			       bool readonly)
{
	struct drm_i915_gem_request *req;
	struct intel_engine_cs *ring = obj->ring;
	u32 seqno;
	int ret;

	req = readonly ? obj->last_write_req : obj->last_read_req;
	if (!req)
		return 0;

	seqno = i915_gem_request_get_seqno(req);
	WARN_ON(seqno == 0);

	ret = i915_wait_seqno(ring, seqno);
	ret = i915_wait_request(req);
	if (ret)
		return ret;

@@ -3332,8 +3335,7 @@ static int
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
	if (obj->last_fenced_req) {
		int ret = i915_wait_seqno(obj->ring,
			   i915_gem_request_get_seqno(obj->last_fenced_req));
		int ret = i915_wait_request(obj->last_fenced_req);
		if (ret)
			return ret;

+2 −4
Original line number Diff line number Diff line
@@ -922,7 +922,6 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
{
	struct intel_engine_cs *ring = ringbuf->ring;
	struct drm_i915_gem_request *request;
	u32 seqno = 0;
	int ret;

	if (ringbuf->last_retired_head != -1) {
@@ -947,15 +946,14 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
		/* Would completion of this request free enough space? */
		if (__intel_ring_space(request->tail, ringbuf->tail,
				       ringbuf->size) >= bytes) {
			seqno = request->seqno;
			break;
		}
	}

	if (seqno == 0)
	if (&request->list == &ring->request_list)
		return -ENOSPC;

	ret = i915_wait_seqno(ring, seqno);
	ret = i915_wait_request(request);
	if (ret)
		return ret;

+3 −8
Original line number Diff line number Diff line
@@ -224,8 +224,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
		return ret;

	overlay->flip_tail = tail;
	ret = i915_wait_seqno(ring,
			 i915_gem_request_get_seqno(overlay->last_flip_req));
	ret = i915_wait_request(overlay->last_flip_req);
	if (ret)
		return ret;
	i915_gem_retire_requests(dev);
@@ -367,19 +366,15 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 * We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
	struct drm_device *dev = overlay->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
	int ret;

	if (overlay->last_flip_req == NULL)
		return 0;

	ret = i915_wait_seqno(ring,
			 i915_gem_request_get_seqno(overlay->last_flip_req));
	ret = i915_wait_request(overlay->last_flip_req);
	if (ret)
		return ret;
	i915_gem_retire_requests(dev);
	i915_gem_retire_requests(overlay->dev);

	if (overlay->flip_tail)
		overlay->flip_tail(overlay);
+6 −8
Original line number Diff line number Diff line
@@ -1899,7 +1899,6 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{
	struct intel_ringbuffer *ringbuf = ring->buffer;
	struct drm_i915_gem_request *request;
	u32 seqno = 0;
	int ret;

	if (ringbuf->last_retired_head != -1) {
@@ -1914,15 +1913,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
	list_for_each_entry(request, &ring->request_list, list) {
		if (__intel_ring_space(request->tail, ringbuf->tail,
				       ringbuf->size) >= n) {
			seqno = request->seqno;
			break;
		}
	}

	if (seqno == 0)
	if (&request->list == &ring->request_list)
		return -ENOSPC;

	ret = i915_wait_seqno(ring, seqno);
	ret = i915_wait_request(request);
	if (ret)
		return ret;

@@ -2011,7 +2009,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)

int intel_ring_idle(struct intel_engine_cs *ring)
{
	u32 seqno;
	struct drm_i915_gem_request *req;
	int ret;

	/* We need to add any requests required to flush the objects and ring */
@@ -2025,11 +2023,11 @@ int intel_ring_idle(struct intel_engine_cs *ring)
	if (list_empty(&ring->request_list))
		return 0;

	seqno = list_entry(ring->request_list.prev,
	req = list_entry(ring->request_list.prev,
			   struct drm_i915_gem_request,
			   list)->seqno;
			   list);

	return i915_wait_seqno(ring, seqno);
	return i915_wait_request(req);
}

static int