Commit e61e0f51 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Rename drm_i915_gem_request to i915_request



We want to de-emphasize the link between the request (dependency,
execution and fence tracking) from GEM and so rename the struct from
drm_i915_gem_request to i915_request. That is we may implement the GEM
user interface on top of requests, but they are an abstraction for
tracking execution rather than an implementation detail of GEM. (Since
they are not tied to HW, we keep the i915 prefix as opposed to intel.)

In short, the spatch:
@@

@@
- struct drm_i915_gem_request
+ struct i915_request

A corollary to contracting the type name, we also harmonise on using
'rq' shorthand for local variables where space if of the essence and
repetition makes 'request' unwieldy. For globals and struct members,
'request' is still much preferred for its clarity.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180221095636.6649-1-chris@chris-wilson.co.uk


Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
Acked-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent ea3f0ef3
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -63,13 +63,13 @@ i915-y += i915_cmd_parser.o \
	  i915_gem.o \
	  i915_gem.o \
	  i915_gem_object.o \
	  i915_gem_object.o \
	  i915_gem_render_state.o \
	  i915_gem_render_state.o \
	  i915_gem_request.o \
	  i915_gem_shrinker.o \
	  i915_gem_shrinker.o \
	  i915_gem_stolen.o \
	  i915_gem_stolen.o \
	  i915_gem_tiling.o \
	  i915_gem_tiling.o \
	  i915_gem_timeline.o \
	  i915_gem_timeline.o \
	  i915_gem_userptr.o \
	  i915_gem_userptr.o \
	  i915_gemfs.o \
	  i915_gemfs.o \
	  i915_request.o \
	  i915_trace_points.o \
	  i915_trace_points.o \
	  i915_vma.o \
	  i915_vma.o \
	  intel_breadcrumbs.o \
	  intel_breadcrumbs.o \
+8 −8
Original line number Original line Diff line number Diff line
@@ -126,7 +126,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
	return 0;
	return 0;
}
}


static inline bool is_gvt_request(struct drm_i915_gem_request *req)
static inline bool is_gvt_request(struct i915_request *req)
{
{
	return i915_gem_context_force_single_submission(req->ctx);
	return i915_gem_context_force_single_submission(req->ctx);
}
}
@@ -148,7 +148,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
static int shadow_context_status_change(struct notifier_block *nb,
static int shadow_context_status_change(struct notifier_block *nb,
		unsigned long action, void *data)
		unsigned long action, void *data)
{
{
	struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
	struct i915_request *req = data;
	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
				shadow_ctx_notifier_block[req->engine->id]);
				shadow_ctx_notifier_block[req->engine->id]);
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
@@ -333,13 +333,13 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
	int ring_id = workload->ring_id;
	int ring_id = workload->ring_id;
	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
	struct drm_i915_gem_request *rq;
	struct i915_request *rq;
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
	int ret;
	int ret;


	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
	rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
	if (IS_ERR(rq)) {
	if (IS_ERR(rq)) {
		gvt_vgpu_err("fail to allocate gem request\n");
		gvt_vgpu_err("fail to allocate gem request\n");
		ret = PTR_ERR(rq);
		ret = PTR_ERR(rq);
@@ -348,7 +348,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)


	gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
	gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);


	workload->req = i915_gem_request_get(rq);
	workload->req = i915_request_get(rq);
	ret = copy_workload_to_ring_buffer(workload);
	ret = copy_workload_to_ring_buffer(workload);
	if (ret)
	if (ret)
		goto err_unpin;
		goto err_unpin;
@@ -582,7 +582,7 @@ out:
	if (!IS_ERR_OR_NULL(workload->req)) {
	if (!IS_ERR_OR_NULL(workload->req)) {
		gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
		gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
				ring_id, workload->req);
				ring_id, workload->req);
		i915_add_request(workload->req);
		i915_request_add(workload->req);
		workload->dispatched = true;
		workload->dispatched = true;
	}
	}


@@ -769,7 +769,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
				workload->status = 0;
				workload->status = 0;
		}
		}


		i915_gem_request_put(fetch_and_zero(&workload->req));
		i915_request_put(fetch_and_zero(&workload->req));


		if (!workload->status && !(vgpu->resetting_eng &
		if (!workload->status && !(vgpu->resetting_eng &
					   ENGINE_MASK(ring_id))) {
					   ENGINE_MASK(ring_id))) {
@@ -886,7 +886,7 @@ static int workload_thread(void *priv)


		gvt_dbg_sched("ring id %d wait workload %p\n",
		gvt_dbg_sched("ring id %d wait workload %p\n",
				workload->ring_id, workload);
				workload->ring_id, workload);
		i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
		i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);


complete:
complete:
		gvt_dbg_sched("will complete workload %p, status: %d\n",
		gvt_dbg_sched("will complete workload %p, status: %d\n",
+1 −1
Original line number Original line Diff line number Diff line
@@ -80,7 +80,7 @@ struct intel_shadow_wa_ctx {
struct intel_vgpu_workload {
struct intel_vgpu_workload {
	struct intel_vgpu *vgpu;
	struct intel_vgpu *vgpu;
	int ring_id;
	int ring_id;
	struct drm_i915_gem_request *req;
	struct i915_request *req;
	/* if this workload has been dispatched to i915? */
	/* if this workload has been dispatched to i915? */
	bool dispatched;
	bool dispatched;
	bool shadowed;
	bool shadowed;
+3 −3
Original line number Original line Diff line number Diff line
@@ -519,7 +519,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct file_stats stats;
		struct file_stats stats;
		struct drm_i915_file_private *file_priv = file->driver_priv;
		struct drm_i915_file_private *file_priv = file->driver_priv;
		struct drm_i915_gem_request *request;
		struct i915_request *request;
		struct task_struct *task;
		struct task_struct *task;


		mutex_lock(&dev->struct_mutex);
		mutex_lock(&dev->struct_mutex);
@@ -536,7 +536,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
		 * Therefore, we need to protect this ->comm access using RCU.
		 * Therefore, we need to protect this ->comm access using RCU.
		 */
		 */
		request = list_first_entry_or_null(&file_priv->mm.request_list,
		request = list_first_entry_or_null(&file_priv->mm.request_list,
						   struct drm_i915_gem_request,
						   struct i915_request,
						   client_link);
						   client_link);
		rcu_read_lock();
		rcu_read_lock();
		task = pid_task(request && request->ctx->pid ?
		task = pid_task(request && request->ctx->pid ?
@@ -4060,7 +4060,7 @@ i915_drop_caches_set(void *data, u64 val)
						     I915_WAIT_LOCKED);
						     I915_WAIT_LOCKED);


		if (val & DROP_RETIRE)
		if (val & DROP_RETIRE)
			i915_gem_retire_requests(dev_priv);
			i915_retire_requests(dev_priv);


		mutex_unlock(&dev->struct_mutex);
		mutex_unlock(&dev->struct_mutex);
	}
	}
+3 −3
Original line number Original line Diff line number Diff line
@@ -808,7 +808,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
	/*
	/*
	 * The i915 workqueue is primarily used for batched retirement of
	 * The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
	 * requests (and thus managing bo) once the task has been completed
	 * by the GPU. i915_gem_retire_requests() is called directly when we
	 * by the GPU. i915_retire_requests() is called directly when we
	 * need high-priority retirement, such as waiting for an explicit
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 * bo.
	 *
	 *
@@ -1992,7 +1992,7 @@ taint:
	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
error:
	i915_gem_set_wedged(i915);
	i915_gem_set_wedged(i915);
	i915_gem_retire_requests(i915);
	i915_retire_requests(i915);
	intel_gpu_reset(i915, ALL_ENGINES);
	intel_gpu_reset(i915, ALL_ENGINES);
	goto finish;
	goto finish;
}
}
@@ -2019,7 +2019,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
{
{
	struct i915_gpu_error *error = &engine->i915->gpu_error;
	struct i915_gpu_error *error = &engine->i915->gpu_error;
	struct drm_i915_gem_request *active_request;
	struct i915_request *active_request;
	int ret;
	int ret;


	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
Loading