Commit 033ef711 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/gvt: Drop redundant prepare_write/pin_pages



Since gvt calls pin_map for the shadow batch buffer, this makes the
action of prepare_write [+pin_pages] redundant. We can write into the
obj->mm.mapping directory and the flush_map routine knows when it has to
flush the cpu cache afterwards.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200619234543.17499-1-chris@chris-wilson.co.uk
parent 4fb33953
Loading
Loading
Loading
Loading
+1 −13
Original line number Diff line number Diff line
@@ -1904,19 +1904,10 @@ static int perform_bb_shadow(struct parser_exec_state *s)
		goto err_free_bb;
	}

	ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush);
	if (ret)
		goto err_free_obj;

	bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
	if (IS_ERR(bb->va)) {
		ret = PTR_ERR(bb->va);
		goto err_finish_shmem_access;
	}

	if (bb->clflush & CLFLUSH_BEFORE) {
		drm_clflush_virt_range(bb->va, bb->obj->base.size);
		bb->clflush &= ~CLFLUSH_BEFORE;
		goto err_free_obj;
	}

	ret = copy_gma_to_hva(s->vgpu, mm,
@@ -1935,7 +1926,6 @@ static int perform_bb_shadow(struct parser_exec_state *s)
	INIT_LIST_HEAD(&bb->list);
	list_add(&bb->list, &s->workload->shadow_bb);

	bb->accessing = true;
	bb->bb_start_cmd_va = s->ip_va;

	if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
@@ -1956,8 +1946,6 @@ static int perform_bb_shadow(struct parser_exec_state *s)
	return 0;
err_unmap:
	i915_gem_object_unpin_map(bb->obj);
err_finish_shmem_access:
	i915_gem_object_finish_access(bb->obj);
err_free_obj:
	i915_gem_object_put(bb->obj);
err_free_bb:
+14 −37
Original line number Diff line number Diff line
@@ -505,8 +505,8 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
			bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
				+ bb->bb_offset;

		if (bb->ppgtt) {
			/* for non-priv bb, scan&shadow is only for
		/*
		 * For non-priv bb, scan&shadow is only for
		 * debugging purpose, so the content of shadow bb
		 * is the same as original bb. Therefore,
		 * here, rather than switch to shadow bb's gma
@@ -514,15 +514,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
		 * gma address, and send original bb to hardware
		 * directly
		 */
			if (bb->clflush & CLFLUSH_AFTER) {
				drm_clflush_virt_range(bb->va,
						bb->obj->base.size);
				bb->clflush &= ~CLFLUSH_AFTER;
			}
			i915_gem_object_finish_access(bb->obj);
			bb->accessing = false;

		} else {
		if (!bb->ppgtt) {
			bb->vma = i915_gem_object_ggtt_pin(bb->obj,
							   NULL, 0, 0, 0);
			if (IS_ERR(bb->vma)) {
@@ -535,27 +527,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
			if (gmadr_bytes == 8)
				bb->bb_start_cmd_va[2] = 0;

			/* No one is going to touch shadow bb from now on. */
			if (bb->clflush & CLFLUSH_AFTER) {
				drm_clflush_virt_range(bb->va,
						bb->obj->base.size);
				bb->clflush &= ~CLFLUSH_AFTER;
			}

			ret = i915_gem_object_set_to_gtt_domain(bb->obj,
								false);
			if (ret)
				goto err;

			ret = i915_vma_move_to_active(bb->vma,
						      workload->req,
						      0);
			if (ret)
				goto err;

			i915_gem_object_finish_access(bb->obj);
			bb->accessing = false;
		}

		/* No one is going to touch shadow bb from now on. */
		i915_gem_object_flush_map(bb->obj);
	}
	return 0;
err:
@@ -626,9 +606,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)

	list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
		if (bb->obj) {
			if (bb->accessing)
				i915_gem_object_finish_access(bb->obj);

			if (bb->va && !IS_ERR(bb->va))
				i915_gem_object_unpin_map(bb->obj);

+0 −2
Original line number Diff line number Diff line
@@ -124,8 +124,6 @@ struct intel_vgpu_shadow_bb {
	struct i915_vma *vma;
	void *va;
	u32 *bb_start_cmd_va;
	unsigned int clflush;
	bool accessing;
	unsigned long bb_offset;
	bool ppgtt;
};