Commit 1be8f347 authored by Joonas Lahtinen's avatar Joonas Lahtinen
Browse files

Merge tag 'gvt-next-2020-05-12' of https://github.com/intel/gvt-linux into drm-intel-next-queued



gvt-next-2020-05-12

- Support PPGTT update via LRI cmd (Zhenyu)
- Remove extra kmap for shadow ctx update (Zhenyu)
- Move workload cleanup out of execlist handling code (Zhenyu)

Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200512094017.GX18545@zhen-hp.sh.intel.com
parents 7a00e68b 47e51832
Loading
Loading
Loading
Loading
+45 −0
Original line number Diff line number Diff line
@@ -882,6 +882,47 @@ static int mocs_cmd_reg_handler(struct parser_exec_state *s,
	return 0;
}

static int is_cmd_update_pdps(unsigned int offset,
			      struct parser_exec_state *s)
{
	u32 base = s->workload->engine->mmio_base;
	return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
}

static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
				       unsigned int offset, unsigned int index)
{
	struct intel_vgpu *vgpu = s->vgpu;
	struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
	struct intel_vgpu_mm *mm;
	u64 pdps[GEN8_3LVL_PDPES];

	if (shadow_mm->ppgtt_mm.root_entry_type ==
	    GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
		pdps[0] = (u64)cmd_val(s, 2) << 32;
		pdps[0] |= cmd_val(s, 4);

		mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
		if (!mm) {
			gvt_vgpu_err("failed to get the 4-level shadow vm\n");
			return -EINVAL;
		}
		intel_vgpu_mm_get(mm);
		list_add_tail(&mm->ppgtt_mm.link,
			      &s->workload->lri_shadow_mm);
		*cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
		*cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
	} else {
		/* Currently all guests use PML4 table and now can't
		 * have a guest with 3-level table but uses LRI for
		 * PPGTT update. So this is simply un-testable. */
		GEM_BUG_ON(1);
		gvt_vgpu_err("invalid shared shadow vm type\n");
		return -EINVAL;
	}
	return 0;
}

static int cmd_reg_handler(struct parser_exec_state *s,
	unsigned int offset, unsigned int index, char *cmd)
{
@@ -920,6 +961,10 @@ static int cmd_reg_handler(struct parser_exec_state *s,
		patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
	}

	if (is_cmd_update_pdps(offset, s) &&
	    cmd_pdp_mmio_update_handler(s, offset, index))
		return -EINVAL;

	/* TODO
	 * In order to let workload with inhibit context to generate
	 * correct image data into memory, vregs values will be loaded to
+0 −2
Original line number Diff line number Diff line
@@ -424,8 +424,6 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)

	ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
out:
	intel_vgpu_unpin_mm(workload->shadow_mm);
	intel_vgpu_destroy_workload(workload);
	return ret;
}

+1 −0
Original line number Diff line number Diff line
@@ -1900,6 +1900,7 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,

	INIT_LIST_HEAD(&mm->ppgtt_mm.list);
	INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
	INIT_LIST_HEAD(&mm->ppgtt_mm.link);

	if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
		mm->ppgtt_mm.guest_pdps[0] = pdps[0];
+1 −0
Original line number Diff line number Diff line
@@ -160,6 +160,7 @@ struct intel_vgpu_mm {

			struct list_head list;
			struct list_head lru_list;
			struct list_head link; /* possible LRI shadow mm list */
		} ppgtt_mm;
		struct {
			void *virtual_ggtt;
+1 −1
Original line number Diff line number Diff line
@@ -2812,7 +2812,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
	MMIO_D(GAMTARBMODE, D_BDW_PLUS);

#define RING_REG(base) _MMIO((base) + 0x270)
	MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
	MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG

	MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
Loading