Commit 0bb5d5b0 authored by Luben Tuikov's avatar Luben Tuikov Committed by Alex Deucher
Browse files

drm/amdgpu: Move to a per-IB secure flag (TMZ)



Move from a per-CS secure flag (TMZ) to a per-IB
secure flag.

Signed-off-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Reviewed-by: default avatarHuang Rui <ray.huang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5888f07a
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -232,8 +232,6 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
	if (ret)
		goto free_all_kdata;

	p->job->secure = cs->in.flags & AMDGPU_CS_FLAGS_SECURE;

	if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
		ret = -ECANCELED;
		goto free_all_kdata;
+20 −3
Original line number Diff line number Diff line
@@ -133,6 +133,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
	uint64_t fence_ctx;
	uint32_t status = 0, alloc_size;
	unsigned fence_flags = 0;
	bool secure;

	unsigned i;
	int r = 0;
@@ -214,9 +215,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
	if (job && ring->funcs->emit_cntxcntl) {
		status |= job->preamble_status;
		status |= job->preemption_status;
		amdgpu_ring_emit_cntxcntl(ring, status, job->secure);
		amdgpu_ring_emit_cntxcntl(ring, status);
	}

	secure = false;
	for (i = 0; i < num_ibs; ++i) {
		ib = &ibs[i];

@@ -228,12 +230,27 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
		    !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
			continue;

		/* If this IB is TMZ, add frame TMZ start packet,
		 * else, turn off TMZ.
		 */
		if (ib->flags & AMDGPU_IB_FLAGS_SECURE && ring->funcs->emit_tmz) {
			if (!secure) {
				secure = true;
				amdgpu_ring_emit_tmz(ring, true);
			}
		} else if (secure) {
			secure = false;
			amdgpu_ring_emit_tmz(ring, false);
		}

		amdgpu_ring_emit_ib(ring, job, ib, status);
		status &= ~AMDGPU_HAVE_CTX_SWITCH;
	}

	if (ring->funcs->emit_tmz)
		amdgpu_ring_emit_tmz(ring, false, job ? job->secure : false);
	if (secure) {
		secure = false;
		amdgpu_ring_emit_tmz(ring, false);
	}

#ifdef CONFIG_X86_64
	if (!(adev->flags & AMD_IS_APU))
+0 −3
Original line number Diff line number Diff line
@@ -62,9 +62,6 @@ struct amdgpu_job {
	/* user fence handling */
	uint64_t		uf_addr;
	uint64_t		uf_sequence;

	/* the job is due to a secure command submission */
	bool			secure;
};

int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+4 −5
Original line number Diff line number Diff line
@@ -168,8 +168,7 @@ struct amdgpu_ring_funcs {
	void (*begin_use)(struct amdgpu_ring *ring);
	void (*end_use)(struct amdgpu_ring *ring);
	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags,
			       bool trusted);
	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
	void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
			  uint32_t reg_val_offs);
	void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
@@ -178,7 +177,7 @@ struct amdgpu_ring_funcs {
	void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
					uint32_t reg0, uint32_t reg1,
					uint32_t ref, uint32_t mask);
	void (*emit_tmz)(struct amdgpu_ring *ring, bool start, bool trusted);
	void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
	/* Try to soft recover the ring to make the fence signal */
	void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
	int (*preempt_ib)(struct amdgpu_ring *ring);
@@ -252,12 +251,12 @@ struct amdgpu_ring {
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d, s) (r)->funcs->emit_cntxcntl((r), (d), (s))
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
#define amdgpu_ring_emit_tmz(r, b, s) (r)->funcs->emit_tmz((r), (b), (s))
#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
+7 −16
Original line number Diff line number Diff line
@@ -3037,8 +3037,7 @@ static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start,
				    bool trusted);
static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);

static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -7436,8 +7435,7 @@ static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
}

static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
					 uint32_t flags,
					 bool trusted)
					 uint32_t flags)
{
	uint32_t dw2 = 0;

@@ -7445,8 +7443,6 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
		gfx_v10_0_ring_emit_ce_meta(ring,
				    (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);

	gfx_v10_0_ring_emit_tmz(ring, true, trusted);

	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
		/* set load_global_config & load_global_uconfig */
@@ -7603,17 +7599,12 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
					   sizeof(de_payload) >> 2);
}

static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start,
				    bool trusted)
static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
{
	if (amdgpu_is_tmz(ring->adev)) {
		amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
	/*
	 * cmd = 0: frame begin
	 * cmd = 1: frame end
	 */
	amdgpu_ring_write(ring,
			  ((amdgpu_is_tmz(ring->adev) && trusted) ? FRAME_TMZ : 0)
			  | FRAME_CMD(start ? 0 : 1));
		amdgpu_ring_write(ring, FRAME_TMZ | FRAME_CMD(start ? 0 : 1));
	}
}

static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
Loading