Commit cbfae36c authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: cleanup PTE flag generation v3



Move the ASIC specific code into a new callback function.

v2: mask the flags for SI and CIK instead of a BUG_ON().
v3: remove last missed BUG_ON().

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarHuang Rui <ray.huang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 71776b6d
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -104,6 +104,10 @@ struct amdgpu_gmc_funcs {
	/* get the pde for a given mc addr */
	void (*get_vm_pde)(struct amdgpu_device *adev, int level,
			   u64 *dst, u64 *flags);
	/* get the pte flags to use for a BO VA mapping */
	void (*get_vm_pte)(struct amdgpu_device *adev,
			   struct amdgpu_bo_va_mapping *mapping,
			   uint64_t *flags);
};

struct amdgpu_xgmi {
@@ -185,6 +189,7 @@ struct amdgpu_gmc {
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))

/**
 * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+2 −27
Original line number Diff line number Diff line
@@ -1571,33 +1571,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
		flags &= ~AMDGPU_PTE_WRITEABLE;

	if (adev->asic_type >= CHIP_TONGA) {
		flags &= ~AMDGPU_PTE_EXECUTABLE;
		flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
	}

	if (adev->asic_type >= CHIP_NAVI10) {
		flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
		flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
	} else {
		flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
		flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
	}

	if ((mapping->flags & AMDGPU_PTE_PRT) &&
	    (adev->asic_type >= CHIP_VEGA10)) {
		flags |= AMDGPU_PTE_PRT;
		if (adev->asic_type >= CHIP_NAVI10) {
			flags |= AMDGPU_PTE_SNOOPED;
			flags |= AMDGPU_PTE_LOG;
			flags |= AMDGPU_PTE_SYSTEM;
		}
		flags &= ~AMDGPU_PTE_VALID;
	}
	if (adev->asic_type == CHIP_ARCTURUS &&
	    !(flags & AMDGPU_PTE_SYSTEM) &&
	    mapping->bo_va->is_xgmi)
		flags |= AMDGPU_PTE_SNOOPED;
	/* Apply ASIC specific mapping flags */
	amdgpu_gmc_get_vm_pte(adev, mapping, &flags);

	trace_amdgpu_vm_bo_update(mapping);

+21 −1
Original line number Diff line number Diff line
@@ -440,12 +440,32 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
	}
}

static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
				 struct amdgpu_bo_va_mapping *mapping,
				 uint64_t *flags)
{
	*flags &= ~AMDGPU_PTE_EXECUTABLE;
	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;

	*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
	*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);

	if (mapping->flags & AMDGPU_PTE_PRT) {
		*flags |= AMDGPU_PTE_PRT;
		*flags |= AMDGPU_PTE_SNOOPED;
		*flags |= AMDGPU_PTE_LOG;
		*flags |= AMDGPU_PTE_SYSTEM;
		*flags &= ~AMDGPU_PTE_VALID;
	}
}

static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
	.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
	.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
	.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
	.map_mtype = gmc_v10_0_map_mtype,
	.get_vm_pde = gmc_v10_0_get_vm_pde
	.get_vm_pde = gmc_v10_0_get_vm_pde,
	.get_vm_pte = gmc_v10_0_get_vm_pte
};

static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
+9 −0
Original line number Diff line number Diff line
@@ -392,6 +392,14 @@ static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}

static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
				struct amdgpu_bo_va_mapping *mapping,
				uint64_t *flags)
{
	*flags &= ~AMDGPU_PTE_EXECUTABLE;
	*flags &= ~AMDGPU_PTE_PRT;
}

static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
					      bool value)
{
@@ -1138,6 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
	.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
	.set_prt = gmc_v6_0_set_prt,
	.get_vm_pde = gmc_v6_0_get_vm_pde,
	.get_vm_pte = gmc_v6_0_get_vm_pte,
};

static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
+10 −1
Original line number Diff line number Diff line
@@ -469,6 +469,14 @@ static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}

static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
				struct amdgpu_bo_va_mapping *mapping,
				uint64_t *flags)
{
	*flags &= ~AMDGPU_PTE_EXECUTABLE;
	*flags &= ~AMDGPU_PTE_PRT;
}

/**
 * gmc_v8_0_set_fault_enable_default - update VM fault handling
 *
@@ -1328,7 +1336,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
	.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
	.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
	.set_prt = gmc_v7_0_set_prt,
	.get_vm_pde = gmc_v7_0_get_vm_pde
	.get_vm_pde = gmc_v7_0_get_vm_pde,
	.get_vm_pte = gmc_v7_0_get_vm_pte
};

static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
Loading