Commit de7b45ba authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: cleanup creating BOs at fixed location (v2)



The placement is something TTM/BO internal and the RAS code should
avoid touching that directly.

Add a helper to create a BO at a fixed location and use that instead.

v2: squash in fixes (Alex)

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarGuchun Chen <guchun.chen@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent db338e16
Loading
Loading
Loading
Loading
+61 −0
Original line number Diff line number Diff line
@@ -342,6 +342,67 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
	return 0;
}

/**
 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
 *
 * @adev: amdgpu device object
 * @offset: offset of the BO
 * @size: size of the BO
 * @domain: where to place it
 * @bo_ptr:  used to initialize BOs in structures
 * @cpu_addr: optional CPU address mapping
 *
 * Creates a kernel BO at a specific offset in the address space of the domain.
 *
 * Returns:
 * 0 on success, negative error code otherwise.
 */
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
			       uint64_t offset, uint64_t size, uint32_t domain,
			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
{
	struct ttm_operation_ctx ctx = { false, false };
	unsigned int i;
	int r;

	offset &= PAGE_MASK;
	size = ALIGN(size, PAGE_SIZE);

	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
				      NULL, NULL);
	if (r)
		return r;

	/*
	 * Remove the original mem node and create a new one at the request
	 * position.
	 */
	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
	}

	ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
			     &(*bo_ptr)->tbo.mem, &ctx);
	if (r)
		goto error;

	if (cpu_addr) {
		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
		if (r)
			goto error;
	}

	amdgpu_bo_unreserve(*bo_ptr);
	return 0;

error:
	amdgpu_bo_unreserve(*bo_ptr);
	amdgpu_bo_unref(bo_ptr);
	return r;
}

/**
 * amdgpu_bo_free_kernel - free BO for kernel use
 *
+3 −0
Original line number Diff line number Diff line
@@ -238,6 +238,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
			    unsigned long size, int align,
			    u32 domain, struct amdgpu_bo **bo_ptr,
			    u64 *gpu_addr, void **cpu_addr);
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
			       uint64_t offset, uint64_t size, uint32_t domain,
			       struct amdgpu_bo **bo_ptr, void **cpu_addr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
			   void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
+6 −79
Original line number Diff line number Diff line
@@ -71,12 +71,6 @@ const char *ras_block_string[] = {

atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);

static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
		uint64_t offset, uint64_t size,
		struct amdgpu_bo **bo_ptr);
static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
		struct amdgpu_bo **bo_ptr);

static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
					size_t size, loff_t *pos)
{
@@ -1260,75 +1254,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
	atomic_set(&ras->in_recovery, 0);
}

static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
		struct amdgpu_bo **bo_ptr)
{
	/* no need to free it actually. */
	amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
	return 0;
}

/* reserve vram with size@offset */
static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
		uint64_t offset, uint64_t size,
		struct amdgpu_bo **bo_ptr)
{
	struct ttm_operation_ctx ctx = { false, false };
	struct amdgpu_bo_param bp;
	int r = 0;
	int i;
	struct amdgpu_bo *bo;

	if (bo_ptr)
		*bo_ptr = NULL;
	memset(&bp, 0, sizeof(bp));
	bp.size = size;
	bp.byte_align = PAGE_SIZE;
	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
		AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
	bp.type = ttm_bo_type_kernel;
	bp.resv = NULL;

	r = amdgpu_bo_create(adev, &bp, &bo);
	if (r)
		return -EINVAL;

	r = amdgpu_bo_reserve(bo, false);
	if (r)
		goto error_reserve;

	offset = ALIGN(offset, PAGE_SIZE);
	for (i = 0; i < bo->placement.num_placement; ++i) {
		bo->placements[i].fpfn = offset >> PAGE_SHIFT;
		bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
	}

	ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
	r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
	if (r)
		goto error_pin;

	r = amdgpu_bo_pin_restricted(bo,
			AMDGPU_GEM_DOMAIN_VRAM,
			offset,
			offset + size);
	if (r)
		goto error_pin;

	if (bo_ptr)
		*bo_ptr = bo;

	amdgpu_bo_unreserve(bo);
	return r;

error_pin:
	amdgpu_bo_unreserve(bo);
error_reserve:
	amdgpu_bo_unref(&bo);
	return r;
}

/* alloc/realloc bps array */
static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
		struct ras_err_handler_data *data, int pages)
@@ -1464,7 +1389,7 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
	struct ras_err_handler_data *data;
	uint64_t bp;
	struct amdgpu_bo *bo;
	struct amdgpu_bo *bo = NULL;
	int i, ret = 0;

	if (!con || !con->eh_data)
@@ -1478,12 +1403,14 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
	for (i = data->last_reserved; i < data->count; i++) {
		bp = data->bps[i].retired_page;

		if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
					PAGE_SIZE, &bo))
		if (amdgpu_bo_create_kernel_at(adev, bp << PAGE_SHIFT, PAGE_SIZE,
					       AMDGPU_GEM_DOMAIN_VRAM,
					       &bo, NULL))
			DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);

		data->bps_bo[i] = bo;
		data->last_reserved = i + 1;
		bo = NULL;
	}

	/* continue to save bad pages to eeprom even reesrve_vram fails */
@@ -1512,7 +1439,7 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
	for (i = data->last_reserved - 1; i >= 0; i--) {
		bo = data->bps_bo[i];

		amdgpu_ras_release_vram(adev, &bo);
		amdgpu_bo_free_kernel(&bo, NULL, NULL);

		data->bps_bo[i] = bo;
		data->last_reserved = i;
+13 −69
Original line number Diff line number Diff line
@@ -1650,81 +1650,25 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
 */
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{
	struct ttm_operation_ctx ctx = { false, false };
	struct amdgpu_bo_param bp;
	int r = 0;
	int i;
	u64 vram_size = adev->gmc.visible_vram_size;
	u64 offset = adev->fw_vram_usage.start_offset;
	u64 size = adev->fw_vram_usage.size;
	struct amdgpu_bo *bo;

	memset(&bp, 0, sizeof(bp));
	bp.size = adev->fw_vram_usage.size;
	bp.byte_align = PAGE_SIZE;
	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
	bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
		AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
	bp.type = ttm_bo_type_kernel;
	bp.resv = NULL;
	uint64_t vram_size = adev->gmc.visible_vram_size;
	int r;

	adev->fw_vram_usage.va = NULL;
	adev->fw_vram_usage.reserved_bo = NULL;

	if (adev->fw_vram_usage.size > 0 &&
		adev->fw_vram_usage.size <= vram_size) {

		r = amdgpu_bo_create(adev, &bp,
				     &adev->fw_vram_usage.reserved_bo);
		if (r)
			goto error_create;

		r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
		if (r)
			goto error_reserve;

		/* remove the original mem node and create a new one at the
		 * request position
		 */
		bo = adev->fw_vram_usage.reserved_bo;
		offset = ALIGN(offset, PAGE_SIZE);
		for (i = 0; i < bo->placement.num_placement; ++i) {
			bo->placements[i].fpfn = offset >> PAGE_SHIFT;
			bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
		}

		ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
		r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
				     &bo->tbo.mem, &ctx);
		if (r)
			goto error_pin;
	if (adev->fw_vram_usage.size == 0 ||
	    adev->fw_vram_usage.size > vram_size)
		return 0;

		r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
			AMDGPU_GEM_DOMAIN_VRAM,
	return amdgpu_bo_create_kernel_at(adev,
					  adev->fw_vram_usage.start_offset,
			(adev->fw_vram_usage.start_offset +
			adev->fw_vram_usage.size));
		if (r)
			goto error_pin;
		r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
					  adev->fw_vram_usage.size,
					  AMDGPU_GEM_DOMAIN_VRAM,
					  &adev->fw_vram_usage.reserved_bo,
					  &adev->fw_vram_usage.va);
		if (r)
			goto error_kmap;

		amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
	}
	return r;

error_kmap:
	amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
error_pin:
	amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
error_reserve:
	amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
error_create:
	adev->fw_vram_usage.va = NULL;
	adev->fw_vram_usage.reserved_bo = NULL;
	return r;
}

/**
 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
 * gtt/vram related fields.