Commit fa3ab3c7 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: change parameter passing in the VM code



Make it more flexible by passing src and page addresses
directly instead of the structures they contain.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c4e1a13a
Loading
Loading
Loading
Loading
+54 −46
Original line number Diff line number Diff line
@@ -349,8 +349,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 * amdgpu_vm_update_pages - helper to call the right asic function
 *
 * @adev: amdgpu_device pointer
 * @gtt: GART instance to use for mapping
 * @gtt_flags: GTT hw access flags
 * @src: address where to copy page table entries from
 * @pages_addr: DMA addresses to use for mapping
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
@@ -362,8 +362,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 * to setup the page table using the DMA.
 */
static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
				   struct amdgpu_gart *gtt,
				   uint32_t gtt_flags,
				   uint64_t src,
				   dma_addr_t *pages_addr,
				   struct amdgpu_ib *ib,
				   uint64_t pe, uint64_t addr,
				   unsigned count, uint32_t incr,
@@ -371,12 +371,11 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
{
	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);

	if ((gtt == &adev->gart) && (flags == gtt_flags)) {
		uint64_t src = gtt->table_addr + (addr >> 12) * 8;
	if (src) {
		src += (addr >> 12) * 8;
		amdgpu_vm_copy_pte(adev, ib, pe, src, count);

	} else if (gtt) {
		dma_addr_t *pages_addr = gtt->pages_addr;
	} else if (pages_addr) {
		amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
				    count, incr, flags);

@@ -426,7 +425,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
	if (r)
		goto error;

	amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries,
	amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries,
			       0, 0);
	amdgpu_ring_pad_ib(ring, &job->ibs[0]);

@@ -536,7 +535,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
		    ((last_pt + incr * count) != pt)) {

			if (count) {
				amdgpu_vm_update_pages(adev, NULL, 0, ib,
				amdgpu_vm_update_pages(adev, 0, NULL, ib,
						       last_pde, last_pt,
						       count, incr,
						       AMDGPU_PTE_VALID);
@@ -551,7 +550,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
	}

	if (count)
		amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt,
		amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt,
				       count, incr, AMDGPU_PTE_VALID);

	if (ib->length_dw != 0) {
@@ -584,8 +583,8 @@ error_free:
 * amdgpu_vm_frag_ptes - add fragment information to PTEs
 *
 * @adev: amdgpu_device pointer
 * @gtt: GART instance to use for mapping
 * @gtt_flags: GTT hw mapping flags
 * @src: address where to copy page table entries from
 * @pages_addr: DMA addresses to use for mapping
 * @ib: IB for the update
 * @pe_start: first PTE to handle
 * @pe_end: last PTE to handle
@@ -593,8 +592,8 @@ error_free:
 * @flags: hw mapping flags
 */
static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
				struct amdgpu_gart *gtt,
				uint32_t gtt_flags,
				uint64_t src,
				dma_addr_t *pages_addr,
				struct amdgpu_ib *ib,
				uint64_t pe_start, uint64_t pe_end,
				uint64_t addr, uint32_t flags)
@@ -632,10 +631,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
		return;

	/* system pages are non continuously */
	if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
	if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) ||
	    (frag_start >= frag_end)) {

		count = (pe_end - pe_start) / 8;
		amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start,
		amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start,
				       addr, count, AMDGPU_GPU_PAGE_SIZE,
				       flags);
		return;
@@ -644,21 +644,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
	/* handle the 4K area at the beginning */
	if (pe_start != frag_start) {
		count = (frag_start - pe_start) / 8;
		amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr,
		amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr,
				       count, AMDGPU_GPU_PAGE_SIZE, flags);
		addr += AMDGPU_GPU_PAGE_SIZE * count;
	}

	/* handle the area in the middle */
	count = (frag_end - frag_start) / 8;
	amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count,
	amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count,
			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);

	/* handle the 4K area at the end */
	if (frag_end != pe_end) {
		addr += AMDGPU_GPU_PAGE_SIZE * count;
		count = (pe_end - frag_end) / 8;
		amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr,
		amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr,
				       count, AMDGPU_GPU_PAGE_SIZE, flags);
	}
}
@@ -667,8 +667,8 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
 * amdgpu_vm_update_ptes - make sure that page tables are valid
 *
 * @adev: amdgpu_device pointer
 * @gtt: GART instance to use for mapping
 * @gtt_flags: GTT hw mapping flags
 * @src: address where to copy page table entries from
 * @pages_addr: DMA addresses to use for mapping
 * @vm: requested vm
 * @start: start of GPU address range
 * @end: end of GPU address range
@@ -678,8 +678,8 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
 * Update the page tables in the range @start - @end.
 */
static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
				  struct amdgpu_gart *gtt,
				  uint32_t gtt_flags,
				  uint64_t src,
				  dma_addr_t *pages_addr,
				  struct amdgpu_vm *vm,
				  struct amdgpu_ib *ib,
				  uint64_t start, uint64_t end,
@@ -707,7 +707,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,

		if (last_pe_end != pe_start) {

			amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
			amdgpu_vm_frag_ptes(adev, src, pages_addr, ib,
					    last_pe_start, last_pe_end,
					    last_dst, flags);

@@ -722,17 +722,16 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
	}

	amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
			    last_pe_start, last_pe_end,
			    last_dst, flags);
	amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start,
			    last_pe_end, last_dst, flags);
}

/**
 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
 *
 * @adev: amdgpu_device pointer
 * @gtt: GART instance to use for mapping
 * @gtt_flags: flags as they are used for GTT
 * @src: address where to copy page table entries from
 * @pages_addr: DMA addresses to use for mapping
 * @vm: requested vm
 * @start: start of mapped range
 * @last: last mapped entry
@@ -744,8 +743,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 * Returns 0 for success, -EINVAL for failure.
 */
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
				       struct amdgpu_gart *gtt,
				       uint32_t gtt_flags,
				       uint64_t src,
				       dma_addr_t *pages_addr,
				       struct amdgpu_vm *vm,
				       uint64_t start, uint64_t last,
				       uint32_t flags, uint64_t addr,
@@ -776,11 +775,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
	/* padding, etc. */
	ndw = 64;

	if ((gtt == &adev->gart) && (flags == gtt_flags)) {
	if (src) {
		/* only copy commands needed */
		ndw += ncmds * 7;

	} else if (gtt) {
	} else if (pages_addr) {
		/* header for write data commands */
		ndw += ncmds * 4;

@@ -810,8 +809,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
	if (r)
		goto error_free;

	amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1,
			      addr, flags);
	amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start,
			      last + 1, addr, flags);

	amdgpu_ring_pad_ib(ring, ib);
	WARN_ON(ib->length_dw > ndw);
@@ -853,12 +852,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
				      uint32_t gtt_flags,
				      struct amdgpu_vm *vm,
				      struct amdgpu_bo_va_mapping *mapping,
				      uint64_t addr, struct fence **fence)
				      uint32_t flags, uint64_t addr,
				      struct fence **fence)
{
	const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;

	uint64_t start = mapping->it.start;
	uint32_t flags = gtt_flags;
	uint64_t src = 0, start = mapping->it.start;
	dma_addr_t *pages_addr = NULL;
	int r;

	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -871,10 +871,17 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,

	trace_amdgpu_vm_bo_update(mapping);

	if (gtt) {
		if (flags == gtt_flags)
			src = adev->gart.table_addr + (addr >> 12) * 8;
		else
			pages_addr = &gtt->pages_addr[addr >> 12];
		addr = 0;
	}
	addr += mapping->offset;

	if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags)))
		return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
	if (!gtt || src)
		return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
						   start, mapping->it.last,
						   flags, addr, fence);

@@ -882,7 +889,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
		uint64_t last;

		last = min((uint64_t)mapping->it.last, start + max_size - 1);
		r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
		r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
						start, last, flags, addr,
						fence);
		if (r)
@@ -914,7 +921,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
	struct amdgpu_vm *vm = bo_va->vm;
	struct amdgpu_bo_va_mapping *mapping;
	struct amdgpu_gart *gtt = NULL;
	uint32_t flags;
	uint32_t gtt_flags, flags;
	uint64_t addr;
	int r;

@@ -937,6 +944,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
	}

	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
	gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;

	spin_lock(&vm->status_lock);
	if (!list_empty(&bo_va->vm_status))
@@ -944,8 +952,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
	spin_unlock(&vm->status_lock);

	list_for_each_entry(mapping, &bo_va->invalids, list) {
		r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr,
					       &bo_va->last_pt_update);
		r = amdgpu_vm_bo_split_mapping(adev, gtt, gtt_flags, vm, mapping,
					       flags, addr, &bo_va->last_pt_update);
		if (r)
			return r;
	}
@@ -991,7 +999,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
		list_del(&mapping->list);

		r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
					       0, NULL);
					       0, 0, NULL);
		kfree(mapping);
		if (r)
			return r;