Commit 12b07256 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Export ppgtt_bind_vma



Reuse the ppgtt_bind_vma() for aliasing_ppgtt_bind_vma() so we can
reduce some code near-duplication. The catch is that we need to then
pass along the i915_address_space and not rely on vma->vm, as they
differ with the aliasing-ppgtt.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200703102519.26539-1-chris@chris-wilson.co.uk
parent 5cecf507
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -32,16 +32,17 @@ static void vma_clear_pages(struct i915_vma *vma)
	vma->pages = NULL;
}

static int vma_bind(struct i915_vma *vma,
static int vma_bind(struct i915_address_space *vm,
		    struct i915_vma *vma,
		    enum i915_cache_level cache_level,
		    u32 flags)
{
	return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
	return vm->vma_ops.bind_vma(vm, vma, cache_level, flags);
}

static void vma_unbind(struct i915_vma *vma)
static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{
	vma->vm->vma_ops.unbind_vma(vma);
	vm->vma_ops.unbind_vma(vm, vma);
}

static const struct i915_vma_ops proxy_vma_ops = {
+4 −3
Original line number Diff line number Diff line
@@ -299,11 +299,12 @@ static void pd_vma_clear_pages(struct i915_vma *vma)
	vma->pages = NULL;
}

static int pd_vma_bind(struct i915_vma *vma,
static int pd_vma_bind(struct i915_address_space *vm,
		       struct i915_vma *vma,
		       enum i915_cache_level cache_level,
		       u32 unused)
{
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
	struct gen6_ppgtt *ppgtt = vma->private;
	u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;

@@ -314,7 +315,7 @@ static int pd_vma_bind(struct i915_vma *vma,
	return 0;
}

static void pd_vma_unbind(struct i915_vma *vma)
static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{
	struct gen6_ppgtt *ppgtt = vma->private;
	struct i915_page_directory * const pd = ppgtt->base.pd;
+17 −32
Original line number Diff line number Diff line
@@ -436,7 +436,8 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}

static int ggtt_bind_vma(struct i915_vma *vma,
static int ggtt_bind_vma(struct i915_address_space *vm,
			 struct i915_vma *vma,
			 enum i915_cache_level cache_level,
			 u32 flags)
{
@@ -451,15 +452,15 @@ static int ggtt_bind_vma(struct i915_vma *vma,
	if (i915_gem_object_is_readonly(obj))
		pte_flags |= PTE_READ_ONLY;

	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
	vm->insert_entries(vm, vma, cache_level, pte_flags);
	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;

	return 0;
}

static void ggtt_unbind_vma(struct i915_vma *vma)
static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{
	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
	vm->clear_range(vm, vma->node.start, vma->size);
}

static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -567,7 +568,8 @@ err:
	return ret;
}

static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static int aliasing_gtt_bind_vma(struct i915_address_space *vm,
				 struct i915_vma *vma,
				 enum i915_cache_level cache_level,
				 u32 flags)
{
@@ -580,44 +582,27 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
		pte_flags |= PTE_READ_ONLY;

	if (flags & I915_VMA_LOCAL_BIND) {
		struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
		struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias;

		if (flags & I915_VMA_ALLOC) {
			ret = alias->vm.allocate_va_range(&alias->vm,
							  vma->node.start,
							  vma->size);
		ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags);
		if (ret)
			return ret;

			set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
		}

		GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
				     __i915_vma_flags(vma)));
		alias->vm.insert_entries(&alias->vm, vma,
					 cache_level, pte_flags);
	}

	if (flags & I915_VMA_GLOBAL_BIND)
		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
		vm->insert_entries(vm, vma, cache_level, pte_flags);

	return 0;
}

static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
				    struct i915_vma *vma)
{
	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
		struct i915_address_space *vm = vma->vm;

	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
		vm->clear_range(vm, vma->node.start, vma->size);
	}

	if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
		struct i915_address_space *vm =
			&i915_vm_to_ggtt(vma->vm)->alias->vm;

		vm->clear_range(vm, vma->node.start, vma->size);
	}
	if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
		ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
}

static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
+11 −2
Original line number Diff line number Diff line
@@ -198,14 +198,16 @@ struct intel_gt;

struct i915_vma_ops {
	/* Map an object into an address space with the given cache flags. */
	int (*bind_vma)(struct i915_vma *vma,
	int (*bind_vma)(struct i915_address_space *vm,
			struct i915_vma *vma,
			enum i915_cache_level cache_level,
			u32 flags);
	/*
	 * Unmap an object from an address space. This usually consists of
	 * setting the valid PTE entries to a reserved scratch page.
	 */
	void (*unbind_vma)(struct i915_vma *vma);
	void (*unbind_vma)(struct i915_address_space *vm,
			   struct i915_vma *vma);

	int (*set_pages)(struct i915_vma *vma);
	void (*clear_pages)(struct i915_vma *vma);
@@ -566,6 +568,13 @@ int ggtt_set_pages(struct i915_vma *vma);
int ppgtt_set_pages(struct i915_vma *vma);
void clear_pages(struct i915_vma *vma);

int ppgtt_bind_vma(struct i915_address_space *vm,
		   struct i915_vma *vma,
		   enum i915_cache_level cache_level,
		   u32 flags);
void ppgtt_unbind_vma(struct i915_address_space *vm,
		      struct i915_vma *vma);

void gtt_write_workarounds(struct intel_gt *gt);

void setup_private_pat(struct intel_uncore *uncore);
+9 −10
Original line number Diff line number Diff line
@@ -155,16 +155,16 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
	return ppgtt;
}

static int ppgtt_bind_vma(struct i915_vma *vma,
int ppgtt_bind_vma(struct i915_address_space *vm,
		   struct i915_vma *vma,
		   enum i915_cache_level cache_level,
		   u32 flags)
{
	u32 pte_flags;
	int err;

	if (flags & I915_VMA_ALLOC) {
		err = vma->vm->allocate_va_range(vma->vm,
						 vma->node.start, vma->size);
	if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
		err = vm->allocate_va_range(vm, vma->node.start, vma->size);
		if (err)
			return err;

@@ -176,17 +176,16 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
	if (i915_gem_object_is_readonly(vma->obj))
		pte_flags |= PTE_READ_ONLY;

	GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
	vm->insert_entries(vm, vma, cache_level, pte_flags);
	wmb();

	return 0;
}

static void ppgtt_unbind_vma(struct i915_vma *vma)
void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
{
	if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
		vm->clear_range(vm, vma->node.start, vma->size);
}

int ppgtt_set_pages(struct i915_vma *vma)
Loading