Commit b291ce0a authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/gem: Purge the sudden reappearance of i915_gem_object_pin()



This died many years ago as we now use i915_vma first and foremost.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191115170835.1367869-1-chris@chris-wilson.co.uk
parent 3c1fe1eb
Loading
Loading
Loading
Loading
+23 −13
Original line number Diff line number Diff line
@@ -1995,28 +1995,38 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
static struct i915_vma *
shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = eb->i915;
	struct i915_vma * const vma = *eb->vma;
	struct i915_address_space *vm;
	struct i915_vma *vma;
	u64 flags;
	int err;

	/*
	 * PPGTT backed shadow buffers must be mapped RO, to prevent
	 * post-scan tampering
	 */
	if (CMDPARSER_USES_GGTT(dev_priv)) {
	if (CMDPARSER_USES_GGTT(eb->i915)) {
		vm = &eb->engine->gt->ggtt->vm;
		flags = PIN_GLOBAL;
		vm = &dev_priv->ggtt.vm;
	} else if (vma->vm->has_read_only) {
		flags = PIN_USER;
		vm = vma->vm;
		i915_gem_object_set_readonly(obj);
	} else {
		vm = eb->context->vm;
		if (!vm->has_read_only) {
			DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
			return ERR_PTR(-EINVAL);
		}

	return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
		i915_gem_object_set_readonly(obj);
		flags = PIN_USER;
	}

	vma = i915_vma_instance(obj, vm, NULL);
	if (IS_ERR(vma))
		return vma;

	err = i915_vma_pin(vma, 0, 0, flags);
	if (err)
		return ERR_PTR(err);

	return vma;
}

static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
@@ -2058,7 +2068,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
		 * For PPGTT backing however, we have no choice but to forcibly
		 * reject unsafe buffers
		 */
		if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
		if (i915_vma_is_ggtt(vma) && err == -EACCES)
			/* Execute original buffer non-secure */
			vma = NULL;
		else
@@ -2075,7 +2085,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
	eb->batch_start_offset = 0;
	eb->batch = vma;

	if (CMDPARSER_USES_GGTT(eb->i915))
	if (i915_vma_is_ggtt(vma))
		eb->batch_flags |= I915_DISPATCH_SECURE;

	/* eb->batch_len unchanged */
+0 −8
Original line number Diff line number Diff line
@@ -1842,14 +1842,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
			   unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)

struct i915_vma * __must_check
i915_gem_object_pin(struct drm_i915_gem_object *obj,
		    struct i915_address_space *vm,
		    const struct i915_ggtt_view *view,
		    u64 size,
		    u64 alignment,
		    u64 flags);

void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);

static inline int __must_check
+13 −25
Original line number Diff line number Diff line
@@ -891,22 +891,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 u64 alignment,
			 u64 flags)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_address_space *vm = &dev_priv->ggtt.vm;

	return i915_gem_object_pin(obj, vm, view, size, alignment,
				   flags | PIN_GLOBAL);
}

struct i915_vma *
i915_gem_object_pin(struct drm_i915_gem_object *obj,
		    struct i915_address_space *vm,
		    const struct i915_ggtt_view *view,
		    u64 size,
		    u64 alignment,
		    u64 flags)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct i915_vma *vma;
	int ret;

@@ -915,17 +901,19 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,

	if (flags & PIN_MAPPABLE &&
	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
		/* If the required space is larger than the available
		/*
		 * If the required space is larger than the available
		 * aperture, we will not able to find a slot for the
		 * object and unbinding the object now will be in
		 * vain. Worse, doing so may cause us to ping-pong
		 * the object in and out of the Global GTT and
		 * waste a lot of cycles under the mutex.
		 */
		if (obj->base.size > dev_priv->ggtt.mappable_end)
		if (obj->base.size > ggtt->mappable_end)
			return ERR_PTR(-E2BIG);

		/* If NONBLOCK is set the caller is optimistically
		/*
		 * If NONBLOCK is set the caller is optimistically
		 * trying to cache the full object within the mappable
		 * aperture, and *must* have a fallback in place for
		 * situations where we cannot bind the object. We
@@ -941,11 +929,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
		 * we could try to minimise harm to others.
		 */
		if (flags & PIN_NONBLOCK &&
		    obj->base.size > dev_priv->ggtt.mappable_end / 2)
		    obj->base.size > ggtt->mappable_end / 2)
			return ERR_PTR(-ENOSPC);
	}

	vma = i915_vma_instance(obj, vm, view);
	vma = i915_vma_instance(obj, &ggtt->vm, view);
	if (IS_ERR(vma))
		return vma;

@@ -955,7 +943,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
				return ERR_PTR(-ENOSPC);

			if (flags & PIN_MAPPABLE &&
			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
			    vma->fence_size > ggtt->mappable_end / 2)
				return ERR_PTR(-ENOSPC);
		}

@@ -965,14 +953,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
	}

	if (vma->fence && !i915_gem_object_is_tiled(obj)) {
		mutex_lock(&vma->vm->mutex);
		mutex_lock(&ggtt->vm.mutex);
		ret = i915_vma_revoke_fence(vma);
		mutex_unlock(&vma->vm->mutex);
		mutex_unlock(&ggtt->vm.mutex);
		if (ret)
			return ERR_PTR(ret);
	}

	ret = i915_vma_pin(vma, size, alignment, flags);
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);