Commit dfce9025 authored by Dave Airlie's avatar Dave Airlie
Browse files

Backmerge i915 security patches from commit 'ea0b163b' into drm-next



This backmerges the branch that ended up in Linus' tree. It removes
all the changes for the rc6 patches from Linus' tree in favour of
a patch that is based on a large refactor that occured.

Otherwise it all looks good.

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parents 2248a283 ea0b163b
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -236,6 +236,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
	free_engines(rcu_access_pointer(ctx->engines));
	free_engines(rcu_access_pointer(ctx->engines));
	mutex_destroy(&ctx->engines_mutex);
	mutex_destroy(&ctx->engines_mutex);


	kfree(ctx->jump_whitelist);

	if (ctx->timeline)
	if (ctx->timeline)
		intel_timeline_put(ctx->timeline);
		intel_timeline_put(ctx->timeline);


@@ -527,6 +529,9 @@ __create_context(struct drm_i915_private *i915)
	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;


	ctx->jump_whitelist = NULL;
	ctx->jump_whitelist_cmds = 0;

	spin_lock(&i915->gem.contexts.lock);
	spin_lock(&i915->gem.contexts.lock);
	list_add_tail(&ctx->link, &i915->gem.contexts.list);
	list_add_tail(&ctx->link, &i915->gem.contexts.list);
	spin_unlock(&i915->gem.contexts.lock);
	spin_unlock(&i915->gem.contexts.lock);
+7 −0
Original line number Original line Diff line number Diff line
@@ -176,6 +176,13 @@ struct i915_gem_context {
	 * per vm, which may be one per context or shared with the global GTT)
	 * per vm, which may be one per context or shared with the global GTT)
	 */
	 */
	struct radix_tree_root handles_vma;
	struct radix_tree_root handles_vma;

	/** jump_whitelist: Bit array for tracking cmds during cmdparsing
	 *  Guarded by struct_mutex
	 */
	unsigned long *jump_whitelist;
	/** jump_whitelist_cmds: No of cmd slots available */
	u32 jump_whitelist_cmds;
};
};


#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
+80 −31
Original line number Original line Diff line number Diff line
@@ -298,7 +298,9 @@ static inline u64 gen8_noncanonical_addr(u64 address)


static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
{
	return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
	return intel_engine_requires_cmd_parser(eb->engine) ||
		(intel_engine_using_cmd_parser(eb->engine) &&
		 eb->args->batch_len);
}
}


static int eb_create(struct i915_execbuffer *eb)
static int eb_create(struct i915_execbuffer *eb)
@@ -1990,40 +1992,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
	return 0;
	return 0;
}
}


static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
static struct i915_vma *
shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = eb->i915;
	struct i915_vma * const vma = *eb->vma;
	struct i915_address_space *vm;
	u64 flags;

	/*
	 * PPGTT backed shadow buffers must be mapped RO, to prevent
	 * post-scan tampering
	 */
	if (CMDPARSER_USES_GGTT(dev_priv)) {
		flags = PIN_GLOBAL;
		vm = &dev_priv->ggtt.vm;
	} else if (vma->vm->has_read_only) {
		flags = PIN_USER;
		vm = vma->vm;
		i915_gem_object_set_readonly(obj);
	} else {
		DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
		return ERR_PTR(-EINVAL);
	}

	return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
}

static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
{
{
	struct intel_engine_pool_node *pool;
	struct intel_engine_pool_node *pool;
	struct i915_vma *vma;
	struct i915_vma *vma;
	u64 batch_start;
	u64 shadow_batch_start;
	int err;
	int err;


	pool = intel_engine_get_pool(eb->engine, eb->batch_len);
	pool = intel_engine_get_pool(eb->engine, eb->batch_len);
	if (IS_ERR(pool))
	if (IS_ERR(pool))
		return ERR_CAST(pool);
		return ERR_CAST(pool);


	err = intel_engine_cmd_parser(eb->engine,
	vma = shadow_batch_pin(eb, pool->obj);
	if (IS_ERR(vma))
		goto err;

	batch_start = gen8_canonical_addr(eb->batch->node.start) +
		      eb->batch_start_offset;

	shadow_batch_start = gen8_canonical_addr(vma->node.start);

	err = intel_engine_cmd_parser(eb->gem_context,
				      eb->engine,
				      eb->batch->obj,
				      eb->batch->obj,
				      pool->obj,
				      batch_start,
				      eb->batch_start_offset,
				      eb->batch_start_offset,
				      eb->batch_len,
				      eb->batch_len,
				      is_master);
				      pool->obj,
				      shadow_batch_start);

	if (err) {
	if (err) {
		if (err == -EACCES) /* unhandled chained batch */
		i915_vma_unpin(vma);

		/*
		 * Unsafe GGTT-backed buffers can still be submitted safely
		 * as non-secure.
		 * For PPGTT backing however, we have no choice but to forcibly
		 * reject unsafe buffers
		 */
		if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
			/* Execute original buffer non-secure */
			vma = NULL;
			vma = NULL;
		else
		else
			vma = ERR_PTR(err);
			vma = ERR_PTR(err);
		goto err;
		goto err;
	}
	}


	vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto err;

	eb->vma[eb->buffer_count] = i915_vma_get(vma);
	eb->vma[eb->buffer_count] = i915_vma_get(vma);
	eb->flags[eb->buffer_count] =
	eb->flags[eb->buffer_count] =
		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
	vma->exec_flags = &eb->flags[eb->buffer_count];
	vma->exec_flags = &eb->flags[eb->buffer_count];
	eb->buffer_count++;
	eb->buffer_count++;


	eb->batch_start_offset = 0;
	eb->batch = vma;

	if (CMDPARSER_USES_GGTT(eb->i915))
		eb->batch_flags |= I915_DISPATCH_SECURE;

	/* eb->batch_len unchanged */

	vma->private = pool;
	vma->private = pool;
	return vma;
	return vma;


@@ -2430,6 +2486,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
		       struct drm_i915_gem_exec_object2 *exec,
		       struct drm_i915_gem_exec_object2 *exec,
		       struct drm_syncobj **fences)
		       struct drm_syncobj **fences)
{
{
	struct drm_i915_private *i915 = to_i915(dev);
	struct i915_execbuffer eb;
	struct i915_execbuffer eb;
	struct dma_fence *in_fence = NULL;
	struct dma_fence *in_fence = NULL;
	struct dma_fence *exec_fence = NULL;
	struct dma_fence *exec_fence = NULL;
@@ -2441,7 +2498,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);


	eb.i915 = to_i915(dev);
	eb.i915 = i915;
	eb.file = file;
	eb.file = file;
	eb.args = args;
	eb.args = args;
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
@@ -2461,6 +2518,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,


	eb.batch_flags = 0;
	eb.batch_flags = 0;
	if (args->flags & I915_EXEC_SECURE) {
	if (args->flags & I915_EXEC_SECURE) {
		if (INTEL_GEN(i915) >= 11)
			return -ENODEV;

		/* Return -EPERM to trigger fallback code on old binaries. */
		if (!HAS_SECURE_BATCHES(i915))
			return -EPERM;

		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
			return -EPERM;
			return -EPERM;


@@ -2539,34 +2603,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
		goto err_vma;
		goto err_vma;
	}
	}


	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;

	if (eb_use_cmdparser(&eb)) {
	if (eb_use_cmdparser(&eb)) {
		struct i915_vma *vma;
		struct i915_vma *vma;


		vma = eb_parse(&eb, drm_is_current_master(file));
		vma = eb_parse(&eb);
		if (IS_ERR(vma)) {
		if (IS_ERR(vma)) {
			err = PTR_ERR(vma);
			err = PTR_ERR(vma);
			goto err_vma;
			goto err_vma;
		}
		}

		if (vma) {
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
			eb.batch_flags |= I915_DISPATCH_SECURE;
			eb.batch_start_offset = 0;
			eb.batch = vma;
		}
	}
	}


	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;

	/*
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
+10 −3
Original line number Original line Diff line number Diff line
@@ -454,13 +454,14 @@ struct intel_engine_cs {
	/* status_notifier: list of callbacks for context-switch changes */
	/* status_notifier: list of callbacks for context-switch changes */
	struct atomic_notifier_head context_status_notifier;
	struct atomic_notifier_head context_status_notifier;


#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
#define I915_ENGINE_USING_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES   BIT(3)
#define I915_ENGINE_HAS_SEMAPHORES   BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL       BIT(5)
#define I915_ENGINE_IS_VIRTUAL       BIT(5)
#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
	unsigned int flags;
	unsigned int flags;


	/*
	/*
@@ -528,9 +529,15 @@ struct intel_engine_cs {
};
};


static inline bool
static inline bool
intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
{
{
	return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
	return engine->flags & I915_ENGINE_USING_CMD_PARSER;
}

static inline bool
intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
{
	return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
}
}


static inline bool
static inline bool
Loading