Commit 6f4134b3 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-fixes-2020-02-13' of...

Merge tag 'drm-intel-next-fixes-2020-02-13' of git://anongit.freedesktop.org/drm/drm-intel

 into drm-fixes

drm/i915 fixes for v5.6-rc2

Most of these were aimed at a "next fixes" pull already during the merge
window, but there were issues with the baseline I used, which resulted
in a lot of issues in CI. I've regenerated this stuff piecemeal now,
adding gradually to it, and it seems healthy now.

Due to the issues this is much bigger than I'd like. But it was
obviously necessary to take the time to ensure it's not garbage...

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/878sl6yfrn.fsf@intel.com
parents e44c1e3a 2aaaa5ee
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -357,14 +357,16 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
		panel_fixed_mode->hdisplay + dtd->hfront_porch;
	panel_fixed_mode->hsync_end =
		panel_fixed_mode->hsync_start + dtd->hsync;
	panel_fixed_mode->htotal = panel_fixed_mode->hsync_end;
	panel_fixed_mode->htotal =
		panel_fixed_mode->hdisplay + dtd->hblank;

	panel_fixed_mode->vdisplay = dtd->vactive;
	panel_fixed_mode->vsync_start =
		panel_fixed_mode->vdisplay + dtd->vfront_porch;
	panel_fixed_mode->vsync_end =
		panel_fixed_mode->vsync_start + dtd->vsync;
	panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end;
	panel_fixed_mode->vtotal =
		panel_fixed_mode->vdisplay + dtd->vblank;

	panel_fixed_mode->clock = dtd->pixel_clock;
	panel_fixed_mode->width_mm = dtd->width_mm;
+18 −26
Original line number Diff line number Diff line
@@ -12366,6 +12366,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
		/* Copy parameters to slave plane */
		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
		linked_state->color_ctl = plane_state->color_ctl;
		linked_state->view = plane_state->view;
		memcpy(linked_state->color_plane, plane_state->color_plane,
		       sizeof(linked_state->color_plane));
@@ -14476,35 +14477,21 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
	return 0;
}
static bool intel_cpu_transcoder_needs_modeset(struct intel_atomic_state *state,
					       enum transcoder transcoder)
{
	struct intel_crtc_state *new_crtc_state;
	struct intel_crtc *crtc;
	int i;
	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
		if (new_crtc_state->cpu_transcoder == transcoder)
			return needs_modeset(new_crtc_state);
	return false;
}
static void
intel_modeset_synced_crtcs(struct intel_atomic_state *state,
static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
					       u8 transcoders)
{
	struct intel_crtc_state *new_crtc_state;
	const struct intel_crtc_state *new_crtc_state;
	struct intel_crtc *crtc;
	int i;
	for_each_new_intel_crtc_in_state(state, crtc,
					 new_crtc_state, i) {
		if (transcoders & BIT(new_crtc_state->cpu_transcoder)) {
			new_crtc_state->uapi.mode_changed = true;
			new_crtc_state->update_pipe = false;
		}
	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
		if (new_crtc_state->hw.enable &&
		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
		    needs_modeset(new_crtc_state))
			return true;
	}
	return false;
}
static int
@@ -14662,15 +14649,20 @@ static int intel_atomic_check(struct drm_device *dev,
		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
			enum transcoder master = new_crtc_state->mst_master_transcoder;
			if (intel_cpu_transcoder_needs_modeset(state, master)) {
			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
				new_crtc_state->uapi.mode_changed = true;
				new_crtc_state->update_pipe = false;
			}
		} else if (is_trans_port_sync_mode(new_crtc_state)) {
		}
		if (is_trans_port_sync_mode(new_crtc_state)) {
			u8 trans = new_crtc_state->sync_mode_slaves_mask |
				   BIT(new_crtc_state->master_transcoder);
			intel_modeset_synced_crtcs(state, trans);
			if (intel_cpu_transcoders_need_modeset(state, trans)) {
				new_crtc_state->uapi.mode_changed = true;
				new_crtc_state->update_pipe = false;
			}
		}
	}
+32 −18
Original line number Diff line number Diff line
@@ -384,6 +384,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
	return data;
}

#ifdef CONFIG_ACPI
static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
{
	struct i2c_adapter_lookup *lookup = data;
@@ -393,8 +394,7 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
	acpi_handle adapter_handle;
	acpi_status status;

	if (intel_dsi->i2c_bus_num >= 0 ||
	    !i2c_acpi_get_i2c_resource(ares, &sb))
	if (!i2c_acpi_get_i2c_resource(ares, &sb))
		return 1;

	if (lookup->slave_addr != sb->slave_address)
@@ -413,24 +413,14 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
	return 1;
}

static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
				  const u16 slave_addr)
{
	struct drm_device *drm_dev = intel_dsi->base.base.dev;
	struct device *dev = &drm_dev->pdev->dev;
	struct i2c_adapter *adapter;
	struct acpi_device *acpi_dev;
	struct list_head resource_list;
	struct i2c_adapter_lookup lookup;
	struct i2c_msg msg;
	int ret;
	u8 vbt_i2c_bus_num = *(data + 2);
	u16 slave_addr = *(u16 *)(data + 3);
	u8 reg_offset = *(data + 5);
	u8 payload_size = *(data + 6);
	u8 *payload_data;

	if (intel_dsi->i2c_bus_num < 0) {
		intel_dsi->i2c_bus_num = vbt_i2c_bus_num;

	acpi_dev = ACPI_COMPANION(dev);
	if (acpi_dev) {
@@ -446,6 +436,30 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
		acpi_dev_free_resource_list(&resource_list);
	}
}
#else
static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
					 const u16 slave_addr)
{
}
#endif

static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
	struct drm_device *drm_dev = intel_dsi->base.base.dev;
	struct device *dev = &drm_dev->pdev->dev;
	struct i2c_adapter *adapter;
	struct i2c_msg msg;
	int ret;
	u8 vbt_i2c_bus_num = *(data + 2);
	u16 slave_addr = *(u16 *)(data + 3);
	u8 reg_offset = *(data + 5);
	u8 payload_size = *(data + 6);
	u8 *payload_data;

	if (intel_dsi->i2c_bus_num < 0) {
		intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
		i2c_acpi_find_adapter(intel_dsi, slave_addr);
	}

	adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
	if (!adapter) {
+36 −1
Original line number Diff line number Diff line
@@ -1981,9 +1981,20 @@ static int __eb_parse(struct dma_fence_work *work)
				       pw->trampoline);
}

static void __eb_parse_release(struct dma_fence_work *work)
{
	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);

	if (pw->trampoline)
		i915_active_release(&pw->trampoline->active);
	i915_active_release(&pw->shadow->active);
	i915_active_release(&pw->batch->active);
}

static const struct dma_fence_work_ops eb_parse_ops = {
	.name = "eb_parse",
	.work = __eb_parse,
	.release = __eb_parse_release,
};

static int eb_parse_pipeline(struct i915_execbuffer *eb,
@@ -1997,6 +2008,20 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
	if (!pw)
		return -ENOMEM;

	err = i915_active_acquire(&eb->batch->active);
	if (err)
		goto err_free;

	err = i915_active_acquire(&shadow->active);
	if (err)
		goto err_batch;

	if (trampoline) {
		err = i915_active_acquire(&trampoline->active);
		if (err)
			goto err_shadow;
	}

	dma_fence_work_init(&pw->base, &eb_parse_ops);

	pw->engine = eb->engine;
@@ -2006,7 +2031,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
	pw->shadow = shadow;
	pw->trampoline = trampoline;

	dma_resv_lock(pw->batch->resv, NULL);
	err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
	if (err)
		goto err_trampoline;

	err = dma_resv_reserve_shared(pw->batch->resv, 1);
	if (err)
@@ -2034,6 +2061,14 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,

err_batch_unlock:
	dma_resv_unlock(pw->batch->resv);
err_trampoline:
	if (trampoline)
		i915_active_release(&trampoline->active);
err_shadow:
	i915_active_release(&shadow->active);
err_batch:
	i915_active_release(&eb->batch->active);
err_free:
	kfree(pw);
	return err;
}
+87 −42
Original line number Diff line number Diff line
@@ -455,10 +455,11 @@ out:

void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
	struct i915_mmap_offset *mmo;
	struct i915_mmap_offset *mmo, *mn;

	spin_lock(&obj->mmo.lock);
	list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
	rbtree_postorder_for_each_entry_safe(mmo, mn,
					     &obj->mmo.offsets, offset) {
		/*
		 * vma_node_unmap for GTT mmaps handled already in
		 * __i915_gem_object_release_mmap_gtt
@@ -487,6 +488,67 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
	i915_gem_object_release_mmap_offset(obj);
}

static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object *obj,
	   enum i915_mmap_type mmap_type)
{
	struct rb_node *rb;

	spin_lock(&obj->mmo.lock);
	rb = obj->mmo.offsets.rb_node;
	while (rb) {
		struct i915_mmap_offset *mmo =
			rb_entry(rb, typeof(*mmo), offset);

		if (mmo->mmap_type == mmap_type) {
			spin_unlock(&obj->mmo.lock);
			return mmo;
		}

		if (mmo->mmap_type < mmap_type)
			rb = rb->rb_right;
		else
			rb = rb->rb_left;
	}
	spin_unlock(&obj->mmo.lock);

	return NULL;
}

static struct i915_mmap_offset *
insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
{
	struct rb_node *rb, **p;

	spin_lock(&obj->mmo.lock);
	rb = NULL;
	p = &obj->mmo.offsets.rb_node;
	while (*p) {
		struct i915_mmap_offset *pos;

		rb = *p;
		pos = rb_entry(rb, typeof(*pos), offset);

		if (pos->mmap_type == mmo->mmap_type) {
			spin_unlock(&obj->mmo.lock);
			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
					      &mmo->vma_node);
			kfree(mmo);
			return pos;
		}

		if (pos->mmap_type < mmo->mmap_type)
			p = &rb->rb_right;
		else
			p = &rb->rb_left;
	}
	rb_link_node(&mmo->offset, rb, p);
	rb_insert_color(&mmo->offset, &obj->mmo.offsets);
	spin_unlock(&obj->mmo.lock);

	return mmo;
}

static struct i915_mmap_offset *
mmap_offset_attach(struct drm_i915_gem_object *obj,
		   enum i915_mmap_type mmap_type,
@@ -496,20 +558,22 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
	struct i915_mmap_offset *mmo;
	int err;

	mmo = lookup_mmo(obj, mmap_type);
	if (mmo)
		goto out;

	mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
	if (!mmo)
		return ERR_PTR(-ENOMEM);

	mmo->obj = obj;
	mmo->dev = obj->base.dev;
	mmo->file = file;
	mmo->mmap_type = mmap_type;
	drm_vma_node_reset(&mmo->vma_node);

	err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
				 obj->base.size / PAGE_SIZE);
	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
	if (likely(!err))
		goto out;
		goto insert;

	/* Attempt to reap some mmap space from dead objects */
	err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
@@ -517,19 +581,17 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
		goto err;

	i915_gem_drain_freed_objects(i915);
	err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
				 obj->base.size / PAGE_SIZE);
	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
	if (err)
		goto err;

insert:
	mmo = insert_mmo(obj, mmo);
	GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
	if (file)
		drm_vma_node_allow(&mmo->vma_node, file);

	spin_lock(&obj->mmo.lock);
	list_add(&mmo->offset, &obj->mmo.offsets);
	spin_unlock(&obj->mmo.lock);

	return mmo;

err:
@@ -745,60 +807,43 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
	struct drm_vma_offset_node *node;
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_i915_gem_object *obj = NULL;
	struct i915_mmap_offset *mmo = NULL;
	struct drm_gem_object *obj = NULL;
	struct file *anon;

	if (drm_dev_is_unplugged(dev))
		return -ENODEV;

	rcu_read_lock();
	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
						  vma->vm_pgoff,
						  vma_pages(vma));
	if (likely(node)) {
		mmo = container_of(node, struct i915_mmap_offset,
				   vma_node);
		/*
		 * In our dependency chain, the drm_vma_offset_node
		 * depends on the validity of the mmo, which depends on
		 * the gem object. However the only reference we have
		 * at this point is the mmo (as the parent of the node).
		 * Try to check if the gem object was at least cleared.
		 */
		if (!mmo || !mmo->obj) {
			drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
			return -EINVAL;
		}
	if (node && drm_vma_node_is_allowed(node, priv)) {
		/*
		 * Skip 0-refcnted objects as it is in the process of being
		 * destroyed and will be invalid when the vma manager lock
		 * is released.
		 */
		obj = &mmo->obj->base;
		if (!kref_get_unless_zero(&obj->refcount))
			obj = NULL;
		mmo = container_of(node, struct i915_mmap_offset, vma_node);
		obj = i915_gem_object_get_rcu(mmo->obj);
	}
	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
	rcu_read_unlock();
	if (!obj)
		return -EINVAL;

	if (!drm_vma_node_is_allowed(node, priv)) {
		drm_gem_object_put_unlocked(obj);
		return -EACCES;
	}
		return node ? -EACCES : -EINVAL;

	if (i915_gem_object_is_readonly(to_intel_bo(obj))) {
	if (i915_gem_object_is_readonly(obj)) {
		if (vma->vm_flags & VM_WRITE) {
			drm_gem_object_put_unlocked(obj);
			i915_gem_object_put(obj);
			return -EINVAL;
		}
		vma->vm_flags &= ~VM_MAYWRITE;
	}

	anon = mmap_singleton(to_i915(obj->dev));
	anon = mmap_singleton(to_i915(dev));
	if (IS_ERR(anon)) {
		drm_gem_object_put_unlocked(obj);
		i915_gem_object_put(obj);
		return PTR_ERR(anon);
	}

Loading