Commit 99013b10 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Make shrink/unshrink be atomic



Add an atomic counter and always take the spinlock around the pin/unpin
events, so that we can perform the list manipulation concurrently.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910212204.17190-1-chris@chris-wilson.co.uk
parent 85dd14c2
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -494,7 +494,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)

		spin_lock_irqsave(&i915->mm.obj_lock, flags);

		if (obj->mm.madv == I915_MADV_WILLNEED)
		if (obj->mm.madv == I915_MADV_WILLNEED &&
		    !atomic_read(&obj->mm.shrink_pin))
			list_move_tail(&obj->mm.link, &i915->mm.shrink_list);

		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+1 −0
Original line number Diff line number Diff line
@@ -156,6 +156,7 @@ struct drm_i915_gem_object {
	struct {
		struct mutex lock; /* protects the pages and their use */
		atomic_t pages_pin_count;
		atomic_t shrink_pin;

		struct sg_table *pages;
		void *mapping;
+1 −0
Original line number Diff line number Diff line
@@ -71,6 +71,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
			list = &i915->mm.shrink_list;
		list_add_tail(&obj->mm.link, list);

		atomic_set(&obj->mm.shrink_pin, 0);
		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
	}
}
+21 −15
Original line number Diff line number Diff line
@@ -516,46 +516,52 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,

void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915 = obj_to_i915(obj);
	unsigned long flags;

	/*
	 * We can only be called while the pages are pinned or when
	 * the pages are released. If pinned, we should only be called
	 * from a single caller under controlled conditions; and on release
	 * only one caller may release us. Neither the two may cross.
	 */
	if (!list_empty(&obj->mm.link)) { /* pinned by caller */
		struct drm_i915_private *i915 = obj_to_i915(obj);
		unsigned long flags;
	if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
		return;

	spin_lock_irqsave(&i915->mm.obj_lock, flags);
		GEM_BUG_ON(list_empty(&obj->mm.link));

	if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
	    !list_empty(&obj->mm.link)) {
		list_del_init(&obj->mm.link);
		i915->mm.shrink_count--;
		i915->mm.shrink_memory -= obj->base.size;

		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
	}
	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}

static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
					      struct list_head *head)
{
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
	GEM_BUG_ON(!list_empty(&obj->mm.link));

	if (i915_gem_object_is_shrinkable(obj)) {
	struct drm_i915_private *i915 = obj_to_i915(obj);
	unsigned long flags;

	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
	if (!i915_gem_object_is_shrinkable(obj))
		return;

	if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
		return;

	spin_lock_irqsave(&i915->mm.obj_lock, flags);
	GEM_BUG_ON(!kref_read(&obj->base.refcount));
	if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
		GEM_BUG_ON(!list_empty(&obj->mm.link));

		list_add_tail(&obj->mm.link, head);
		i915->mm.shrink_count++;
		i915->mm.shrink_memory += obj->base.size;

		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
	}
	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}

void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
+1 −1
Original line number Diff line number Diff line
@@ -134,8 +134,8 @@ static int __context_pin_state(struct i915_vma *vma)

static void __context_unpin_state(struct i915_vma *vma)
{
	__i915_vma_unpin(vma);
	i915_vma_make_shrinkable(vma);
	__i915_vma_unpin(vma);
}

static void __intel_context_retire(struct i915_active *active)