Commit a8cff4c8 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Promote i915->mm.obj_lock to be irqsafe

The intent is to be able to update the mm.lists from inside an irqsoff
section (e.g. from a softirq rcu workqueue), ergo we need to make the
i915->mm.obj_lock irqsafe.

v2: can_discard_pages() ensures we are shrinkable
v3: Beware shadowing of 'flags'

Fixes: 3b4fa964 ("drm/i915: Track the purgeable objects on a separate eviction list")
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110869


Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190610145430.17717-1-chris@chris-wilson.co.uk
parent f4d57d83
Loading
Loading
Loading
Loading
+14 −9
Original line number Diff line number Diff line
@@ -475,15 +475,20 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
	}
	mutex_unlock(&i915->ggtt.vm.mutex);

	if (i915_gem_object_is_shrinkable(obj) &&
	    obj->mm.madv == I915_MADV_WILLNEED) {
	if (i915_gem_object_is_shrinkable(obj)) {
		unsigned long flags;

		spin_lock_irqsave(&i915->mm.obj_lock, flags);

		if (obj->mm.madv == I915_MADV_WILLNEED) {
			struct list_head *list;

		spin_lock(&i915->mm.obj_lock);
			list = obj->bind_count ?
				&i915->mm.bound_list : &i915->mm.unbound_list;
			list_move_tail(&obj->mm.link, list);
		spin_unlock(&i915->mm.obj_lock);
		}

		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
	}
}

+8 −4
Original line number Diff line number Diff line
@@ -207,9 +207,11 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
		 */
		if (i915_gem_object_has_pages(obj) &&
		    i915_gem_object_is_shrinkable(obj)) {
			spin_lock(&i915->mm.obj_lock);
			unsigned long flags;

			spin_lock_irqsave(&i915->mm.obj_lock, flags);
			list_del_init(&obj->mm.link);
			spin_unlock(&i915->mm.obj_lock);
			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
		}

		mutex_unlock(&i915->drm.struct_mutex);
@@ -330,9 +332,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
		obj->mm.madv = I915_MADV_DONTNEED;

		if (i915_gem_object_has_pages(obj)) {
			spin_lock(&i915->mm.obj_lock);
			unsigned long flags;

			spin_lock_irqsave(&i915->mm.obj_lock, flags);
			list_move_tail(&obj->mm.link, &i915->mm.purge_list);
			spin_unlock(&i915->mm.obj_lock);
			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
		}
	}

+12 −4
Original line number Diff line number Diff line
@@ -57,11 +57,15 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));

	if (i915_gem_object_is_shrinkable(obj)) {
		spin_lock(&i915->mm.obj_lock);
		unsigned long flags;

		spin_lock_irqsave(&i915->mm.obj_lock, flags);

		i915->mm.shrink_count++;
		i915->mm.shrink_memory += obj->base.size;
		list_add(&obj->mm.link, &i915->mm.unbound_list);
		spin_unlock(&i915->mm.obj_lock);

		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
	}
}

@@ -151,11 +155,15 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
		return pages;

	if (i915_gem_object_is_shrinkable(obj)) {
		spin_lock(&i915->mm.obj_lock);
		unsigned long flags;

		spin_lock_irqsave(&i915->mm.obj_lock, flags);

		list_del(&obj->mm.link);
		i915->mm.shrink_count--;
		i915->mm.shrink_memory -= obj->base.size;
		spin_unlock(&i915->mm.obj_lock);

		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
	}

	if (obj->mm.mapping) {
+20 −18
Original line number Diff line number Diff line
@@ -138,7 +138,7 @@ unsigned long
i915_gem_shrink(struct drm_i915_private *i915,
		unsigned long target,
		unsigned long *nr_scanned,
		unsigned flags)
		unsigned int shrink)
{
	const struct {
		struct list_head *list;
@@ -154,7 +154,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
	unsigned long scanned = 0;
	bool unlock;

	if (!shrinker_lock(i915, flags, &unlock))
	if (!shrinker_lock(i915, shrink, &unlock))
		return 0;

	/*
@@ -166,12 +166,12 @@ i915_gem_shrink(struct drm_i915_private *i915,
	 * We don't care about errors here; if we cannot wait upon the GPU,
	 * we will free as much as we can and hope to get a second chance.
	 */
	if (flags & I915_SHRINK_ACTIVE)
	if (shrink & I915_SHRINK_ACTIVE)
		i915_gem_wait_for_idle(i915,
				       I915_WAIT_LOCKED,
				       MAX_SCHEDULE_TIMEOUT);

	trace_i915_gem_shrink(i915, target, flags);
	trace_i915_gem_shrink(i915, target, shrink);
	i915_retire_requests(i915);

	/*
@@ -179,10 +179,10 @@ i915_gem_shrink(struct drm_i915_private *i915,
	 * device just to recover a little memory. If absolutely necessary,
	 * we will force the wake during oom-notifier.
	 */
	if (flags & I915_SHRINK_BOUND) {
	if (shrink & I915_SHRINK_BOUND) {
		wakeref = intel_runtime_pm_get_if_in_use(i915);
		if (!wakeref)
			flags &= ~I915_SHRINK_BOUND;
			shrink &= ~I915_SHRINK_BOUND;
	}

	/*
@@ -207,8 +207,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
	for (phase = phases; phase->list; phase++) {
		struct list_head still_in_list;
		struct drm_i915_gem_object *obj;
		unsigned long flags;

		if ((flags & phase->bit) == 0)
		if ((shrink & phase->bit) == 0)
			continue;

		INIT_LIST_HEAD(&still_in_list);
@@ -220,50 +221,50 @@ i915_gem_shrink(struct drm_i915_private *i915,
		 * to be able to shrink their pages, so they remain on
		 * the unbound/bound list until actually freed.
		 */
		spin_lock(&i915->mm.obj_lock);
		spin_lock_irqsave(&i915->mm.obj_lock, flags);
		while (count < target &&
		       (obj = list_first_entry_or_null(phase->list,
						       typeof(*obj),
						       mm.link))) {
			list_move_tail(&obj->mm.link, &still_in_list);

			if (flags & I915_SHRINK_VMAPS &&
			if (shrink & I915_SHRINK_VMAPS &&
			    !is_vmalloc_addr(obj->mm.mapping))
				continue;

			if (!(flags & I915_SHRINK_ACTIVE) &&
			if (!(shrink & I915_SHRINK_ACTIVE) &&
			    (i915_gem_object_is_active(obj) ||
			     i915_gem_object_is_framebuffer(obj)))
				continue;

			if (!(flags & I915_SHRINK_BOUND) &&
			if (!(shrink & I915_SHRINK_BOUND) &&
			    READ_ONCE(obj->bind_count))
				continue;

			if (!can_release_pages(obj))
				continue;

			spin_unlock(&i915->mm.obj_lock);
			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);

			if (unsafe_drop_pages(obj)) {
				/* May arrive from get_pages on another bo */
				mutex_lock_nested(&obj->mm.lock,
						  I915_MM_SHRINKER);
				if (!i915_gem_object_has_pages(obj)) {
					try_to_writeback(obj, flags);
					try_to_writeback(obj, shrink);
					count += obj->base.size >> PAGE_SHIFT;
				}
				mutex_unlock(&obj->mm.lock);
			}
			scanned += obj->base.size >> PAGE_SHIFT;

			spin_lock(&i915->mm.obj_lock);
			spin_lock_irqsave(&i915->mm.obj_lock, flags);
		}
		list_splice_tail(&still_in_list, phase->list);
		spin_unlock(&i915->mm.obj_lock);
		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
	}

	if (flags & I915_SHRINK_BOUND)
	if (shrink & I915_SHRINK_BOUND)
		intel_runtime_pm_put(i915, wakeref);

	i915_retire_requests(i915);
@@ -379,6 +380,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
	struct drm_i915_gem_object *obj;
	unsigned long unevictable, bound, unbound, freed_pages;
	intel_wakeref_t wakeref;
	unsigned long flags;

	freed_pages = 0;
	with_intel_runtime_pm(i915, wakeref)
@@ -392,7 +394,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
	 * being pointed to by hardware.
	 */
	unbound = bound = unevictable = 0;
	spin_lock(&i915->mm.obj_lock);
	spin_lock_irqsave(&i915->mm.obj_lock, flags);
	list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) {
		if (!can_release_pages(obj))
			unevictable += obj->base.size >> PAGE_SHIFT;
@@ -405,7 +407,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
		else
			bound += obj->base.size >> PAGE_SHIFT;
	}
	spin_unlock(&i915->mm.obj_lock);
	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);

	if (freed_pages || unbound || bound)
		pr_info("Purging GPU memory, %lu pages freed, "
+3 −2
Original line number Diff line number Diff line
@@ -613,6 +613,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	struct i915_vma *vma;
	unsigned long flags;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
@@ -689,10 +690,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
	list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
	mutex_unlock(&ggtt->vm.mutex);

	spin_lock(&dev_priv->mm.obj_lock);
	spin_lock_irqsave(&dev_priv->mm.obj_lock, flags);
	GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
	obj->bind_count++;
	spin_unlock(&dev_priv->mm.obj_lock);
	spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags);

	return obj;

Loading