Commit ecab9be1 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Combine unbound/bound list tracking for objects



With async binding, we don't want to manage a bound/unbound list as we
may end up running before we even acquire the pages. All that is
required is keeping track of shrinkable objects, so reduce it to the
minimum list.

Fixes: 6951e589 ("drm/i915: Move GEM object domain management from struct_mutex to local")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190612105720.30310-1-chris@chris-wilson.co.uk
parent 6ce1c33d
Loading
Loading
Loading
Loading
+3 −8
Original line number Diff line number Diff line
@@ -219,7 +219,7 @@ restart:
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
	if (obj->bind_count) {
	if (atomic_read(&obj->bind_count)) {
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
@@ -480,13 +480,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)

		spin_lock_irqsave(&i915->mm.obj_lock, flags);

		if (obj->mm.madv == I915_MADV_WILLNEED) {
			struct list_head *list;

			list = obj->bind_count ?
				&i915->mm.bound_list : &i915->mm.unbound_list;
			list_move_tail(&obj->mm.link, list);
		}
		if (obj->mm.madv == I915_MADV_WILLNEED)
			list_move_tail(&obj->mm.link, &i915->mm.shrink_list);

		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
	}
+1 −1
Original line number Diff line number Diff line
@@ -216,7 +216,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,

		mutex_unlock(&i915->drm.struct_mutex);

		GEM_BUG_ON(obj->bind_count);
		GEM_BUG_ON(atomic_read(&obj->bind_count));
		GEM_BUG_ON(obj->userfault_count);
		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
		GEM_BUG_ON(!list_empty(&obj->lut_list));
+1 −1
Original line number Diff line number Diff line
@@ -155,7 +155,7 @@ struct drm_i915_gem_object {
#define STRIDE_MASK (~TILING_MASK)

	/** Count of VMA actually bound by this object */
	unsigned int bind_count;
	atomic_t bind_count;
	unsigned int active_count;
	/** Count of how many global VMA are currently pinned for use by HW */
	unsigned int pin_global;
+8 −2
Original line number Diff line number Diff line
@@ -57,13 +57,19 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));

	if (i915_gem_object_is_shrinkable(obj)) {
		struct list_head *list;
		unsigned long flags;

		spin_lock_irqsave(&i915->mm.obj_lock, flags);

		i915->mm.shrink_count++;
		i915->mm.shrink_memory += obj->base.size;
		list_add(&obj->mm.link, &i915->mm.unbound_list);

		if (obj->mm.madv != I915_MADV_WILLNEED)
			list = &i915->mm.purge_list;
		else
			list = &i915->mm.shrink_list;
		list_add_tail(&obj->mm.link, list);

		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
	}
@@ -193,7 +199,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
	if (i915_gem_object_has_pinned_pages(obj))
		return -EBUSY;

	GEM_BUG_ON(obj->bind_count);
	GEM_BUG_ON(atomic_read(&obj->bind_count));

	/* May be called by shrinker from within get_pages() (on another bo) */
	mutex_lock_nested(&obj->mm.lock, subclass);
+27 −3
Original line number Diff line number Diff line
@@ -158,15 +158,22 @@ void i915_gem_suspend(struct drm_i915_private *i915)
	intel_uc_suspend(i915);
}

static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
{
	return list_first_entry_or_null(list,
					struct drm_i915_gem_object,
					mm.link);
}

void i915_gem_suspend_late(struct drm_i915_private *i915)
{
	struct drm_i915_gem_object *obj;
	struct list_head *phases[] = {
		&i915->mm.unbound_list,
		&i915->mm.bound_list,
		&i915->mm.shrink_list,
		&i915->mm.purge_list,
		NULL
	}, **phase;
	unsigned long flags;

	/*
	 * Neither the BIOS, ourselves or any other kernel
@@ -188,13 +195,30 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
	 * machine in an unusable condition.
	 */

	spin_lock_irqsave(&i915->mm.obj_lock, flags);
	for (phase = phases; *phase; phase++) {
		list_for_each_entry(obj, *phase, mm.link) {
		LIST_HEAD(keep);

		while ((obj = first_mm_object(*phase))) {
			list_move_tail(&obj->mm.link, &keep);

			/* Beware the background _i915_gem_free_objects */
			if (!kref_get_unless_zero(&obj->base.refcount))
				continue;

			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);

			i915_gem_object_lock(obj);
			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
			i915_gem_object_unlock(obj);
			i915_gem_object_put(obj);

			spin_lock_irqsave(&i915->mm.obj_lock, flags);
		}

		list_splice_tail(&keep, *phase);
	}
	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);

	intel_uc_sanitize(i915);
	i915_gem_sanitize(i915);
Loading