Commit 3b4fa964 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Track the purgeable objects on a separate eviction list



Currently the purgeable objects, I915_MADV_DONTNEED, are mixed in the
normal bound/unbound lists. Every shrinker pass starts with an attempt
to purge from this set of unneeded objects, which entails us doing a
walk over both lists looking for any candidates. If there are none, and
since we are shrinking we can reasonably assume that the lists are
full!, this becomes a very slow futile walk.

If we separate out the purgeable objects into own list, this search then
becomes its own phase that is preferentially handled during shrinking.
Instead the cost becomes that we then need to filter the purgeable list
if we want to distinguish between bound and unbound objects.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190530203500.26272-1-chris@chris-wilson.co.uk
parent 7ef5ef5c
Loading
Loading
Loading
Loading
+9 −5
Original line number Diff line number Diff line
@@ -462,7 +462,6 @@ err_unpin_global:
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct list_head *list;
	struct i915_vma *vma;

	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -476,11 +475,16 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
	}
	mutex_unlock(&i915->ggtt.vm.mutex);

	if (obj->mm.madv == I915_MADV_WILLNEED) {
		struct list_head *list;

		spin_lock(&i915->mm.obj_lock);
	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
		list = obj->bind_count ?
			&i915->mm.bound_list : &i915->mm.unbound_list;
		list_move_tail(&obj->mm.link, list);
		spin_unlock(&i915->mm.obj_lock);
	}
}

void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
+10 −1
Original line number Diff line number Diff line
@@ -333,9 +333,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
	if (obj->mm.quirked)
		__i915_gem_object_unpin_pages(obj);

	if (discard_backing_storage(obj))
	if (discard_backing_storage(obj)) {
		struct drm_i915_private *i915 = to_i915(obj->base.dev);

		obj->mm.madv = I915_MADV_DONTNEED;

		if (i915_gem_object_has_pages(obj)) {
			spin_lock(&i915->mm.obj_lock);
			list_move_tail(&obj->mm.link, &i915->mm.purge_list);
			spin_unlock(&i915->mm.obj_lock);
		}
	}

	/*
	 * Before we free the object, make sure any pure RCU-only
	 * read-side critical sections are complete, e.g.
+1 −0
Original line number Diff line number Diff line
@@ -164,6 +164,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
	struct list_head *phases[] = {
		&i915->mm.unbound_list,
		&i915->mm.bound_list,
		&i915->mm.purge_list,
		NULL
	}, **phase;

+1 −3
Original line number Diff line number Diff line
@@ -80,9 +80,7 @@ rebuild_st:
	sg_page_sizes = 0;
	for (i = 0; i < page_count; i++) {
		const unsigned int shrink[] = {
			(I915_SHRINK_BOUND |
			 I915_SHRINK_UNBOUND |
			 I915_SHRINK_PURGEABLE),
			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
			0,
		}, *s = shrink;
		gfp_t gfp = noreclaim;
+10 −12
Original line number Diff line number Diff line
@@ -144,6 +144,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
		struct list_head *list;
		unsigned int bit;
	} phases[] = {
		{ &i915->mm.purge_list, ~0u },
		{ &i915->mm.unbound_list, I915_SHRINK_UNBOUND },
		{ &i915->mm.bound_list, I915_SHRINK_BOUND },
		{ NULL, 0 },
@@ -226,10 +227,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
						       mm.link))) {
			list_move_tail(&obj->mm.link, &still_in_list);

			if (flags & I915_SHRINK_PURGEABLE &&
			    obj->mm.madv != I915_MADV_DONTNEED)
				continue;

			if (flags & I915_SHRINK_VMAPS &&
			    !is_vmalloc_addr(obj->mm.mapping))
				continue;
@@ -239,6 +236,10 @@ i915_gem_shrink(struct drm_i915_private *i915,
			     i915_gem_object_is_framebuffer(obj)))
				continue;

			if (!(flags & I915_SHRINK_BOUND) &&
			    READ_ONCE(obj->bind_count))
				continue;

			if (!can_release_pages(obj))
				continue;

@@ -324,6 +325,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
			count += obj->base.size >> PAGE_SHIFT;
			num_objects++;
		}
	list_for_each_entry(obj, &i915->mm.purge_list, mm.link)
		if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
			count += obj->base.size >> PAGE_SHIFT;
			num_objects++;
		}
	spin_unlock(&i915->mm.obj_lock);

	/* Update our preferred vmscan batch size for the next pass.
@@ -361,14 +367,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
				&sc->nr_scanned,
				I915_SHRINK_BOUND |
				I915_SHRINK_UNBOUND |
				I915_SHRINK_PURGEABLE |
				I915_SHRINK_WRITEBACK);
	if (sc->nr_scanned < sc->nr_to_scan)
		freed += i915_gem_shrink(i915,
					 sc->nr_to_scan - sc->nr_scanned,
					 &sc->nr_scanned,
					 I915_SHRINK_BOUND |
					 I915_SHRINK_UNBOUND |
				I915_SHRINK_WRITEBACK);
	if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
		intel_wakeref_t wakeref;
Loading