Commit 9da0ea09 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/gem: Drop cached obj->bind_count



We cached the number of vma bound to the object in order to speed up
shrinker decisions. This has been superseded by being more proactive in
removing objects we cannot shrink from the shrinker lists, and so we can
drop the clumsy attempt at atomically counting the bind count and
comparing it to the number of pinned mappings of the object. This will
only get more clumsier with asynchronous binding and unbinding.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200401223924.16667-1-chris@chris-wilson.co.uk
parent 0d86ee35
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -369,7 +369,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
	struct i915_vma *vma;

	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
	if (!atomic_read(&obj->bind_count))
	if (list_empty(&obj->vma.list))
		return;

	mutex_lock(&i915->ggtt.vm.mutex);
+0 −1
Original line number Diff line number Diff line
@@ -206,7 +206,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
		}
		obj->mmo.offsets = RB_ROOT;

		GEM_BUG_ON(atomic_read(&obj->bind_count));
		GEM_BUG_ON(obj->userfault_count);
		GEM_BUG_ON(!list_empty(&obj->lut_list));

+0 −3
Original line number Diff line number Diff line
@@ -179,9 +179,6 @@ struct drm_i915_gem_object {
#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
#define STRIDE_MASK (~TILING_MASK)

	/** Count of VMA actually bound by this object */
	atomic_t bind_count;

	struct {
		/*
		 * Protects the pages and their use. Do not use directly, but
+0 −2
Original line number Diff line number Diff line
@@ -199,8 +199,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
	if (i915_gem_object_has_pinned_pages(obj))
		return -EBUSY;

	GEM_BUG_ON(atomic_read(&obj->bind_count));

	/* May be called by shrinker from within get_pages() (on another bo) */
	mutex_lock(&obj->mm.lock);
	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
+2 −16
Original line number Diff line number Diff line
@@ -26,18 +26,6 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
	if (!i915_gem_object_is_shrinkable(obj))
		return false;

	/*
	 * Only report true if by unbinding the object and putting its pages
	 * we can actually make forward progress towards freeing physical
	 * pages.
	 *
	 * If the pages are pinned for any other reason than being bound
	 * to the GPU, simply unbinding from the GPU is not going to succeed
	 * in releasing our pin count on the pages themselves.
	 */
	if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
		return false;

	/*
	 * We can only return physical pages to the system if we can either
	 * discard the contents (because the user has marked them as being
@@ -54,6 +42,8 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
	flags = 0;
	if (shrink & I915_SHRINK_ACTIVE)
		flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
	if (!(shrink & I915_SHRINK_BOUND))
		flags = I915_GEM_OBJECT_UNBIND_TEST;

	if (i915_gem_object_unbind(obj, flags) == 0)
		__i915_gem_object_put_pages(obj);
@@ -194,10 +184,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
			    i915_gem_object_is_framebuffer(obj))
				continue;

			if (!(shrink & I915_SHRINK_BOUND) &&
			    atomic_read(&obj->bind_count))
				continue;

			if (!can_release_pages(obj))
				continue;

Loading