Commit 538ef96b authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/gem: Track the rpm wakerefs



Keep track of the temporary rpm wakerefs used for user access to the
device, so that we can cancel them upon release and clearly identify any
leaks.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-10-chris@chris-wilson.co.uk
parent 3055f0cd
Loading
Loading
Loading
Loading
+29 −18
Original line number Original line Diff line number Diff line
@@ -785,6 +785,8 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)


void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
{
	intel_wakeref_t wakeref;

	/*
	/*
	 * No actual flushing is required for the GTT write domain for reads
	 * No actual flushing is required for the GTT write domain for reads
	 * from the GTT domain. Writes to it "immediately" go to main memory
	 * from the GTT domain. Writes to it "immediately" go to main memory
@@ -811,13 +813,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)


	i915_gem_chipset_flush(dev_priv);
	i915_gem_chipset_flush(dev_priv);


	intel_runtime_pm_get(dev_priv);
	wakeref = intel_runtime_pm_get(dev_priv);
	spin_lock_irq(&dev_priv->uncore.lock);
	spin_lock_irq(&dev_priv->uncore.lock);


	POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
	POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));


	spin_unlock_irq(&dev_priv->uncore.lock);
	spin_unlock_irq(&dev_priv->uncore.lock);
	intel_runtime_pm_put_unchecked(dev_priv);
	intel_runtime_pm_put(dev_priv, wakeref);
}
}


static void
static void
@@ -1069,6 +1071,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
{
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct i915_ggtt *ggtt = &i915->ggtt;
	intel_wakeref_t wakeref;
	struct drm_mm_node node;
	struct drm_mm_node node;
	struct i915_vma *vma;
	struct i915_vma *vma;
	void __user *user_data;
	void __user *user_data;
@@ -1079,7 +1082,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
	if (ret)
	if (ret)
		return ret;
		return ret;


	intel_runtime_pm_get(i915);
	wakeref = intel_runtime_pm_get(i915);
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
				       PIN_MAPPABLE |
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONFAULT |
@@ -1152,7 +1155,7 @@ out_unpin:
		i915_vma_unpin(vma);
		i915_vma_unpin(vma);
	}
	}
out_unlock:
out_unlock:
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);
	mutex_unlock(&i915->drm.struct_mutex);
	mutex_unlock(&i915->drm.struct_mutex);


	return ret;
	return ret;
@@ -1253,6 +1256,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
{
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct i915_ggtt *ggtt = &i915->ggtt;
	intel_wakeref_t wakeref;
	struct drm_mm_node node;
	struct drm_mm_node node;
	struct i915_vma *vma;
	struct i915_vma *vma;
	u64 remain, offset;
	u64 remain, offset;
@@ -1271,13 +1275,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
		 * This easily dwarfs any performance advantage from
		 * This easily dwarfs any performance advantage from
		 * using the cache bypass of indirect GGTT access.
		 * using the cache bypass of indirect GGTT access.
		 */
		 */
		if (!intel_runtime_pm_get_if_in_use(i915)) {
		wakeref = intel_runtime_pm_get_if_in_use(i915);
		if (!wakeref) {
			ret = -EFAULT;
			ret = -EFAULT;
			goto out_unlock;
			goto out_unlock;
		}
		}
	} else {
	} else {
		/* No backing pages, no fallback, we must force GGTT access */
		/* No backing pages, no fallback, we must force GGTT access */
		intel_runtime_pm_get(i915);
		wakeref = intel_runtime_pm_get(i915);
	}
	}


	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -1359,7 +1364,7 @@ out_unpin:
		i915_vma_unpin(vma);
		i915_vma_unpin(vma);
	}
	}
out_rpm:
out_rpm:
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);
out_unlock:
out_unlock:
	mutex_unlock(&i915->drm.struct_mutex);
	mutex_unlock(&i915->drm.struct_mutex);
	return ret;
	return ret;
@@ -1864,6 +1869,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	bool write = area->vm_flags & VM_WRITE;
	bool write = area->vm_flags & VM_WRITE;
	intel_wakeref_t wakeref;
	struct i915_vma *vma;
	struct i915_vma *vma;
	pgoff_t page_offset;
	pgoff_t page_offset;
	int ret;
	int ret;
@@ -1893,7 +1899,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
	if (ret)
	if (ret)
		goto err;
		goto err;


	intel_runtime_pm_get(dev_priv);
	wakeref = intel_runtime_pm_get(dev_priv);


	ret = i915_mutex_lock_interruptible(dev);
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
	if (ret)
@@ -1971,7 +1977,7 @@ err_unpin:
err_unlock:
err_unlock:
	mutex_unlock(&dev->struct_mutex);
	mutex_unlock(&dev->struct_mutex);
err_rpm:
err_rpm:
	intel_runtime_pm_put_unchecked(dev_priv);
	intel_runtime_pm_put(dev_priv, wakeref);
	i915_gem_object_unpin_pages(obj);
	i915_gem_object_unpin_pages(obj);
err:
err:
	switch (ret) {
	switch (ret) {
@@ -2044,6 +2050,7 @@ void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	intel_wakeref_t wakeref;


	/* Serialisation between user GTT access and our code depends upon
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * revoking the CPU's PTE whilst the mutex is held. The next user
@@ -2054,7 +2061,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
	 * wakeref.
	 * wakeref.
	 */
	 */
	lockdep_assert_held(&i915->drm.struct_mutex);
	lockdep_assert_held(&i915->drm.struct_mutex);
	intel_runtime_pm_get(i915);
	wakeref = intel_runtime_pm_get(i915);


	if (!obj->userfault_count)
	if (!obj->userfault_count)
		goto out;
		goto out;
@@ -2071,7 +2078,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
	wmb();
	wmb();


out:
out:
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);
}
}


void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
@@ -4706,8 +4713,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
				    struct llist_node *freed)
				    struct llist_node *freed)
{
{
	struct drm_i915_gem_object *obj, *on;
	struct drm_i915_gem_object *obj, *on;
	intel_wakeref_t wakeref;


	intel_runtime_pm_get(i915);
	wakeref = intel_runtime_pm_get(i915);
	llist_for_each_entry_safe(obj, on, freed, freed) {
	llist_for_each_entry_safe(obj, on, freed, freed) {
		struct i915_vma *vma, *vn;
		struct i915_vma *vma, *vn;


@@ -4768,7 +4776,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
		if (on)
		if (on)
			cond_resched();
			cond_resched();
	}
	}
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);
}
}


static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
@@ -4877,11 +4885,13 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)


void i915_gem_sanitize(struct drm_i915_private *i915)
void i915_gem_sanitize(struct drm_i915_private *i915)
{
{
	intel_wakeref_t wakeref;

	GEM_TRACE("\n");
	GEM_TRACE("\n");


	mutex_lock(&i915->drm.struct_mutex);
	mutex_lock(&i915->drm.struct_mutex);


	intel_runtime_pm_get(i915);
	wakeref = intel_runtime_pm_get(i915);
	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);


	/*
	/*
@@ -4904,7 +4914,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
	intel_engines_sanitize(i915, false);
	intel_engines_sanitize(i915, false);


	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);


	i915_gem_contexts_lost(i915);
	i915_gem_contexts_lost(i915);
	mutex_unlock(&i915->drm.struct_mutex);
	mutex_unlock(&i915->drm.struct_mutex);
@@ -4912,11 +4922,12 @@ void i915_gem_sanitize(struct drm_i915_private *i915)


int i915_gem_suspend(struct drm_i915_private *i915)
int i915_gem_suspend(struct drm_i915_private *i915)
{
{
	intel_wakeref_t wakeref;
	int ret;
	int ret;


	GEM_TRACE("\n");
	GEM_TRACE("\n");


	intel_runtime_pm_get(i915);
	wakeref = intel_runtime_pm_get(i915);
	intel_suspend_gt_powersave(i915);
	intel_suspend_gt_powersave(i915);


	mutex_lock(&i915->drm.struct_mutex);
	mutex_lock(&i915->drm.struct_mutex);
@@ -4968,12 +4979,12 @@ int i915_gem_suspend(struct drm_i915_private *i915)
	if (WARN_ON(!intel_engines_are_idle(i915)))
	if (WARN_ON(!intel_engines_are_idle(i915)))
		i915_gem_set_wedged(i915); /* no hope, discard everything */
		i915_gem_set_wedged(i915); /* no hope, discard everything */


	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);
	return 0;
	return 0;


err_unlock:
err_unlock:
	mutex_unlock(&i915->drm.struct_mutex);
	mutex_unlock(&i915->drm.struct_mutex);
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);
	return ret;
	return ret;
}
}


+3 −2
Original line number Original line Diff line number Diff line
@@ -2202,6 +2202,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
	struct i915_execbuffer eb;
	struct i915_execbuffer eb;
	struct dma_fence *in_fence = NULL;
	struct dma_fence *in_fence = NULL;
	struct sync_file *out_fence = NULL;
	struct sync_file *out_fence = NULL;
	intel_wakeref_t wakeref;
	int out_fence_fd = -1;
	int out_fence_fd = -1;
	int err;
	int err;


@@ -2272,7 +2273,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
	 * wakeref that we hold until the GPU has been idle for at least
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 * 100ms.
	 */
	 */
	intel_runtime_pm_get(eb.i915);
	wakeref = intel_runtime_pm_get(eb.i915);


	err = i915_mutex_lock_interruptible(dev);
	err = i915_mutex_lock_interruptible(dev);
	if (err)
	if (err)
@@ -2424,7 +2425,7 @@ err_vma:
		eb_release_vmas(&eb);
		eb_release_vmas(&eb);
	mutex_unlock(&dev->struct_mutex);
	mutex_unlock(&dev->struct_mutex);
err_rpm:
err_rpm:
	intel_runtime_pm_put_unchecked(eb.i915);
	intel_runtime_pm_put(eb.i915, wakeref);
	i915_gem_context_put(eb.ctx);
	i915_gem_context_put(eb.ctx);
err_destroy:
err_destroy:
	eb_destroy(&eb);
	eb_destroy(&eb);
+4 −2
Original line number Original line Diff line number Diff line
@@ -209,6 +209,7 @@ static void fence_write(struct drm_i915_fence_reg *fence,
static int fence_update(struct drm_i915_fence_reg *fence,
static int fence_update(struct drm_i915_fence_reg *fence,
			struct i915_vma *vma)
			struct i915_vma *vma)
{
{
	intel_wakeref_t wakeref;
	int ret;
	int ret;


	if (vma) {
	if (vma) {
@@ -256,9 +257,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
	 * If the device is currently powered down, we will defer the write
	 * If the device is currently powered down, we will defer the write
	 * to the runtime resume, see i915_gem_restore_fences().
	 * to the runtime resume, see i915_gem_restore_fences().
	 */
	 */
	if (intel_runtime_pm_get_if_in_use(fence->i915)) {
	wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
	if (wakeref) {
		fence_write(fence, vma);
		fence_write(fence, vma);
		intel_runtime_pm_put_unchecked(fence->i915);
		intel_runtime_pm_put(fence->i915, wakeref);
	}
	}


	if (vma) {
	if (vma) {
+14 −8
Original line number Original line Diff line number Diff line
@@ -2527,6 +2527,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
{
{
	struct drm_i915_private *i915 = vma->vm->i915;
	struct drm_i915_private *i915 = vma->vm->i915;
	struct drm_i915_gem_object *obj = vma->obj;
	struct drm_i915_gem_object *obj = vma->obj;
	intel_wakeref_t wakeref;
	u32 pte_flags;
	u32 pte_flags;


	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
@@ -2534,9 +2535,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
	if (i915_gem_object_is_readonly(obj))
	if (i915_gem_object_is_readonly(obj))
		pte_flags |= PTE_READ_ONLY;
		pte_flags |= PTE_READ_ONLY;


	intel_runtime_pm_get(i915);
	wakeref = intel_runtime_pm_get(i915);
	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);


	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;


@@ -2553,10 +2554,11 @@ static int ggtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
static void ggtt_unbind_vma(struct i915_vma *vma)
{
{
	struct drm_i915_private *i915 = vma->vm->i915;
	struct drm_i915_private *i915 = vma->vm->i915;
	intel_wakeref_t wakeref;


	intel_runtime_pm_get(i915);
	wakeref = intel_runtime_pm_get(i915);
	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);
}
}


static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
@@ -2588,9 +2590,11 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
	}
	}


	if (flags & I915_VMA_GLOBAL_BIND) {
	if (flags & I915_VMA_GLOBAL_BIND) {
		intel_runtime_pm_get(i915);
		intel_wakeref_t wakeref;

		wakeref = intel_runtime_pm_get(i915);
		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
		intel_runtime_pm_put_unchecked(i915);
		intel_runtime_pm_put(i915, wakeref);
	}
	}


	return 0;
	return 0;
@@ -2601,9 +2605,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
	struct drm_i915_private *i915 = vma->vm->i915;
	struct drm_i915_private *i915 = vma->vm->i915;


	if (vma->flags & I915_VMA_GLOBAL_BIND) {
	if (vma->flags & I915_VMA_GLOBAL_BIND) {
		intel_runtime_pm_get(i915);
		intel_wakeref_t wakeref;

		wakeref = intel_runtime_pm_get(i915);
		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
		intel_runtime_pm_put_unchecked(i915);
		intel_runtime_pm_put(i915, wakeref);
	}
	}


	if (vma->flags & I915_VMA_LOCAL_BIND) {
	if (vma->flags & I915_VMA_LOCAL_BIND) {
+20 −12
Original line number Original line Diff line number Diff line
@@ -153,6 +153,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
		{ &i915->mm.bound_list, I915_SHRINK_BOUND },
		{ &i915->mm.bound_list, I915_SHRINK_BOUND },
		{ NULL, 0 },
		{ NULL, 0 },
	}, *phase;
	}, *phase;
	intel_wakeref_t wakeref = 0;
	unsigned long count = 0;
	unsigned long count = 0;
	unsigned long scanned = 0;
	unsigned long scanned = 0;
	bool unlock;
	bool unlock;
@@ -182,9 +183,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
	 * device just to recover a little memory. If absolutely necessary,
	 * device just to recover a little memory. If absolutely necessary,
	 * we will force the wake during oom-notifier.
	 * we will force the wake during oom-notifier.
	 */
	 */
	if ((flags & I915_SHRINK_BOUND) &&
	if (flags & I915_SHRINK_BOUND) {
	    !intel_runtime_pm_get_if_in_use(i915))
		wakeref = intel_runtime_pm_get_if_in_use(i915);
		if (!wakeref)
			flags &= ~I915_SHRINK_BOUND;
			flags &= ~I915_SHRINK_BOUND;
	}


	/*
	/*
	 * As we may completely rewrite the (un)bound list whilst unbinding
	 * As we may completely rewrite the (un)bound list whilst unbinding
@@ -265,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
	}
	}


	if (flags & I915_SHRINK_BOUND)
	if (flags & I915_SHRINK_BOUND)
		intel_runtime_pm_put_unchecked(i915);
		intel_runtime_pm_put(i915, wakeref);


	i915_retire_requests(i915);
	i915_retire_requests(i915);


@@ -292,14 +295,15 @@ i915_gem_shrink(struct drm_i915_private *i915,
 */
 */
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
{
{
	intel_wakeref_t wakeref;
	unsigned long freed;
	unsigned long freed;


	intel_runtime_pm_get(i915);
	wakeref = intel_runtime_pm_get(i915);
	freed = i915_gem_shrink(i915, -1UL, NULL,
	freed = i915_gem_shrink(i915, -1UL, NULL,
				I915_SHRINK_BOUND |
				I915_SHRINK_BOUND |
				I915_SHRINK_UNBOUND |
				I915_SHRINK_UNBOUND |
				I915_SHRINK_ACTIVE);
				I915_SHRINK_ACTIVE);
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);


	return freed;
	return freed;
}
}
@@ -370,14 +374,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
					 I915_SHRINK_BOUND |
					 I915_SHRINK_BOUND |
					 I915_SHRINK_UNBOUND);
					 I915_SHRINK_UNBOUND);
	if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
	if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
		intel_runtime_pm_get(i915);
		intel_wakeref_t wakeref;

		wakeref = intel_runtime_pm_get(i915);
		freed += i915_gem_shrink(i915,
		freed += i915_gem_shrink(i915,
					 sc->nr_to_scan - sc->nr_scanned,
					 sc->nr_to_scan - sc->nr_scanned,
					 &sc->nr_scanned,
					 &sc->nr_scanned,
					 I915_SHRINK_ACTIVE |
					 I915_SHRINK_ACTIVE |
					 I915_SHRINK_BOUND |
					 I915_SHRINK_BOUND |
					 I915_SHRINK_UNBOUND);
					 I915_SHRINK_UNBOUND);
		intel_runtime_pm_put_unchecked(i915);
		intel_runtime_pm_put(i915, wakeref);
	}
	}


	shrinker_unlock(i915, unlock);
	shrinker_unlock(i915, unlock);
@@ -392,12 +398,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
		container_of(nb, struct drm_i915_private, mm.oom_notifier);
		container_of(nb, struct drm_i915_private, mm.oom_notifier);
	struct drm_i915_gem_object *obj;
	struct drm_i915_gem_object *obj;
	unsigned long unevictable, bound, unbound, freed_pages;
	unsigned long unevictable, bound, unbound, freed_pages;
	intel_wakeref_t wakeref;


	intel_runtime_pm_get(i915);
	wakeref = intel_runtime_pm_get(i915);
	freed_pages = i915_gem_shrink(i915, -1UL, NULL,
	freed_pages = i915_gem_shrink(i915, -1UL, NULL,
				      I915_SHRINK_BOUND |
				      I915_SHRINK_BOUND |
				      I915_SHRINK_UNBOUND);
				      I915_SHRINK_UNBOUND);
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);


	/* Because we may be allocating inside our own driver, we cannot
	/* Because we may be allocating inside our own driver, we cannot
	 * assert that there are no objects with pinned pages that are not
	 * assert that there are no objects with pinned pages that are not
@@ -435,6 +442,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
		container_of(nb, struct drm_i915_private, mm.vmap_notifier);
		container_of(nb, struct drm_i915_private, mm.vmap_notifier);
	struct i915_vma *vma, *next;
	struct i915_vma *vma, *next;
	unsigned long freed_pages = 0;
	unsigned long freed_pages = 0;
	intel_wakeref_t wakeref;
	bool unlock;
	bool unlock;


	if (!shrinker_lock(i915, 0, &unlock))
	if (!shrinker_lock(i915, 0, &unlock))
@@ -446,12 +454,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
				   MAX_SCHEDULE_TIMEOUT))
				   MAX_SCHEDULE_TIMEOUT))
		goto out;
		goto out;


	intel_runtime_pm_get(i915);
	wakeref = intel_runtime_pm_get(i915);
	freed_pages += i915_gem_shrink(i915, -1UL, NULL,
	freed_pages += i915_gem_shrink(i915, -1UL, NULL,
				       I915_SHRINK_BOUND |
				       I915_SHRINK_BOUND |
				       I915_SHRINK_UNBOUND |
				       I915_SHRINK_UNBOUND |
				       I915_SHRINK_VMAPS);
				       I915_SHRINK_VMAPS);
	intel_runtime_pm_put_unchecked(i915);
	intel_runtime_pm_put(i915, wakeref);


	/* We also want to clear any cached iomaps as they wrap vmap */
	/* We also want to clear any cached iomaps as they wrap vmap */
	list_for_each_entry_safe(vma, next,
	list_for_each_entry_safe(vma, next,
Loading