Commit 09d7e46b authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Pull VM lists under the VM mutex.



A starting point to counter the pervasive struct_mutex. For the goal of
avoiding (or at least blocking under them!) global locks during user
request submission, a simple but important step is being able to manage
each clients GTT separately. For which, we want to replace using the
struct_mutex as the guard for all things GTT/VM and switch instead to a
specific mutex inside i915_address_space.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190128102356.15037-2-chris@chris-wilson.co.uk
parent 499197dc
Loading
Loading
Loading
Loading
+8 −6
Original line number Diff line number Diff line
@@ -245,18 +245,19 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *file)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
	struct drm_i915_gem_get_aperture *args = data;
	struct i915_vma *vma;
	u64 pinned;

	mutex_lock(&ggtt->vm.mutex);

	pinned = ggtt->vm.reserved;
	mutex_lock(&dev->struct_mutex);
	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
		if (i915_vma_is_pinned(vma))
			pinned += vma->node.size;
	mutex_unlock(&dev->struct_mutex);

	mutex_unlock(&ggtt->vm.mutex);

	args->aper_size = ggtt->vm.total;
	args->aper_available_size = args->aper_size - pinned;
@@ -1529,20 +1530,21 @@ err:

static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915;
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct list_head *list;
	struct i915_vma *vma;

	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

	mutex_lock(&i915->ggtt.vm.mutex);
	for_each_ggtt_vma(vma, obj) {
		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->bound_list);
	}
	mutex_unlock(&i915->ggtt.vm.mutex);

	i915 = to_i915(obj->base.dev);
	spin_lock(&i915->mm.obj_lock);
	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
	list_move_tail(&obj->mm.link, list);
+2 −0
Original line number Diff line number Diff line
@@ -430,6 +430,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
	}

	INIT_LIST_HEAD(&eviction_list);
	mutex_lock(&vm->mutex);
	list_for_each_entry(vma, &vm->bound_list, vm_link) {
		if (i915_vma_is_pinned(vma))
			continue;
@@ -437,6 +438,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
		__i915_vma_pin(vma);
		list_add(&vma->evict_link, &eviction_list);
	}
	mutex_unlock(&vm->mutex);

	ret = 0;
	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
+13 −2
Original line number Diff line number Diff line
@@ -1931,7 +1931,10 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */

	INIT_LIST_HEAD(&vma->obj_link);

	mutex_lock(&vma->vm->mutex);
	list_add(&vma->vm_link, &vma->vm->unbound_list);
	mutex_unlock(&vma->vm->mutex);

	return vma;
}
@@ -3504,9 +3507,10 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)

	i915_check_and_clear_faults(dev_priv);

	mutex_lock(&ggtt->vm.mutex);

	/* First fill our portion of the GTT with scratch pages */
	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);

	ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */

	/* clflush objects bound into the GGTT and rebind them. */
@@ -3516,19 +3520,26 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
			continue;

		mutex_unlock(&ggtt->vm.mutex);

		if (!i915_vma_unbind(vma))
			continue;
			goto lock;

		WARN_ON(i915_vma_bind(vma,
				      obj ? obj->cache_level : 0,
				      PIN_UPDATE));
		if (obj)
			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));

lock:
		mutex_lock(&ggtt->vm.mutex);
	}

	ggtt->vm.closed = false;
	i915_ggtt_invalidate(dev_priv);

	mutex_unlock(&ggtt->vm.mutex);

	if (INTEL_GEN(dev_priv) >= 8) {
		struct intel_ppat *ppat = &dev_priv->ppat;

+4 −0
Original line number Diff line number Diff line
@@ -461,6 +461,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
					       I915_SHRINK_VMAPS);

	/* We also want to clear any cached iomaps as they wrap vmap */
	mutex_lock(&i915->ggtt.vm.mutex);
	list_for_each_entry_safe(vma, next,
				 &i915->ggtt.vm.bound_list, vm_link) {
		unsigned long count = vma->node.size >> PAGE_SHIFT;
@@ -468,9 +469,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
		if (!vma->iomap || i915_vma_is_active(vma))
			continue;

		mutex_unlock(&i915->ggtt.vm.mutex);
		if (i915_vma_unbind(vma) == 0)
			freed_pages += count;
		mutex_lock(&i915->ggtt.vm.mutex);
	}
	mutex_unlock(&i915->ggtt.vm.mutex);

out:
	shrinker_unlock(i915, unlock);
+2 −0
Original line number Diff line number Diff line
@@ -702,7 +702,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
	vma->flags |= I915_VMA_GLOBAL_BIND;
	__i915_vma_set_map_and_fenceable(vma);

	mutex_lock(&ggtt->vm.mutex);
	list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
	mutex_unlock(&ggtt->vm.mutex);

	spin_lock(&dev_priv->mm.obj_lock);
	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
Loading