Commit d984457b authored by Rob Clark's avatar Rob Clark
Browse files

drm/msm: Add priv->mm_lock to protect active/inactive lists



Rather than relying on the big dev->struct_mutex hammer, introduce a
more specific lock for protecting the bo lists.

Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Reviewed-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Reviewed-by: default avatarKristian H. Kristensen <hoegsberg@google.com>
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 2a86efb1
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -112,6 +112,11 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_gpu *gpu = priv->gpu;
	int ret;

	ret = mutex_lock_interruptible(&priv->mm_lock);
	if (ret)
		return ret;

	if (gpu) {
		seq_printf(m, "Active Objects (%s):\n", gpu->name);
@@ -121,6 +126,8 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
	seq_printf(m, "Inactive Objects:\n");
	msm_gem_describe_objects(&priv->inactive_list, m);

	mutex_unlock(&priv->mm_lock);

	return 0;
}

+7 −0
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@

#include <linux/dma-mapping.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>

@@ -441,6 +442,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
	init_llist_head(&priv->free_list);

	INIT_LIST_HEAD(&priv->inactive_list);
	mutex_init(&priv->mm_lock);

	/* Teach lockdep about lock ordering wrt. shrinker: */
	fs_reclaim_acquire(GFP_KERNEL);
	might_lock(&priv->mm_lock);
	fs_reclaim_release(GFP_KERNEL);

	drm_mode_config_init(ddev);

+12 −1
Original line number Diff line number Diff line
@@ -174,8 +174,19 @@ struct msm_drm_private {
	struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
	struct msm_perf_state *perf;

	/* list of GEM objects: */
	/*
	 * List of inactive GEM objects.  Every bo is either in the inactive_list
	 * or gpu->active_list (for the gpu it is active on[1])
	 *
	 * These lists are protected by mm_lock.  If struct_mutex is involved, it
	 * should be aquired prior to mm_lock.  One should *not* hold mm_lock in
	 * get_pages()/vmap()/etc paths, as they can trigger the shrinker.
	 *
	 * [1] if someone ever added support for the old 2d cores, there could be
	 *     more than one gpu object
	 */
	struct list_head inactive_list;
	struct mutex mm_lock;

	/* worker for delayed free of objects: */
	struct work_struct free_work;
+16 −12
Original line number Diff line number Diff line
@@ -767,13 +767,17 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
	struct msm_drm_private *priv = obj->dev->dev_private;

	might_sleep();
	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);

	if (!atomic_fetch_inc(&msm_obj->active_count)) {
		mutex_lock(&priv->mm_lock);
		msm_obj->gpu = gpu;
		list_del_init(&msm_obj->mm_list);
		list_add_tail(&msm_obj->mm_list, &gpu->active_list);
		mutex_unlock(&priv->mm_lock);
	}
}

@@ -782,12 +786,14 @@ void msm_gem_active_put(struct drm_gem_object *obj)
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_drm_private *priv = obj->dev->dev_private;

	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
	might_sleep();

	if (!atomic_dec_return(&msm_obj->active_count)) {
		mutex_lock(&priv->mm_lock);
		msm_obj->gpu = NULL;
		list_del_init(&msm_obj->mm_list);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
		mutex_unlock(&priv->mm_lock);
	}
}

@@ -942,13 +948,16 @@ static void free_object(struct msm_gem_object *msm_obj)
{
	struct drm_gem_object *obj = &msm_obj->base;
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));

	mutex_lock(&priv->mm_lock);
	list_del(&msm_obj->mm_list);
	mutex_unlock(&priv->mm_lock);

	msm_gem_lock(obj);

@@ -1127,14 +1136,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
	}

	if (struct_mutex_locked) {
		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	mutex_lock(&priv->mm_lock);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	} else {
		mutex_lock(&dev->struct_mutex);
		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
		mutex_unlock(&dev->struct_mutex);
	}
	mutex_unlock(&priv->mm_lock);

	return obj;

@@ -1202,9 +1206,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,

	msm_gem_unlock(obj);

	mutex_lock(&dev->struct_mutex);
	mutex_lock(&priv->mm_lock);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
	mutex_unlock(&dev->struct_mutex);
	mutex_unlock(&priv->mm_lock);

	return obj;

+12 −0
Original line number Diff line number Diff line
@@ -51,6 +51,8 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
	if (!msm_gem_shrinker_lock(dev, &unlock))
		return 0;

	mutex_lock(&priv->mm_lock);

	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
		if (!msm_gem_trylock(&msm_obj->base))
			continue;
@@ -59,6 +61,8 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
		msm_gem_unlock(&msm_obj->base);
	}

	mutex_unlock(&priv->mm_lock);

	if (unlock)
		mutex_unlock(&dev->struct_mutex);

@@ -78,6 +82,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
	if (!msm_gem_shrinker_lock(dev, &unlock))
		return SHRINK_STOP;

	mutex_lock(&priv->mm_lock);

	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
		if (freed >= sc->nr_to_scan)
			break;
@@ -90,6 +96,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
		msm_gem_unlock(&msm_obj->base);
	}

	mutex_unlock(&priv->mm_lock);

	if (unlock)
		mutex_unlock(&dev->struct_mutex);

@@ -112,6 +120,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
	if (!msm_gem_shrinker_lock(dev, &unlock))
		return NOTIFY_DONE;

	mutex_lock(&priv->mm_lock);

	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
		if (!msm_gem_trylock(&msm_obj->base))
			continue;
@@ -129,6 +139,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
			break;
	}

	mutex_unlock(&priv->mm_lock);

	if (unlock)
		mutex_unlock(&dev->struct_mutex);

Loading