Commit dfcd6660 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds
Browse files

mm/mmu_notifier: convert user range->blockable to helper function

Use the mmu_notifier_range_blockable() helper function instead of directly
dereferencing the range->blockable field.  This is done to make it easier
to change the mmu_notifier range field.

This patch is the outcome of the following coccinelle patch:

%<-------------------------------------------------------------------
@@
identifier I1, FN;
@@
FN(..., struct mmu_notifier_range *I1, ...) {
<...
-I1->blockable
+mmu_notifier_range_blockable(I1)
...>
}
------------------------------------------------------------------->%

spatch --in-place --sp-file blockable.spatch --dir .

Link: http://lkml.kernel.org/r/20190326164747.24405-3-jglisse@redhat.com


Signed-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Ross Zwisler <zwisler@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4a83bfe9
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -256,14 +256,14 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
	/* TODO we should be able to split locking for interval tree and
	 * amdgpu_mn_invalidate_node
	 */
	if (amdgpu_mn_read_lock(amn, range->blockable))
	if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
		return -EAGAIN;

	it = interval_tree_iter_first(&amn->objects, range->start, end);
	while (it) {
		struct amdgpu_mn_node *node;

		if (!range->blockable) {
		if (!mmu_notifier_range_blockable(range)) {
			amdgpu_mn_read_unlock(amn);
			return -EAGAIN;
		}
@@ -299,7 +299,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
	/* notification is exclusive, but interval is inclusive */
	end = range->end - 1;

	if (amdgpu_mn_read_lock(amn, range->blockable))
	if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
		return -EAGAIN;

	it = interval_tree_iter_first(&amn->objects, range->start, end);
@@ -307,7 +307,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
		struct amdgpu_mn_node *node;
		struct amdgpu_bo *bo;

		if (!range->blockable) {
		if (!mmu_notifier_range_blockable(range)) {
			amdgpu_mn_read_unlock(amn);
			return -EAGAIN;
		}
+1 −1
Original line number Diff line number Diff line
@@ -122,7 +122,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
	while (it) {
		struct drm_i915_gem_object *obj;

		if (!range->blockable) {
		if (!mmu_notifier_range_blockable(range)) {
			ret = -EAGAIN;
			break;
		}
+2 −2
Original line number Diff line number Diff line
@@ -133,7 +133,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
	/* TODO we should be able to split locking for interval tree and
	 * the tear down.
	 */
	if (range->blockable)
	if (mmu_notifier_range_blockable(range))
		mutex_lock(&rmn->lock);
	else if (!mutex_trylock(&rmn->lock))
		return -EAGAIN;
@@ -144,7 +144,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
		struct radeon_bo *bo;
		long r;

		if (!range->blockable) {
		if (!mmu_notifier_range_blockable(range)) {
			ret = -EAGAIN;
			goto out_unlock;
		}
+3 −2
Original line number Diff line number Diff line
@@ -152,7 +152,7 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
	struct ib_ucontext_per_mm *per_mm =
		container_of(mn, struct ib_ucontext_per_mm, mn);

	if (range->blockable)
	if (mmu_notifier_range_blockable(range))
		down_read(&per_mm->umem_rwsem);
	else if (!down_read_trylock(&per_mm->umem_rwsem))
		return -EAGAIN;
@@ -170,7 +170,8 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
	return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
					     range->end,
					     invalidate_range_start_trampoline,
					     range->blockable, NULL);
					     mmu_notifier_range_blockable(range),
					     NULL);
}

static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
+3 −3
Original line number Diff line number Diff line
@@ -526,20 +526,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
	struct gntdev_grant_map *map;
	int ret = 0;

	if (range->blockable)
	if (mmu_notifier_range_blockable(range))
		mutex_lock(&priv->lock);
	else if (!mutex_trylock(&priv->lock))
		return -EAGAIN;

	list_for_each_entry(map, &priv->maps, next) {
		ret = unmap_if_in_range(map, range->start, range->end,
					range->blockable);
					mmu_notifier_range_blockable(range));
		if (ret)
			goto out_unlock;
	}
	list_for_each_entry(map, &priv->freeable_maps, next) {
		ret = unmap_if_in_range(map, range->start, range->end,
					range->blockable);
					mmu_notifier_range_blockable(range));
		if (ret)
			goto out_unlock;
	}
Loading