Commit 42fc5414 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds
Browse files

mmap locking API: add mmap_assert_locked() and mmap_assert_write_locked()



Add new APIs to assert that mmap_sem is held.

Using this instead of rwsem_is_locked and lockdep_assert_held[_write]
makes the assertions more tolerant of future changes to the lock type.

Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarDaniel Jordan <daniel.m.jordan@oracle.com>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-10-walken@google.com


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 14c3656b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2181,7 +2181,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
	 * For now, this can't happen because all callers hold mmap_sem
	 * for write.  If this changes, we'll need a different solution.
	 */
	lockdep_assert_held_write(&mm->mmap_sem);
	mmap_assert_write_locked(mm);

	if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
		on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
+3 −3
Original line number Diff line number Diff line
@@ -234,7 +234,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
	pte_t *ptep, pte;
	bool ret = true;

	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
	mmap_assert_locked(mm);

	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));

@@ -286,7 +286,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
	pte_t *pte;
	bool ret = true;

	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
	mmap_assert_locked(mm);

	pgd = pgd_offset(mm, address);
	if (!pgd_present(*pgd))
@@ -405,7 +405,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
	 * Coredumping runs without mmap_sem so we can only check that
	 * the mmap_sem is held, if PF_DUMPCORE was not set.
	 */
	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
	mmap_assert_locked(mm);

	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
	if (!ctx)
+14 −0
Original line number Diff line number Diff line
#ifndef _LINUX_MMAP_LOCK_H
#define _LINUX_MMAP_LOCK_H

#include <linux/mmdebug.h>

#define MMAP_LOCK_INITIALIZER(name) \
	.mmap_sem = __RWSEM_INITIALIZER((name).mmap_sem),

@@ -73,4 +75,16 @@ static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
	up_read_non_owner(&mm->mmap_sem);
}

static inline void mmap_assert_locked(struct mm_struct *mm)
{
	lockdep_assert_held(&mm->mmap_sem);
	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
}

static inline void mmap_assert_write_locked(struct mm_struct *mm)
{
	lockdep_assert_held_write(&mm->mmap_sem);
	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
}

#endif /* _LINUX_MMAP_LOCK_H */
+1 −1
Original line number Diff line number Diff line
@@ -1425,7 +1425,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
	VM_BUG_ON(end   & ~PAGE_MASK);
	VM_BUG_ON_VMA(start < vma->vm_start, vma);
	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
	mmap_assert_locked(mm);

	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
	if (vma->vm_flags & VM_LOCKONFAULT)
+1 −1
Original line number Diff line number Diff line
@@ -563,7 +563,7 @@ int hmm_range_fault(struct hmm_range *range)
	struct mm_struct *mm = range->notifier->mm;
	int ret;

	lockdep_assert_held(&mm->mmap_sem);
	mmap_assert_locked(mm);

	do {
		/* If range is no longer valid force retry. */
Loading