Commit 70f8a3ca authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Jason Gunthorpe
Browse files

mm: make mm->pinned_vm an atomic64 counter



Taking a sleeping lock to _only_ increment a variable is quite the
overkill, and pretty much all users do this. Furthermore, some drivers
(ie: infiniband and scif) that need pinned semantics can go to quite
some trouble to actually delay via workqueue (un)accounting for pinned
pages when not possible to acquire it.

By making the counter atomic we no longer need to hold the mmap_sem and
can simply some code around it for pinned_vm users. The counter is 64-bit
such that we need not worry about overflows such as rdma user input
controlled from userspace.

Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarChristoph Lameter <cl@linux.com>
Reviewed-by: default avatarDaniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent a2bfd708
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -166,13 +166,13 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

	down_write(&mm->mmap_sem);
	if (check_add_overflow(mm->pinned_vm, npages, &new_pinned) ||
	    (new_pinned > lock_limit && !capable(CAP_IPC_LOCK))) {
	new_pinned = atomic64_read(&mm->pinned_vm) + npages;
	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
		up_write(&mm->mmap_sem);
		ret = -ENOMEM;
		goto out;
	}
	mm->pinned_vm = new_pinned;
	atomic64_set(&mm->pinned_vm, new_pinned);
	up_write(&mm->mmap_sem);

	cur_base = addr & PAGE_MASK;
@@ -234,7 +234,7 @@ umem_release:
	__ib_umem_release(context->device, umem, 0);
vma:
	down_write(&mm->mmap_sem);
	mm->pinned_vm -= ib_umem_num_pages(umem);
	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
	up_write(&mm->mmap_sem);
out:
	if (vma_list)
@@ -263,7 +263,7 @@ static void ib_umem_release_defer(struct work_struct *work)
	struct ib_umem *umem = container_of(work, struct ib_umem, work);

	down_write(&umem->owning_mm->mmap_sem);
	umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
	atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
	up_write(&umem->owning_mm->mmap_sem);

	__ib_umem_release_tail(umem);
@@ -302,7 +302,7 @@ void ib_umem_release(struct ib_umem *umem)
	} else {
		down_write(&umem->owning_mm->mmap_sem);
	}
	umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
	atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
	up_write(&umem->owning_mm->mmap_sem);

	__ib_umem_release_tail(umem);
+3 −3
Original line number Diff line number Diff line
@@ -92,7 +92,7 @@ bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
	size = DIV_ROUND_UP(size, PAGE_SIZE);

	down_read(&mm->mmap_sem);
	pinned = mm->pinned_vm;
	pinned = atomic64_read(&mm->pinned_vm);
	up_read(&mm->mmap_sem);

	/* First, check the absolute limit against all pinned pages. */
@@ -112,7 +112,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
		return ret;

	down_write(&mm->mmap_sem);
	mm->pinned_vm += ret;
	atomic64_add(ret, &mm->pinned_vm);
	up_write(&mm->mmap_sem);

	return ret;
@@ -131,7 +131,7 @@ void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,

	if (mm) { /* during close after signal, mm can be NULL */
		down_write(&mm->mmap_sem);
		mm->pinned_vm -= npages;
		atomic64_sub(npages, &mm->pinned_vm);
		up_write(&mm->mmap_sem);
	}
}
+2 −2
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
			goto bail_release;
	}

	current->mm->pinned_vm += num_pages;
	atomic64_add(num_pages, &current->mm->pinned_vm);

	ret = 0;
	goto bail;
@@ -156,7 +156,7 @@ void qib_release_user_pages(struct page **p, size_t num_pages)
	__qib_release_user_pages(p, num_pages, 1);

	if (current->mm) {
		current->mm->pinned_vm -= num_pages;
		atomic64_sub(num_pages, &current->mm->pinned_vm);
		up_write(&current->mm->mmap_sem);
	}
}
+4 −4
Original line number Diff line number Diff line
@@ -129,7 +129,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
	uiomr->owning_mm = mm = current->mm;
	down_write(&mm->mmap_sem);

	locked = npages + current->mm->pinned_vm;
	locked = npages + atomic64_read(&current->mm->pinned_vm);
	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
@@ -187,7 +187,7 @@ out:
	if (ret < 0)
		usnic_uiom_put_pages(chunk_list, 0);
	else {
		mm->pinned_vm = locked;
		atomic64_set(&mm->pinned_vm, locked);
		mmgrab(uiomr->owning_mm);
	}

@@ -441,7 +441,7 @@ static void usnic_uiom_release_defer(struct work_struct *work)
		container_of(work, struct usnic_uiom_reg, work);

	down_write(&uiomr->owning_mm->mmap_sem);
	uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
	atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
	up_write(&uiomr->owning_mm->mmap_sem);

	__usnic_uiom_release_tail(uiomr);
@@ -469,7 +469,7 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
	} else {
		down_write(&uiomr->owning_mm->mmap_sem);
	}
	uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
	atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
	up_write(&uiomr->owning_mm->mmap_sem);

	__usnic_uiom_release_tail(uiomr);
+3 −3
Original line number Diff line number Diff line
@@ -285,7 +285,7 @@ __scif_dec_pinned_vm_lock(struct mm_struct *mm,
	} else {
		down_write(&mm->mmap_sem);
	}
	mm->pinned_vm -= nr_pages;
	atomic64_sub(nr_pages, &mm->pinned_vm);
	up_write(&mm->mmap_sem);
	return 0;
}
@@ -299,7 +299,7 @@ static inline int __scif_check_inc_pinned_vm(struct mm_struct *mm,
		return 0;

	locked = nr_pages;
	locked += mm->pinned_vm;
	locked += atomic64_read(&mm->pinned_vm);
	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
		dev_err(scif_info.mdev.this_device,
@@ -307,7 +307,7 @@ static inline int __scif_check_inc_pinned_vm(struct mm_struct *mm,
			locked, lock_limit);
		return -ENOMEM;
	}
	mm->pinned_vm = locked;
	atomic64_set(&mm->pinned_vm, locked);
	return 0;
}

Loading