Commit 5e1a3149 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin
Browse files

Revert "vhost-vdpa: fix page pinning leakage in error path"



This reverts commit 7ed9e3d9.

The patch creates a DoS risk since it can result in a high order memory
allocation.

Fixes: 7ed9e3d9 ("vhost-vdpa: fix page pinning leakage in error path")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 7ba08e81
Loading
Loading
Loading
Loading
+48 −71
Original line number Diff line number Diff line
@@ -602,13 +602,11 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
	struct vhost_dev *dev = &v->vdev;
	struct vhost_iotlb *iotlb = dev->iotlb;
	struct page **page_list;
	struct vm_area_struct **vmas;
	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
	unsigned int gup_flags = FOLL_LONGTERM;
	unsigned long map_pfn, last_pfn = 0;
	unsigned long npages, lock_limit;
	unsigned long i, nmap = 0;
	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
	unsigned long locked, lock_limit, pinned, i;
	u64 iova = msg->iova;
	long pinned;
	int ret = 0;

	if (msg->iova < v->range.first ||
@@ -619,6 +617,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
				    msg->iova + msg->size - 1))
		return -EEXIST;

	page_list = (struct page **) __get_free_page(GFP_KERNEL);
	if (!page_list)
		return -ENOMEM;

	if (msg->perm & VHOST_ACCESS_WO)
		gup_flags |= FOLL_WRITE;

@@ -626,86 +628,61 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
	if (!npages)
		return -EINVAL;

	page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
	vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
			      GFP_KERNEL);
	if (!page_list || !vmas) {
		ret = -ENOMEM;
		goto free;
	}

	mmap_read_lock(dev->mm);

	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
		ret = -ENOMEM;
		goto unlock;
	}

	pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
				page_list, vmas);
	if (npages != pinned) {
		if (pinned < 0) {
			ret = pinned;
		} else {
			unpin_user_pages(page_list, pinned);
	if (locked > lock_limit) {
		ret = -ENOMEM;
		}
		goto unlock;
		goto out;
	}

	cur_base = msg->uaddr & PAGE_MASK;
	iova &= PAGE_MASK;

	while (npages) {
		pinned = min_t(unsigned long, npages, list_size);
		ret = pin_user_pages(cur_base, pinned,
				     gup_flags, page_list, NULL);
		if (ret != pinned)
			goto out;

		if (!last_pfn)
			map_pfn = page_to_pfn(page_list[0]);

	/* One more iteration to avoid extra vdpa_map() call out of loop. */
	for (i = 0; i <= npages; i++) {
		unsigned long this_pfn;
		for (i = 0; i < ret; i++) {
			unsigned long this_pfn = page_to_pfn(page_list[i]);
			u64 csize;

		/* The last chunk may have no valid PFN next to it */
		this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;

		if (last_pfn && (this_pfn == -1UL ||
				 this_pfn != last_pfn + 1)) {
			if (last_pfn && (this_pfn != last_pfn + 1)) {
				/* Pin a contiguous chunk of memory */
			csize = last_pfn - map_pfn + 1;
			ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
				if (vhost_vdpa_map(v, iova, csize,
						   map_pfn << PAGE_SHIFT,
					     msg->perm);
			if (ret) {
				/*
				 * Unpin the rest chunks of memory on the
				 * flight with no corresponding vdpa_map()
				 * calls having been made yet. On the other
				 * hand, vdpa_unmap() in the failure path
				 * is in charge of accounting the number of
				 * pinned pages for its own.
				 * This asymmetrical pattern of accounting
				 * is for efficiency to pin all pages at
				 * once, while there is no other callsite
				 * of vdpa_map() than here above.
				 */
				unpin_user_pages(&page_list[nmap],
						 npages - nmap);
						   msg->perm))
					goto out;
			}
			atomic64_add(csize, &dev->mm->pinned_vm);
			nmap += csize;
			iova += csize << PAGE_SHIFT;
				map_pfn = this_pfn;
				iova += csize;
			}

			last_pfn = this_pfn;
		}

	WARN_ON(nmap != npages);
		cur_base += ret << PAGE_SHIFT;
		npages -= ret;
	}

	/* Pin the rest chunk */
	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
			     map_pfn << PAGE_SHIFT, msg->perm);
out:
	if (ret)
	if (ret) {
		vhost_vdpa_unmap(v, msg->iova, msg->size);
unlock:
		atomic64_sub(npages, &dev->mm->pinned_vm);
	}
	mmap_read_unlock(dev->mm);
free:
	kvfree(vmas);
	kvfree(page_list);
	free_page((unsigned long)page_list);
	return ret;
}