Commit 2ed38143 authored by Shiraz Saleem's avatar Shiraz Saleem Committed by Jason Gunthorpe
Browse files

RDMA/i40iw: Address an mmap handler exploit in i40iw

i40iw_mmap manipulates the vma->vm_pgoff to differentiate a push page mmap
vs a doorbell mmap, and uses it to compute the pfn in remap_pfn_range
without any validation. This is vulnerable to an mmap exploit as described
in: https://lore.kernel.org/r/20201119093523.7588-1-zhudi21@huawei.com

The push feature is disabled in the driver currently and therefore no push
mmaps are issued from user-space. The feature does not work as expected in
the x722 product.

Remove the push module parameter and all VMA attribute manipulations for
this feature in i40iw_mmap. Update i40iw_mmap to only allow DB user
mmapings at offset = 0. Check vm_pgoff for zero and if the mmaps are bound
to a single page.

Cc: <stable@kernel.org>
Fixes: d3749841 ("i40iw: add files for iwarp interface")
Link: https://lore.kernel.org/r/20201125005616.1800-2-shiraz.saleem@intel.com


Reported-by: default avatarDi Zhu <zhudi21@huawei.com>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 6830ff85
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -54,10 +54,6 @@
#define DRV_VERSION	__stringify(DRV_VERSION_MAJOR) "."		\
	__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)

static int push_mode;
module_param(push_mode, int, 0644);
MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");

static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
@@ -1580,7 +1576,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
	if (status)
		goto exit;
	iwdev->obj_next = iwdev->obj_mem;
	iwdev->push_mode = push_mode;

	init_waitqueue_head(&iwdev->vchnl_waitq);
	init_waitqueue_head(&dev->vf_reqs);
+7 −30
Original line number Diff line number Diff line
@@ -167,39 +167,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
 */
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
	struct i40iw_ucontext *ucontext;
	u64 db_addr_offset, push_offset, pfn;

	ucontext = to_ucontext(context);
	if (ucontext->iwdev->sc_dev.is_pf) {
		db_addr_offset = I40IW_DB_ADDR_OFFSET;
		push_offset = I40IW_PUSH_OFFSET;
		if (vma->vm_pgoff)
			vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
	} else {
		db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
		push_offset = I40IW_VF_PUSH_OFFSET;
		if (vma->vm_pgoff)
			vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
	}
	struct i40iw_ucontext *ucontext = to_ucontext(context);
	u64 dbaddr;

	vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;

	if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	} else {
		if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		else
			vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	}
	if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
		return -EINVAL;

	pfn = vma->vm_pgoff +
	      (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
	       PAGE_SHIFT);
	dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);

	return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
				 vma->vm_page_prot, NULL);
	return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
				 pgprot_noncached(vma->vm_page_prot), NULL);
}

/**