Commit d41e9b22 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Jason Gunthorpe:
 "Two security issues and several small bug fixes. Things seem to have
  stabilized for this release here.

  Summary:

   - Significant out of bounds access security issue in i40iw

   - Fix misuse of mmu notifiers in hfi1

   - Several errors in the register map/usage in hns

   - Missing error returns in mthca"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/hns: Bugfix for memory window mtpt configuration
  RDMA/hns: Fix retry_cnt and rnr_cnt when querying QP
  RDMA/hns: Fix wrong field of SRQ number the device supports
  IB/hfi1: Ensure correct mm is used at all times
  RDMA/i40iw: Address an mmap handler exploit in i40iw
  IB/mthca: fix return value of error branch in mthca_init_cq()
parents 76dc2bfc 17475e10
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
/*
 * Copyright(c) 2020 Cornelis Networks, Inc.
 * Copyright(c) 2015-2020 Intel Corporation.
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
@@ -206,8 +207,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
	spin_lock_init(&fd->tid_lock);
	spin_lock_init(&fd->invalid_lock);
	fd->rec_cpu_num = -1; /* no cpu affinity by default */
	fd->mm = current->mm;
	mmgrab(fd->mm);
	fd->dd = dd;
	fp->private_data = fd;
	return 0;
@@ -711,7 +710,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)

	deallocate_ctxt(uctxt);
done:
	mmdrop(fdata->mm);

	if (atomic_dec_and_test(&dd->user_refcount))
		complete(&dd->user_comp);
+1 −1
Original line number Diff line number Diff line
#ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H
/*
 * Copyright(c) 2020 Cornelis Networks, Inc.
 * Copyright(c) 2015-2020 Intel Corporation.
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
@@ -1451,7 +1452,6 @@ struct hfi1_filedata {
	u32 invalid_tid_idx;
	/* protect invalid_tids array and invalid_tid_idx */
	spinlock_t invalid_lock;
	struct mm_struct *mm;
};

extern struct xarray hfi1_dev_table;
+35 −33
Original line number Diff line number Diff line
/*
 * Copyright(c) 2020 Cornelis Networks, Inc.
 * Copyright(c) 2016 - 2017 Intel Corporation.
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
@@ -48,23 +49,11 @@
#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
#include <linux/interval_tree_generic.h>
#include <linux/sched/mm.h>

#include "mmu_rb.h"
#include "trace.h"

struct mmu_rb_handler {
	struct mmu_notifier mn;
	struct rb_root_cached root;
	void *ops_arg;
	spinlock_t lock;        /* protect the RB tree */
	struct mmu_rb_ops *ops;
	struct mm_struct *mm;
	struct list_head lru_list;
	struct work_struct del_work;
	struct list_head del_list;
	struct workqueue_struct *wq;
};

static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
static int mmu_notifier_range_start(struct mmu_notifier *,
@@ -92,37 +81,36 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
	return PAGE_ALIGN(node->addr + node->len) - 1;
}

int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
int hfi1_mmu_rb_register(void *ops_arg,
			 struct mmu_rb_ops *ops,
			 struct workqueue_struct *wq,
			 struct mmu_rb_handler **handler)
{
	struct mmu_rb_handler *handlr;
	struct mmu_rb_handler *h;
	int ret;

	handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
	if (!handlr)
	h = kmalloc(sizeof(*h), GFP_KERNEL);
	if (!h)
		return -ENOMEM;

	handlr->root = RB_ROOT_CACHED;
	handlr->ops = ops;
	handlr->ops_arg = ops_arg;
	INIT_HLIST_NODE(&handlr->mn.hlist);
	spin_lock_init(&handlr->lock);
	handlr->mn.ops = &mn_opts;
	handlr->mm = mm;
	INIT_WORK(&handlr->del_work, handle_remove);
	INIT_LIST_HEAD(&handlr->del_list);
	INIT_LIST_HEAD(&handlr->lru_list);
	handlr->wq = wq;

	ret = mmu_notifier_register(&handlr->mn, handlr->mm);
	h->root = RB_ROOT_CACHED;
	h->ops = ops;
	h->ops_arg = ops_arg;
	INIT_HLIST_NODE(&h->mn.hlist);
	spin_lock_init(&h->lock);
	h->mn.ops = &mn_opts;
	INIT_WORK(&h->del_work, handle_remove);
	INIT_LIST_HEAD(&h->del_list);
	INIT_LIST_HEAD(&h->lru_list);
	h->wq = wq;

	ret = mmu_notifier_register(&h->mn, current->mm);
	if (ret) {
		kfree(handlr);
		kfree(h);
		return ret;
	}

	*handler = handlr;
	*handler = h;
	return 0;
}

@@ -134,7 +122,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
	struct list_head del_list;

	/* Unregister first so we don't get any more notifications. */
	mmu_notifier_unregister(&handler->mn, handler->mm);
	mmu_notifier_unregister(&handler->mn, handler->mn.mm);

	/*
	 * Make sure the wq delete handler is finished running.  It will not
@@ -166,6 +154,10 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
	int ret = 0;

	trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);

	if (current->mm != handler->mn.mm)
		return -EPERM;

	spin_lock_irqsave(&handler->lock, flags);
	node = __mmu_rb_search(handler, mnode->addr, mnode->len);
	if (node) {
@@ -180,6 +172,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
		__mmu_int_rb_remove(mnode, &handler->root);
		list_del(&mnode->list); /* remove from LRU list */
	}
	mnode->handler = handler;
unlock:
	spin_unlock_irqrestore(&handler->lock, flags);
	return ret;
@@ -217,6 +210,9 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
	unsigned long flags;
	bool ret = false;

	if (current->mm != handler->mn.mm)
		return ret;

	spin_lock_irqsave(&handler->lock, flags);
	node = __mmu_rb_search(handler, addr, len);
	if (node) {
@@ -239,6 +235,9 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
	unsigned long flags;
	bool stop = false;

	if (current->mm != handler->mn.mm)
		return;

	INIT_LIST_HEAD(&del_list);

	spin_lock_irqsave(&handler->lock, flags);
@@ -272,6 +271,9 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
{
	unsigned long flags;

	if (current->mm != handler->mn.mm)
		return;

	/* Validity of handler and node pointers has been checked by caller. */
	trace_hfi1_mmu_rb_remove(node->addr, node->len);
	spin_lock_irqsave(&handler->lock, flags);
+15 −1
Original line number Diff line number Diff line
/*
 * Copyright(c) 2020 Cornelis Networks, Inc.
 * Copyright(c) 2016 Intel Corporation.
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
@@ -54,6 +55,7 @@ struct mmu_rb_node {
	unsigned long len;
	unsigned long __last;
	struct rb_node node;
	struct mmu_rb_handler *handler;
	struct list_head list;
};

@@ -71,7 +73,19 @@ struct mmu_rb_ops {
		     void *evict_arg, bool *stop);
};

int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
struct mmu_rb_handler {
	struct mmu_notifier mn;
	struct rb_root_cached root;
	void *ops_arg;
	spinlock_t lock;        /* protect the RB tree */
	struct mmu_rb_ops *ops;
	struct list_head lru_list;
	struct work_struct del_work;
	struct list_head del_list;
	struct workqueue_struct *wq;
};

int hfi1_mmu_rb_register(void *ops_arg,
			 struct mmu_rb_ops *ops,
			 struct workqueue_struct *wq,
			 struct mmu_rb_handler **handler);
+8 −4
Original line number Diff line number Diff line
/*
 * Copyright(c) 2020 Cornelis Networks, Inc.
 * Copyright(c) 2015-2018 Intel Corporation.
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
@@ -173,15 +174,18 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
{
	struct page **pages;
	struct hfi1_devdata *dd = fd->uctxt->dd;
	struct mm_struct *mm;

	if (mapped) {
		pci_unmap_single(dd->pcidev, node->dma_addr,
				 node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
		pages = &node->pages[idx];
		mm = mm_from_tid_node(node);
	} else {
		pages = &tidbuf->pages[idx];
		mm = current->mm;
	}
	hfi1_release_user_pages(fd->mm, pages, npages, mapped);
	hfi1_release_user_pages(mm, pages, npages, mapped);
	fd->tid_n_pinned -= npages;
}

@@ -216,12 +220,12 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
	 * pages, accept the amount pinned so far and program only that.
	 * User space knows how to deal with partially programmed buffers.
	 */
	if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
	if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
		kfree(pages);
		return -ENOMEM;
	}

	pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
	pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
	if (pinned <= 0) {
		kfree(pages);
		return pinned;
@@ -756,7 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,

	if (fd->use_mn) {
		ret = mmu_interval_notifier_insert(
			&node->notifier, fd->mm,
			&node->notifier, current->mm,
			tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
			&tid_mn_ops);
		if (ret)
Loading