Commit a8dba053 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Doug Ledford:
 "Fairly small pull request for -rc3. I'm out of town the rest of this
  week, so I made sure to clean out as much as possible from patchworks
  in enough time for 0-day to chew through it (Yay! for 0-day being back
  online! :-)). Jason might send through any emergency stuff that could
  pop up, otherwise I'm back next week.

  The only real thing of note is the siw ABI change. Since we just
  merged siw *this* release, there are no prior kernel releases to
  maintain kernel ABI with. I told Bernard that if there is anything
  else about the siw ABI he thinks he might want to change before it
  goes set in stone, he should get it in ASAP. The siw module was around
  for several years outside the kernel tree, and it had to be revamped
  considerably for inclusion upstream, so we are making no attempts to
  be backward compatible with the out of tree version. Once 5.3 is
  actually released, we will have our baseline ABI to maintain.

  Summary:

   - Fix a memory registration release flow issue that was causing a
     WARN_ON (mlx5)

   - If the counters for a port aren't allocated, then we can't do
     operations on the non-existent counters (core)

   - Check the right variable for error code result (mlx5)

   - Fix a use after free issue (mlx5)

   - Fix an off by one memory leak (siw)

   - Actually return an error code on error (core)

   - Allow siw to be built on 32bit arches (siw, ABI change, but OK
     since siw was just merged this merge window and there is no prior
     released kernel to maintain compatibility with and we also updated
     the rdma-core user space package to match)"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/siw: Change CQ flags from 64->32 bits
  RDMA/core: Fix error code in stat_get_doit_qp()
  RDMA/siw: Fix a memory leak in siw_init_cpulist()
  IB/mlx5: Fix use-after-free error while accessing ev_file pointer
  IB/mlx5: Check the correct variable in error handling code
  RDMA/counter: Prevent QP counter binding if counters unsupported
  IB/mlx5: Fix implicit MR release flow
parents e83b009c 2c8ccb37
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
	int ret;

	port_counter = &dev->port_data[port].port_counter;
	if (!port_counter->hstats)
		return -EOPNOTSUPP;

	mutex_lock(&port_counter->lock);
	if (on) {
		ret = __counter_set_mode(&port_counter->mode,
@@ -509,6 +512,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
	if (!rdma_is_port_valid(dev, port))
		return -EINVAL;

	if (!dev->port_data[port].port_counter.hstats)
		return -EOPNOTSUPP;

	qp = rdma_counter_get_qp(dev, qp_num);
	if (!qp)
		return -ENOENT;
+6 −2
Original line number Diff line number Diff line
@@ -1952,12 +1952,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,

	if (fill_nldev_handle(msg, device) ||
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode))
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
		ret = -EMSGSIZE;
		goto err_msg;
	}

	if ((mode == RDMA_COUNTER_MODE_AUTO) &&
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask))
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
		ret = -EMSGSIZE;
		goto err_msg;
	}

	nlmsg_end(msg, nlh);
	ib_device_put(device);
+0 −4
Original line number Diff line number Diff line
@@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
	 * prevent any further fault handling on this MR.
	 */
	ib_umem_notifier_start_account(umem_odp);
	umem_odp->dying = 1;
	/* Make sure that the fact the umem is dying is out before we release
	 * all pending page faults. */
	smp_wmb();
	complete_all(&umem_odp->notifier_completion);
	umem_odp->umem.context->invalidate_range(
		umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
+6 −5
Original line number Diff line number Diff line
@@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
			event_sub->eventfd =
				eventfd_ctx_fdget(redirect_fd);

			if (IS_ERR(event_sub)) {
			if (IS_ERR(event_sub->eventfd)) {
				err = PTR_ERR(event_sub->eventfd);
				event_sub->eventfd = NULL;
				goto err;
@@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
	struct devx_async_event_file *ev_file = filp->private_data;
	struct devx_event_subscription *event_sub, *event_sub_tmp;
	struct devx_async_event_data *entry, *tmp;
	struct mlx5_ib_dev *dev = ev_file->dev;

	mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock);
	mutex_lock(&dev->devx_event_table.event_xa_lock);
	/* delete the subscriptions which are related to this FD */
	list_for_each_entry_safe(event_sub, event_sub_tmp,
				 &ev_file->subscribed_events_list, file_list) {
		devx_cleanup_subscription(ev_file->dev, event_sub);
		devx_cleanup_subscription(dev, event_sub);
		if (event_sub->eventfd)
			eventfd_ctx_put(event_sub->eventfd);

@@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
		kfree_rcu(event_sub, rcu);
	}

	mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock);
	mutex_unlock(&dev->devx_event_table.event_xa_lock);

	/* free the pending events allocation */
	if (!ev_file->omit_data) {
@@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
	}

	uverbs_close_fd(filp);
	put_device(&ev_file->dev->ib_dev.dev);
	put_device(&dev->ib_dev.dev);
	return 0;
}

+9 −15
Original line number Diff line number Diff line
@@ -579,7 +579,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
			u32 flags)
{
	int npages = 0, current_seq, page_shift, ret, np;
	bool implicit = false;
	struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
	bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
	bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
@@ -594,7 +593,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
		if (IS_ERR(odp))
			return PTR_ERR(odp);
		mr = odp->private;
		implicit = true;
	} else {
		odp = odp_mr;
	}
@@ -682,19 +680,15 @@ next_mr:

out:
	if (ret == -EAGAIN) {
		if (implicit || !odp->dying) {
			unsigned long timeout =
				msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
		unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);

			if (!wait_for_completion_timeout(
					&odp->notifier_completion,
		if (!wait_for_completion_timeout(&odp->notifier_completion,
						 timeout)) {
				mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
					     current_seq, odp->notifiers_seq, odp->notifiers_count);
			}
		} else {
			/* The MR is being killed, kill the QP as well. */
			ret = -EFAULT;
			mlx5_ib_warn(
				dev,
				"timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
				current_seq, odp->notifiers_seq,
				odp->notifiers_count);
		}
	}

Loading