Commit aa0c9086 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Jason Gunthorpe:
 "Small update, a few more merge window bugs and normal driver bug
  fixes:

   - Two merge window regressions in mlx5: a error path bug found by
     syzkaller and some lost code during a rework preventing ipoib from
     working in some configurations

   - Silence clang compilation warning in OPA related code

   - Fix a long standing race condition in ib_nl for ACM

   - Resolve when the HFI1 is shutdown"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/mlx5: Set PD pointers for the error flow unwind
  IB/mlx5: Fix 50G per lane indication
  RDMA/siw: Fix reporting vendor_part_id
  IB/sa: Resolv use-after-free in ib_nl_make_request()
  IB/hfi1: Do not destroy link_wq when the device is shut down
  IB/hfi1: Do not destroy hfi1_wq when the device is shut down
  RDMA/mlx5: Fix legacy IPoIB QP initialization
  IB/hfi1: Add explicit cast OPA_MTU_8192 to 'enum ib_mtu'
parents 0f318cba 0a037150
Loading
Loading
Loading
Loading
+17 −21
Original line number Diff line number Diff line
@@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
	return len;
}

static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
{
	struct sk_buff *skb = NULL;
	struct nlmsghdr *nlh;
	void *data;
	struct ib_sa_mad *mad;
	int len;
	unsigned long flags;
	unsigned long delay;
	gfp_t gfp_flag;
	int ret;

	INIT_LIST_HEAD(&query->list);
	query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);

	mad = query->mad_buf->mad;
	len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
@@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
	/* Repair the nlmsg header length */
	nlmsg_end(skb, nlh);

	return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
}
	gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
		GFP_NOWAIT;

static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
{
	unsigned long flags;
	unsigned long delay;
	int ret;
	spin_lock_irqsave(&ib_nl_request_lock, flags);
	ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);

	INIT_LIST_HEAD(&query->list);
	query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
	if (ret)
		goto out;

	/* Put the request on the list first.*/
	spin_lock_irqsave(&ib_nl_request_lock, flags);
	/* Put the request on the list.*/
	delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
	query->timeout = delay + jiffies;
	list_add_tail(&query->list, &ib_nl_request_list);
	/* Start the timeout if this is the only request */
	if (ib_nl_request_list.next == &query->list)
		queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
	spin_unlock_irqrestore(&ib_nl_request_lock, flags);

	ret = ib_nl_send_msg(query, gfp_mask);
	if (ret) {
		ret = -EIO;
		/* Remove the request */
		spin_lock_irqsave(&ib_nl_request_lock, flags);
		list_del(&query->list);
out:
	spin_unlock_irqrestore(&ib_nl_request_lock, flags);
	}

	return ret;
}
+28 −9
Original line number Diff line number Diff line
@@ -830,6 +830,29 @@ wq_error:
	return -ENOMEM;
}

/**
 * destroy_workqueues - destroy per port workqueues
 * @dd: the hfi1_ib device
 */
static void destroy_workqueues(struct hfi1_devdata *dd)
{
	int pidx;
	struct hfi1_pportdata *ppd;

	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
		ppd = dd->pport + pidx;

		if (ppd->hfi1_wq) {
			destroy_workqueue(ppd->hfi1_wq);
			ppd->hfi1_wq = NULL;
		}
		if (ppd->link_wq) {
			destroy_workqueue(ppd->link_wq);
			ppd->link_wq = NULL;
		}
	}
}

/**
 * enable_general_intr() - Enable the IRQs that will be handled by the
 * general interrupt handler.
@@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
		 * We can't count on interrupts since we are stopping.
		 */
		hfi1_quiet_serdes(ppd);

		if (ppd->hfi1_wq) {
			destroy_workqueue(ppd->hfi1_wq);
			ppd->hfi1_wq = NULL;
		}
		if (ppd->link_wq) {
			destroy_workqueue(ppd->link_wq);
			ppd->link_wq = NULL;
		}
		if (ppd->hfi1_wq)
			flush_workqueue(ppd->hfi1_wq);
		if (ppd->link_wq)
			flush_workqueue(ppd->link_wq);
	}
	sdma_exit(dd);
}
@@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev)
	 * clear dma engines, etc.
	 */
	shutdown_device(dd);
	destroy_workqueues(dd);

	stop_timers(dd);

+5 −2
Original line number Diff line number Diff line
@@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
	/* Constraining 10KB packets to 8KB packets */
	if (mtu == (enum ib_mtu)OPA_MTU_10240)
		mtu = OPA_MTU_8192;
		mtu = (enum ib_mtu)OPA_MTU_8192;
	return opa_mtu_enum_to_int((enum opa_mtu)mtu);
}

@@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
	struct hfi1_ibport *ibp =
		to_iport(qp->ibqp.device, qp->port_num);
	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
	struct hfi1_devdata *dd = ppd->dd;

	if (dd->flags & HFI1_SHUTDOWN)
		return true;

	return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
			       priv->s_sde ?
+4 −1
Original line number Diff line number Diff line
@@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
	struct hfi1_ibport *ibp =
		to_iport(qp->ibqp.device, qp->port_num);
	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
	struct hfi1_devdata *dd = ppd->dd;

	if ((dd->flags & HFI1_SHUTDOWN))
		return true;

	return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
				   priv->s_sde ?
+1 −1
Original line number Diff line number Diff line
@@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
					   mdev_port_num);
	if (err)
		goto out;
	ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
	ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
	eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);

	props->active_width     = IB_WIDTH_4X;
Loading