Commit 5e7c1b75 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Jason Gunthorpe:
 "First RDMA subsystem updates for 5.5-rc. A very small set of fixes,
  most people seem to still be recovering from December!

  Five small driver fixes:

   - Fix error flow with MR allocation in bnxt_re

   - An errata work around for bnxt_re

   - Misuse of the workqueue API in hfi1

   - Protocol error in hfi1

   - Regression in 5.5 related to the mmap rework with i40iw"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  i40iw: Remove setting of VMA private data and use rdma_user_mmap_io
  IB/hfi1: Adjust flow PSN with the correct resync_psn
  IB/hfi1: Don't cancel unused work item
  RDMA/bnxt_re: Fix Send Work Entry state check while polling completions
  RDMA/bnxt_re: Avoid freeing MR resources if dereg fails
parents 4a3033ef 9554de39
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -3305,8 +3305,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
	int rc;

	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
	if (rc)
	if (rc) {
		dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
		return rc;
	}

	if (mr->pages) {
		rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
+6 −6
Original line number Diff line number Diff line
@@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
			/* Add qp to flush list of the CQ */
			bnxt_qplib_add_flush_qp(qp);
		} else {
			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
			/* Before we complete, do WA 9060 */
			if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
				      cqe_sq_cons)) {
				*lib_qp = qp;
				goto out;
			}
			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
				cqe->status = CQ_REQ_STATUS_OK;
				cqe++;
				(*budget)--;
+3 −1
Original line number Diff line number Diff line
@@ -81,6 +81,8 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
void iowait_cancel_work(struct iowait *w)
{
	cancel_work_sync(&iowait_get_ib_work(w)->iowork);
	/* Make sure that the iowork for TID RDMA is used */
	if (iowait_get_tid_work(w)->iowork.func)
		cancel_work_sync(&iowait_get_tid_work(w)->iowork);
}

+9 −0
Original line number Diff line number Diff line
@@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
			 */
			fpsn = full_flow_psn(flow, flow->flow_state.spsn);
			req->r_ack_psn = psn;
			/*
			 * If resync_psn points to the last flow PSN for a
			 * segment and the new segment (likely from a new
			 * request) starts with a new generation number, we
			 * need to adjust resync_psn accordingly.
			 */
			if (flow->flow_state.generation !=
			    (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT))
				resync_psn = mask_psn(fpsn - 1);
			flow->resync_npkts +=
				delta_psn(mask_psn(resync_psn + 1), fpsn);
			/*
+6 −8
Original line number Diff line number Diff line
@@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
	struct i40iw_ucontext *ucontext;
	u64 db_addr_offset;
	u64 push_offset;
	u64 db_addr_offset, push_offset, pfn;

	ucontext = to_ucontext(context);
	if (ucontext->iwdev->sc_dev.is_pf) {
@@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)

	if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		vma->vm_private_data = ucontext;
	} else {
		if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
			vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	}

	if (io_remap_pfn_range(vma, vma->vm_start,
			       vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
			       PAGE_SIZE, vma->vm_page_prot))
		return -EAGAIN;
	pfn = vma->vm_pgoff +
	      (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
	       PAGE_SHIFT);

	return 0;
	return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
				 vma->vm_page_prot, NULL);
}

/**