Commit d6cff021 authored by Roland Dreier's avatar Roland Dreier
Browse files

[PATCH] IB/mthca: fix posting of first work request



Fix posting first WQE for mem-free HCAs: we need to link to previous
WQE even in that case.  While we're at it, simplify code for
Tavor-mode HCAs.  We don't really need the conditional test there
either; we can similarly always link to the previous WQE.

Based on Michael S. Tsirkin's analogous fix for userspace libmthca.

Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent bb4a7f0d
Loading
Loading
Loading
Loading
+22 −26
Original line number Diff line number Diff line
@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq)
	wq->last_comp = wq->max - 1;
	wq->head      = 0;
	wq->tail      = 0;
	wq->last      = NULL;
}

void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
		}
	}

	qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
	qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);

	return 0;
}

@@ -1583,7 +1585,6 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
			goto out;
		}

		if (prev_wqe) {
		((struct mthca_next_seg *) prev_wqe)->nda_op =
			cpu_to_be32(((ind << qp->sq.wqe_shift) +
				     qp->send_wqe_offset) |
@@ -1591,7 +1592,6 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
		wmb();
		((struct mthca_next_seg *) prev_wqe)->ee_nds =
			cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
		}

		if (!size0) {
			size0 = size;
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,

		qp->wrid[ind] = wr->wr_id;

		if (likely(prev_wqe)) {
		((struct mthca_next_seg *) prev_wqe)->nda_op =
			cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
		wmb();
		((struct mthca_next_seg *) prev_wqe)->ee_nds =
			cpu_to_be32(MTHCA_NEXT_DBD | size);
		}

		if (!size0)
			size0 = size;
@@ -1905,7 +1903,6 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
			goto out;
		}

		if (likely(prev_wqe)) {
		((struct mthca_next_seg *) prev_wqe)->nda_op =
			cpu_to_be32(((ind << qp->sq.wqe_shift) +
				     qp->send_wqe_offset) |
@@ -1913,7 +1910,6 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
		wmb();
		((struct mthca_next_seg *) prev_wqe)->ee_nds =
			cpu_to_be32(MTHCA_NEXT_DBD | size);
		}

		if (!size0) {
			size0 = size;
+6 −8
Original line number Diff line number Diff line
@@ -189,7 +189,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,

	srq->max      = attr->max_wr;
	srq->max_gs   = attr->max_sge;
	srq->last     = NULL;
	srq->counter  = 0;

	if (mthca_is_memfree(dev))
@@ -264,6 +263,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,

	srq->first_free = 0;
	srq->last_free  = srq->max - 1;
	srq->last	= get_wqe(srq, srq->max - 1);

	return 0;

@@ -446,13 +446,11 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
			((struct mthca_data_seg *) wqe)->addr = 0;
		}

		if (likely(prev_wqe)) {
		((struct mthca_next_seg *) prev_wqe)->nda_op =
			cpu_to_be32((ind << srq->wqe_shift) | 1);
		wmb();
		((struct mthca_next_seg *) prev_wqe)->ee_nds =
			cpu_to_be32(MTHCA_NEXT_DBD);
		}

		srq->wrid[ind]  = wr->wr_id;
		srq->first_free = next_ind;