Commit ca0f1a80 authored by Yamin Friedman's avatar Yamin Friedman Committed by Christoph Hellwig
Browse files

nvmet-rdma: use new shared CQ mechanism



Has the driver use shared CQs providing ~10%-20% improvement when
multiple disks are used. Instead of opening a CQ for each QP per
controller, a CQ for each core will be provided by the RDMA core driver
that will be shared between the QPs on that core reducing interrupt
overhead.

Signed-off-by: default avatarYamin Friedman <yaminf@mellanox.com>
Signed-off-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Reviewed-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 287f329e
Loading
Loading
Loading
Loading
+7 −7
Original line number Diff line number Diff line
@@ -752,7 +752,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
{
	struct nvmet_rdma_rsp *rsp =
		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
	struct nvmet_rdma_queue *queue = cq->cq_context;
	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
	u16 status = 0;

	WARN_ON(rsp->n_rdma <= 0);
@@ -1008,7 +1008,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
	struct nvmet_rdma_cmd *cmd =
		container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
	struct nvmet_rdma_queue *queue = cq->cq_context;
	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
	struct nvmet_rdma_rsp *rsp;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
@@ -1258,9 +1258,8 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
	 */
	nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;

	queue->cq = ib_alloc_cq(ndev->device, queue,
			nr_cqe + 1, queue->comp_vector,
			IB_POLL_WORKQUEUE);
	queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
				   queue->comp_vector, IB_POLL_WORKQUEUE);
	if (IS_ERR(queue->cq)) {
		ret = PTR_ERR(queue->cq);
		pr_err("failed to create CQ cqe= %d ret= %d\n",
@@ -1322,7 +1321,7 @@ out:
err_destroy_qp:
	rdma_destroy_qp(queue->cm_id);
err_destroy_cq:
	ib_free_cq(queue->cq);
	ib_cq_pool_put(queue->cq, nr_cqe + 1);
	goto out;
}

@@ -1332,7 +1331,8 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
	if (queue->cm_id)
		rdma_destroy_id(queue->cm_id);
	ib_destroy_qp(queue->qp);
	ib_free_cq(queue->cq);
	ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 *
		       queue->send_queue_size + 1);
}

static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)