Commit 5b2322e4 authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Bjorn Helgaas
Browse files

nvmet: Introduce helper functions to allocate and free request SGLs



Add helpers to allocate and free the SGL in a struct nvmet_req:

  int nvmet_req_alloc_sgl(struct nvmet_req *req)
  void nvmet_req_free_sgl(struct nvmet_req *req)

This will be expanded in a future patch to implement peer-to-peer memory
DMAs and should be common with all target drivers.

The new helpers are used in nvmet-rdma.  Seeing we use req.transfer_len as
the length of the SGL it is set earlier and cleared on any error.  It also
seems to be unnecessary to accumulate the length as the map_sgl functions
should only ever be called once per request.

Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: default avatarBjorn Helgaas <bhelgaas@google.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarSagi Grimberg <sagi@grimberg.me>
parent e0596ab2
Loading
Loading
Loading
Loading
+18 −0
Original line number Diff line number Diff line
@@ -725,6 +725,24 @@ void nvmet_req_execute(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_execute);

int nvmet_req_alloc_sgl(struct nvmet_req *req)
{
	req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
	if (!req->sg)
		return -ENOMEM;

	return 0;
}
EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);

void nvmet_req_free_sgl(struct nvmet_req *req)
{
	sgl_free(req->sg);
	req->sg = NULL;
	req->sg_cnt = 0;
}
EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);

static inline bool nvmet_cc_en(u32 cc)
{
	return (cc >> NVME_CC_EN_SHIFT) & 0x1;
+2 −0
Original line number Diff line number Diff line
@@ -336,6 +336,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
void nvmet_req_uninit(struct nvmet_req *req);
void nvmet_req_execute(struct nvmet_req *req);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
int nvmet_req_alloc_sgl(struct nvmet_req *req);
void nvmet_req_free_sgl(struct nvmet_req *req);

void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
		u16 size);
+12 −8
Original line number Diff line number Diff line
@@ -503,7 +503,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
	}

	if (rsp->req.sg != rsp->cmd->inline_sg)
		sgl_free(rsp->req.sg);
		nvmet_req_free_sgl(&rsp->req);

	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
		nvmet_rdma_process_wr_wait_list(queue);
@@ -652,24 +652,24 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
{
	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
	u64 addr = le64_to_cpu(sgl->addr);
	u32 len = get_unaligned_le24(sgl->length);
	u32 key = get_unaligned_le32(sgl->key);
	int ret;

	rsp->req.transfer_len = get_unaligned_le24(sgl->length);

	/* no data command? */
	if (!len)
	if (!rsp->req.transfer_len)
		return 0;

	rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
	if (!rsp->req.sg)
		return NVME_SC_INTERNAL;
	ret = nvmet_req_alloc_sgl(&rsp->req);
	if (ret < 0)
		goto error_out;

	ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
			rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
			nvmet_data_dir(&rsp->req));
	if (ret < 0)
		return NVME_SC_INTERNAL;
	rsp->req.transfer_len += len;
		goto error_out;
	rsp->n_rdma += ret;

	if (invalidate) {
@@ -678,6 +678,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
	}

	return 0;

error_out:
	rsp->req.transfer_len = 0;
	return NVME_SC_INTERNAL;
}

static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)