Commit 1fbf9742 authored by James Smart's avatar James Smart Committed by Martin K. Petersen
Browse files

scsi: lpfc: Convert ring number to hardware queue for nvme wqe posting.



SLI4 nvme functions are passing the SLI3 ring number when posting wqe to
hardware. This should be indicating the hardware queue to use, not the ring
number.

Replace ring number with the hardware queue that should be used.

Note: SCSI avoided this issue as it utilized an older lfpc_issue_iocb
routine that properly adapts.

Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 4c47efc1
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -315,8 +315,8 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
			struct lpfc_iocbq *, uint32_t);
int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum,
			struct lpfc_iocbq *iocbq);
int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
			struct lpfc_iocbq *pwqe);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
					    struct lpfc_iocbq *piocbq);
+2 −1
Original line number Diff line number Diff line
@@ -3734,7 +3734,8 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
				return cnt;
			cnt++;
			qp = &phba->sli4_hba.hdwq[idx];
			lpfc_cmd->hdwq = idx;
			lpfc_cmd->hdwq_no = idx;
			lpfc_cmd->hdwq = qp;
			lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
			lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
			spin_lock(&qp->io_buf_list_put_lock);
+6 −5
Original line number Diff line number Diff line
@@ -528,7 +528,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
	lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
			 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);

	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
	rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
	if (rc) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
				 "6045 Issue GEN REQ WQE to NPORT x%x "
@@ -1605,7 +1605,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
			 lpfc_ncmd->cur_iocbq.sli4_xritag,
			 lpfc_queue_info->index, ndlp->nlp_DID);

	ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
	ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
	if (ret) {
		atomic_inc(&lport->xmt_fcp_wqerr);
		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
@@ -1867,7 +1867,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
	abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
	abts_buf->vport = vport;
	abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
	ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
	ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	if (ret_val) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
@@ -1978,7 +1978,8 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
		pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
		lpfc_ncmd->start_time = jiffies;
		lpfc_ncmd->flags = 0;
		lpfc_ncmd->hdwq = idx;
		lpfc_ncmd->hdwq = qp;
		lpfc_ncmd->hdwq_no = idx;

		/* Rsp SGE will be filled in when we rcv an IO
		 * from the NVME Layer to be sent.
@@ -2026,7 +2027,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
	lpfc_ncmd->ndlp = NULL;
	lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;

	qp = &phba->sli4_hba.hdwq[lpfc_ncmd->hdwq];
	qp = lpfc_ncmd->hdwq;
	if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
				"6310 XB release deferred for "
+2 −1
Original line number Diff line number Diff line
@@ -79,7 +79,8 @@ struct lpfc_nvme_buf {
	dma_addr_t dma_phys_sgl;
	struct sli4_sge *dma_sgl;
	struct lpfc_iocbq cur_iocbq;
	uint16_t hdwq;
	struct lpfc_sli4_hdw_queue *hdwq;
	uint16_t hdwq_no;
	uint16_t cpu;

	/* NVME specific fields */
+26 −8
Original line number Diff line number Diff line
@@ -845,7 +845,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
	lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
			 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);

	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
	if (rc == WQE_SUCCESS) {
		/*
		 * Okay to repost buffer here, but wait till cmpl
@@ -901,6 +901,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
		else
			ctxp->ts_nvme_data = ktime_get_ns();
	}

	/* Setup the hdw queue if not already set */
	if (!ctxp->hdwq)
		ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];

	if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
		int id = smp_processor_id();
		if (id < LPFC_CHECK_CPU_CNT) {
@@ -946,7 +951,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
			 ctxp->oxid, rsp->op, rsp->rsplen);

	ctxp->flag |= LPFC_NVMET_IO_INP;
	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
	if (rc == WQE_SUCCESS) {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
		if (!ctxp->ts_cmd_nvme)
@@ -965,7 +970,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
		 * WQE release CQE
		 */
		ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
		wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq;
		wq = ctxp->hdwq->nvme_wq;
		pring = wq->pring;
		spin_lock_irqsave(&pring->ring_lock, iflags);
		list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
@@ -1015,6 +1020,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
	if (phba->pport->load_flag & FC_UNLOADING)
		return;

	if (!ctxp->hdwq)
		ctxp->hdwq = &phba->sli4_hba.hdwq[0];

	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
			"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
			ctxp->oxid, ctxp->flag, ctxp->state);
@@ -1039,7 +1047,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
	if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
						 ctxp->oxid);
		wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq;
		wq = ctxp->hdwq->nvme_wq;
		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
		lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
		return;
@@ -1649,6 +1657,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
	struct lpfc_sli_ring *pring;
	struct lpfc_iocbq *nvmewqeq;
	struct lpfc_nvmet_rcv_ctx *ctxp;
	unsigned long iflags;
	int rc;

@@ -1662,7 +1671,8 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
		list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
				 list);
		spin_unlock_irqrestore(&pring->ring_lock, iflags);
		rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
		ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
		rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
		spin_lock_irqsave(&pring->ring_lock, iflags);
		if (rc == -EBUSY) {
			/* WQ was full again, so put it back on the list */
@@ -1765,6 +1775,7 @@ dropit:
	ctxp->state = LPFC_NVMET_STE_LS_RCV;
	ctxp->entry_cnt = 1;
	ctxp->rqb_buffer = (void *)nvmebuf;
	ctxp->hdwq = &phba->sli4_hba.hdwq[0];

	lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
			 oxid, size, sid);
@@ -1987,6 +1998,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
	ctxp->flag = 0;
	ctxp->ctxbuf = ctx_buf;
	ctxp->rqb_buffer = (void *)nvmebuf;
	ctxp->hdwq = NULL;
	spin_lock_init(&ctxp->ctxlock);

#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -3044,7 +3056,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
	abts_wqeq->iocb_flag |= LPFC_IO_NVME;
	abts_wqeq->context2 = ctxp;
	abts_wqeq->vport = phba->pport;
	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
	if (!ctxp->hdwq)
		ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];

	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	if (rc == WQE_SUCCESS) {
		atomic_inc(&tgtp->xmt_abort_sol);
@@ -3096,7 +3111,10 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
	abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
	abts_wqeq->iocb_cmpl = NULL;
	abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
	if (!ctxp->hdwq)
		ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];

	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	if (rc == WQE_SUCCESS) {
		return 0;
@@ -3165,7 +3183,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
	abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
	abts_wqeq->iocb_cmpl = 0;
	abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	if (rc == WQE_SUCCESS) {
		atomic_inc(&tgtp->xmt_abort_unsol);
Loading