Commit 24bb45fd authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge tag 'nvme-5.10-2020-10-29' of git://git.infradead.org/nvme into block-5.10

Pull NVMe fixes from Christoph:

"nvme updates for 5.10:

 - improve zone revalidation (Keith Busch)
 - gracefully handle zero length messages in nvme-rdma (zhenwei pi)
 - nvme-fc error handling fixes (James Smart)
 - nvmet tracing NULL pointer dereference fix (Chaitanya Kulkarni)"

* tag 'nvme-5.10-2020-10-29' of git://git.infradead.org/nvme:
  nvmet: fix a NULL pointer dereference when tracing the flush command
  nvme-fc: remove nvme_fc_terminate_io()
  nvme-fc: eliminate terminate_io use by nvme_fc_error_recovery
  nvme-fc: remove err_work work item
  nvme-fc: track error_recovery while connecting
  nvme-rdma: handle unexpected nvme completion data length
  nvme: ignore zone validate errors on subsequent scans
parents 7cb6e22b 3c3751f2
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2125,7 +2125,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)

	if (blk_queue_is_zoned(ns->queue)) {
		ret = nvme_revalidate_zones(ns);
		if (ret)
		if (ret && !nvme_first_scan(ns->disk))
			return ret;
	}

+109 −161
Original line number Diff line number Diff line
@@ -146,7 +146,8 @@ struct nvme_fc_rport {

/* fc_ctrl flags values - specified as bit positions */
#define ASSOC_ACTIVE		0
#define FCCTRL_TERMIO		1
#define ASSOC_FAILED		1
#define FCCTRL_TERMIO		2

struct nvme_fc_ctrl {
	spinlock_t		lock;
@@ -157,7 +158,6 @@ struct nvme_fc_ctrl {
	u32			cnum;

	bool			ioq_live;
	atomic_t		err_work_active;
	u64			association_id;
	struct nvmefc_ls_rcv_op	*rcv_disconn;

@@ -167,7 +167,6 @@ struct nvme_fc_ctrl {
	struct blk_mq_tag_set	tag_set;

	struct delayed_work	connect_work;
	struct work_struct	err_work;

	struct kref		ref;
	unsigned long		flags;
@@ -2414,24 +2413,97 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
	nvme_fc_ctrl_put(ctrl);
}

/*
 * This routine is used by the transport when it needs to find active
 * io on a queue that is to be terminated. The transport uses
 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
 * this routine to kill them on a 1 by 1 basis.
 *
 * As FC allocates FC exchange for each io, the transport must contact
 * the LLDD to terminate the exchange, thus releasing the FC exchange.
 * After terminating the exchange the LLDD will call the transport's
 * normal io done path for the request, but it will have an aborted
 * status. The done path will return the io request back to the block
 * layer with an error status.
 */
static bool
nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
{
	struct nvme_ctrl *nctrl = data;
	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);

	__nvme_fc_abort_op(ctrl, op);
	return true;
}

/*
 * This routine runs through all outstanding commands on the association
 * and aborts them.  This routine is typically be called by the
 * delete_association routine. It is also called due to an error during
 * reconnect. In that scenario, it is most likely a command that initializes
 * the controller, including fabric Connect commands on io queues, that
 * may have timed out or failed thus the io must be killed for the connect
 * thread to see the error.
 */
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
{
	int active;
	/*
	 * If io queues are present, stop them and terminate all outstanding
	 * ios on them. As FC allocates FC exchange for each io, the
	 * transport must contact the LLDD to terminate the exchange,
	 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
	 * to tell us what io's are busy and invoke a transport routine
	 * to kill them with the LLDD.  After terminating the exchange
	 * the LLDD will call the transport's normal io done path, but it
	 * will have an aborted status. The done path will return the
	 * io requests back to the block layer as part of normal completions
	 * (but with error status).
	 */
	if (ctrl->ctrl.queue_count > 1) {
		nvme_stop_queues(&ctrl->ctrl);
		blk_mq_tagset_busy_iter(&ctrl->tag_set,
				nvme_fc_terminate_exchange, &ctrl->ctrl);
		blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
		if (start_queues)
			nvme_start_queues(&ctrl->ctrl);
	}

	/*
	 * if an error (io timeout, etc) while (re)connecting,
	 * it's an error on creating the new association.
	 * Start the error recovery thread if it hasn't already
	 * been started. It is expected there could be multiple
	 * ios hitting this path before things are cleaned up.
	 * Other transports, which don't have link-level contexts bound
	 * to sqe's, would try to gracefully shutdown the controller by
	 * writing the registers for shutdown and polling (call
	 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
	 * just aborted and we will wait on those contexts, and given
	 * there was no indication of how live the controlelr is on the
	 * link, don't send more io to create more contexts for the
	 * shutdown. Let the controller fail via keepalive failure if
	 * its still present.
	 */
	if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
		active = atomic_xchg(&ctrl->err_work_active, 1);
		if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
			atomic_set(&ctrl->err_work_active, 0);
			WARN_ON(1);

	/*
	 * clean up the admin queue. Same thing as above.
	 */
	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
				nvme_fc_terminate_exchange, &ctrl->ctrl);
	blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
}

static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
	/*
	 * if an error (io timeout, etc) while (re)connecting, the remote
	 * port requested terminating of the association (disconnect_ls)
	 * or an error (timeout or abort) occurred on an io while creating
	 * the controller.  Abort any ios on the association and let the
	 * create_association error path resolve things.
	 */
	if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
		__nvme_fc_abort_outstanding_ios(ctrl, true);
		set_bit(ASSOC_FAILED, &ctrl->flags);
		return;
	}

@@ -2745,30 +2817,6 @@ nvme_fc_complete_rq(struct request *rq)
	nvme_fc_ctrl_put(ctrl);
}

/*
 * This routine is used by the transport when it needs to find active
 * io on a queue that is to be terminated. The transport uses
 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
 * this routine to kill them on a 1 by 1 basis.
 *
 * As FC allocates FC exchange for each io, the transport must contact
 * the LLDD to terminate the exchange, thus releasing the FC exchange.
 * After terminating the exchange the LLDD will call the transport's
 * normal io done path for the request, but it will have an aborted
 * status. The done path will return the io request back to the block
 * layer with an error status.
 */
static bool
nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
{
	struct nvme_ctrl *nctrl = data;
	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);

	__nvme_fc_abort_op(ctrl, op);
	return true;
}


static const struct blk_mq_ops nvme_fc_mq_ops = {
	.queue_rq	= nvme_fc_queue_rq,
@@ -2988,6 +3036,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
		ctrl->cnum, ctrl->lport->localport.port_name,
		ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);

	clear_bit(ASSOC_FAILED, &ctrl->flags);

	/*
	 * Create the admin queue
	 */
@@ -3016,7 +3066,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
	 */

	ret = nvme_enable_ctrl(&ctrl->ctrl);
	if (ret)
	if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
		goto out_disconnect_admin_queue;

	ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
@@ -3026,7 +3076,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);

	ret = nvme_init_identify(&ctrl->ctrl);
	if (ret)
	if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
		goto out_disconnect_admin_queue;

	/* sanity checks */
@@ -3071,9 +3121,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
			ret = nvme_fc_create_io_queues(ctrl);
		else
			ret = nvme_fc_recreate_io_queues(ctrl);
		if (ret)
			goto out_term_aen_ops;
	}
	if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
		goto out_term_aen_ops;

	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);

@@ -3107,60 +3157,6 @@ out_free_queue:
}


/*
 * This routine runs through all outstanding commands on the association
 * and aborts them.  This routine is typically be called by the
 * delete_association routine. It is also called due to an error during
 * reconnect. In that scenario, it is most likely a command that initializes
 * the controller, including fabric Connect commands on io queues, that
 * may have timed out or failed thus the io must be killed for the connect
 * thread to see the error.
 */
static void
__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
{
	/*
	 * If io queues are present, stop them and terminate all outstanding
	 * ios on them. As FC allocates FC exchange for each io, the
	 * transport must contact the LLDD to terminate the exchange,
	 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
	 * to tell us what io's are busy and invoke a transport routine
	 * to kill them with the LLDD.  After terminating the exchange
	 * the LLDD will call the transport's normal io done path, but it
	 * will have an aborted status. The done path will return the
	 * io requests back to the block layer as part of normal completions
	 * (but with error status).
	 */
	if (ctrl->ctrl.queue_count > 1) {
		nvme_stop_queues(&ctrl->ctrl);
		blk_mq_tagset_busy_iter(&ctrl->tag_set,
				nvme_fc_terminate_exchange, &ctrl->ctrl);
		blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
		if (start_queues)
			nvme_start_queues(&ctrl->ctrl);
	}

	/*
	 * Other transports, which don't have link-level contexts bound
	 * to sqe's, would try to gracefully shutdown the controller by
	 * writing the registers for shutdown and polling (call
	 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
	 * just aborted and we will wait on those contexts, and given
	 * there was no indication of how live the controlelr is on the
	 * link, don't send more io to create more contexts for the
	 * shutdown. Let the controller fail via keepalive failure if
	 * its still present.
	 */

	/*
	 * clean up the admin queue. Same thing as above.
	 */
	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
				nvme_fc_terminate_exchange, &ctrl->ctrl);
	blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
}

/*
 * This routine stops operation of the controller on the host side.
 * On the host os stack side: Admin and IO queues are stopped,
@@ -3237,7 +3233,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);

	cancel_work_sync(&ctrl->err_work);
	cancel_delayed_work_sync(&ctrl->connect_work);
	/*
	 * kill the association on the link side.  this will block
@@ -3292,79 +3287,35 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
}

static void
__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
nvme_fc_reset_ctrl_work(struct work_struct *work)
{
	/*
	 * if state is CONNECTING - the error occurred as part of a
	 * reconnect attempt. Abort any ios on the association and
	 * let the create_association error paths resolve things.
	 */
	if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
		__nvme_fc_abort_outstanding_ios(ctrl, true);
		return;
	}

	/*
	 * For any other state, kill the association. As this routine
	 * is a common io abort routine for resetting and such, after
	 * the association is terminated, ensure that the state is set
	 * to CONNECTING.
	 */
	struct nvme_fc_ctrl *ctrl =
		container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);

	nvme_stop_keep_alive(&ctrl->ctrl);
	nvme_stop_ctrl(&ctrl->ctrl);

	/* will block will waiting for io to terminate */
	nvme_fc_delete_association(ctrl);

	if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
	    !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
		dev_err(ctrl->ctrl.device,
			"NVME-FC{%d}: error_recovery: Couldn't change state "
			"to CONNECTING\n", ctrl->cnum);
}

static void
nvme_fc_reset_ctrl_work(struct work_struct *work)
{
	struct nvme_fc_ctrl *ctrl =
		container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
	int ret;

	__nvme_fc_terminate_io(ctrl);

	nvme_stop_ctrl(&ctrl->ctrl);

	if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
		ret = nvme_fc_create_association(ctrl);
	else
		ret = -ENOTCONN;

	if (ret)
		nvme_fc_reconnect_or_delete(ctrl, ret);
	else
		dev_info(ctrl->ctrl.device,
			"NVME-FC{%d}: controller reset complete\n",
			ctrl->cnum);
	if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
		if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
			dev_err(ctrl->ctrl.device,
				"NVME-FC{%d}: failed to schedule connect "
				"after reset\n", ctrl->cnum);
		} else {
			flush_delayed_work(&ctrl->connect_work);
		}
	} else {
		nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
	}

static void
nvme_fc_connect_err_work(struct work_struct *work)
{
	struct nvme_fc_ctrl *ctrl =
			container_of(work, struct nvme_fc_ctrl, err_work);

	__nvme_fc_terminate_io(ctrl);

	atomic_set(&ctrl->err_work_active, 0);

	/*
	 * Rescheduling the connection after recovering
	 * from the io error is left to the reconnect work
	 * item, which is what should have stalled waiting on
	 * the io that had the error that scheduled this work.
	 */
}


static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
	.name			= "fc",
	.module			= THIS_MODULE,
@@ -3491,7 +3442,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
	ctrl->dev = lport->dev;
	ctrl->cnum = idx;
	ctrl->ioq_live = false;
	atomic_set(&ctrl->err_work_active, 0);
	init_waitqueue_head(&ctrl->ioabort_wait);

	get_device(ctrl->dev);
@@ -3499,7 +3449,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,

	INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
	INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
	INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
	spin_lock_init(&ctrl->lock);

	/* io queue count */
@@ -3592,7 +3541,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
	nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
	cancel_work_sync(&ctrl->ctrl.reset_work);
	cancel_work_sync(&ctrl->err_work);
	cancel_delayed_work_sync(&ctrl->connect_work);

	ctrl->ctrl.opts = NULL;
+8 −0
Original line number Diff line number Diff line
@@ -1768,6 +1768,14 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
		return;
	}

	/* sanity checking for received data length */
	if (unlikely(wc->byte_len < len)) {
		dev_err(queue->ctrl->ctrl.device,
			"Unexpected nvme completion length(%d)\n", wc->byte_len);
		nvme_rdma_error_recovery(queue->ctrl);
		return;
	}

	ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
	/*
	 * AEN requests are special as they don't time out and can
+2 −2
Original line number Diff line number Diff line
@@ -907,8 +907,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
	req->error_loc = NVMET_NO_ERROR_LOC;
	req->error_slba = 0;

	trace_nvmet_req_init(req, req->cmd);

	/* no support for fused commands yet */
	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
		req->error_loc = offsetof(struct nvme_common_command, flags);
@@ -938,6 +936,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
	if (status)
		goto fail;

	trace_nvmet_req_init(req, req->cmd);

	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
		goto fail;
+7 −14
Original line number Diff line number Diff line
@@ -46,18 +46,11 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
	return req->sq->ctrl;
}

static inline void __assign_disk_name(char *name, struct nvmet_req *req,
		bool init)
static inline void __assign_req_name(char *name, struct nvmet_req *req)
{
	struct nvmet_ctrl *ctrl = nvmet_req_to_ctrl(req);
	struct nvmet_ns *ns;

	if ((init && req->sq->qid) || (!init && req->cq->qid)) {
		ns = nvmet_find_namespace(ctrl, req->cmd->rw.nsid);
		strncpy(name, ns->device_path, DISK_NAME_LEN);
		return;
	}

	if (req->ns)
		strncpy(name, req->ns->device_path, DISK_NAME_LEN);
	else
		memset(name, 0, DISK_NAME_LEN);
}
#endif
@@ -81,7 +74,7 @@ TRACE_EVENT(nvmet_req_init,
	TP_fast_assign(
		__entry->cmd = cmd;
		__entry->ctrl = nvmet_req_to_ctrl(req);
		__assign_disk_name(__entry->disk, req, true);
		__assign_req_name(__entry->disk, req);
		__entry->qid = req->sq->qid;
		__entry->cid = cmd->common.command_id;
		__entry->opcode = cmd->common.opcode;
@@ -121,7 +114,7 @@ TRACE_EVENT(nvmet_req_complete,
		__entry->cid = req->cqe->command_id;
		__entry->result = le64_to_cpu(req->cqe->result.u64);
		__entry->status = le16_to_cpu(req->cqe->status) >> 1;
		__assign_disk_name(__entry->disk, req, false);
		__assign_req_name(__entry->disk, req);
	),
	TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
		__print_ctrl_name(__entry->ctrl),