Commit 41d7f2ed authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'nvme-5.2' of git://git.infradead.org/nvme into for-5.2/block

Pull NVMe changes from Christoph.

* 'nvme-5.2' of git://git.infradead.org/nvme:
  nvme: set 0 capacity if namespace block size exceeds PAGE_SIZE
  nvme-rdma: fix typo in struct comment
  nvme-loop: kill timeout handler
  nvme-tcp: rename function to have nvme_tcp prefix
  nvme-rdma: fix a NULL deref when an admin connect times out
  nvme-tcp: fix a NULL deref when an admin connect times out
  nvmet-tcp: don't fail maxr2t greater than 1
  nvmet-file: clamp-down file namespace lba_shift
  nvmet: include <linux/scatterlist.h>
  nvmet: return a specified error it subsys_alloc fails
  nvmet: rename nvme_completion instances from rsp to cqe
  nvmet-rdma: remove p2p_client initialization from fast-path
parents cc6be131 01fa0174
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -1591,6 +1591,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
	sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
	unsigned short bs = 1 << ns->lba_shift;

	if (ns->lba_shift > PAGE_SHIFT) {
		/* unsupported block size, set capacity to 0 later */
		bs = (1 << 9);
	}
	blk_mq_freeze_queue(disk->queue);
	blk_integrity_unregister(disk);

@@ -1601,7 +1605,8 @@ static void nvme_update_disk_info(struct gendisk *disk,
	if (ns->ms && !ns->ext &&
	    (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
		nvme_init_integrity(disk, ns->ms, ns->pi_type);
	if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk))
	if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
	    ns->lba_shift > PAGE_SHIFT)
		capacity = 0;

	set_capacity(disk, capacity);
+6 −4
Original line number Diff line number Diff line
@@ -914,8 +914,9 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
{
	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
	nvme_rdma_stop_queue(&ctrl->queues[0]);
	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request,
			&ctrl->ctrl);
	if (ctrl->ctrl.admin_tagset)
		blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
			nvme_cancel_request, &ctrl->ctrl);
	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
	nvme_rdma_destroy_admin_queue(ctrl, remove);
}
@@ -926,8 +927,9 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
	if (ctrl->ctrl.queue_count > 1) {
		nvme_stop_queues(&ctrl->ctrl);
		nvme_rdma_stop_io_queues(ctrl);
		blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request,
				&ctrl->ctrl);
		if (ctrl->ctrl.tagset)
			blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
				nvme_cancel_request, &ctrl->ctrl);
		if (remove)
			nvme_start_queues(&ctrl->ctrl);
		nvme_rdma_destroy_io_queues(ctrl, remove);
+10 −8
Original line number Diff line number Diff line
@@ -473,7 +473,6 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
	}

	return 0;

}

static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
@@ -634,7 +633,6 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
	nvme_end_request(rq, cpu_to_le16(status << 1), res);
}


static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
			      unsigned int *offset, size_t *len)
{
@@ -1535,7 +1533,7 @@ out_free_queue:
	return ret;
}

static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
	int i, ret;

@@ -1565,7 +1563,7 @@ static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
	return nr_io_queues;
}

static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
	unsigned int nr_io_queues;
	int ret;
@@ -1582,7 +1580,7 @@ static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
	dev_info(ctrl->device,
		"creating %d I/O queues.\n", nr_io_queues);

	return nvme_tcp_alloc_io_queues(ctrl);
	return __nvme_tcp_alloc_io_queues(ctrl);
}

static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
@@ -1599,7 +1597,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{
	int ret;

	ret = nvme_alloc_io_queues(ctrl);
	ret = nvme_tcp_alloc_io_queues(ctrl);
	if (ret)
		return ret;

@@ -1710,7 +1708,9 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
{
	blk_mq_quiesce_queue(ctrl->admin_q);
	nvme_tcp_stop_queue(ctrl, 0);
	blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl);
	if (ctrl->admin_tagset)
		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
			nvme_cancel_request, ctrl);
	blk_mq_unquiesce_queue(ctrl->admin_q);
	nvme_tcp_destroy_admin_queue(ctrl, remove);
}
@@ -1722,7 +1722,9 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
		return;
	nvme_stop_queues(ctrl);
	nvme_tcp_stop_io_queues(ctrl);
	blk_mq_tagset_busy_iter(ctrl->tagset, nvme_cancel_request, ctrl);
	if (ctrl->tagset)
		blk_mq_tagset_busy_iter(ctrl->tagset,
			nvme_cancel_request, ctrl);
	if (remove)
		nvme_start_queues(ctrl);
	nvme_tcp_destroy_io_queues(ctrl, remove);
+1 −0
Original line number Diff line number Diff line
@@ -3,6 +3,7 @@ config NVME_TARGET
	tristate "NVMe Target support"
	depends on BLOCK
	depends on CONFIGFS_FS
	select SGL_ALLOC
	help
	  This enabled target side support for the NVMe protocol, that is
	  it allows the Linux kernel to implement NVMe subsystems and
+2 −2
Original line number Diff line number Diff line
@@ -898,8 +898,8 @@ static struct config_group *nvmet_subsys_make(struct config_group *group,
	}

	subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
	if (!subsys)
		return ERR_PTR(-ENOMEM);
	if (IS_ERR(subsys))
		return ERR_CAST(subsys);

	config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);

Loading