Commit b6bf0830 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge tag 'nvme-5.10-2020-10-08' of git://git.infradead.org/nvme into for-5.10/drivers

Pull NVMe updates from Christoph:

"nvme update for 5.10:

 - fix a controller refcount leak on init failure (Chaitanya Kulkarni)
 - misc cleanups (Chaitanya Kulkarni)
 - major refactoring of the scanning code (me)"

* tag 'nvme-5.10-2020-10-08' of git://git.infradead.org/nvme: (23 commits)
  nvme-core: remove extra condition for vwc
  nvme-core: remove extra variable
  nvme: remove nvme_identify_ns_list
  nvme: refactor nvme_validate_ns
  nvme: move nvme_validate_ns
  nvme: query namespace identifiers before adding the namespace
  nvme: revalidate zone bitmaps in nvme_update_ns_info
  nvme: remove nvme_update_formats
  nvme: update the known admin effects
  nvme: set the queue limits in nvme_update_ns_info
  nvme: remove the 0 lba_shift check in nvme_update_ns_info
  nvme: clean up the check for too large logic block sizes
  nvme: freeze the queue over ->lba_shift updates
  nvme: factor out a nvme_configure_metadata helper
  nvme: call nvme_identify_ns as the first thing in nvme_alloc_ns_block
  nvme: lift the check for an unallocated namespace into nvme_identify_ns
  nvme: rename __nvme_revalidate_disk
  nvme: rename _nvme_revalidate_disk
  nvme: rename nvme_validate_ns to nvme_validate_or_alloc_ns
  nvme: remove the disk argument to nvme_update_zone_info
  ...
parents 103fbf8e c4485252
Loading
Loading
Loading
Loading
+199 −256
Original line number Diff line number Diff line
@@ -89,7 +89,6 @@ static dev_t nvme_chr_devt;
static struct class *nvme_class;
static struct class *nvme_subsys_class;

static int _nvme_revalidate_disk(struct gendisk *disk);
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
					   unsigned nsid);
@@ -968,10 +967,10 @@ static u32 nvme_known_admin_effects(u8 opcode)
{
	switch (opcode) {
	case nvme_admin_format_nvm:
		return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
			NVME_CMD_EFFECTS_CSE_MASK;
	case nvme_admin_sanitize_nvm:
		return NVME_CMD_EFFECTS_CSE_MASK;
		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
	default:
		break;
	}
@@ -1009,7 +1008,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
	 * For simplicity, IO to all namespaces is quiesced even if the command
	 * effects say only one namespace is affected.
	 */
	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
		mutex_lock(&ctrl->scan_lock);
		mutex_lock(&ctrl->subsys->lock);
		nvme_mpath_start_freeze(ctrl->subsys);
@@ -1020,36 +1019,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
	return effects;
}

static void nvme_update_formats(struct nvme_ctrl *ctrl, u32 *effects)
{
	struct nvme_ns *ns;

	down_read(&ctrl->namespaces_rwsem);
	list_for_each_entry(ns, &ctrl->namespaces, list)
		if (_nvme_revalidate_disk(ns->disk))
			nvme_set_queue_dying(ns);
		else if (blk_queue_is_zoned(ns->disk->queue)) {
			/*
			 * IO commands are required to fully revalidate a zoned
			 * device. Force the command effects to trigger rescan
			 * work so report zones can run in a context with
			 * unfrozen IO queues.
			 */
			*effects |= NVME_CMD_EFFECTS_NCC;
		}
	up_read(&ctrl->namespaces_rwsem);
}

static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
{
	/*
	 * Revalidate LBA changes prior to unfreezing. This is necessary to
	 * prevent memory corruption if a logical block size was changed by
	 * this command.
	 */
	if (effects & NVME_CMD_EFFECTS_LBCC)
		nvme_update_formats(ctrl, &effects);
	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
		nvme_unfreeze(ctrl);
		nvme_mpath_unfreeze(ctrl->subsys);
		mutex_unlock(&ctrl->subsys->lock);
@@ -1309,6 +1281,8 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
	int status, pos, len;
	void *data;

	if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
		return 0;
	if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
		return 0;

@@ -1352,19 +1326,8 @@ free_data:
	return status;
}

static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
{
	struct nvme_command c = { };

	c.identify.opcode = nvme_admin_identify;
	c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
	c.identify.nsid = cpu_to_le32(nsid);
	return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
				    NVME_IDENTIFY_DATA_SIZE);
}

static int nvme_identify_ns(struct nvme_ctrl *ctrl,
		unsigned nsid, struct nvme_id_ns **id)
static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
			struct nvme_ns_ids *ids, struct nvme_id_ns **id)
{
	struct nvme_command c = { };
	int error;
@@ -1381,9 +1344,24 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
	if (error) {
		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
		kfree(*id);
		goto out_free_id;
	}

	error = -ENODEV;
	if ((*id)->ncap == 0) /* namespace not allocated or attached */
		goto out_free_id;

	if (ctrl->vs >= NVME_VS(1, 1, 0) &&
	    !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
		memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
	if (ctrl->vs >= NVME_VS(1, 2, 0) &&
	    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
		memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));

	return 0;

out_free_id:
	kfree(*id);
	return error;
}

@@ -1905,20 +1883,6 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
					   nvme_lba_to_sect(ns, max_blocks));
}

static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
		struct nvme_id_ns *id, struct nvme_ns_ids *ids)
{
	memset(ids, 0, sizeof(*ids));

	if (ctrl->vs >= NVME_VS(1, 1, 0))
		memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
	if (ctrl->vs >= NVME_VS(1, 2, 0))
		memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
	if (ctrl->vs >= NVME_VS(1, 3, 0) || nvme_multi_css(ctrl))
		return nvme_identify_ns_descs(ctrl, nsid, ids);
	return 0;
}

static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
{
	return !uuid_is_null(&ids->uuid) ||
@@ -1959,6 +1923,68 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
	return 0;
}

static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
{
	struct nvme_ctrl *ctrl = ns->ctrl;

	/*
	 * The PI implementation requires the metadata size to be equal to the
	 * t10 pi tuple size.
	 */
	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
	if (ns->ms == sizeof(struct t10_pi_tuple))
		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
	else
		ns->pi_type = 0;

	ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
	if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
		return 0;
	if (ctrl->ops->flags & NVME_F_FABRICS) {
		/*
		 * The NVMe over Fabrics specification only supports metadata as
		 * part of the extended data LBA.  We rely on HCA/HBA support to
		 * remap the separate metadata buffer from the block layer.
		 */
		if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
			return -EINVAL;
		if (ctrl->max_integrity_segments)
			ns->features |=
				(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
	} else {
		/*
		 * For PCIe controllers, we can't easily remap the separate
		 * metadata buffer from the block layer and thus require a
		 * separate metadata buffer for block layer metadata/PI support.
		 * We allow extended LBAs for the passthrough interface, though.
		 */
		if (id->flbas & NVME_NS_FLBAS_META_EXT)
			ns->features |= NVME_NS_EXT_LBAS;
		else
			ns->features |= NVME_NS_METADATA_SUPPORTED;
	}

	return 0;
}

static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
		struct request_queue *q)
{
	bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;

	if (ctrl->max_hw_sectors) {
		u32 max_segments =
			(ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;

		max_segments = min_not_zero(max_segments, ctrl->max_segments);
		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
	}
	blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
	blk_queue_dma_alignment(q, 7);
	blk_queue_write_cache(q, vwc, vwc);
}

static void nvme_update_disk_info(struct gendisk *disk,
		struct nvme_ns *ns, struct nvme_id_ns *id)
{
@@ -1966,11 +1992,15 @@ static void nvme_update_disk_info(struct gendisk *disk,
	unsigned short bs = 1 << ns->lba_shift;
	u32 atomic_bs, phys_bs, io_opt = 0;

	/*
	 * The block layer can't support LBA sizes larger than the page size
	 * yet, so catch this early and don't allow block I/O.
	 */
	if (ns->lba_shift > PAGE_SHIFT) {
		/* unsupported block size, set capacity to 0 later */
		capacity = 0;
		bs = (1 << 9);
	}
	blk_mq_freeze_queue(disk->queue);

	blk_integrity_unregister(disk);

	atomic_bs = phys_bs = bs;
@@ -2004,13 +2034,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
	blk_queue_io_min(disk->queue, phys_bs);
	blk_queue_io_opt(disk->queue, io_opt);

	/*
	 * The block layer can't support LBA sizes larger than the page size
	 * yet, so catch this early and don't allow block I/O.
	 */
	if (ns->lba_shift > PAGE_SHIFT)
		capacity = 0;

	/*
	 * Register a metadata profile for PI, or the plain non-integrity NVMe
	 * metadata masquerading as Type 0 if supported, otherwise reject block
@@ -2035,8 +2058,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
		set_disk_ro(disk, true);
	else
		set_disk_ro(disk, false);

	blk_mq_unfreeze_queue(disk->queue);
}

static inline bool nvme_first_scan(struct gendisk *disk)
@@ -2076,151 +2097,49 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
	blk_queue_chunk_sectors(ns->queue, iob);
}

static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
{
	unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
	struct nvme_ns *ns = disk->private_data;
	struct nvme_ctrl *ctrl = ns->ctrl;
	int ret;

	/*
	 * If identify namespace failed, use default 512 byte block size so
	 * block layer can use before failing read/write for 0 capacity.
	 */
	blk_mq_freeze_queue(ns->disk->queue);
	ns->lba_shift = id->lbaf[lbaf].ds;
	if (ns->lba_shift == 0)
		ns->lba_shift = 9;
	nvme_set_queue_limits(ns->ctrl, ns->queue);

	switch (ns->head->ids.csi) {
	case NVME_CSI_NVM:
		break;
	case NVME_CSI_ZNS:
		ret = nvme_update_zone_info(disk, ns, lbaf);
		if (ret) {
			dev_warn(ctrl->device,
				"failed to add zoned namespace:%u ret:%d\n",
				ns->head->ns_id, ret);
			return ret;
		}
		break;
	default:
		dev_warn(ctrl->device, "unknown csi:%u ns:%u\n",
			ns->head->ids.csi, ns->head->ns_id);
		return -ENODEV;
	if (ns->head->ids.csi == NVME_CSI_ZNS) {
		ret = nvme_update_zone_info(ns, lbaf);
		if (ret)
			goto out_unfreeze;
	}

	ns->features = 0;
	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
	/* the PI implementation requires metadata equal t10 pi tuple size */
	if (ns->ms == sizeof(struct t10_pi_tuple))
		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
	else
		ns->pi_type = 0;
	ret = nvme_configure_metadata(ns, id);
	if (ret)
		goto out_unfreeze;
	nvme_set_chunk_sectors(ns, id);
	nvme_update_disk_info(ns->disk, ns, id);
	blk_mq_unfreeze_queue(ns->disk->queue);

	if (ns->ms) {
		/*
		 * For PCIe only the separate metadata pointer is supported,
		 * as the block layer supplies metadata in a separate bio_vec
		 * chain. For Fabrics, only metadata as part of extended data
		 * LBA is supported on the wire per the Fabrics specification,
		 * but the HBA/HCA will do the remapping from the separate
		 * metadata buffers for us.
		 */
		if (id->flbas & NVME_NS_FLBAS_META_EXT) {
			ns->features |= NVME_NS_EXT_LBAS;
			if ((ctrl->ops->flags & NVME_F_FABRICS) &&
			    (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) &&
			    ctrl->max_integrity_segments)
				ns->features |= NVME_NS_METADATA_SUPPORTED;
		} else {
			if (WARN_ON_ONCE(ctrl->ops->flags & NVME_F_FABRICS))
				return -EINVAL;
			if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
				ns->features |= NVME_NS_METADATA_SUPPORTED;
		}
	if (blk_queue_is_zoned(ns->queue)) {
		ret = nvme_revalidate_zones(ns);
		if (ret)
			return ret;
	}

	nvme_set_chunk_sectors(ns, id);
	nvme_update_disk_info(disk, ns, id);
#ifdef CONFIG_NVME_MULTIPATH
	if (ns->head->disk) {
		blk_mq_freeze_queue(ns->head->disk->queue);
		nvme_update_disk_info(ns->head->disk, ns, id);
		blk_stack_limits(&ns->head->disk->queue->limits,
				 &ns->queue->limits, 0);
		blk_queue_update_readahead(ns->head->disk->queue);
		nvme_update_bdev_size(ns->head->disk);
		blk_mq_unfreeze_queue(ns->head->disk->queue);
	}
#endif
	return 0;
}

static int _nvme_revalidate_disk(struct gendisk *disk)
{
	struct nvme_ns *ns = disk->private_data;
	struct nvme_ctrl *ctrl = ns->ctrl;
	struct nvme_id_ns *id;
	struct nvme_ns_ids ids;
	int ret = 0;

	if (test_bit(NVME_NS_DEAD, &ns->flags)) {
		set_capacity(disk, 0);
		return -ENODEV;
	}

	ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
	if (ret)
		goto out;

	if (id->ncap == 0) {
		ret = -ENODEV;
		goto free_id;
	}

	ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
	if (ret)
		goto free_id;

	if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
		dev_err(ctrl->device,
			"identifiers changed for nsid %d\n", ns->head->ns_id);
		ret = -ENODEV;
		goto free_id;
	}

	ret = __nvme_revalidate_disk(disk, id);
free_id:
	kfree(id);
out:
	/*
	 * Only fail the function if we got a fatal error back from the
	 * device, otherwise ignore the error and just move on.
	 */
	if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
		ret = 0;
	else if (ret > 0)
		ret = blk_status_to_errno(nvme_error_status(ret));
	return ret;
}

static int nvme_revalidate_disk(struct gendisk *disk)
{
	int ret;

	ret = _nvme_revalidate_disk(disk);
	if (ret)
		return ret;

#ifdef CONFIG_BLK_DEV_ZONED
	if (blk_queue_is_zoned(disk->queue)) {
		struct nvme_ns *ns = disk->private_data;
		struct nvme_ctrl *ctrl = ns->ctrl;

		ret = blk_revalidate_disk_zones(disk, NULL);
		if (!ret)
			blk_queue_max_zone_append_sectors(disk->queue,
							  ctrl->max_zone_append);
	}
#endif
out_unfreeze:
	blk_mq_unfreeze_queue(ns->disk->queue);
	return ret;
}

@@ -2502,26 +2421,6 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);

static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
		struct request_queue *q)
{
	bool vwc = false;

	if (ctrl->max_hw_sectors) {
		u32 max_segments =
			(ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;

		max_segments = min_not_zero(max_segments, ctrl->max_segments);
		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
	}
	blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
	blk_queue_dma_alignment(q, 7);
	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
		vwc = true;
	blk_queue_write_cache(q, vwc, vwc);
}

static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{
	__le64 ts;
@@ -3822,25 +3721,16 @@ out:
}

static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
		struct nvme_id_ns *id)
		struct nvme_ns_ids *ids, bool is_shared)
{
	struct nvme_ctrl *ctrl = ns->ctrl;
	bool is_shared = id->nmic & NVME_NS_NMIC_SHARED;
	struct nvme_ns_head *head = NULL;
	struct nvme_ns_ids ids;
	int ret = 0;

	ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
	if (ret) {
		if (ret < 0)
			return ret;
		return blk_status_to_errno(nvme_error_status(ret));
	}

	mutex_lock(&ctrl->subsys->lock);
	head = nvme_find_ns_head(ctrl->subsys, nsid);
	if (!head) {
		head = nvme_alloc_ns_head(ctrl, nsid, &ids);
		head = nvme_alloc_ns_head(ctrl, nsid, ids);
		if (IS_ERR(head)) {
			ret = PTR_ERR(head);
			goto out_unlock;
@@ -3853,7 +3743,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
				"Duplicate unshared namespace %d\n", nsid);
			goto out_put_ns_head;
		}
		if (!nvme_ns_ids_equal(&head->ids, &ids)) {
		if (!nvme_ns_ids_equal(&head->ids, ids)) {
			dev_err(ctrl->device,
				"IDs don't match for shared namespace %d\n",
					nsid);
@@ -3901,7 +3791,8 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
}
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);

static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
		struct nvme_ns_ids *ids)
{
	struct nvme_ns *ns;
	struct gendisk *disk;
@@ -3909,9 +3800,12 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
	char disk_name[DISK_NAME_LEN];
	int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;

	if (nvme_identify_ns(ctrl, nsid, ids, &id))
		return;

	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
	if (!ns)
		return;
		goto out_free_id;

	ns->queue = blk_mq_init_queue(ctrl->tagset);
	if (IS_ERR(ns->queue))
@@ -3926,23 +3820,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)

	ns->queue->queuedata = ns;
	ns->ctrl = ctrl;

	kref_init(&ns->kref);
	ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */

	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
	nvme_set_queue_limits(ctrl, ns->queue);

	ret = nvme_identify_ns(ctrl, nsid, &id);
	ret = nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED);
	if (ret)
		goto out_free_queue;

	if (id->ncap == 0)	/* no namespace (legacy quirk) */
		goto out_free_id;

	ret = nvme_init_ns_head(ns, nsid, id);
	if (ret)
		goto out_free_id;
	nvme_set_disk_name(disk_name, ns, ctrl, &flags);

	disk = alloc_disk_node(0, node);
@@ -3956,7 +3838,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
	memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
	ns->disk = disk;

	if (__nvme_revalidate_disk(disk, id))
	if (nvme_update_ns_info(ns, id))
		goto out_put_disk;

	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
@@ -3991,12 +3873,12 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
		list_del_init(&ns->head->entry);
	mutex_unlock(&ctrl->subsys->lock);
	nvme_put_ns_head(ns->head);
 out_free_id:
	kfree(id);
 out_free_queue:
	blk_cleanup_queue(ns->queue);
 out_free_ns:
	kfree(ns);
 out_free_id:
	kfree(id);
}

static void nvme_ns_remove(struct nvme_ns *ns)
@@ -4004,6 +3886,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
		return;

	set_capacity(ns->disk, 0);
	nvme_fault_inject_fini(&ns->fault_inject);

	mutex_lock(&ns->ctrl->subsys->lock);
@@ -4041,22 +3924,75 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
	}
}

static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
{
	struct nvme_id_ns *id;
	int ret = -ENODEV;

	if (test_bit(NVME_NS_DEAD, &ns->flags))
		goto out;

	ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
	if (ret)
		goto out;

	ret = -ENODEV;
	if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
		dev_err(ns->ctrl->device,
			"identifiers changed for nsid %d\n", ns->head->ns_id);
		goto out_free_id;
	}

	ret = nvme_update_ns_info(ns, id);

out_free_id:
	kfree(id);
out:
	/*
	 * Only remove the namespace if we got a fatal error back from the
	 * device, otherwise ignore the error and just move on.
	 *
	 * TODO: we should probably schedule a delayed retry here.
	 */
	if (ret && ret != -ENOMEM && !(ret > 0 && !(ret & NVME_SC_DNR)))
		nvme_ns_remove(ns);
	else
		revalidate_disk_size(ns->disk, true);
}

static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
	struct nvme_ns_ids ids = { };
	struct nvme_ns *ns;
	int ret;

	if (nvme_identify_ns_descs(ctrl, nsid, &ids))
		return;

	ns = nvme_find_get_ns(ctrl, nsid);
	if (!ns) {
		nvme_alloc_ns(ctrl, nsid);
	if (ns) {
		nvme_validate_ns(ns, &ids);
		nvme_put_ns(ns);
		return;
	}

	ret = nvme_revalidate_disk(ns->disk);
	revalidate_disk_size(ns->disk, ret == 0);
	if (ret)
		nvme_ns_remove(ns);
	nvme_put_ns(ns);
	switch (ids.csi) {
	case NVME_CSI_NVM:
		nvme_alloc_ns(ctrl, nsid, &ids);
		break;
	case NVME_CSI_ZNS:
		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
			dev_warn(ctrl->device,
				"nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
				nsid);
			break;
		}
		nvme_alloc_ns(ctrl, nsid, &ids);
		break;
	default:
		dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
			ids.csi, nsid);
		break;
	}
}

static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
@@ -4092,7 +4028,14 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
		return -ENOMEM;

	for (;;) {
		ret = nvme_identify_ns_list(ctrl, prev, ns_list);
		struct nvme_command cmd = {
			.identify.opcode	= nvme_admin_identify,
			.identify.cns		= NVME_ID_CNS_NS_ACTIVE_LIST,
			.identify.nsid		= cpu_to_le32(prev),
		};

		ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
					    NVME_IDENTIFY_DATA_SIZE);
		if (ret)
			goto free;

@@ -4101,7 +4044,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)

			if (!nsid)	/* end of the list? */
				goto out;
			nvme_validate_ns(ctrl, nsid);
			nvme_validate_or_alloc_ns(ctrl, nsid);
			while (++prev < nsid)
				nvme_ns_remove_by_nsid(ctrl, prev);
		}
@@ -4124,7 +4067,7 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
	kfree(id);

	for (i = 1; i <= nn; i++)
		nvme_validate_ns(ctrl, i);
		nvme_validate_or_alloc_ns(ctrl, i);

	nvme_remove_invalid_namespaces(ctrl, nn);
}
+3 −6
Original line number Diff line number Diff line
@@ -758,10 +758,9 @@ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
}
#endif /* CONFIG_NVME_MULTIPATH */

int nvme_revalidate_zones(struct nvme_ns *ns);
#ifdef CONFIG_BLK_DEV_ZONED
int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns,
			  unsigned lbaf);

int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
int nvme_report_zones(struct gendisk *disk, sector_t sector,
		      unsigned int nr_zones, report_zones_cb cb, void *data);

@@ -778,9 +777,7 @@ static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
	return BLK_STS_NOTSUPP;
}

static inline int nvme_update_zone_info(struct gendisk *disk,
					struct nvme_ns *ns,
					unsigned lbaf)
static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
{
	dev_warn(ns->ctrl->device,
		 "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
+13 −3
Original line number Diff line number Diff line
@@ -7,6 +7,17 @@
#include <linux/vmalloc.h>
#include "nvme.h"

int nvme_revalidate_zones(struct nvme_ns *ns)
{
	struct request_queue *q = ns->queue;
	int ret;

	ret = blk_revalidate_disk_zones(ns->disk, NULL);
	if (!ret)
		blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
	return ret;
}

static int nvme_set_max_append(struct nvme_ctrl *ctrl)
{
	struct nvme_command c = { };
@@ -35,11 +46,10 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
	return 0;
}

int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns,
			  unsigned lbaf)
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
{
	struct nvme_effects_log *log = ns->head->effects;
	struct request_queue *q = disk->queue;
	struct request_queue *q = ns->queue;
	struct nvme_command c = { };
	struct nvme_id_ns_zns *id;
	int status;
+2 −2
Original line number Diff line number Diff line
@@ -579,7 +579,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
				0 /* no quirks, we're perfect! */);
	if (ret)
		goto out_put_ctrl;
		goto out;

	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
		WARN_ON_ONCE(1);
@@ -635,8 +635,8 @@ out_free_queues:
	kfree(ctrl->queues);
out_uninit_ctrl:
	nvme_uninit_ctrl(&ctrl->ctrl);
out_put_ctrl:
	nvme_put_ctrl(&ctrl->ctrl);
out:
	if (ret > 0)
		ret = -EIO;
	return ERR_PTR(ret);
+3 −1
Original line number Diff line number Diff line
@@ -692,7 +692,9 @@ static inline bool queue_is_mq(struct request_queue *q)
static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue *q)
{
	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
		return q->limits.zoned;
	return BLK_ZONED_NONE;
}

static inline bool blk_queue_is_zoned(struct request_queue *q)