Commit 4429f14a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'block-5.10-2020-11-07' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - NVMe pull request from Christoph:
    - revert a nvme_queue size optimization (Keith Bush)
    - fabrics timeout races fixes (Chao Leng and Sagi Grimberg)"

 - null_blk zone locking fix (Damien)

* tag 'block-5.10-2020-11-07' of git://git.kernel.dk/linux-block:
  null_blk: Fix scheduling in atomic with zoned mode
  nvme-tcp: avoid repeated request completion
  nvme-rdma: avoid repeated request completion
  nvme-tcp: avoid race between time out and tear down
  nvme-rdma: avoid race between time out and tear down
  nvme: introduce nvme_sync_io_queues
  Revert "nvme-pci: remove last_sq_tail"
parents e9c02d68 e1777d09
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ struct nullb_device {
	unsigned int nr_zones_closed;
	struct blk_zone *zones;
	sector_t zone_size_sects;
	spinlock_t zone_dev_lock;
	spinlock_t zone_lock;
	unsigned long *zone_locks;

	unsigned long size; /* device size in MB */
+31 −16
Original line number Diff line number Diff line
@@ -46,12 +46,21 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
	if (!dev->zones)
		return -ENOMEM;

	spin_lock_init(&dev->zone_dev_lock);
	/*
	 * With memory backing, the zone_lock spinlock needs to be temporarily
	 * released to avoid scheduling in atomic context. To guarantee zone
	 * information protection, use a bitmap to lock zones with
	 * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
	 * implies that the queue is marked with BLK_MQ_F_BLOCKING.
	 */
	spin_lock_init(&dev->zone_lock);
	if (dev->memory_backed) {
		dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
		if (!dev->zone_locks) {
			kvfree(dev->zones);
			return -ENOMEM;
		}
	}

	if (dev->zone_nr_conv >= dev->nr_zones) {
		dev->zone_nr_conv = dev->nr_zones - 1;
@@ -137,11 +146,16 @@ void null_free_zoned_dev(struct nullb_device *dev)

static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
{
	if (dev->memory_backed)
		wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
	spin_lock_irq(&dev->zone_lock);
}

static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
{
	spin_unlock_irq(&dev->zone_lock);

	if (dev->memory_backed)
		clear_and_wake_up_bit(zno, dev->zone_locks);
}

@@ -322,7 +336,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
		return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);

	null_lock_zone(dev, zno);
	spin_lock(&dev->zone_dev_lock);

	switch (zone->cond) {
	case BLK_ZONE_COND_FULL:
@@ -375,9 +388,17 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
	if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
		zone->cond = BLK_ZONE_COND_IMP_OPEN;

	spin_unlock(&dev->zone_dev_lock);
	/*
	 * Memory backing allocation may sleep: release the zone_lock spinlock
	 * to avoid scheduling in atomic context. Zone operation atomicity is
	 * still guaranteed through the zone_locks bitmap.
	 */
	if (dev->memory_backed)
		spin_unlock_irq(&dev->zone_lock);
	ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
	spin_lock(&dev->zone_dev_lock);
	if (dev->memory_backed)
		spin_lock_irq(&dev->zone_lock);

	if (ret != BLK_STS_OK)
		goto unlock;

@@ -392,7 +413,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
	ret = BLK_STS_OK;

unlock:
	spin_unlock(&dev->zone_dev_lock);
	null_unlock_zone(dev, zno);

	return ret;
@@ -516,9 +536,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
			null_lock_zone(dev, i);
			zone = &dev->zones[i];
			if (zone->cond != BLK_ZONE_COND_EMPTY) {
				spin_lock(&dev->zone_dev_lock);
				null_reset_zone(dev, zone);
				spin_unlock(&dev->zone_dev_lock);
				trace_nullb_zone_op(cmd, i, zone->cond);
			}
			null_unlock_zone(dev, i);
@@ -530,7 +548,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
	zone = &dev->zones[zone_no];

	null_lock_zone(dev, zone_no);
	spin_lock(&dev->zone_dev_lock);

	switch (op) {
	case REQ_OP_ZONE_RESET:
@@ -550,8 +567,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
		break;
	}

	spin_unlock(&dev->zone_dev_lock);

	if (ret == BLK_STS_OK)
		trace_nullb_zone_op(cmd, zone_no, zone->cond);

+6 −2
Original line number Diff line number Diff line
@@ -4582,8 +4582,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_start_queues);


void nvme_sync_queues(struct nvme_ctrl *ctrl)
void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{
	struct nvme_ns *ns;

@@ -4591,7 +4590,12 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl)
	list_for_each_entry(ns, &ctrl->namespaces, list)
		blk_sync_queue(ns->queue);
	up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_sync_io_queues);

void nvme_sync_queues(struct nvme_ctrl *ctrl)
{
	nvme_sync_io_queues(ctrl);
	if (ctrl->admin_q)
		blk_sync_queue(ctrl->admin_q);
}
+1 −0
Original line number Diff line number Diff line
@@ -602,6 +602,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
void nvme_sync_queues(struct nvme_ctrl *ctrl);
void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
void nvme_unfreeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze(struct nvme_ctrl *ctrl);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
+19 −4
Original line number Diff line number Diff line
@@ -198,6 +198,7 @@ struct nvme_queue {
	u32 q_depth;
	u16 cq_vector;
	u16 sq_tail;
	u16 last_sq_tail;
	u16 cq_head;
	u16 qid;
	u8 cq_phase;
@@ -455,11 +456,24 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
	return 0;
}

static inline void nvme_write_sq_db(struct nvme_queue *nvmeq)
/*
 * Write sq tail if we are asked to, or if the next command would wrap.
 */
static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
{
	if (!write_sq) {
		u16 next_tail = nvmeq->sq_tail + 1;

		if (next_tail == nvmeq->q_depth)
			next_tail = 0;
		if (next_tail != nvmeq->last_sq_tail)
			return;
	}

	if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
			nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
		writel(nvmeq->sq_tail, nvmeq->q_db);
	nvmeq->last_sq_tail = nvmeq->sq_tail;
}

/**
@@ -476,8 +490,7 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
	       cmd, sizeof(*cmd));
	if (++nvmeq->sq_tail == nvmeq->q_depth)
		nvmeq->sq_tail = 0;
	if (write_sq)
		nvme_write_sq_db(nvmeq);
	nvme_write_sq_db(nvmeq, write_sq);
	spin_unlock(&nvmeq->sq_lock);
}

@@ -486,7 +499,8 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
	struct nvme_queue *nvmeq = hctx->driver_data;

	spin_lock(&nvmeq->sq_lock);
	nvme_write_sq_db(nvmeq);
	if (nvmeq->sq_tail != nvmeq->last_sq_tail)
		nvme_write_sq_db(nvmeq, true);
	spin_unlock(&nvmeq->sq_lock);
}

@@ -1496,6 +1510,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
	struct nvme_dev *dev = nvmeq->dev;

	nvmeq->sq_tail = 0;
	nvmeq->last_sq_tail = 0;
	nvmeq->cq_head = 0;
	nvmeq->cq_phase = 1;
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
Loading