Commit e1777d09 authored by Damien Le Moal's avatar Damien Le Moal Committed by Jens Axboe
Browse files

null_blk: Fix scheduling in atomic with zoned mode



Commit aa1c09cb ("null_blk: Fix locking in zoned mode") changed
zone locking to using the potentially sleeping wait_on_bit_io()
function. This is acceptable when memory backing is enabled as the
device queue is in that case marked as blocking, but this triggers a
scheduling while in atomic context with memory backing disabled.

Fix this by relying solely on the device zone spinlock for zone
information protection without temporarily releasing this lock around
null_process_cmd() execution in null_zone_write(). This is OK to do
since when memory backing is disabled, command processing does not
block and the memory backing lock nullb->lock is unused. This solution
avoids the overhead of having to mark a zoned null_blk device queue as
blocking when memory backing is unused.

This patch also adds comments to the zone locking code to explain the
unusual locking scheme.

Fixes: aa1c09cb ("null_blk: Fix locking in zoned mode")
Reported-by: default avatarkernel test robot <lkp@intel.com>
Signed-off-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7ae7a8de
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ struct nullb_device {
	unsigned int nr_zones_closed;
	struct blk_zone *zones;
	sector_t zone_size_sects;
	spinlock_t zone_dev_lock;
	spinlock_t zone_lock;
	unsigned long *zone_locks;

	unsigned long size; /* device size in MB */
+31 −16
Original line number Diff line number Diff line
@@ -46,12 +46,21 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
	if (!dev->zones)
		return -ENOMEM;

	spin_lock_init(&dev->zone_dev_lock);
	/*
	 * With memory backing, the zone_lock spinlock needs to be temporarily
	 * released to avoid scheduling in atomic context. To guarantee zone
	 * information protection, use a bitmap to lock zones with
	 * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
	 * implies that the queue is marked with BLK_MQ_F_BLOCKING.
	 */
	spin_lock_init(&dev->zone_lock);
	if (dev->memory_backed) {
		dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
		if (!dev->zone_locks) {
			kvfree(dev->zones);
			return -ENOMEM;
		}
	}

	if (dev->zone_nr_conv >= dev->nr_zones) {
		dev->zone_nr_conv = dev->nr_zones - 1;
@@ -137,11 +146,16 @@ void null_free_zoned_dev(struct nullb_device *dev)

static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
{
	if (dev->memory_backed)
		wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
	spin_lock_irq(&dev->zone_lock);
}

static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
{
	spin_unlock_irq(&dev->zone_lock);

	if (dev->memory_backed)
		clear_and_wake_up_bit(zno, dev->zone_locks);
}

@@ -322,7 +336,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
		return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);

	null_lock_zone(dev, zno);
	spin_lock(&dev->zone_dev_lock);

	switch (zone->cond) {
	case BLK_ZONE_COND_FULL:
@@ -375,9 +388,17 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
	if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
		zone->cond = BLK_ZONE_COND_IMP_OPEN;

	spin_unlock(&dev->zone_dev_lock);
	/*
	 * Memory backing allocation may sleep: release the zone_lock spinlock
	 * to avoid scheduling in atomic context. Zone operation atomicity is
	 * still guaranteed through the zone_locks bitmap.
	 */
	if (dev->memory_backed)
		spin_unlock_irq(&dev->zone_lock);
	ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
	spin_lock(&dev->zone_dev_lock);
	if (dev->memory_backed)
		spin_lock_irq(&dev->zone_lock);

	if (ret != BLK_STS_OK)
		goto unlock;

@@ -392,7 +413,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
	ret = BLK_STS_OK;

unlock:
	spin_unlock(&dev->zone_dev_lock);
	null_unlock_zone(dev, zno);

	return ret;
@@ -516,9 +536,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
			null_lock_zone(dev, i);
			zone = &dev->zones[i];
			if (zone->cond != BLK_ZONE_COND_EMPTY) {
				spin_lock(&dev->zone_dev_lock);
				null_reset_zone(dev, zone);
				spin_unlock(&dev->zone_dev_lock);
				trace_nullb_zone_op(cmd, i, zone->cond);
			}
			null_unlock_zone(dev, i);
@@ -530,7 +548,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
	zone = &dev->zones[zone_no];

	null_lock_zone(dev, zone_no);
	spin_lock(&dev->zone_dev_lock);

	switch (op) {
	case REQ_OP_ZONE_RESET:
@@ -550,8 +567,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
		break;
	}

	spin_unlock(&dev->zone_dev_lock);

	if (ret == BLK_STS_OK)
		trace_nullb_zone_op(cmd, zone_no, zone->cond);