Commit a271a89c authored by Mike Snitzer's avatar Mike Snitzer
Browse files

dm mpath: take m->lock spinlock when testing QUEUE_IF_NO_PATH



Fix multipath_end_io, multipath_end_io_bio and multipath_busy to take
m->lock while testing if MPATHF_QUEUE_IF_NO_PATH bit is set.  These are
all slow-path cases when no paths are available so extra locking isn't a
performance hit.  Correctness matters most.

Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 69cea0d4
Loading
Loading
Loading
Loading
+34 −18
Original line number Diff line number Diff line
@@ -1621,13 +1621,17 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
		if (pgpath)
			fail_path(pgpath);

		if (atomic_read(&m->nr_valid_paths) == 0 &&
		    !must_push_back_rq(m)) {
		if (!atomic_read(&m->nr_valid_paths)) {
			unsigned long flags;
			spin_lock_irqsave(&m->lock, flags);
			if (!must_push_back_rq(m)) {
				if (error == BLK_STS_IOERR)
					dm_report_EIO(m);
				/* complete with the original error */
				r = DM_ENDIO_DONE;
			}
			spin_unlock_irqrestore(&m->lock, flags);
		}
	}

	if (pgpath) {
@@ -1656,16 +1660,20 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
	if (pgpath)
		fail_path(pgpath);

	if (atomic_read(&m->nr_valid_paths) == 0 &&
	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
	if (!atomic_read(&m->nr_valid_paths)) {
		spin_lock_irqsave(&m->lock, flags);
		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
			if (__must_push_back(m)) {
				r = DM_ENDIO_REQUEUE;
			} else {
				dm_report_EIO(m);
				*error = BLK_STS_IOERR;
			}
			spin_unlock_irqrestore(&m->lock, flags);
			goto done;
		}
		spin_unlock_irqrestore(&m->lock, flags);
	}

	spin_lock_irqsave(&m->lock, flags);
	bio_list_add(&m->queued_bios, clone);
@@ -1962,10 +1970,11 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
		}
	} else {
		/* No path is available */
		r = -EIO;
		spin_lock_irqsave(&m->lock, flags);
		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
			r = -ENOTCONN;
		else
			r = -EIO;
		spin_unlock_irqrestore(&m->lock, flags);
	}

	if (r == -ENOTCONN) {
@@ -2036,8 +2045,15 @@ static int multipath_busy(struct dm_target *ti)
		return true;

	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
	if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
	if (!atomic_read(&m->nr_valid_paths)) {
		unsigned long flags;
		spin_lock_irqsave(&m->lock, flags);
		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
			spin_unlock_irqrestore(&m->lock, flags);
			return (m->queue_mode != DM_TYPE_REQUEST_BASED);
		}
		spin_unlock_irqrestore(&m->lock, flags);
	}

	/* Guess which priority_group will be used at next mapping time */
	pg = READ_ONCE(m->current_pg);