Commit b2abdb1b authored by Mike Snitzer's avatar Mike Snitzer
Browse files

dm: fold dm_process_bio() into dm_submit_bio()



dm_process_bio() is only called by dm_submit_bio(), there is no benefit
to keeping dm_process_bio() factored out, so fold it.

While at it, cleanup dm_submit_bio()'s DMF_BLOCK_IO_FOR_SUSPEND related
branching and expand scope of dm_get_live_table() rcu reference on map
via common 'out' label to dm_put_live_table().

Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 0c2915b8
Loading
Loading
Loading
Loading
+22 −30
Original line number Diff line number Diff line
@@ -1665,28 +1665,6 @@ out:
	return ret;
}

static blk_qc_t dm_process_bio(struct mapped_device *md,
			       struct dm_table *map, struct bio *bio)
{
	blk_qc_t ret = BLK_QC_T_NONE;

	if (unlikely(!map)) {
		bio_io_error(bio);
		return ret;
	}

	/*
	 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
	 * otherwise associated queue_limits won't be imposed.
	 */
	if (is_abnormal_io(bio))
		blk_queue_split(&bio);

	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
		return __process_bio(md, map, bio);
	return __split_and_process_bio(md, map, bio);
}

static blk_qc_t dm_submit_bio(struct bio *bio)
{
	struct mapped_device *md = bio->bi_disk->private_data;
@@ -1707,22 +1685,36 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
	}

	map = dm_get_live_table(md, &srcu_idx);
	if (unlikely(!map)) {
		DMERR_LIMIT("%s: mapping table unavailable, erroring io",
			    dm_device_name(md));
		bio_io_error(bio);
		goto out;
	}

	/* if we're suspended, we have to queue this io for later */
	/* If suspended, queue this IO for later */
	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
		dm_put_live_table(md, srcu_idx);

		if (bio->bi_opf & REQ_NOWAIT)
			bio_wouldblock_error(bio);
		else if (!(bio->bi_opf & REQ_RAHEAD))
			queue_io(md, bio);
		else
		else if (bio->bi_opf & REQ_RAHEAD)
			bio_io_error(bio);
		return ret;
		else
			queue_io(md, bio);
		goto out;
	}

	ret = dm_process_bio(md, map, bio);
	/*
	 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
	 * otherwise associated queue_limits won't be imposed.
	 */
	if (is_abnormal_io(bio))
		blk_queue_split(&bio);

	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
		ret = __process_bio(md, map, bio);
	else
		ret = __split_and_process_bio(md, map, bio);
out:
	dm_put_live_table(md, srcu_idx);
	return ret;
}