Commit 2705c937 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe
Browse files

block: kill QUEUE_FLAG_NO_SG_MERGE



Since bdced438 ("block: setup bi_phys_segments after splitting"),
physical segment number is mainly figured out in blk_queue_split() for
fast path, and the flag of BIO_SEG_VALID is set there too.

Now only blk_recount_segments() and blk_recalc_rq_segments() use this
flag.

Basically blk_recount_segments() is bypassed in fast path given BIO_SEG_VALID
is set in blk_queue_split().

For another user of blk_recalc_rq_segments():

- run in partial completion branch of blk_update_request, which is an unusual case

- run in blk_cloned_rq_check_limits(), still not a big problem if the flag is killed
since dm-rq is the only user.

Multi-page bvec is enabled now, not doing S/G merging is rather pointless with the
current setup of the I/O path, as it isn't going to save you a significant amount
of cycles.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ac4fa1d1
Loading
Loading
Loading
Loading
+6 −25
Original line number Diff line number Diff line
@@ -358,8 +358,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
EXPORT_SYMBOL(blk_queue_split);

static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
					     struct bio *bio,
					     bool no_sg_merge)
					     struct bio *bio)
{
	struct bio_vec bv, bvprv = { NULL };
	int prev = 0;
@@ -385,13 +384,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
	nr_phys_segs = 0;
	for_each_bio(bio) {
		bio_for_each_bvec(bv, bio, iter) {
			/*
			 * If SG merging is disabled, each bio vector is
			 * a segment
			 */
			if (no_sg_merge)
				goto new_segment;

			if (prev) {
				if (seg_size + bv.bv_len
				    > queue_max_segment_size(q))
@@ -421,27 +413,16 @@ new_segment:

void blk_recalc_rq_segments(struct request *rq)
{
	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
			&rq->q->queue_flags);

	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
			no_sg_merge);
	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
}

void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
	unsigned short seg_cnt = bio_segments(bio);

	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
			(seg_cnt < queue_max_segments(q)))
		bio->bi_phys_segments = seg_cnt;
	else {
	struct bio *nxt = bio->bi_next;

	bio->bi_next = NULL;
		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
	bio->bi_next = nxt;
	}

	bio_set_flag(bio, BIO_SEG_VALID);
}
+0 −1
Original line number Diff line number Diff line
@@ -128,7 +128,6 @@ static const char *const blk_queue_flag_name[] = {
	QUEUE_FLAG_NAME(SAME_FORCE),
	QUEUE_FLAG_NAME(DEAD),
	QUEUE_FLAG_NAME(INIT_DONE),
	QUEUE_FLAG_NAME(NO_SG_MERGE),
	QUEUE_FLAG_NAME(POLL),
	QUEUE_FLAG_NAME(WC),
	QUEUE_FLAG_NAME(FUA),
+0 −3
Original line number Diff line number Diff line
@@ -2837,9 +2837,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
	    set->map[HCTX_TYPE_POLL].nr_queues)
		blk_queue_flag_set(QUEUE_FLAG_POLL, q);

	if (!(set->flags & BLK_MQ_F_SG_MERGE))
		blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);

	q->sg_reserved_size = INT_MAX;

	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
+0 −13
Original line number Diff line number Diff line
@@ -1698,14 +1698,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
	return q && !blk_queue_add_random(q);
}

static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
				   sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}

static bool dm_table_all_devices_attribute(struct dm_table *t,
					   iterate_devices_callout_fn func)
{
@@ -1902,11 +1894,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
	if (!dm_table_supports_write_zeroes(t))
		q->limits.max_write_zeroes_sectors = 0;

	if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
		blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
	else
		blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);

	dm_table_verify_integrity(t);

	/*
+0 −1
Original line number Diff line number Diff line
@@ -588,7 +588,6 @@ struct request_queue {
#define QUEUE_FLAG_SAME_FORCE	12	/* force complete on same CPU */
#define QUEUE_FLAG_DEAD		13	/* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE	14	/* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE	15	/* don't attempt to merge SG segments*/
#define QUEUE_FLAG_POLL		16	/* IO polling enabled if set */
#define QUEUE_FLAG_WC		17	/* Write back caching */
#define QUEUE_FLAG_FUA		18	/* device supports FUA writes */