Commit 16e3e418 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe
Browse files

block: reuse __blk_bvec_map_sg() for mapping page sized bvec



Inside __blk_segment_map_sg(), page sized bvec mapping is optimized
a bit with one standalone branch.

So reuse __blk_bvec_map_sg() to do that.

Cc: Omar Sandoval <osandov@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent cae6c2e5
Loading
Loading
Loading
Loading
+9 −11
Original line number Diff line number Diff line
@@ -493,6 +493,14 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
	return nsegs;
}

static inline int __blk_bvec_map_sg(struct bio_vec bv,
		struct scatterlist *sglist, struct scatterlist **sg)
{
	*sg = blk_next_sg(sg, sglist);
	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
	return 1;
}

static inline void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
		     struct scatterlist *sglist, struct bio_vec *bvprv,
@@ -511,23 +519,13 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
	} else {
new_segment:
		if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) {
			*sg = blk_next_sg(sg, sglist);
			sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
			(*nsegs) += 1;
			(*nsegs) += __blk_bvec_map_sg(*bvec, sglist, sg);
		} else
			(*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
	}
	*bvprv = *bvec;
}

static inline int __blk_bvec_map_sg(struct bio_vec bv,
		struct scatterlist *sglist, struct scatterlist **sg)
{
	*sg = sglist;
	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
	return 1;
}

static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
			     struct scatterlist *sglist,
			     struct scatterlist **sg)