Commit d3ee2d84 authored by Xiao Ni's avatar Xiao Ni Committed by Song Liu
Browse files

md/raid10: improve discard request for far layout



For far layout, the discard region is not continuous on disks. So it needs
far copies r10bio to cover all regions. It needs a way to know all r10bios
have finish or not. Similar with raid10_sync_request, only the first r10bio
master_bio records the discard bio. Other r10bios master_bio record the
first r10bio. The first r10bio can finish after other r10bios finish and
then return the discard bio.

Signed-off-by: default avatarXiao Ni <xni@redhat.com>
Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
parent bcc90d28
Loading
Loading
Loading
Loading
+63 −23
Original line number Diff line number Diff line
@@ -1534,6 +1534,28 @@ static struct bio *raid10_split_bio(struct r10conf *conf,
	return bio;
}

static void raid_end_discard_bio(struct r10bio *r10bio)
{
	struct r10conf *conf = r10bio->mddev->private;
	struct r10bio *first_r10bio;

	while (atomic_dec_and_test(&r10bio->remaining)) {

		allow_barrier(conf);

		if (!test_bit(R10BIO_Discard, &r10bio->state)) {
			first_r10bio = (struct r10bio *)r10bio->master_bio;
			free_r10bio(r10bio);
			r10bio = first_r10bio;
		} else {
			md_write_end(r10bio->mddev);
			bio_endio(r10bio->master_bio);
			free_r10bio(r10bio);
			break;
		}
	}
}

static void raid10_end_discard_request(struct bio *bio)
{
	struct r10bio *r10_bio = bio->bi_private;
@@ -1560,11 +1582,7 @@ static void raid10_end_discard_request(struct bio *bio)
		rdev = conf->mirrors[dev].rdev;
	}

	if (atomic_dec_and_test(&r10_bio->remaining)) {
		md_write_end(r10_bio->mddev);
		raid_end_bio_io(r10_bio);
	}

	raid_end_discard_bio(r10_bio);
	rdev_dec_pending(rdev, conf->mddev);
}

@@ -1577,7 +1595,9 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
{
	struct r10conf *conf = mddev->private;
	struct geom *geo = &conf->geo;
	struct r10bio *r10_bio;
	struct r10bio *r10_bio, *first_r10bio;
	int far_copies = geo->far_copies;
	bool first_copy = true;

	int disk;
	sector_t chunk;
@@ -1616,30 +1636,20 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
	if (bio_sectors(bio) < stripe_size*2)
		goto out;

	/* For far offset layout, if bio is not aligned with stripe size, it splits
	 * the part that is not aligned with strip size.
	/* For far and far offset layout, if bio is not aligned with stripe size,
	 * it splits the part that is not aligned with strip size.
	 */
	div_u64_rem(bio_start, stripe_size, &remainder);
	if (geo->far_offset && remainder) {
	if ((far_copies > 1) && remainder) {
		split_size = stripe_size - remainder;
		bio = raid10_split_bio(conf, bio, split_size, false);
	}
	div_u64_rem(bio_end, stripe_size, &remainder);
	if (geo->far_offset && remainder) {
	if ((far_copies > 1) && remainder) {
		split_size = bio_sectors(bio) - remainder;
		bio = raid10_split_bio(conf, bio, split_size, true);
	}

	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
	r10_bio->mddev = mddev;
	r10_bio->state = 0;
	r10_bio->sectors = 0;
	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);

	wait_blocked_dev(mddev, r10_bio);

	r10_bio->master_bio = bio;

	bio_start = bio->bi_iter.bi_sector;
	bio_end = bio_end_sector(bio);

@@ -1665,6 +1675,28 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
	end_disk_offset = (bio_end & geo->chunk_mask) +
				(last_stripe_index << geo->chunk_shift);

retry_discard:
	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
	r10_bio->mddev = mddev;
	r10_bio->state = 0;
	r10_bio->sectors = 0;
	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
	wait_blocked_dev(mddev, r10_bio);

	/* For far layout it needs more than one r10bio to cover all regions.
	 * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
	 * to record the discard bio. Other r10bio->master_bio record the first
	 * r10bio. The first r10bio only release after all other r10bios finish.
	 * The discard bio returns only first r10bio finishes
	 */
	if (first_copy) {
		r10_bio->master_bio = bio;
		set_bit(R10BIO_Discard, &r10_bio->state);
		first_copy = false;
		first_r10bio = r10_bio;
	} else
		r10_bio->master_bio = (struct bio *)first_r10bio;

	rcu_read_lock();
	for (disk = 0; disk < geo->raid_disks; disk++) {
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
@@ -1755,11 +1787,19 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
		}
	}

	if (atomic_dec_and_test(&r10_bio->remaining)) {
		md_write_end(r10_bio->mddev);
		raid_end_bio_io(r10_bio);
	if (!geo->far_offset && --far_copies) {
		first_stripe_index += geo->stride >> geo->chunk_shift;
		start_disk_offset += geo->stride;
		last_stripe_index += geo->stride >> geo->chunk_shift;
		end_disk_offset += geo->stride;
		atomic_inc(&first_r10bio->remaining);
		raid_end_discard_bio(r10_bio);
		wait_barrier(conf);
		goto retry_discard;
	}

	raid_end_discard_bio(r10_bio);

	return 0;
out:
	allow_barrier(conf);
+1 −0
Original line number Diff line number Diff line
@@ -179,5 +179,6 @@ enum r10bio_state {
	R10BIO_Previous,
/* failfast devices did receive failfast requests. */
	R10BIO_FailFast,
	R10BIO_Discard,
};
#endif