Commit 9d6cb1b0 authored by Johannes Thumshirn's avatar Johannes Thumshirn Committed by David Sterba
Browse files

btrfs: raid56: reduce indentation in lock_stripe_add



In lock_stripe_add() we're traversing the stripe hash list and check if
the current list element's raid_map equals is equal to the raid bio's
raid_map. If both are equal we continue processing.

If we'd check for inequality instead of equality we can reduce one level
of indentation.

Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 1d2e7c7c
Loading
Loading
Loading
Loading
+44 −47
Original line number Diff line number Diff line
@@ -682,10 +682,12 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)

	spin_lock_irqsave(&h->lock, flags);
	list_for_each_entry(cur, &h->hash_list, hash_list) {
		if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
		if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
			continue;

		spin_lock(&cur->bio_list_lock);

			/* can we steal this cached rbio's pages? */
		/* Can we steal this cached rbio's pages? */
		if (bio_list_empty(&cur->bio_list) &&
		    list_empty(&cur->plug_list) &&
		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
@@ -700,7 +702,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
			goto lockit;
		}

			/* can we merge into the lock owner? */
		/* Can we merge into the lock owner? */
		if (rbio_can_merge(cur, rbio)) {
			merge_rbio(cur, rbio);
			spin_unlock(&cur->bio_list_lock);
@@ -711,15 +713,11 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)


		/*
			 * we couldn't merge with the running
			 * rbio, see if we can merge with the
			 * pending ones.  We don't have to
			 * check for rmw_locked because there
			 * is no way they are inside finish_rmw
			 * right now
		 * We couldn't merge with the running rbio, see if we can merge
		 * with the pending ones.  We don't have to check for rmw_locked
		 * because there is no way they are inside finish_rmw right now
		 */
			list_for_each_entry(pending, &cur->plug_list,
					    plug_list) {
		list_for_each_entry(pending, &cur->plug_list, plug_list) {
			if (rbio_can_merge(pending, rbio)) {
				merge_rbio(pending, rbio);
				spin_unlock(&cur->bio_list_lock);
@@ -729,16 +727,15 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
			}
		}

			/* no merging, put us on the tail of the plug list,
			 * our rbio will be started with the currently
			 * running rbio unlocks
		/*
		 * No merging, put us on the tail of the plug list, our rbio
		 * will be started with the currently running rbio unlocks
		 */
		list_add_tail(&rbio->plug_list, &cur->plug_list);
		spin_unlock(&cur->bio_list_lock);
		ret = 1;
		goto out;
	}
	}
lockit:
	refcount_inc(&rbio->refs);
	list_add(&rbio->hash_list, &h->hash_list);