Commit e1a41848 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba
Browse files

btrfs: Refactor unclustered extent allocation into find_free_extent_unclustered()



This patch will extract unclsutered extent allocation code into
find_free_extent_unclustered().

And this helper function will use return value to indicate what to do
next.

This should make find_free_extent() a little easier to read.

Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarSu Yue <suy.fnst@cn.fujitsu.com>
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
[Update merge conflict with fb5c39d7 ("btrfs: don't use ctl->free_space for max_extent_size")]
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent d06e3bb6
Loading
Loading
Loading
Loading
+69 −46
Original line number Diff line number Diff line
@@ -7415,6 +7415,70 @@ refill_cluster:
	return 1;
}

/*
 * Return >0 to inform caller that we find nothing
 * Return 0 when we found an free extent and set ffe_ctrl->found_offset
 * Return -EAGAIN to inform caller that we need to re-search this block group
 */
static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
		struct btrfs_free_cluster *last_ptr,
		struct find_free_extent_ctl *ffe_ctl)
{
	u64 offset;

	/*
	 * We are doing an unclustered allocation, set the fragmented flag so
	 * we don't bother trying to setup a cluster again until we get more
	 * space.
	 */
	if (unlikely(last_ptr)) {
		spin_lock(&last_ptr->lock);
		last_ptr->fragmented = 1;
		spin_unlock(&last_ptr->lock);
	}
	if (ffe_ctl->cached) {
		struct btrfs_free_space_ctl *free_space_ctl;

		free_space_ctl = bg->free_space_ctl;
		spin_lock(&free_space_ctl->tree_lock);
		if (free_space_ctl->free_space <
		    ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
		    ffe_ctl->empty_size) {
			ffe_ctl->total_free_space = max_t(u64,
					ffe_ctl->total_free_space,
					free_space_ctl->free_space);
			spin_unlock(&free_space_ctl->tree_lock);
			return 1;
		}
		spin_unlock(&free_space_ctl->tree_lock);
	}

	offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
			ffe_ctl->num_bytes, ffe_ctl->empty_size,
			&ffe_ctl->max_extent_size);

	/*
	 * If we didn't find a chunk, and we haven't failed on this block group
	 * before, and this block group is in the middle of caching and we are
	 * ok with waiting, then go ahead and wait for progress to be made, and
	 * set @retry_unclustered to true.
	 *
	 * If @retry_unclustered is true then we've already waited on this
	 * block group once and should move on to the next block group.
	 */
	if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
	    ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
		wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
						ffe_ctl->empty_size);
		ffe_ctl->retry_unclustered = true;
		return -EAGAIN;
	} else if (!offset) {
		return 1;
	}
	ffe_ctl->found_offset = offset;
	return 0;
}

/*
 * walks the btree of allocated extents and find a hole of a given size.
 * The key ins is changed to record the hole:
@@ -7617,54 +7681,13 @@ have_block_group:
			/* ret == -ENOENT case falls through */
		}

		/*
		 * We are doing an unclustered alloc, set the fragmented flag so
		 * we don't bother trying to setup a cluster again until we get
		 * more space.
		 */
		if (unlikely(last_ptr)) {
			spin_lock(&last_ptr->lock);
			last_ptr->fragmented = 1;
			spin_unlock(&last_ptr->lock);
		}
		if (ffe_ctl.cached) {
			struct btrfs_free_space_ctl *ctl =
				block_group->free_space_ctl;

			spin_lock(&ctl->tree_lock);
			if (ctl->free_space <
			    num_bytes + ffe_ctl.empty_cluster + empty_size) {
				ffe_ctl.total_free_space = max(ctl->free_space,
						ffe_ctl.total_free_space);
				spin_unlock(&ctl->tree_lock);
				goto loop;
			}
			spin_unlock(&ctl->tree_lock);
		}

		ffe_ctl.found_offset = btrfs_find_space_for_alloc(block_group,
				ffe_ctl.search_start, num_bytes, empty_size,
				&ffe_ctl.max_extent_size);
		/*
		 * If we didn't find a chunk, and we haven't failed on this
		 * block group before, and this block group is in the middle of
		 * caching and we are ok with waiting, then go ahead and wait
		 * for progress to be made, and set ffe_ctl.retry_unclustered to
		 * true.
		 *
		 * If ffe_ctl.retry_unclustered is true then we've already
		 * waited on this block group once and should move on to the
		 * next block group.
		 */
		if (!ffe_ctl.found_offset && !ffe_ctl.retry_unclustered &&
		    !ffe_ctl.cached && ffe_ctl.loop > LOOP_CACHING_NOWAIT) {
			wait_block_group_cache_progress(block_group,
						num_bytes + empty_size);
			ffe_ctl.retry_unclustered = true;
		ret = find_free_extent_unclustered(block_group, last_ptr,
						   &ffe_ctl);
		if (ret == -EAGAIN)
			goto have_block_group;
		} else if (!ffe_ctl.found_offset) {
		else if (ret > 0)
			goto loop;
		}
		/* ret == 0 case falls through */
checks:
		ffe_ctl.search_start = round_up(ffe_ctl.found_offset,
					     fs_info->stripesize);