Commit ffb9e0f0 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba
Browse files

btrfs: block-group: Refactor btrfs_read_block_groups()



Refactor the work inside the loop of btrfs_read_block_groups() into one
separate function, read_one_block_group().

This allows read_one_block_group to be reused for later BG_TREE feature.

The refactor does the following extra fix:
- Use btrfs_fs_incompat() to replace open-coded feature check

Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarAnand Jain <anand.jain@oracle.com>
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent d4e253bb
Loading
Loading
Loading
Loading
+108 −111
Original line number Diff line number Diff line
@@ -1686,56 +1686,25 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
	return ret;
}

int btrfs_read_block_groups(struct btrfs_fs_info *info)
static int read_one_block_group(struct btrfs_fs_info *info,
				struct btrfs_path *path,
				int need_clear)
{
	struct btrfs_path *path;
	int ret;
	struct extent_buffer *leaf = path->nodes[0];
	struct btrfs_block_group_cache *cache;
	struct btrfs_space_info *space_info;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct extent_buffer *leaf;
	int need_clear = 0;
	u64 cache_gen;
	u64 feature;
	int mixed;

	feature = btrfs_super_incompat_flags(info->super_copy);
	mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);

	key.objectid = 0;
	key.offset = 0;
	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	path->reada = READA_FORWARD;

	cache_gen = btrfs_super_cache_generation(info->super_copy);
	if (btrfs_test_opt(info, SPACE_CACHE) &&
	    btrfs_super_generation(info->super_copy) != cache_gen)
		need_clear = 1;
	if (btrfs_test_opt(info, CLEAR_CACHE))
		need_clear = 1;

	while (1) {
	struct btrfs_block_group_item bgi;
	const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
	int slot = path->slots[0];
	int ret;

		ret = find_first_block_group(info, path, &key);
		if (ret > 0)
			break;
		if (ret != 0)
			goto error;

		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
	btrfs_item_key_to_cpu(leaf, &key, slot);
	ASSERT(key.type == BTRFS_BLOCK_GROUP_ITEM_KEY);

		cache = btrfs_create_block_group_cache(info, found_key.objectid,
						       found_key.offset);
		if (!cache) {
			ret = -ENOMEM;
			goto error;
		}
	cache = btrfs_create_block_group_cache(info, key.objectid, key.offset);
	if (!cache)
		return -ENOMEM;

	if (need_clear) {
		/*
@@ -1751,74 +1720,57 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
		if (btrfs_test_opt(info, SPACE_CACHE))
			cache->disk_cache_state = BTRFS_DC_CLEAR;
	}

		read_extent_buffer(leaf, &bgi,
				   btrfs_item_ptr_offset(leaf, path->slots[0]),
	read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
			   sizeof(bgi));
		/* cache::chunk_objectid is unused */
	cache->used = btrfs_stack_block_group_used(&bgi);
	cache->flags = btrfs_stack_block_group_flags(&bgi);
		if (!mixed &&
		    ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
	if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
	    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
			btrfs_err(info,
"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
				  cache->start);
			btrfs_put_block_group(cache);
			ret = -EINVAL;
			goto error;
	}

		key.objectid = found_key.objectid + found_key.offset;
		btrfs_release_path(path);

	/*
		 * We need to exclude the super stripes now so that the space
		 * info has super bytes accounted for, otherwise we'll think
		 * we have more space than we actually do.
	 * We need to exclude the super stripes now so that the space info has
	 * super bytes accounted for, otherwise we'll think we have more space
	 * than we actually do.
	 */
	ret = exclude_super_stripes(cache);
	if (ret) {
			/*
			 * We may have excluded something, so call this just in
			 * case.
			 */
		/* We may have excluded something, so call this just in case. */
		btrfs_free_excluded_extents(cache);
			btrfs_put_block_group(cache);
		goto error;
	}

	/*
		 * Check for two cases, either we are full, and therefore
		 * don't need to bother with the caching work since we won't
		 * find any space, or we are empty, and we can just add all
		 * the space in and be done with it.  This saves us _a_lot_ of
		 * time, particularly in the full case.
	 * Check for two cases, either we are full, and therefore don't need
	 * to bother with the caching work since we won't find any space, or we
	 * are empty, and we can just add all the space in and be done with it.
	 * This saves us _a_lot_ of time, particularly in the full case.
	 */
		if (found_key.offset == cache->used) {
	if (key.offset == cache->used) {
		cache->last_byte_to_unpin = (u64)-1;
		cache->cached = BTRFS_CACHE_FINISHED;
		btrfs_free_excluded_extents(cache);
	} else if (cache->used == 0) {
		cache->last_byte_to_unpin = (u64)-1;
		cache->cached = BTRFS_CACHE_FINISHED;
			add_new_free_space(cache, found_key.objectid,
					   found_key.objectid +
					   found_key.offset);
		add_new_free_space(cache, key.objectid,
				   key.objectid + key.offset);
		btrfs_free_excluded_extents(cache);
	}

	ret = btrfs_add_block_group_cache(info, cache);
	if (ret) {
		btrfs_remove_free_space_cache(cache);
			btrfs_put_block_group(cache);
		goto error;
	}

	trace_btrfs_add_block_group(info, cache, 0);
		btrfs_update_space_info(info, cache->flags, found_key.offset,
					cache->used,
					cache->bytes_super, &space_info);
	btrfs_update_space_info(info, cache->flags, key.offset,
				cache->used, cache->bytes_super, &space_info);

	cache->space_info = space_info;

@@ -1831,6 +1783,51 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
		ASSERT(list_empty(&cache->bg_list));
		btrfs_mark_bg_unused(cache);
	}
	return 0;
error:
	btrfs_put_block_group(cache);
	return ret;
}

int btrfs_read_block_groups(struct btrfs_fs_info *info)
{
	struct btrfs_path *path;
	int ret;
	struct btrfs_block_group_cache *cache;
	struct btrfs_space_info *space_info;
	struct btrfs_key key;
	int need_clear = 0;
	u64 cache_gen;

	key.objectid = 0;
	key.offset = 0;
	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	path->reada = READA_FORWARD;

	cache_gen = btrfs_super_cache_generation(info->super_copy);
	if (btrfs_test_opt(info, SPACE_CACHE) &&
	    btrfs_super_generation(info->super_copy) != cache_gen)
		need_clear = 1;
	if (btrfs_test_opt(info, CLEAR_CACHE))
		need_clear = 1;

	while (1) {
		ret = find_first_block_group(info, path, &key);
		if (ret > 0)
			break;
		if (ret != 0)
			goto error;

		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
		ret = read_one_block_group(info, path, need_clear);
		if (ret < 0)
			goto error;
		key.objectid += key.offset;
		key.offset = 0;
		btrfs_release_path(path);
	}

	list_for_each_entry_rcu(space_info, &info->space_info, list) {