Commit 7fe6d45e authored by Dennis Zhou's avatar Dennis Zhou Committed by David Sterba
Browse files

btrfs: have multiple discard lists



Non-block group destruction discarding currently only had a single list
with no minimum discard length. This can lead to caravaning more
meaningful discards behind a heavily fragmented block group.

This adds support for multiple lists with minimum discard lengths to
prevent the caravan effect. We promote block groups back up when we
exceed the BTRFS_ASYNC_DISCARD_MAX_FILTER size, currently we support
only 2 lists with filters of 1MB and 32KB respectively.

Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarDennis Zhou <dennis@kernel.org>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 19b2a2c7
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -456,7 +456,7 @@ struct btrfs_full_stripe_locks_tree {
 * afterwards represent monotonically decreasing discard filter sizes to
 * prioritize what should be discarded next.
 */
#define BTRFS_NR_DISCARD_LISTS		2
#define BTRFS_NR_DISCARD_LISTS		3
#define BTRFS_DISCARD_INDEX_UNUSED	0
#define BTRFS_DISCARD_INDEX_START	1

+96 −9
Original line number Diff line number Diff line
@@ -22,6 +22,13 @@
#define BTRFS_DISCARD_MAX_DELAY_MSEC	(1000UL)
#define BTRFS_DISCARD_MAX_IOPS		(10U)

/* Montonically decreasing minimum length filters after index 0 */
static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
	0,
	BTRFS_ASYNC_DISCARD_MAX_FILTER,
	BTRFS_ASYNC_DISCARD_MIN_FILTER
};

static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
					  struct btrfs_block_group *block_group)
{
@@ -139,16 +146,18 @@ static struct btrfs_block_group *find_next_block_group(
 * peek_discard_list - wrap find_next_block_group()
 * @discard_ctl: discard control
 * @discard_state: the discard_state of the block_group after state management
 * @discard_index: the discard_index of the block_group after state management
 *
 * This wraps find_next_block_group() and sets the block_group to be in use.
 * discard_state's control flow is managed here.  Variables related to
 * discard_state are reset here as needed (eg. discard_cursor).  @discard_state
 * is remembered as it may change while we're discarding, but we want the
 * discard to execute in the context determined here.
 * discard_state are reset here as needed (eg discard_cursor).  @discard_state
 * and @discard_index are remembered as it may change while we're discarding,
 * but we want the discard to execute in the context determined here.
 */
static struct btrfs_block_group *peek_discard_list(
					struct btrfs_discard_ctl *discard_ctl,
					enum btrfs_discard_state *discard_state)
					enum btrfs_discard_state *discard_state,
					int *discard_index)
{
	struct btrfs_block_group *block_group;
	const u64 now = ktime_get_ns();
@@ -169,6 +178,7 @@ again:
		}
		discard_ctl->block_group = block_group;
		*discard_state = block_group->discard_state;
		*discard_index = block_group->discard_index;
	} else {
		block_group = NULL;
	}
@@ -178,6 +188,64 @@ again:
	return block_group;
}

/**
 * btrfs_discard_check_filter - updates a block groups filters
 * @block_group: block group of interest
 * @bytes: recently freed region size after coalescing
 *
 * Async discard maintains multiple lists with progressively smaller filters
 * to prioritize discarding based on size.  Should a free space that matches
 * a larger filter be returned to the free_space_cache, prioritize that discard
 * by moving @block_group to the proper filter.
 */
void btrfs_discard_check_filter(struct btrfs_block_group *block_group,
				u64 bytes)
{
	struct btrfs_discard_ctl *discard_ctl;

	if (!block_group ||
	    !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
		return;

	discard_ctl = &block_group->fs_info->discard_ctl;

	if (block_group->discard_index > BTRFS_DISCARD_INDEX_START &&
	    bytes >= discard_minlen[block_group->discard_index - 1]) {
		int i;

		remove_from_discard_list(discard_ctl, block_group);

		for (i = BTRFS_DISCARD_INDEX_START; i < BTRFS_NR_DISCARD_LISTS;
		     i++) {
			if (bytes >= discard_minlen[i]) {
				block_group->discard_index = i;
				add_to_discard_list(discard_ctl, block_group);
				break;
			}
		}
	}
}

/**
 * btrfs_update_discard_index - moves a block group along the discard lists
 * @discard_ctl: discard control
 * @block_group: block_group of interest
 *
 * Increment @block_group's discard_index.  If it falls of the list, let it be.
 * Otherwise add it back to the appropriate list.
 */
static void btrfs_update_discard_index(struct btrfs_discard_ctl *discard_ctl,
				       struct btrfs_block_group *block_group)
{
	block_group->discard_index++;
	if (block_group->discard_index == BTRFS_NR_DISCARD_LISTS) {
		block_group->discard_index = 1;
		return;
	}

	add_to_discard_list(discard_ctl, block_group);
}

/**
 * btrfs_discard_cancel_work - remove a block_group from the discard lists
 * @discard_ctl: discard control
@@ -295,6 +363,8 @@ static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl,
			btrfs_mark_bg_unused(block_group);
		else
			add_to_discard_unused_list(discard_ctl, block_group);
	} else {
		btrfs_update_discard_index(discard_ctl, block_group);
	}
}

@@ -311,25 +381,42 @@ static void btrfs_discard_workfn(struct work_struct *work)
	struct btrfs_discard_ctl *discard_ctl;
	struct btrfs_block_group *block_group;
	enum btrfs_discard_state discard_state;
	int discard_index = 0;
	u64 trimmed = 0;
	u64 minlen = 0;

	discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);

	block_group = peek_discard_list(discard_ctl, &discard_state);
	block_group = peek_discard_list(discard_ctl, &discard_state,
					&discard_index);
	if (!block_group || !btrfs_run_discard_work(discard_ctl))
		return;

	/* Perform discarding */
	if (discard_state == BTRFS_DISCARD_BITMAPS)
	minlen = discard_minlen[discard_index];

	if (discard_state == BTRFS_DISCARD_BITMAPS) {
		u64 maxlen = 0;

		/*
		 * Use the previous levels minimum discard length as the max
		 * length filter.  In the case something is added to make a
		 * region go beyond the max filter, the entire bitmap is set
		 * back to BTRFS_TRIM_STATE_UNTRIMMED.
		 */
		if (discard_index != BTRFS_DISCARD_INDEX_UNUSED)
			maxlen = discard_minlen[discard_index - 1];

		btrfs_trim_block_group_bitmaps(block_group, &trimmed,
				       block_group->discard_cursor,
				       btrfs_block_group_end(block_group),
				       0, true);
	else
				       minlen, maxlen, true);
	} else {
		btrfs_trim_block_group_extents(block_group, &trimmed,
				       block_group->discard_cursor,
				       btrfs_block_group_end(block_group),
				       0, true);
				       minlen, true);
	}

	discard_ctl->prev_discard = trimmed;

+5 −0
Original line number Diff line number Diff line
@@ -11,6 +11,11 @@ struct btrfs_block_group;

/* Discard size limits */
#define BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE		(SZ_64M)
#define BTRFS_ASYNC_DISCARD_MAX_FILTER			(SZ_1M)
#define BTRFS_ASYNC_DISCARD_MIN_FILTER			(SZ_32K)

/* List operations */
void btrfs_discard_check_filter(struct btrfs_block_group *block_group, u64 bytes);

/* Work operations */
void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
+42 −13
Original line number Diff line number Diff line
@@ -2465,6 +2465,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
	struct btrfs_block_group *block_group = ctl->private;
	struct btrfs_free_space *info;
	int ret = 0;
	u64 filter_bytes = bytes;

	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
	if (!info)
@@ -2501,6 +2502,8 @@ link:
	 */
	steal_from_bitmap(ctl, info, true);

	filter_bytes = max(filter_bytes, info->bytes);

	ret = link_free_space(ctl, info);
	if (ret)
		kmem_cache_free(btrfs_free_space_cachep, info);
@@ -2513,8 +2516,10 @@ out:
		ASSERT(ret != -EEXIST);
	}

	if (trim_state != BTRFS_TRIM_STATE_TRIMMED)
	if (trim_state != BTRFS_TRIM_STATE_TRIMMED) {
		btrfs_discard_check_filter(block_group, filter_bytes);
		btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
	}

	return ret;
}
@@ -3478,7 +3483,14 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group,
				goto next;
			}
			unlink_free_space(ctl, entry);
			if (max_discard_size && bytes > max_discard_size) {
			/*
			 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
			 * If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim
			 * X when we come back around.  So trim it now.
			 */
			if (max_discard_size &&
			    bytes >= (max_discard_size +
				      BTRFS_ASYNC_DISCARD_MIN_FILTER)) {
				bytes = max_discard_size;
				extent_bytes = max_discard_size;
				entry->offset += max_discard_size;
@@ -3585,7 +3597,7 @@ static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl,
 */
static int trim_bitmaps(struct btrfs_block_group *block_group,
			u64 *total_trimmed, u64 start, u64 end, u64 minlen,
			bool async)
			u64 maxlen, bool async)
{
	struct btrfs_discard_ctl *discard_ctl =
					&block_group->fs_info->discard_ctl;
@@ -3613,7 +3625,15 @@ static int trim_bitmaps(struct btrfs_block_group *block_group,
		}

		entry = tree_search_offset(ctl, offset, 1, 0);
		if (!entry || (async && start == offset &&
		/*
		 * Bitmaps are marked trimmed lossily now to prevent constant
		 * discarding of the same bitmap (the reason why we are bound
		 * by the filters).  So, retrim the block group bitmaps when we
		 * are preparing to punt to the unused_bgs list.  This uses
		 * @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED
		 * which is the only discard index which sets minlen to 0.
		 */
		if (!entry || (async && minlen && start == offset &&
			       btrfs_free_space_trimmed(entry))) {
			spin_unlock(&ctl->tree_lock);
			mutex_unlock(&ctl->cache_writeout_mutex);
@@ -3634,10 +3654,10 @@ static int trim_bitmaps(struct btrfs_block_group *block_group,
		ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
		if (ret2 || start >= end) {
			/*
			 * This keeps the invariant that all bytes are trimmed
			 * if BTRFS_TRIM_STATE_TRIMMED is set on a bitmap.
			 * We lossily consider a bitmap trimmed if we only skip
			 * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER.
			 */
			if (ret2 && !minlen)
			if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER)
				end_trimming_bitmap(ctl, entry);
			else
				entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
@@ -3658,14 +3678,21 @@ static int trim_bitmaps(struct btrfs_block_group *block_group,
		}

		bytes = min(bytes, end - start);
		if (bytes < minlen) {
			entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
		if (bytes < minlen || (async && maxlen && bytes > maxlen)) {
			spin_unlock(&ctl->tree_lock);
			mutex_unlock(&ctl->cache_writeout_mutex);
			goto next;
		}

		if (async && max_discard_size && bytes > max_discard_size)
		/*
		 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
		 * If X < @minlen, we won't trim X when we come back around.
		 * So trim it now.  We differ here from trimming extents as we
		 * don't keep individual state per bit.
		 */
		if (async &&
		    max_discard_size &&
		    bytes > (max_discard_size + minlen))
			bytes = max_discard_size;

		bitmap_clear_bits(ctl, entry, start, bytes);
@@ -3773,7 +3800,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group,
	if (ret)
		goto out;

	ret = trim_bitmaps(block_group, trimmed, start, end, minlen, false);
	ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false);
	div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem);
	/* If we ended in the middle of a bitmap, reset the trimming flag */
	if (rem)
@@ -3807,7 +3834,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,

int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
				   u64 *trimmed, u64 start, u64 end, u64 minlen,
				   bool async)
				   u64 maxlen, bool async)
{
	int ret;

@@ -3821,7 +3848,9 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
	btrfs_get_block_group_trimming(block_group);
	spin_unlock(&block_group->lock);

	ret = trim_bitmaps(block_group, trimmed, start, end, minlen, async);
	ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen,
			   async);

	btrfs_put_block_group_trimming(block_group);

	return ret;
+1 −1
Original line number Diff line number Diff line
@@ -146,7 +146,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
				   bool async);
int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
				   u64 *trimmed, u64 start, u64 end, u64 minlen,
				   bool async);
				   u64 maxlen, bool async);

/* Support functions for running our sanity tests */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS