Commit 08fdb2cd authored by Coly Li's avatar Coly Li Committed by Jens Axboe
Browse files

bcache: remove for_each_cache()



Since now each cache_set explicitly has single cache, for_each_cache()
is unnecessary. This patch removes this macro, and update all locations
where it is used, and makes sure all code logic still being consistent.

Signed-off-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 697e2349
Loading
Loading
Loading
Loading
+8 −9
Original line number Diff line number Diff line
@@ -88,7 +88,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
	struct cache *ca;
	struct bucket *b;
	unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
	unsigned int i;
	int r;

	atomic_sub(sectors, &c->rescale);
@@ -104,7 +103,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)

	c->min_prio = USHRT_MAX;

	for_each_cache(ca, c, i)
	ca = c->cache;
	for_each_bucket(b, ca)
		if (b->prio &&
		    b->prio != BTREE_PRIO &&
+2 −7
Original line number Diff line number Diff line
@@ -887,9 +887,6 @@ do { \

/* Looping macros */

#define for_each_cache(ca, cs, iter)					\
	for (iter = 0; ca = cs->cache, iter < 1; iter++)

#define for_each_bucket(b, ca)						\
	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
@@ -931,10 +928,8 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)

static inline void wake_up_allocators(struct cache_set *c)
{
	struct cache *ca;
	unsigned int i;
	struct cache *ca = c->cache;

	for_each_cache(ca, c, i)
	wake_up_process(ca->alloc_thread);
}

+47 −56
Original line number Diff line number Diff line
@@ -1167,12 +1167,11 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
static int btree_check_reserve(struct btree *b, struct btree_op *op)
{
	struct cache_set *c = b->c;
	struct cache *ca;
	unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
	struct cache *ca = c->cache;
	unsigned int reserve = (c->root->level - b->level) * 2 + 1;

	mutex_lock(&c->bucket_lock);

	for_each_cache(ca, c, i)
	if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
		if (op)
			prepare_to_wait(&c->btree_cache_wait, &op->wait,
@@ -1695,7 +1694,6 @@ static void btree_gc_start(struct cache_set *c)
{
	struct cache *ca;
	struct bucket *b;
	unsigned int i;

	if (!c->gc_mark_valid)
		return;
@@ -1705,7 +1703,7 @@ static void btree_gc_start(struct cache_set *c)
	c->gc_mark_valid = 0;
	c->gc_done = ZERO_KEY;

	for_each_cache(ca, c, i)
	ca = c->cache;
	for_each_bucket(b, ca) {
		b->last_gc = b->gen;
		if (!atomic_read(&b->pin)) {
@@ -1721,7 +1719,8 @@ static void bch_btree_gc_finish(struct cache_set *c)
{
	struct bucket *b;
	struct cache *ca;
	unsigned int i;
	unsigned int i, j;
	uint64_t *k;

	mutex_lock(&c->bucket_lock);

@@ -1739,7 +1738,6 @@ static void bch_btree_gc_finish(struct cache_set *c)
		struct bcache_device *d = c->devices[i];
		struct cached_dev *dc;
		struct keybuf_key *w, *n;
		unsigned int j;

		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
			continue;
@@ -1756,17 +1754,16 @@ static void bch_btree_gc_finish(struct cache_set *c)
	rcu_read_unlock();

	c->avail_nbuckets = 0;
	for_each_cache(ca, c, i) {
		uint64_t *i;

	ca = c->cache;
	ca->invalidate_needs_gc = 0;

		for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
	for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
		SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);

		for (i = ca->prio_buckets;
		     i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
	for (k = ca->prio_buckets;
	     k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
		SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);

	for_each_bucket(b, ca) {
		c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
@@ -1779,7 +1776,6 @@ static void bch_btree_gc_finish(struct cache_set *c)
		if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
			c->avail_nbuckets++;
	}
	}

	mutex_unlock(&c->bucket_lock);
}
@@ -1830,10 +1826,8 @@ static void bch_btree_gc(struct cache_set *c)

static bool gc_should_run(struct cache_set *c)
{
	struct cache *ca;
	unsigned int i;
	struct cache *ca = c->cache;

	for_each_cache(ca, c, i)
	if (ca->invalidate_needs_gc)
		return true;

@@ -2081,9 +2075,8 @@ out:

void bch_initial_gc_finish(struct cache_set *c)
{
	struct cache *ca;
	struct cache *ca = c->cache;
	struct bucket *b;
	unsigned int i;

	bch_btree_gc_finish(c);

@@ -2098,7 +2091,6 @@ void bch_initial_gc_finish(struct cache_set *c)
	 * This is only safe for buckets that have no live data in them, which
	 * there should always be some of.
	 */
	for_each_cache(ca, c, i) {
	for_each_bucket(b, ca) {
		if (fifo_full(&ca->free[RESERVE_PRIO]) &&
		    fifo_full(&ca->free[RESERVE_BTREE]))
@@ -2113,7 +2105,6 @@ void bch_initial_gc_finish(struct cache_set *c)
					  b - ca->buckets);
		}
	}
	}

	mutex_unlock(&c->bucket_lock);
}
+104 −125
Original line number Diff line number Diff line
@@ -179,11 +179,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
		ret;							\
	})

	struct cache *ca;
	unsigned int iter;
	struct cache *ca = c->cache;
	int ret = 0;

	for_each_cache(ca, c, iter) {
	struct journal_device *ja = &ca->journal;
	DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
	unsigned int i, l, r, m;
@@ -223,7 +220,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)

	/* no journal entries on this device? */
	if (l == ca->sb.njournal_buckets)
			continue;
		goto out;
bsearch:
	BUG_ON(list_empty(list));

@@ -283,8 +280,8 @@ bsearch:
				ca->sb.njournal_buckets;

		}
	}

out:
	if (!list_empty(list))
		c->journal.seq = list_entry(list->prev,
					    struct journal_replay,
@@ -342,10 +339,8 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)

static bool is_discard_enabled(struct cache_set *s)
{
	struct cache *ca;
	unsigned int i;
	struct cache *ca = s->cache;

	for_each_cache(ca, s, i)
	if (ca->discard)
		return true;

@@ -633,9 +628,10 @@ static void do_journal_discard(struct cache *ca)
static void journal_reclaim(struct cache_set *c)
{
	struct bkey *k = &c->journal.key;
	struct cache *ca;
	struct cache *ca = c->cache;
	uint64_t last_seq;
	unsigned int iter, n = 0;
	unsigned int next;
	struct journal_device *ja = &ca->journal;
	atomic_t p __maybe_unused;

	atomic_long_inc(&c->reclaim);
@@ -647,46 +643,31 @@ static void journal_reclaim(struct cache_set *c)

	/* Update last_idx */

	for_each_cache(ca, c, iter) {
		struct journal_device *ja = &ca->journal;

	while (ja->last_idx != ja->cur_idx &&
	       ja->seq[ja->last_idx] < last_seq)
		ja->last_idx = (ja->last_idx + 1) %
			ca->sb.njournal_buckets;
	}

	for_each_cache(ca, c, iter)
	do_journal_discard(ca);

	if (c->journal.blocks_free)
		goto out;

	/*
	 * Allocate:
	 * XXX: Sort by free journal space
	 */

	for_each_cache(ca, c, iter) {
		struct journal_device *ja = &ca->journal;
		unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;

	next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
	/* No space available on this device */
	if (next == ja->discard_idx)
			continue;
		goto out;

	ja->cur_idx = next;
		k->ptr[n++] = MAKE_PTR(0,
	k->ptr[0] = MAKE_PTR(0,
			     bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
			     ca->sb.nr_this_dev);
	atomic_long_inc(&c->reclaimed_journal_buckets);
	}

	if (n) {
	bkey_init(k);
		SET_KEY_PTRS(k, n);
	SET_KEY_PTRS(k, 1);
	c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
	}

out:
	if (!journal_full(&c->journal))
		__closure_wake_up(&c->journal.wait);
@@ -750,7 +731,7 @@ static void journal_write_unlocked(struct closure *cl)
	__releases(c->journal.lock)
{
	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
	struct cache *ca;
	struct cache *ca = c->cache;
	struct journal_write *w = c->journal.cur;
	struct bkey *k = &c->journal.key;
	unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
@@ -780,9 +761,7 @@ static void journal_write_unlocked(struct closure *cl)
	bkey_copy(&w->data->btree_root, &c->root->key);
	bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);

	for_each_cache(ca, c, i)
	w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];

	w->data->magic		= jset_magic(&c->sb);
	w->data->version	= BCACHE_JSET_VERSION;
	w->data->last_seq	= last_seq(&c->journal);
+28 −30
Original line number Diff line number Diff line
@@ -196,18 +196,17 @@ static unsigned int bucket_heap_top(struct cache *ca)

void bch_moving_gc(struct cache_set *c)
{
	struct cache *ca;
	struct cache *ca = c->cache;
	struct bucket *b;
	unsigned int i;
	unsigned long sectors_to_move, reserve_sectors;

	if (!c->copy_gc_enabled)
		return;

	mutex_lock(&c->bucket_lock);

	for_each_cache(ca, c, i) {
		unsigned long sectors_to_move = 0;
		unsigned long reserve_sectors = ca->sb.bucket_size *
	sectors_to_move = 0;
	reserve_sectors = ca->sb.bucket_size *
			     fifo_used(&ca->free[RESERVE_MOVINGGC]);

	ca->heap.used = 0;
@@ -238,7 +237,6 @@ void bch_moving_gc(struct cache_set *c)

	while (heap_pop(&ca->heap, b, bucket_cmp))
		SET_GC_MOVE(b, 1);
	}

	mutex_unlock(&c->bucket_lock);

Loading