Commit 4a784266 authored by Coly Li's avatar Coly Li Committed by Jens Axboe
Browse files

bcache: remove embedded struct cache_sb from struct cache_set



Since bcache code was merged into mainline kerrnel, each cache set only
as one single cache in it. The multiple caches framework is here but the
code is far from completed. Considering the multiple copies of cached
data can also be stored on e.g. md raid1 devices, it is unnecessary to
support multiple caches in one cache set indeed.

The previous preparation patches fix the dependencies of explicitly
making a cache set only have single cache. Now we don't have to maintain
an embedded partial super block in struct cache_set, the in-memory super
block can be directly referenced from struct cache.

This patch removes the embedded struct cache_sb from struct cache_set,
and fixes all locations where the superb lock was referenced from this
removed super block by referencing the in-memory super block of struct
cache.

Signed-off-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6f9414e0
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{
	struct cache *ca;
	struct bucket *b;
	unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
	unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
	int r;

	atomic_sub(sectors, &c->rescale);
@@ -583,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
					   struct open_bucket, list);
found:
	if (!ret->sectors_free && KEY_PTRS(alloc)) {
		ret->sectors_free = c->sb.bucket_size;
		ret->sectors_free = c->cache->sb.bucket_size;
		bkey_copy(&ret->key, alloc);
		bkey_init(alloc);
	}
@@ -677,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
				&PTR_CACHE(c, &b->key, i)->sectors_written);
	}

	if (b->sectors_free < c->sb.block_size)
	if (b->sectors_free < c->cache->sb.block_size)
		b->sectors_free = 0;

	/*
+1 −3
Original line number Diff line number Diff line
@@ -517,8 +517,6 @@ struct cache_set {
	atomic_t		idle_counter;
	atomic_t		at_max_writeback_rate;

	struct cache_sb		sb;

	struct cache		*cache;

	struct bcache_device	**devices;
@@ -799,7 +797,7 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)

static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
{
	return s & (c->sb.bucket_size - 1);
	return s & (c->cache->sb.bucket_size - 1);
}

static inline struct cache *PTR_CACHE(struct cache_set *c,
+9 −8
Original line number Diff line number Diff line
@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b)

	if (b->written < btree_blocks(b))
		bch_bset_init_next(&b->keys, write_block(b),
				   bset_magic(&b->c->sb));
				   bset_magic(&b->c->cache->sb));

}

@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b)
	 * See the comment arount cache_set->fill_iter.
	 */
	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
	iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
	iter->used = 0;

#ifdef CONFIG_BCACHE_DEBUG
@@ -178,7 +178,7 @@ void bch_btree_node_read_done(struct btree *b)
			goto err;

		err = "bad magic";
		if (i->magic != bset_magic(&b->c->sb))
		if (i->magic != bset_magic(&b->c->cache->sb))
			goto err;

		err = "bad checksum";
@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b)

	if (b->written < btree_blocks(b))
		bch_bset_init_next(&b->keys, write_block(b),
				   bset_magic(&b->c->sb));
				   bset_magic(&b->c->cache->sb));
out:
	mempool_free(iter, &b->c->fill_iter);
	return;
@@ -423,7 +423,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)

	do_btree_node_write(b);

	atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
	atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);

	b->written += set_blocks(i, block_bytes(b->c->cache));
@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
	if (c->verify_data)
		list_move(&c->verify_data->list, &c->btree_cache);

	free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb)));
	free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
#endif

	list_splice(&c->btree_cache_freeable,
@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
	mutex_init(&c->verify_lock);

	c->verify_ondisk = (void *)
		__get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb)));
		__get_free_pages(GFP_KERNEL|__GFP_COMP,
				 ilog2(meta_bucket_pages(&c->cache->sb)));
	if (!c->verify_ondisk) {
		/*
		 * Don't worry about the mca_rereserve buckets
@@ -1108,7 +1109,7 @@ retry:
	}

	b->parent = parent;
	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));

	mutex_unlock(&c->bucket_lock);

+1 −1
Original line number Diff line number Diff line
@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)

static inline void set_gc_sectors(struct cache_set *c)
{
	atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
	atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
}

void bkey_put(struct cache_set *c, struct bkey *k);
+3 −3
Original line number Diff line number Diff line
@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
			size_t bucket = PTR_BUCKET_NR(c, k, i);
			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));

			if (KEY_SIZE(k) + r > c->sb.bucket_size ||
			if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
			    bucket <  ca->sb.first_bucket ||
			    bucket >= ca->sb.nbuckets)
				return true;
@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
			size_t bucket = PTR_BUCKET_NR(c, k, i);
			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));

			if (KEY_SIZE(k) + r > c->sb.bucket_size)
			if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
				return "bad, length too big";
			if (bucket <  ca->sb.first_bucket)
				return "bad, short offset";
@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
		size_t n = PTR_BUCKET_NR(b->c, k, j);

		pr_cont(" bucket %zu", n);
		if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
		if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
			pr_cont(" prio %i",
				PTR_BUCKET(b->c, k, j)->prio);
	}
Loading