Commit 4e1ebae3 authored by Coly Li's avatar Coly Li Committed by Jens Axboe
Browse files

bcache: only use block_bytes() on struct cache



Because struct cache_set and struct cache both have struct cache_sb,
therefore macro block_bytes() can be used on both of them. When removing
the embedded struct cache_sb from struct cache_set, this macro won't be
used on struct cache_set anymore.

This patch unifies all block_bytes() usage only on struct cache, this is
one of the preparation to remove the embedded struct cache_sb from
struct cache_set.

Signed-off-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1132e56e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -759,7 +759,7 @@ struct bbio {

#define bucket_pages(c)		((c)->sb.bucket_size / PAGE_SECTORS)
#define bucket_bytes(c)		((c)->sb.bucket_size << 9)
#define block_bytes(c)		((c)->sb.block_size << 9)
#define block_bytes(ca)		((ca)->sb.block_size << 9)

static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
{
+12 −12
Original line number Diff line number Diff line
@@ -104,7 +104,7 @@

static inline struct bset *write_block(struct btree *b)
{
	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
}

static void bch_btree_init_next(struct btree *b)
@@ -173,7 +173,7 @@ void bch_btree_node_read_done(struct btree *b)
			goto err;

		err = "bad btree header";
		if (b->written + set_blocks(i, block_bytes(b->c)) >
		if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
		    btree_blocks(b))
			goto err;

@@ -199,13 +199,13 @@ void bch_btree_node_read_done(struct btree *b)

		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));

		b->written += set_blocks(i, block_bytes(b->c));
		b->written += set_blocks(i, block_bytes(b->c->cache));
	}

	err = "corrupted btree";
	for (i = write_block(b);
	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
	     i = ((void *) i) + block_bytes(b->c))
	     i = ((void *) i) + block_bytes(b->c->cache))
		if (i->seq == b->keys.set[0].data->seq)
			goto err;

@@ -347,7 +347,7 @@ static void do_btree_node_write(struct btree *b)

	b->bio->bi_end_io	= btree_node_write_endio;
	b->bio->bi_private	= cl;
	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));
	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c->cache));
	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
	bch_bio_map(b->bio, i);

@@ -423,10 +423,10 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)

	do_btree_node_write(b);

	atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
	atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);

	b->written += set_blocks(i, block_bytes(b->c));
	b->written += set_blocks(i, block_bytes(b->c->cache));
}

void bch_btree_node_write(struct btree *b, struct closure *parent)
@@ -1344,7 +1344,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,

	if (nodes < 2 ||
	    __set_blocks(b->keys.set[0].data, keys,
			 block_bytes(b->c)) > blocks * (nodes - 1))
			 block_bytes(b->c->cache)) > blocks * (nodes - 1))
		return 0;

	for (i = 0; i < nodes; i++) {
@@ -1378,7 +1378,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
			     k = bkey_next(k)) {
				if (__set_blocks(n1, n1->keys + keys +
						 bkey_u64s(k),
						 block_bytes(b->c)) > blocks)
						 block_bytes(b->c->cache)) > blocks)
					break;

				last = k;
@@ -1394,7 +1394,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
			 * though)
			 */
			if (__set_blocks(n1, n1->keys + n2->keys,
					 block_bytes(b->c)) >
					 block_bytes(b->c->cache)) >
			    btree_blocks(new_nodes[i]))
				goto out_unlock_nocoalesce;

@@ -1403,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
			last = &r->b->key;
		}

		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
		       btree_blocks(new_nodes[i]));

		if (last)
@@ -2210,7 +2210,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
		goto err;

	split = set_blocks(btree_bset_first(n1),
			   block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
			   block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;

	if (split) {
		unsigned int keys = 0;
+4 −4
Original line number Diff line number Diff line
@@ -25,8 +25,8 @@ struct dentry *bcache_debug;
	for (i = (start);						\
	     (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
	     i->seq == (start)->seq;					\
	     i = (void *) i + set_blocks(i, block_bytes(b->c)) *	\
		 block_bytes(b->c))
	     i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) *	\
		 block_bytes(b->c->cache))

void bch_btree_verify(struct btree *b)
{
@@ -82,14 +82,14 @@ void bch_btree_verify(struct btree *b)

		for_each_written_bset(b, ondisk, i) {
			unsigned int block = ((void *) i - (void *) ondisk) /
				block_bytes(b->c);
				block_bytes(b->c->cache);

			pr_err("*** on disk block %u:\n", block);
			bch_dump_bset(&b->keys, i, block);
		}

		pr_err("*** block %zu not written\n",
		       ((void *) i - (void *) ondisk) / block_bytes(b->c));
		       ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));

		for (j = 0; j < inmemory->keys; j++)
			if (inmemory->d[j] != sorted->d[j])
+4 −4
Original line number Diff line number Diff line
@@ -98,7 +98,7 @@ reread: left = ca->sb.bucket_size - offset;
				return ret;
			}

			blocks = set_blocks(j, block_bytes(ca->set));
			blocks = set_blocks(j, block_bytes(ca));

			/*
			 * Nodes in 'list' are in linear increasing order of
@@ -734,7 +734,7 @@ static void journal_write_unlocked(struct closure *cl)
	struct cache *ca = c->cache;
	struct journal_write *w = c->journal.cur;
	struct bkey *k = &c->journal.key;
	unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
	unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
		c->sb.block_size;

	struct bio *bio;
@@ -754,7 +754,7 @@ static void journal_write_unlocked(struct closure *cl)
		return;
	}

	c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
	c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));

	w->data->btree_level = c->root->level;

@@ -847,7 +847,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
		struct journal_write *w = c->journal.cur;

		sectors = __set_blocks(w->data, w->data->keys + nkeys,
				       block_bytes(c)) * c->sb.block_size;
				       block_bytes(c->cache)) * c->sb.block_size;

		if (sectors <= min_t(size_t,
				     c->journal.blocks_free * c->sb.block_size,
+1 −1
Original line number Diff line number Diff line
@@ -99,7 +99,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
	 * bch_data_insert_keys() will insert the keys created so far
	 * and finish the rest when the keylist is empty.
	 */
	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
	if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
		return -ENOMEM;

	return __bch_keylist_realloc(l, u64s);
Loading