Commit d19936a2 authored by Kent Overstreet's avatar Kent Overstreet Committed by Jens Axboe
Browse files

bcache: convert to bioset_init()/mempool_init()



Convert bcache to embedded bio sets.

Reviewed-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b906bbb6
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -269,7 +269,7 @@ struct bcache_device {
	atomic_t		*stripe_sectors_dirty;
	unsigned long		*full_dirty_stripes;

	struct bio_set		*bio_split;
	struct bio_set		bio_split;

	unsigned		data_csum:1;

@@ -530,9 +530,9 @@ struct cache_set {
	struct closure		sb_write;
	struct semaphore	sb_write_mutex;

	mempool_t		*search;
	mempool_t		*bio_meta;
	struct bio_set		*bio_split;
	mempool_t		search;
	mempool_t		bio_meta;
	struct bio_set		bio_split;

	/* For the btree cache */
	struct shrinker		shrink;
@@ -657,7 +657,7 @@ struct cache_set {
	 * A btree node on disk could have too many bsets for an iterator to fit
	 * on the stack - have to dynamically allocate them
	 */
	mempool_t		*fill_iter;
	mempool_t		fill_iter;

	struct bset_sort_state	sort;

+4 −9
Original line number Diff line number Diff line
@@ -1118,8 +1118,7 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,

void bch_bset_sort_state_free(struct bset_sort_state *state)
{
	if (state->pool)
		mempool_destroy(state->pool);
	mempool_exit(&state->pool);
}

int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
@@ -1129,11 +1128,7 @@ int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
	state->page_order = page_order;
	state->crit_factor = int_sqrt(1 << page_order);

	state->pool = mempool_create_page_pool(1, page_order);
	if (!state->pool)
		return -ENOMEM;

	return 0;
	return mempool_init_page_pool(&state->pool, 1, page_order);
}
EXPORT_SYMBOL(bch_bset_sort_state_init);

@@ -1191,7 +1186,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,

		BUG_ON(order > state->page_order);

		outp = mempool_alloc(state->pool, GFP_NOIO);
		outp = mempool_alloc(&state->pool, GFP_NOIO);
		out = page_address(outp);
		used_mempool = true;
		order = state->page_order;
@@ -1220,7 +1215,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
	}

	if (used_mempool)
		mempool_free(virt_to_page(out), state->pool);
		mempool_free(virt_to_page(out), &state->pool);
	else
		free_pages((unsigned long) out, order);

+1 −1
Original line number Diff line number Diff line
@@ -347,7 +347,7 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
/* Sorting */

struct bset_sort_state {
	mempool_t		*pool;
	mempool_t		pool;

	unsigned		page_order;
	unsigned		crit_factor;
+2 −2
Original line number Diff line number Diff line
@@ -204,7 +204,7 @@ void bch_btree_node_read_done(struct btree *b)
	struct bset *i = btree_bset_first(b);
	struct btree_iter *iter;

	iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
	iter->used = 0;

@@ -271,7 +271,7 @@ void bch_btree_node_read_done(struct btree *b)
		bch_bset_init_next(&b->keys, write_block(b),
				   bset_magic(&b->c->sb));
out:
	mempool_free(iter, b->c->fill_iter);
	mempool_free(iter, &b->c->fill_iter);
	return;
err:
	set_btree_node_io_error(b);
+2 −2
Original line number Diff line number Diff line
@@ -17,12 +17,12 @@
void bch_bbio_free(struct bio *bio, struct cache_set *c)
{
	struct bbio *b = container_of(bio, struct bbio, bio);
	mempool_free(b, c->bio_meta);
	mempool_free(b, &c->bio_meta);
}

struct bio *bch_bbio_alloc(struct cache_set *c)
{
	struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
	struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
	struct bio *bio = &b->bio;

	bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
Loading