Commit 72a44517 authored by Kent Overstreet's avatar Kent Overstreet
Browse files

bcache: Convert gc to a kthread



We needed a dedicated rescuer workqueue for gc anyways... and gc was
conceptually a dedicated thread, just one that wasn't running all the
time. Switch it to a dedicated thread to make the code a bit more
straightforward.

Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent 35fcd848
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -210,7 +210,7 @@ static void invalidate_buckets_lru(struct cache *ca)
			 * multiple times when it can't do anything
			 */
			ca->invalidate_needs_gc = 1;
			bch_queue_gc(ca->set);
			wake_up_gc(ca->set);
			return;
		}

@@ -235,7 +235,7 @@ static void invalidate_buckets_fifo(struct cache *ca)

		if (++checked >= ca->sb.nbuckets) {
			ca->invalidate_needs_gc = 1;
			bch_queue_gc(ca->set);
			wake_up_gc(ca->set);
			return;
		}
	}
@@ -260,7 +260,7 @@ static void invalidate_buckets_random(struct cache *ca)

		if (++checked >= ca->sb.nbuckets / 2) {
			ca->invalidate_needs_gc = 1;
			bch_queue_gc(ca->set);
			wake_up_gc(ca->set);
			return;
		}
	}
+4 −5
Original line number Diff line number Diff line
@@ -773,7 +773,7 @@ struct cache_set {
	struct gc_stat		gc_stats;
	size_t			nbuckets;

	struct closure_with_waitlist gc;
	struct task_struct	*gc_thread;
	/* Where in the btree gc currently is */
	struct bkey		gc_done;

@@ -786,11 +786,10 @@ struct cache_set {
	/* Counts how many sectors bio_insert has added to the cache */
	atomic_t		sectors_to_gc;

	struct closure		moving_gc;
	struct closure_waitlist	moving_gc_wait;
	wait_queue_head_t	moving_gc_wait;
	struct keybuf		moving_gc_keys;
	/* Number of moving GC bios in flight */
	atomic_t		in_flight;
	struct semaphore	moving_in_flight;

	struct btree		*root;

@@ -1176,7 +1175,7 @@ bool bch_cache_set_error(struct cache_set *, const char *, ...);
void bch_prio_write(struct cache *);
void bch_write_bdev_super(struct cached_dev *, struct closure *);

extern struct workqueue_struct *bcache_wq, *bch_gc_wq;
extern struct workqueue_struct *bcache_wq;
extern const char * const bch_cache_modes[];
extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets;
+34 −16
Original line number Diff line number Diff line
@@ -28,7 +28,9 @@

#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/freezer.h>
#include <linux/hash.h>
#include <linux/kthread.h>
#include <linux/prefetch.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
@@ -105,7 +107,6 @@ static const char *op_type(struct btree_op *op)
#define PTR_HASH(c, k)							\
	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))

struct workqueue_struct *bch_gc_wq;
static struct workqueue_struct *btree_io_wq;

void bch_btree_op_init_stack(struct btree_op *op)
@@ -732,12 +733,9 @@ int bch_btree_cache_alloc(struct cache_set *c)
{
	unsigned i;

	/* XXX: doesn't check for errors */

	closure_init_unlocked(&c->gc);

	for (i = 0; i < mca_reserve(c); i++)
		mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
			return -ENOMEM;

	list_splice_init(&c->btree_cache,
			 &c->btree_cache_freeable);
@@ -1456,9 +1454,8 @@ size_t bch_btree_gc_finish(struct cache_set *c)
	return available;
}

static void bch_btree_gc(struct closure *cl)
static void bch_btree_gc(struct cache_set *c)
{
	struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
	int ret;
	unsigned long available;
	struct gc_stat stats;
@@ -1483,7 +1480,7 @@ static void bch_btree_gc(struct closure *cl)

	if (ret) {
		pr_warn("gc failed!");
		continue_at(cl, bch_btree_gc, bch_gc_wq);
		return;
	}

	/* Possibly wait for new UUIDs or whatever to hit disk */
@@ -1505,12 +1502,35 @@ static void bch_btree_gc(struct closure *cl)

	trace_bcache_gc_end(c);

	continue_at(cl, bch_moving_gc, bch_gc_wq);
	bch_moving_gc(c);
}

void bch_queue_gc(struct cache_set *c)
static int bch_gc_thread(void *arg)
{
	closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl);
	struct cache_set *c = arg;

	while (1) {
		bch_btree_gc(c);

		set_current_state(TASK_INTERRUPTIBLE);
		if (kthread_should_stop())
			break;

		try_to_freeze();
		schedule();
	}

	return 0;
}

int bch_gc_thread_start(struct cache_set *c)
{
	c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
	if (IS_ERR(c->gc_thread))
		return PTR_ERR(c->gc_thread);

	set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
	return 0;
}

/* Initial partial gc */
@@ -2480,14 +2500,12 @@ void bch_btree_exit(void)
{
	if (btree_io_wq)
		destroy_workqueue(btree_io_wq);
	if (bch_gc_wq)
		destroy_workqueue(bch_gc_wq);
}

int __init bch_btree_init(void)
{
	if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) ||
	    !(btree_io_wq = create_singlethread_workqueue("bch_btree_io")))
	btree_io_wq = create_singlethread_workqueue("bch_btree_io");
	if (!btree_io_wq)
		return -ENOMEM;

	return 0;
+8 −2
Original line number Diff line number Diff line
@@ -388,12 +388,18 @@ int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *);

int bch_btree_search_recurse(struct btree *, struct btree_op *);

void bch_queue_gc(struct cache_set *);
int bch_gc_thread_start(struct cache_set *);
size_t bch_btree_gc_finish(struct cache_set *);
void bch_moving_gc(struct closure *);
void bch_moving_gc(struct cache_set *);
int bch_btree_check(struct cache_set *, struct btree_op *);
uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);

static inline void wake_up_gc(struct cache_set *c)
{
	if (c->gc_thread)
		wake_up_process(c->gc_thread);
}

void bch_keybuf_init(struct keybuf *);
void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *,
		       keybuf_pred_fn *);
+14 −21
Original line number Diff line number Diff line
@@ -57,8 +57,7 @@ static void write_moving_finish(struct closure *cl)

	bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);

	atomic_dec_bug(&io->s.op.c->in_flight);
	closure_wake_up(&io->s.op.c->moving_gc_wait);
	up(&io->s.op.c->moving_in_flight);

	closure_return_with_destructor(cl, moving_io_destructor);
}
@@ -113,7 +112,7 @@ static void write_moving(struct closure *cl)
		bch_data_insert(&s->op.cl);
	}

	continue_at(cl, write_moving_finish, bch_gc_wq);
	continue_at(cl, write_moving_finish, system_wq);
}

static void read_moving_submit(struct closure *cl)
@@ -124,15 +123,17 @@ static void read_moving_submit(struct closure *cl)

	bch_submit_bbio(bio, s->op.c, &io->w->key, 0);

	continue_at(cl, write_moving, bch_gc_wq);
	continue_at(cl, write_moving, system_wq);
}

static void read_moving(struct closure *cl)
static void read_moving(struct cache_set *c)
{
	struct cache_set *c = container_of(cl, struct cache_set, moving_gc);
	struct keybuf_key *w;
	struct moving_io *io;
	struct bio *bio;
	struct closure cl;

	closure_init_stack(&cl);

	/* XXX: if we error, background writeback could stall indefinitely */

@@ -164,13 +165,8 @@ static void read_moving(struct closure *cl)

		trace_bcache_gc_copy(&w->key);

		closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);

		if (atomic_inc_return(&c->in_flight) >= 64) {
			closure_wait_event(&c->moving_gc_wait, cl,
					   atomic_read(&c->in_flight) < 64);
			continue_at(cl, read_moving, bch_gc_wq);
		}
		down(&c->moving_in_flight);
		closure_call(&io->s.cl, read_moving_submit, NULL, &cl);
	}

	if (0) {
@@ -180,7 +176,7 @@ err: if (!IS_ERR_OR_NULL(w->private))
		bch_keybuf_del(&c->moving_gc_keys, w);
	}

	closure_return(cl);
	closure_sync(&cl);
}

static bool bucket_cmp(struct bucket *l, struct bucket *r)
@@ -193,15 +189,14 @@ static unsigned bucket_heap_top(struct cache *ca)
	return GC_SECTORS_USED(heap_peek(&ca->heap));
}

void bch_moving_gc(struct closure *cl)
void bch_moving_gc(struct cache_set *c)
{
	struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
	struct cache *ca;
	struct bucket *b;
	unsigned i;

	if (!c->copy_gc_enabled)
		closure_return(cl);
		return;

	mutex_lock(&c->bucket_lock);

@@ -242,13 +237,11 @@ void bch_moving_gc(struct closure *cl)

	c->moving_gc_keys.last_scanned = ZERO_KEY;

	closure_init(&c->moving_gc, cl);
	read_moving(&c->moving_gc);

	closure_return(cl);
	read_moving(c);
}

void bch_moving_init_cache_set(struct cache_set *c)
{
	bch_keybuf_init(&c->moving_gc_keys);
	sema_init(&c->moving_in_flight, 64);
}
Loading