Commit a6088845 authored by Jianchao Wang's avatar Jianchao Wang Committed by Jens Axboe
Browse files

block: kyber: make kyber more friendly with merging



Currently, kyber is very unfriendly with merging. kyber depends
on ctx rq_list to do merging, however, most of time, it will not
leave any requests in ctx rq_list. This is because even if tokens
of one domain is used up, kyber will try to dispatch requests
from other domain and flush the rq_list there.

To improve this, we setup kyber_ctx_queue (kcq) which is similar
with ctx, but it has rq_lists for different domain and build same
mapping between kcq and khd as the ctx & hctx. Then we could merge,
insert and dispatch for different domains separately. At the same
time, only flush the rq_list of kcq when get domain token successfully.
Then if one domain token is used up, the requests could be left in
the rq_list of that domain and maybe merged with following io.

Following is my test result on machine with 8 cores and NVMe card
INTEL SSDPEKKR128G7

fio size=256m ioengine=libaio iodepth=64 direct=1 numjobs=8
seq/random
+------+---------------------------------------------------------------+
|patch?| bw(MB/s) |   iops    | slat(usec) |    clat(usec)   |  merge  |
+----------------------------------------------------------------------+
| w/o  |  606/612 | 151k/153k |  6.89/7.03 | 3349.21/3305.40 |   0/0   |
+----------------------------------------------------------------------+
| w/   | 1083/616 | 277k/154k |  4.93/6.95 | 1830.62/3279.95 | 223k/3k |
+----------------------------------------------------------------------+
When set numjobs to 16, the bw and iops could reach 1662MB/s and 425k
on my platform.

Signed-off-by: default avatarJianchao Wang <jianchao.w.wang@oracle.com>
Tested-by: default avatarHolger Hoffstätte <holger@applied-asynchrony.com>
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9c558734
Loading
Loading
Loading
Loading
+158 −32
Original line number Original line Diff line number Diff line
@@ -72,6 +72,19 @@ static const unsigned int kyber_batch_size[] = {
	[KYBER_OTHER] = 8,
	[KYBER_OTHER] = 8,
};
};


/*
 * There is a same mapping between ctx & hctx and kcq & khd,
 * we use request->mq_ctx->index_hw to index the kcq in khd.
 */
struct kyber_ctx_queue {
	/*
	 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
	 * Also protect the rqs on rq_list when merge.
	 */
	spinlock_t lock;
	struct list_head rq_list[KYBER_NUM_DOMAINS];
} ____cacheline_aligned_in_smp;

struct kyber_queue_data {
struct kyber_queue_data {
	struct request_queue *q;
	struct request_queue *q;


@@ -99,6 +112,8 @@ struct kyber_hctx_data {
	struct list_head rqs[KYBER_NUM_DOMAINS];
	struct list_head rqs[KYBER_NUM_DOMAINS];
	unsigned int cur_domain;
	unsigned int cur_domain;
	unsigned int batching;
	unsigned int batching;
	struct kyber_ctx_queue *kcqs;
	struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
	wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
	wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
	struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
	struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
	atomic_t wait_index[KYBER_NUM_DOMAINS];
	atomic_t wait_index[KYBER_NUM_DOMAINS];
@@ -107,10 +122,8 @@ struct kyber_hctx_data {
static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
			     void *key);
			     void *key);


static int rq_sched_domain(const struct request *rq)
static unsigned int kyber_sched_domain(unsigned int op)
{
{
	unsigned int op = rq->cmd_flags;

	if ((op & REQ_OP_MASK) == REQ_OP_READ)
	if ((op & REQ_OP_MASK) == REQ_OP_READ)
		return KYBER_READ;
		return KYBER_READ;
	else if ((op & REQ_OP_MASK) == REQ_OP_WRITE && op_is_sync(op))
	else if ((op & REQ_OP_MASK) == REQ_OP_WRITE && op_is_sync(op))
@@ -284,6 +297,11 @@ static unsigned int kyber_sched_tags_shift(struct kyber_queue_data *kqd)
	return kqd->q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
	return kqd->q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
}
}


static int kyber_bucket_fn(const struct request *rq)
{
	return kyber_sched_domain(rq->cmd_flags);
}

static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
{
{
	struct kyber_queue_data *kqd;
	struct kyber_queue_data *kqd;
@@ -297,7 +315,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
		goto err;
		goto err;
	kqd->q = q;
	kqd->q = q;


	kqd->cb = blk_stat_alloc_callback(kyber_stat_timer_fn, rq_sched_domain,
	kqd->cb = blk_stat_alloc_callback(kyber_stat_timer_fn, kyber_bucket_fn,
					  KYBER_NUM_DOMAINS, kqd);
					  KYBER_NUM_DOMAINS, kqd);
	if (!kqd->cb)
	if (!kqd->cb)
		goto err_kqd;
		goto err_kqd;
@@ -376,6 +394,15 @@ static void kyber_exit_sched(struct elevator_queue *e)
	kfree(kqd);
	kfree(kqd);
}
}


static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
{
	unsigned int i;

	spin_lock_init(&kcq->lock);
	for (i = 0; i < KYBER_NUM_DOMAINS; i++)
		INIT_LIST_HEAD(&kcq->rq_list[i]);
}

static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
{
	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
@@ -386,6 +413,24 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
	if (!khd)
	if (!khd)
		return -ENOMEM;
		return -ENOMEM;


	khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
				       sizeof(struct kyber_ctx_queue),
				       GFP_KERNEL, hctx->numa_node);
	if (!khd->kcqs)
		goto err_khd;

	for (i = 0; i < hctx->nr_ctx; i++)
		kyber_ctx_queue_init(&khd->kcqs[i]);

	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
		if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
				      ilog2(8), GFP_KERNEL, hctx->numa_node)) {
			while (--i >= 0)
				sbitmap_free(&khd->kcq_map[i]);
			goto err_kcqs;
		}
	}

	spin_lock_init(&khd->lock);
	spin_lock_init(&khd->lock);


	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
@@ -405,10 +450,22 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
					kqd->async_depth);
					kqd->async_depth);


	return 0;
	return 0;

err_kcqs:
	kfree(khd->kcqs);
err_khd:
	kfree(khd);
	return -ENOMEM;
}
}


static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
{
	struct kyber_hctx_data *khd = hctx->sched_data;
	int i;

	for (i = 0; i < KYBER_NUM_DOMAINS; i++)
		sbitmap_free(&khd->kcq_map[i]);
	kfree(khd->kcqs);
	kfree(hctx->sched_data);
	kfree(hctx->sched_data);
}
}


@@ -430,7 +487,7 @@ static void rq_clear_domain_token(struct kyber_queue_data *kqd,


	nr = rq_get_domain_token(rq);
	nr = rq_get_domain_token(rq);
	if (nr != -1) {
	if (nr != -1) {
		sched_domain = rq_sched_domain(rq);
		sched_domain = kyber_sched_domain(rq->cmd_flags);
		sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
		sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
				    rq->mq_ctx->cpu);
				    rq->mq_ctx->cpu);
	}
	}
@@ -449,11 +506,51 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
	}
	}
}
}


static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
{
	struct kyber_hctx_data *khd = hctx->sched_data;
	struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
	struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw];
	unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
	struct list_head *rq_list = &kcq->rq_list[sched_domain];
	bool merged;

	spin_lock(&kcq->lock);
	merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio);
	spin_unlock(&kcq->lock);
	blk_mq_put_ctx(ctx);

	return merged;
}

static void kyber_prepare_request(struct request *rq, struct bio *bio)
static void kyber_prepare_request(struct request *rq, struct bio *bio)
{
{
	rq_set_domain_token(rq, -1);
	rq_set_domain_token(rq, -1);
}
}


static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
				  struct list_head *rq_list, bool at_head)
{
	struct kyber_hctx_data *khd = hctx->sched_data;
	struct request *rq, *next;

	list_for_each_entry_safe(rq, next, rq_list, queuelist) {
		unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
		struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw];
		struct list_head *head = &kcq->rq_list[sched_domain];

		spin_lock(&kcq->lock);
		if (at_head)
			list_move(&rq->queuelist, head);
		else
			list_move_tail(&rq->queuelist, head);
		sbitmap_set_bit(&khd->kcq_map[sched_domain],
				rq->mq_ctx->index_hw);
		blk_mq_sched_request_inserted(rq);
		spin_unlock(&kcq->lock);
	}
}

static void kyber_finish_request(struct request *rq)
static void kyber_finish_request(struct request *rq)
{
{
	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
@@ -472,7 +569,7 @@ static void kyber_completed_request(struct request *rq)
	 * Check if this request met our latency goal. If not, quickly gather
	 * Check if this request met our latency goal. If not, quickly gather
	 * some statistics and start throttling.
	 * some statistics and start throttling.
	 */
	 */
	sched_domain = rq_sched_domain(rq);
	sched_domain = kyber_sched_domain(rq->cmd_flags);
	switch (sched_domain) {
	switch (sched_domain) {
	case KYBER_READ:
	case KYBER_READ:
		target = kqd->read_lat_nsec;
		target = kqd->read_lat_nsec;
@@ -498,19 +595,38 @@ static void kyber_completed_request(struct request *rq)
		blk_stat_activate_msecs(kqd->cb, 10);
		blk_stat_activate_msecs(kqd->cb, 10);
}
}


static void kyber_flush_busy_ctxs(struct kyber_hctx_data *khd,
struct flush_kcq_data {
				  struct blk_mq_hw_ctx *hctx)
	struct kyber_hctx_data *khd;
	unsigned int sched_domain;
	struct list_head *list;
};

static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
{
{
	LIST_HEAD(rq_list);
	struct flush_kcq_data *flush_data = data;
	struct request *rq, *next;
	struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];


	blk_mq_flush_busy_ctxs(hctx, &rq_list);
	spin_lock(&kcq->lock);
	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
	list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
		unsigned int sched_domain;
			      flush_data->list);
	sbitmap_clear_bit(sb, bitnr);
	spin_unlock(&kcq->lock);


		sched_domain = rq_sched_domain(rq);
	return true;
		list_move_tail(&rq->queuelist, &khd->rqs[sched_domain]);
}
}

static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
				  unsigned int sched_domain,
				  struct list_head *list)
{
	struct flush_kcq_data data = {
		.khd = khd,
		.sched_domain = sched_domain,
		.list = list,
	};

	sbitmap_for_each_set(&khd->kcq_map[sched_domain],
			     flush_busy_kcq, &data);
}
}


static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
@@ -573,26 +689,23 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
static struct request *
static struct request *
kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
			  struct kyber_hctx_data *khd,
			  struct kyber_hctx_data *khd,
			  struct blk_mq_hw_ctx *hctx,
			  struct blk_mq_hw_ctx *hctx)
			  bool *flushed)
{
{
	struct list_head *rqs;
	struct list_head *rqs;
	struct request *rq;
	struct request *rq;
	int nr;
	int nr;


	rqs = &khd->rqs[khd->cur_domain];
	rqs = &khd->rqs[khd->cur_domain];
	rq = list_first_entry_or_null(rqs, struct request, queuelist);


	/*
	/*
	 * If there wasn't already a pending request and we haven't flushed the
	 * If we already have a flushed request, then we just need to get a
	 * software queues yet, flush the software queues and check again.
	 * token for it. Otherwise, if there are pending requests in the kcqs,
	 * flush the kcqs, but only if we can get a token. If not, we should
	 * leave the requests in the kcqs so that they can be merged. Note that
	 * khd->lock serializes the flushes, so if we observed any bit set in
	 * the kcq_map, we will always get a request.
	 */
	 */
	if (!rq && !*flushed) {
		kyber_flush_busy_ctxs(khd, hctx);
		*flushed = true;
	rq = list_first_entry_or_null(rqs, struct request, queuelist);
	rq = list_first_entry_or_null(rqs, struct request, queuelist);
	}

	if (rq) {
	if (rq) {
		nr = kyber_get_domain_token(kqd, khd, hctx);
		nr = kyber_get_domain_token(kqd, khd, hctx);
		if (nr >= 0) {
		if (nr >= 0) {
@@ -601,6 +714,16 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
			list_del_init(&rq->queuelist);
			list_del_init(&rq->queuelist);
			return rq;
			return rq;
		}
		}
	} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
		nr = kyber_get_domain_token(kqd, khd, hctx);
		if (nr >= 0) {
			kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
			rq = list_first_entry(rqs, struct request, queuelist);
			khd->batching++;
			rq_set_domain_token(rq, nr);
			list_del_init(&rq->queuelist);
			return rq;
		}
	}
	}


	/* There were either no pending requests or no tokens. */
	/* There were either no pending requests or no tokens. */
@@ -611,7 +734,6 @@ static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
{
{
	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
	struct kyber_hctx_data *khd = hctx->sched_data;
	struct kyber_hctx_data *khd = hctx->sched_data;
	bool flushed = false;
	struct request *rq;
	struct request *rq;
	int i;
	int i;


@@ -622,7 +744,7 @@ static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
	 * from the batch.
	 * from the batch.
	 */
	 */
	if (khd->batching < kyber_batch_size[khd->cur_domain]) {
	if (khd->batching < kyber_batch_size[khd->cur_domain]) {
		rq = kyber_dispatch_cur_domain(kqd, khd, hctx, &flushed);
		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
		if (rq)
		if (rq)
			goto out;
			goto out;
	}
	}
@@ -643,7 +765,7 @@ static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
		else
		else
			khd->cur_domain++;
			khd->cur_domain++;


		rq = kyber_dispatch_cur_domain(kqd, khd, hctx, &flushed);
		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
		if (rq)
		if (rq)
			goto out;
			goto out;
	}
	}
@@ -660,10 +782,12 @@ static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
	int i;
	int i;


	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
		if (!list_empty_careful(&khd->rqs[i]))
		if (!list_empty_careful(&khd->rqs[i]) ||
		    sbitmap_any_bit_set(&khd->kcq_map[i]))
			return true;
			return true;
	}
	}
	return sbitmap_any_bit_set(&hctx->ctx_map);

	return false;
}
}


#define KYBER_LAT_SHOW_STORE(op)					\
#define KYBER_LAT_SHOW_STORE(op)					\
@@ -834,7 +958,9 @@ static struct elevator_type kyber_sched = {
		.init_hctx = kyber_init_hctx,
		.init_hctx = kyber_init_hctx,
		.exit_hctx = kyber_exit_hctx,
		.exit_hctx = kyber_exit_hctx,
		.limit_depth = kyber_limit_depth,
		.limit_depth = kyber_limit_depth,
		.bio_merge = kyber_bio_merge,
		.prepare_request = kyber_prepare_request,
		.prepare_request = kyber_prepare_request,
		.insert_requests = kyber_insert_requests,
		.finish_request = kyber_finish_request,
		.finish_request = kyber_finish_request,
		.requeue_request = kyber_finish_request,
		.requeue_request = kyber_finish_request,
		.completed_request = kyber_completed_request,
		.completed_request = kyber_completed_request,