Commit 7baa8572 authored by Jens Axboe's avatar Jens Axboe
Browse files

blk-mq-tag: change busy_iter_fn to return whether to continue or not



We have this functionality in sbitmap, but we don't export it in
blk-mq for users of the tags busy iteration. This can be useful
for stopping the iteration, if the caller doesn't need to find
more requests.

Reviewed-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c28445fa
Loading
Loading
Loading
Loading
+5 −2
Original line number Diff line number Diff line
@@ -422,15 +422,18 @@ struct show_busy_params {

/*
 * Note: the state of a request may change while this function is in progress,
 * e.g. due to a concurrent blk_mq_finish_request() call.
 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
 * keep iterating requests.
 */
static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
{
	const struct show_busy_params *params = data;

	if (rq->mq_hctx == params->hctx)
		__blk_mq_debugfs_rq_show(params->m,
					 list_entry_rq(&rq->queuelist));

	return true;
}

static int hctx_busy_show(void *data, struct seq_file *m)
+2 −2
Original line number Diff line number Diff line
@@ -236,7 +236,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
	 * test and set the bit before assigning ->rqs[].
	 */
	if (rq && rq->q == hctx->queue)
		iter_data->fn(hctx, rq, iter_data->data, reserved);
		return iter_data->fn(hctx, rq, iter_data->data, reserved);
	return true;
}

@@ -289,7 +289,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
	 */
	rq = tags->rqs[bitnr];
	if (rq && blk_mq_request_started(rq))
		iter_data->fn(rq, iter_data->data, reserved);
		return iter_data->fn(rq, iter_data->data, reserved);

	return true;
}
+11 −5
Original line number Diff line number Diff line
@@ -94,7 +94,7 @@ struct mq_inflight {
	unsigned int *inflight;
};

static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
				  struct request *rq, void *priv,
				  bool reserved)
{
@@ -109,6 +109,8 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
		mi->inflight[0]++;
	if (mi->part->partno)
		mi->inflight[1]++;

	return true;
}

void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
@@ -120,7 +122,7 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
}

static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
				     struct request *rq, void *priv,
				     bool reserved)
{
@@ -128,6 +130,8 @@ static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,

	if (rq->part == mi->part)
		mi->inflight[rq_data_dir(rq)]++;

	return true;
}

void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
@@ -821,7 +825,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
	return false;
}

static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
		struct request *rq, void *priv, bool reserved)
{
	unsigned long *next = priv;
@@ -831,7 +835,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
	 * so we're not unnecessarilly synchronizing across CPUs.
	 */
	if (!blk_mq_req_expired(rq, next))
		return;
		return true;

	/*
	 * We have reason to believe the request may be expired. Take a
@@ -843,7 +847,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
	 * timeout handler to posting a natural completion.
	 */
	if (!refcount_inc_not_zero(&rq->ref))
		return;
		return true;

	/*
	 * The request is now locked and cannot be reallocated underneath the
@@ -855,6 +859,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
		blk_mq_rq_timed_out(rq, reserved);
	if (refcount_dec_and_test(&rq->ref))
		__blk_mq_free_request(rq);

	return true;
}

static void blk_mq_timeout_work(struct work_struct *work)
+6 −3
Original line number Diff line number Diff line
@@ -2720,7 +2720,7 @@ static void mtip_softirq_done_fn(struct request *rq)
	blk_mq_end_request(rq, cmd->status);
}

static void mtip_abort_cmd(struct request *req, void *data, bool reserved)
static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
{
	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
	struct driver_data *dd = data;
@@ -2730,14 +2730,16 @@ static void mtip_abort_cmd(struct request *req, void *data, bool reserved)
	clear_bit(req->tag, dd->port->cmds_to_issue);
	cmd->status = BLK_STS_IOERR;
	mtip_softirq_done_fn(req);
	return true;
}

static void mtip_queue_cmd(struct request *req, void *data, bool reserved)
static bool mtip_queue_cmd(struct request *req, void *data, bool reserved)
{
	struct driver_data *dd = data;

	set_bit(req->tag, dd->port->cmds_to_issue);
	blk_abort_request(req);
	return true;
}

/*
@@ -3920,12 +3922,13 @@ protocol_init_error:
	return rv;
}

static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
{
	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);

	cmd->status = BLK_STS_IOERR;
	blk_mq_complete_request(rq);
	return true;
}

/*
+2 −1
Original line number Diff line number Diff line
@@ -734,12 +734,13 @@ static void recv_work(struct work_struct *work)
	kfree(args);
}

static void nbd_clear_req(struct request *req, void *data, bool reserved)
static bool nbd_clear_req(struct request *req, void *data, bool reserved)
{
	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);

	cmd->status = BLK_STS_IOERR;
	blk_mq_complete_request(req);
	return true;
}

static void nbd_clear_que(struct nbd_device *nbd)
Loading