Commit 634f9e46 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe
Browse files

blk-mq: remove REQ_ATOM_COMPLETE usages from blk-mq



After the recent updates to use generation number and state based
synchronization, blk-mq no longer depends on REQ_ATOM_COMPLETE except
to avoid firing the same timeout multiple times.

Remove all REQ_ATOM_COMPLETE usages and use a new rq_flags flag
RQF_MQ_TIMEOUT_EXPIRED to avoid firing the same timeout multiple
times.  This removes atomic bitops from hot paths too.

v2: Removed blk_clear_rq_complete() from blk_mq_rq_timed_out().

v3: Added RQF_MQ_TIMEOUT_EXPIRED flag.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: "jianchao.wang" <jianchao.w.wang@oracle.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 358f70da
Loading
Loading
Loading
Loading
+7 −8
Original line number Diff line number Diff line
@@ -634,8 +634,7 @@ void blk_mq_complete_request(struct request *rq)
	 * hctx_lock() covers both issue and completion paths.
	 */
	hctx_lock(hctx, &srcu_idx);
	if (blk_mq_rq_aborted_gstate(rq) != rq->gstate &&
	    !blk_mark_rq_complete(rq))
	if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
		__blk_mq_complete_request(rq);
	hctx_unlock(hctx, srcu_idx);
}
@@ -685,8 +684,6 @@ void blk_mq_start_request(struct request *rq)
	preempt_enable();

	set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);

	if (q->dma_drain_size && blk_rq_bytes(rq)) {
		/*
@@ -837,6 +834,8 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
	if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
		return;

	req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;

	if (ops->timeout)
		ret = ops->timeout(req, reserved);

@@ -852,7 +851,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
		 */
		blk_mq_rq_update_aborted_gstate(req, 0);
		blk_add_timer(req);
		blk_clear_rq_complete(req);
		break;
	case BLK_EH_NOT_HANDLED:
		break;
@@ -871,7 +869,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,

	might_sleep();

	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
	if ((rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) ||
	    !test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
		return;

	/* read coherent snapshots of @rq->state_gen and @rq->deadline */
@@ -906,8 +905,8 @@ static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
	 * now guaranteed to see @rq->aborted_gstate and yield.  If
	 * @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
	 */
	if (READ_ONCE(rq->gstate) == rq->aborted_gstate &&
	    !blk_mark_rq_complete(rq))
	if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
	    READ_ONCE(rq->gstate) == rq->aborted_gstate)
		blk_mq_rq_timed_out(rq, reserved);
}

+1 −0
Original line number Diff line number Diff line
@@ -214,6 +214,7 @@ void blk_add_timer(struct request *req)
		req->timeout = q->rq_timeout;

	req->deadline = jiffies + req->timeout;
	req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;

	/*
	 * Only the non-mq case needs to add the request to a protected list.
+2 −0
Original line number Diff line number Diff line
@@ -125,6 +125,8 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_SPECIAL_PAYLOAD	((__force req_flags_t)(1 << 18))
/* The per-zone write lock is held for this request */
#define RQF_ZONE_WRITE_LOCKED	((__force req_flags_t)(1 << 19))
/* timeout is expired */
#define RQF_MQ_TIMEOUT_EXPIRED	((__force req_flags_t)(1 << 20))

/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \