Commit 079076b3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: remove deadline __deadline manipulation helpers



No users left since the removal of the legacy request interface, we can
remove all the magic bit stealing now and make it a normal field.

But use WRITE_ONCE/READ_ONCE on the new deadline field, given that we
don't seem to have any mechanism to guarantee a new value actually
gets seen by other threads.

Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 8f4236d9
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -325,7 +325,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
	rq->special = NULL;
	/* tag was already set */
	rq->extra_len = 0;
	rq->__deadline = 0;
	WRITE_ONCE(rq->deadline, 0);

	rq->timeout = 0;

@@ -839,7 +839,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
	if (rq->rq_flags & RQF_TIMED_OUT)
		return false;

	deadline = blk_rq_deadline(rq);
	deadline = READ_ONCE(rq->deadline);
	if (time_after_eq(jiffies, deadline))
		return true;

+5 −3
Original line number Diff line number Diff line
@@ -84,7 +84,7 @@ void blk_abort_request(struct request *req)
	 * immediately and that scan sees the new timeout value.
	 * No need for fancy synchronizations.
	 */
	blk_rq_set_deadline(req, jiffies);
	WRITE_ONCE(req->deadline, jiffies);
	kblockd_schedule_work(&req->q->timeout_work);
}
EXPORT_SYMBOL_GPL(blk_abort_request);
@@ -121,14 +121,16 @@ void blk_add_timer(struct request *req)
		req->timeout = q->rq_timeout;

	req->rq_flags &= ~RQF_TIMED_OUT;
	blk_rq_set_deadline(req, jiffies + req->timeout);

	expiry = jiffies + req->timeout;
	WRITE_ONCE(req->deadline, expiry);

	/*
	 * If the timer isn't already pending or this timeout is earlier
	 * than an existing one, modify the timer. Round up to next nearest
	 * second.
	 */
	expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
	expiry = blk_rq_timeout(round_jiffies_up(expiry));

	if (!timer_pending(&q->timeout) ||
	    time_before(expiry, q->timeout.expires)) {
+0 −35
Original line number Diff line number Diff line
@@ -238,26 +238,6 @@ void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
void blk_account_io_done(struct request *req, u64 now);

/*
 * EH timer and IO completion will both attempt to 'grab' the request, make
 * sure that only one of them succeeds. Steal the bottom bit of the
 * __deadline field for this.
 */
static inline int blk_mark_rq_complete(struct request *rq)
{
	return test_and_set_bit(0, &rq->__deadline);
}

static inline void blk_clear_rq_complete(struct request *rq)
{
	clear_bit(0, &rq->__deadline);
}

static inline bool blk_rq_is_complete(struct request *rq)
{
	return test_bit(0, &rq->__deadline);
}

/*
 * Internal elevator interface
 */
@@ -322,21 +302,6 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
		q->last_merge = NULL;
}

/*
 * Steal a bit from this field for legacy IO path atomic IO marking. Note that
 * setting the deadline clears the bottom bit, potentially clearing the
 * completed bit. The user has to be OK with this (current ones are fine).
 */
static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
{
	rq->__deadline = time & ~0x1UL;
}

static inline unsigned long blk_rq_deadline(struct request *rq)
{
	return rq->__deadline & ~0x1UL;
}

/*
 * Internal io_context interface
 */
+1 −3
Original line number Diff line number Diff line
@@ -224,9 +224,7 @@ struct request {
	refcount_t ref;

	unsigned int timeout;

	/* access through blk_rq_set_deadline, blk_rq_deadline */
	unsigned long __deadline;
	unsigned long deadline;

	union {
		struct __call_single_data csd;