Commit 84f97dc2 authored by Jens Axboe's avatar Jens Axboe
Browse files

io_uring: make io_cqring_events() take 'ctx' as argument



The rings can be derived from the ctx, and we need the ctx there for
a future change.

No functional changes in this patch.

Reviewed-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2665abfd
Loading
Loading
Loading
Loading
+6 −4
Original line number Diff line number Diff line
@@ -866,8 +866,10 @@ static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr)
	}
}

static unsigned io_cqring_events(struct io_rings *rings)
static unsigned io_cqring_events(struct io_ring_ctx *ctx)
{
	struct io_rings *rings = ctx->rings;

	/* See comment at the top of this file */
	smp_rmb();
	return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
@@ -1023,7 +1025,7 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
		 * If we do, we can potentially be spinning for commands that
		 * already triggered a CQE (eg in error).
		 */
		if (io_cqring_events(ctx->rings))
		if (io_cqring_events(ctx))
			break;

		/*
@@ -3076,7 +3078,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
	 * started waiting. For timeouts, we always want to return to userspace,
	 * regardless of event count.
	 */
	return io_cqring_events(ctx->rings) >= iowq->to_wait ||
	return io_cqring_events(ctx) >= iowq->to_wait ||
			atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}

@@ -3111,7 +3113,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
	struct io_rings *rings = ctx->rings;
	int ret = 0;

	if (io_cqring_events(rings) >= min_events)
	if (io_cqring_events(ctx) >= min_events)
		return 0;

	if (sig) {