Commit 863e0560 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: track link's head and tail during submit



Explicitly save not only a link's head in io_submit_sqe[s]() but the
tail as well. That's in preparation for keeping linked requests in a
singly linked list.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 018043be
Loading
Loading
Loading
Loading
+16 −8
Original line number Diff line number Diff line
@@ -6536,8 +6536,13 @@ static inline void io_queue_link_head(struct io_kiocb *req,
		io_queue_sqe(req, NULL, cs);
}

struct io_submit_link {
	struct io_kiocb *head;
	struct io_kiocb *last;
};

static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
			 struct io_kiocb **link, struct io_comp_state *cs)
			 struct io_submit_link *link, struct io_comp_state *cs)
{
	struct io_ring_ctx *ctx = req->ctx;
	int ret;
@@ -6549,8 +6554,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
	 * submitted sync once the chain is complete. If none of those
	 * conditions are true (normal request), then just queue it.
	 */
	if (*link) {
		struct io_kiocb *head = *link;
	if (link->head) {
		struct io_kiocb *head = link->head;

		/*
		 * Taking sequential execution of a link, draining both sides
@@ -6571,11 +6576,12 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
		}
		trace_io_uring_link(ctx, req, head);
		list_add_tail(&req->link_list, &head->link_list);
		link->last = req;

		/* last request of a link, enqueue the link */
		if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
			io_queue_link_head(head, cs);
			*link = NULL;
			link->head = NULL;
		}
	} else {
		if (unlikely(ctx->drain_next)) {
@@ -6589,7 +6595,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
			ret = io_req_defer_prep(req, sqe);
			if (unlikely(ret))
				req->flags |= REQ_F_FAIL_LINK;
			*link = req;
			link->head = req;
			link->last = req;
		} else {
			io_queue_sqe(req, sqe, cs);
		}
@@ -6769,7 +6776,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
{
	struct io_submit_state state;
	struct io_kiocb *link = NULL;
	struct io_submit_link link;
	int i, submitted = 0;

	/* if we have a backlog and couldn't flush it all, return BUSY */
@@ -6789,6 +6796,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
	refcount_add(nr, &current->usage);

	io_submit_state_start(&state, ctx, nr);
	link.head = NULL;

	for (i = 0; i < nr; i++) {
		const struct io_uring_sqe *sqe;
@@ -6834,8 +6842,8 @@ fail_req:
		percpu_counter_sub(&tctx->inflight, unused);
		put_task_struct_many(current, unused);
	}
	if (link)
		io_queue_link_head(link, &state.comp);
	if (link.head)
		io_queue_link_head(link.head, &state.comp);
	io_submit_state_end(&state);

	 /* Commit SQ ring head once we've consumed and submitted all SQEs */