Commit 267bc904 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: use inlined struct sqe_submit



req->submit is always up-to-date, use it directly

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 50585b9a
Loading
Loading
Loading
Loading
+43 −44
Original line number Diff line number Diff line
@@ -1155,10 +1155,9 @@ static bool io_file_supports_async(struct file *file)
	return false;
}

static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
		      bool force_nonblock)
static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
{
	const struct io_uring_sqe *sqe = s->sqe;
	const struct io_uring_sqe *sqe = req->submit.sqe;
	struct io_ring_ctx *ctx = req->ctx;
	struct kiocb *kiocb = &req->rw;
	unsigned ioprio;
@@ -1406,8 +1405,8 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
	return ret;
}

static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
		   struct io_kiocb **nxt, bool force_nonblock)
static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
		   bool force_nonblock)
{
	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
	struct kiocb *kiocb = &req->rw;
@@ -1416,7 +1415,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
	size_t iov_count;
	ssize_t read_size, ret;

	ret = io_prep_rw(req, s, force_nonblock);
	ret = io_prep_rw(req, force_nonblock);
	if (ret)
		return ret;
	file = kiocb->ki_filp;
@@ -1424,7 +1423,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
	if (unlikely(!(file->f_mode & FMODE_READ)))
		return -EBADF;

	ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
	ret = io_import_iovec(req->ctx, READ, &req->submit, &iovec, &iter);
	if (ret < 0)
		return ret;

@@ -1456,7 +1455,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
			ret2 = -EAGAIN;
		/* Catch -EAGAIN return for forced non-blocking submission */
		if (!force_nonblock || ret2 != -EAGAIN)
			kiocb_done(kiocb, ret2, nxt, s->in_async);
			kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
		else
			ret = -EAGAIN;
	}
@@ -1464,8 +1463,8 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
	return ret;
}

static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
		    struct io_kiocb **nxt, bool force_nonblock)
static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
		    bool force_nonblock)
{
	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
	struct kiocb *kiocb = &req->rw;
@@ -1474,7 +1473,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
	size_t iov_count;
	ssize_t ret;

	ret = io_prep_rw(req, s, force_nonblock);
	ret = io_prep_rw(req, force_nonblock);
	if (ret)
		return ret;

@@ -1482,7 +1481,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
	if (unlikely(!(file->f_mode & FMODE_WRITE)))
		return -EBADF;

	ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
	ret = io_import_iovec(req->ctx, WRITE, &req->submit, &iovec, &iter);
	if (ret < 0)
		return ret;

@@ -1519,7 +1518,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
		else
			ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
		if (!force_nonblock || ret2 != -EAGAIN)
			kiocb_done(kiocb, ret2, nxt, s->in_async);
			kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
		else
			ret = -EAGAIN;
	}
@@ -2188,9 +2187,9 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
	return 0;
}

static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
			const struct io_uring_sqe *sqe)
static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
	const struct io_uring_sqe *sqe = req->submit.sqe;
	struct io_uring_sqe *sqe_copy;

	if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
@@ -2217,10 +2216,10 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
}

static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
			   const struct sqe_submit *s, struct io_kiocb **nxt,
			   bool force_nonblock)
			   struct io_kiocb **nxt, bool force_nonblock)
{
	int ret, opcode;
	struct sqe_submit *s = &req->submit;

	req->user_data = READ_ONCE(s->sqe->user_data);

@@ -2232,18 +2231,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
	case IORING_OP_READV:
		if (unlikely(s->sqe->buf_index))
			return -EINVAL;
		ret = io_read(req, s, nxt, force_nonblock);
		ret = io_read(req, nxt, force_nonblock);
		break;
	case IORING_OP_WRITEV:
		if (unlikely(s->sqe->buf_index))
			return -EINVAL;
		ret = io_write(req, s, nxt, force_nonblock);
		ret = io_write(req, nxt, force_nonblock);
		break;
	case IORING_OP_READ_FIXED:
		ret = io_read(req, s, nxt, force_nonblock);
		ret = io_read(req, nxt, force_nonblock);
		break;
	case IORING_OP_WRITE_FIXED:
		ret = io_write(req, s, nxt, force_nonblock);
		ret = io_write(req, nxt, force_nonblock);
		break;
	case IORING_OP_FSYNC:
		ret = io_fsync(req, s->sqe, nxt, force_nonblock);
@@ -2318,7 +2317,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
		s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
		s->in_async = true;
		do {
			ret = __io_submit_sqe(ctx, req, s, &nxt, false);
			ret = __io_submit_sqe(ctx, req, &nxt, false);
			/*
			 * We can get EAGAIN for polled IO even though we're
			 * forcing a sync submission from here, since we can't
@@ -2372,9 +2371,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
	return table->files[index & IORING_FILE_TABLE_MASK];
}

static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
static int io_req_set_file(struct io_ring_ctx *ctx,
			   struct io_submit_state *state, struct io_kiocb *req)
{
	struct sqe_submit *s = &req->submit;
	unsigned flags;
	int fd;

@@ -2438,12 +2438,11 @@ static int io_grab_files(struct io_ring_ctx *ctx, struct io_kiocb *req)
	return ret;
}

static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
			struct sqe_submit *s)
static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
	int ret;

	ret = __io_submit_sqe(ctx, req, s, NULL, true);
	ret = __io_submit_sqe(ctx, req, NULL, true);

	/*
	 * We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -2451,6 +2450,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
	 */
	if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
	    (req->flags & REQ_F_MUST_PUNT))) {
		struct sqe_submit *s = &req->submit;
		struct io_uring_sqe *sqe_copy;

		sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
@@ -2488,31 +2488,30 @@ err:
	return ret;
}

static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
			struct sqe_submit *s)
static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
	int ret;

	ret = io_req_defer(ctx, req, s->sqe);
	ret = io_req_defer(ctx, req);
	if (ret) {
		if (ret != -EIOCBQUEUED) {
			io_cqring_add_event(ctx, req->submit.sqe->user_data, ret);
			io_free_req(req, NULL);
			io_cqring_add_event(ctx, s->sqe->user_data, ret);
		}
		return 0;
	}

	return __io_queue_sqe(ctx, req, s);
	return __io_queue_sqe(ctx, req);
}

static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
			      struct sqe_submit *s, struct io_kiocb *shadow)
			      struct io_kiocb *shadow)
{
	int ret;
	int need_submit = false;

	if (!shadow)
		return io_queue_sqe(ctx, req, s);
		return io_queue_sqe(ctx, req);

	/*
	 * Mark the first IO in link list as DRAIN, let all the following
@@ -2520,12 +2519,12 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
	 * list.
	 */
	req->flags |= REQ_F_IO_DRAIN;
	ret = io_req_defer(ctx, req, s->sqe);
	ret = io_req_defer(ctx, req);
	if (ret) {
		if (ret != -EIOCBQUEUED) {
			io_cqring_add_event(ctx, req->submit.sqe->user_data, ret);
			io_free_req(req, NULL);
			__io_free_req(shadow);
			io_cqring_add_event(ctx, s->sqe->user_data, ret);
			return 0;
		}
	} else {
@@ -2543,7 +2542,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
	spin_unlock_irq(&ctx->completion_lock);

	if (need_submit)
		return __io_queue_sqe(ctx, req, s);
		return __io_queue_sqe(ctx, req);

	return 0;
}
@@ -2551,10 +2550,10 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
#define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)

static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
			  struct sqe_submit *s, struct io_submit_state *state,
			  struct io_kiocb **link)
			  struct io_submit_state *state, struct io_kiocb **link)
{
	struct io_uring_sqe *sqe_copy;
	struct sqe_submit *s = &req->submit;
	int ret;

	/* enforce forwards compatibility on users */
@@ -2563,11 +2562,11 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
		goto err_req;
	}

	ret = io_req_set_file(ctx, s, state, req);
	ret = io_req_set_file(ctx, state, req);
	if (unlikely(ret)) {
err_req:
		io_free_req(req, NULL);
		io_cqring_add_event(ctx, s->sqe->user_data, ret);
		io_free_req(req, NULL);
		return;
	}

@@ -2598,7 +2597,7 @@ err_req:
		INIT_LIST_HEAD(&req->link_list);
		*link = req;
	} else {
		io_queue_sqe(ctx, req, s);
		io_queue_sqe(ctx, req);
	}
}

@@ -2742,7 +2741,7 @@ out:
		req->submit.needs_fixed_file = async;
		trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
					  true, async);
		io_submit_sqe(ctx, req, &req->submit, statep, &link);
		io_submit_sqe(ctx, req, statep, &link);
		submitted++;

		/*
@@ -2750,14 +2749,14 @@ out:
		 * that's the end of the chain. Submit the previous link.
		 */
		if (!(sqe_flags & IOSQE_IO_LINK) && link) {
			io_queue_link_head(ctx, link, &link->submit, shadow_req);
			io_queue_link_head(ctx, link, shadow_req);
			link = NULL;
			shadow_req = NULL;
		}
	}

	if (link)
		io_queue_link_head(ctx, link, &link->submit, shadow_req);
		io_queue_link_head(ctx, link, shadow_req);
	if (statep)
		io_submit_state_end(&state);