Commit ac45abc0 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: remove custom ->func handlers



In preparation of getting rid of work.func, this removes almost all
custom instances of it, leaving only io_wq_submit_work() and
io_link_work_cb(). And the last one will be dealt later.

Nothing fancy, just routinely remove *_finish() function and inline
what's left. E.g. remove io_fsync_finish() + inline __io_fsync() into
io_fsync().

As no users of io_req_cancelled() are left, delete it as well. The patch
adds extra switch lookup on cold-ish path, but that's overweighted by
nice diffstat and other benefits of the following patches.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3af73b28
Loading
Loading
Loading
Loading
+27 −112
Original line number Diff line number Diff line
@@ -2898,23 +2898,15 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
	return 0;
}

static bool io_req_cancelled(struct io_kiocb *req)
{
	if (req->work.flags & IO_WQ_WORK_CANCEL) {
		req_set_fail_links(req);
		io_cqring_add_event(req, -ECANCELED);
		io_put_req(req);
		return true;
	}

	return false;
}

static void __io_fsync(struct io_kiocb *req)
static int io_fsync(struct io_kiocb *req, bool force_nonblock)
{
	loff_t end = req->sync.off + req->sync.len;
	int ret;

	/* fsync always requires a blocking context */
	if (force_nonblock)
		return -EAGAIN;

	ret = vfs_fsync_range(req->file, req->sync.off,
				end > 0 ? end : LLONG_MAX,
				req->sync.flags & IORING_FSYNC_DATASYNC);
@@ -2922,53 +2914,9 @@ static void __io_fsync(struct io_kiocb *req)
		req_set_fail_links(req);
	io_cqring_add_event(req, ret);
	io_put_req(req);
}

static void io_fsync_finish(struct io_wq_work **workptr)
{
	struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);

	if (io_req_cancelled(req))
		return;
	__io_fsync(req);
	io_steal_work(req, workptr);
}

static int io_fsync(struct io_kiocb *req, bool force_nonblock)
{
	/* fsync always requires a blocking context */
	if (force_nonblock) {
		req->work.func = io_fsync_finish;
		return -EAGAIN;
	}
	__io_fsync(req);
	return 0;
}

static void __io_fallocate(struct io_kiocb *req)
{
	int ret;

	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
	ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
				req->sync.len);
	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
	if (ret < 0)
		req_set_fail_links(req);
	io_cqring_add_event(req, ret);
	io_put_req(req);
}

static void io_fallocate_finish(struct io_wq_work **workptr)
{
	struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);

	if (io_req_cancelled(req))
		return;
	__io_fallocate(req);
	io_steal_work(req, workptr);
}

static int io_fallocate_prep(struct io_kiocb *req,
			     const struct io_uring_sqe *sqe)
{
@@ -2986,13 +2934,20 @@ static int io_fallocate_prep(struct io_kiocb *req,

static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
{
	int ret;

	/* fallocate always requiring blocking context */
	if (force_nonblock) {
		req->work.func = io_fallocate_finish;
	if (force_nonblock)
		return -EAGAIN;
	}

	__io_fallocate(req);
	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
	ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
				req->sync.len);
	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
	if (ret < 0)
		req_set_fail_links(req);
	io_cqring_add_event(req, ret);
	io_put_req(req);
	return 0;
}

@@ -3489,38 +3444,20 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
	return 0;
}

static void __io_sync_file_range(struct io_kiocb *req)
static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
{
	int ret;

	/* sync_file_range always requires a blocking context */
	if (force_nonblock)
		return -EAGAIN;

	ret = sync_file_range(req->file, req->sync.off, req->sync.len,
				req->sync.flags);
	if (ret < 0)
		req_set_fail_links(req);
	io_cqring_add_event(req, ret);
	io_put_req(req);
}


static void io_sync_file_range_finish(struct io_wq_work **workptr)
{
	struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);

	if (io_req_cancelled(req))
		return;
	__io_sync_file_range(req);
	io_steal_work(req, workptr);
}

static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
{
	/* sync_file_range always requires a blocking context */
	if (force_nonblock) {
		req->work.func = io_sync_file_range_finish;
		return -EAGAIN;
	}

	__io_sync_file_range(req);
	return 0;
}

@@ -3942,49 +3879,27 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
	return 0;
}

static int __io_accept(struct io_kiocb *req, bool force_nonblock)
static int io_accept(struct io_kiocb *req, bool force_nonblock)
{
	struct io_accept *accept = &req->accept;
	unsigned file_flags;
	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
	int ret;

	file_flags = force_nonblock ? O_NONBLOCK : 0;
	ret = __sys_accept4_file(req->file, file_flags, accept->addr,
					accept->addr_len, accept->flags,
					accept->nofile);
	if (ret == -EAGAIN && force_nonblock)
		return -EAGAIN;
	if (ret < 0) {
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
	if (ret < 0)
		req_set_fail_links(req);
	}
	io_cqring_add_event(req, ret);
	io_put_req(req);
	return 0;
}

static void io_accept_finish(struct io_wq_work **workptr)
{
	struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);

	if (io_req_cancelled(req))
		return;
	__io_accept(req, false);
	io_steal_work(req, workptr);
}

static int io_accept(struct io_kiocb *req, bool force_nonblock)
{
	int ret;

	ret = __io_accept(req, force_nonblock);
	if (ret == -EAGAIN && force_nonblock) {
		req->work.func = io_accept_finish;
		return -EAGAIN;
	}
	return 0;
}

static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
	struct io_connect *conn = &req->connect;