Commit 1e95081c authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: fix deferred req iovec leak



After defer, a request will be prepared, that includes allocating iovec
if needed, and then submitted through io_wq_submit_work() but not custom
handler (e.g. io_rw_async()/io_sendrecv_async()). However, it'll leak
iovec, as it's in io-wq and the code goes as follows:

io_read() {
	if (!io_wq_current_is_worker())
		kfree(iovec);
}

Put all deallocation logic in io_{read,write,send,recv}(), which will
leave the memory, if going async with -EAGAIN.

It also fixes a leak after failed io_alloc_async_ctx() in
io_{recv,send}_msg().

Cc: stable@vger.kernel.org # 5.5
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e1d85334
Loading
Loading
Loading
Loading
+12 −35
Original line number Diff line number Diff line
@@ -2144,17 +2144,6 @@ static int io_alloc_async_ctx(struct io_kiocb *req)
	return req->io == NULL;
}

static void io_rw_async(struct io_wq_work **workptr)
{
	struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
	struct iovec *iov = NULL;

	if (req->io->rw.iov != req->io->rw.fast_iov)
		iov = req->io->rw.iov;
	io_wq_submit_work(workptr);
	kfree(iov);
}

static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
			     struct iovec *iovec, struct iovec *fast_iov,
			     struct iov_iter *iter)
@@ -2167,7 +2156,6 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,

		io_req_map_rw(req, io_size, iovec, fast_iov, iter);
	}
	req->work.func = io_rw_async;
	return 0;
}

@@ -2254,7 +2242,6 @@ copy_iov:
		}
	}
out_free:
	if (!io_wq_current_is_worker())
	kfree(iovec);
	return ret;
}
@@ -2360,7 +2347,6 @@ copy_iov:
		}
	}
out_free:
	if (!io_wq_current_is_worker())
	kfree(iovec);
	return ret;
}
@@ -2956,19 +2942,6 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
	return 0;
}

#if defined(CONFIG_NET)
static void io_sendrecv_async(struct io_wq_work **workptr)
{
	struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
	struct iovec *iov = NULL;

	if (req->io->rw.iov != req->io->rw.fast_iov)
		iov = req->io->msg.iov;
	io_wq_submit_work(workptr);
	kfree(iov);
}
#endif

static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_NET)
@@ -3037,17 +3010,19 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
		if (force_nonblock && ret == -EAGAIN) {
			if (req->io)
				return -EAGAIN;
			if (io_alloc_async_ctx(req))
			if (io_alloc_async_ctx(req)) {
				if (kmsg && kmsg->iov != kmsg->fast_iov)
					kfree(kmsg->iov);
				return -ENOMEM;
			}
			memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
			req->work.func = io_sendrecv_async;
			return -EAGAIN;
		}
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
	}

	if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
	if (kmsg && kmsg->iov != kmsg->fast_iov)
		kfree(kmsg->iov);
	io_cqring_add_event(req, ret);
	if (ret < 0)
@@ -3181,17 +3156,19 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
		if (force_nonblock && ret == -EAGAIN) {
			if (req->io)
				return -EAGAIN;
			if (io_alloc_async_ctx(req))
			if (io_alloc_async_ctx(req)) {
				if (kmsg && kmsg->iov != kmsg->fast_iov)
					kfree(kmsg->iov);
				return -ENOMEM;
			}
			memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
			req->work.func = io_sendrecv_async;
			return -EAGAIN;
		}
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
	}

	if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
	if (kmsg && kmsg->iov != kmsg->fast_iov)
		kfree(kmsg->iov);
	io_cqring_add_event(req, ret);
	if (ret < 0)