Commit b0dd8a41 authored by Jens Axboe's avatar Jens Axboe
Browse files

io_uring: correct poll cancel and linked timeout expiration completion



Currently a poll request fills a completion entry of 0, even if it got
cancelled. This is odd, and it makes it harder to support with chains.
Ensure that it returns -ECANCELED in the completions events if it got
cancelled, and furthermore ensure that the linked timeout that triggered
it completes with -ETIME if we did indeed trigger the completions
through a timeout.

Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e0e328c4
Loading
Loading
Loading
Loading
+22 −11
Original line number Diff line number Diff line
@@ -2066,11 +2066,14 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
	return 0;
}

static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
{
	struct io_ring_ctx *ctx = req->ctx;

	req->poll.done = true;
	if (error)
		io_cqring_fill_event(req, error);
	else
		io_cqring_fill_event(req, mangle_poll(mask));
	io_commit_cqring(ctx);
}
@@ -2084,11 +2087,16 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
	struct io_ring_ctx *ctx = req->ctx;
	struct io_kiocb *nxt = NULL;
	__poll_t mask = 0;
	int ret = 0;

	if (work->flags & IO_WQ_WORK_CANCEL)
	if (work->flags & IO_WQ_WORK_CANCEL) {
		WRITE_ONCE(poll->canceled, true);
		ret = -ECANCELED;
	} else if (READ_ONCE(poll->canceled)) {
		ret = -ECANCELED;
	}

	if (!READ_ONCE(poll->canceled))
	if (ret != -ECANCELED)
		mask = vfs_poll(poll->file, &pt) & poll->events;

	/*
@@ -2099,13 +2107,13 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
	 * avoid further branches in the fast path.
	 */
	spin_lock_irq(&ctx->completion_lock);
	if (!mask && !READ_ONCE(poll->canceled)) {
	if (!mask && ret != -ECANCELED) {
		add_wait_queue(poll->head, &poll->wait);
		spin_unlock_irq(&ctx->completion_lock);
		return;
	}
	io_poll_remove_req(req);
	io_poll_complete(req, mask);
	io_poll_complete(req, mask, ret);
	spin_unlock_irq(&ctx->completion_lock);

	io_cqring_ev_posted(ctx);
@@ -2139,7 +2147,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
	 */
	if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
		io_poll_remove_req(req);
		io_poll_complete(req, mask);
		io_poll_complete(req, mask, 0);
		req->flags |= REQ_F_COMP_LOCKED;
		io_put_req(req);
		spin_unlock_irqrestore(&ctx->completion_lock, flags);
@@ -2251,7 +2259,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
	}
	if (mask) { /* no async, we'd stolen it */
		ipt.error = 0;
		io_poll_complete(req, mask);
		io_poll_complete(req, mask, 0);
	}
	spin_unlock_irq(&ctx->completion_lock);

@@ -2503,7 +2511,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)

static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
				     struct io_kiocb *req, __u64 sqe_addr,
				     struct io_kiocb **nxt)
				     struct io_kiocb **nxt, int success_ret)
{
	unsigned long flags;
	int ret;
@@ -2520,6 +2528,8 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
		goto done;
	ret = io_poll_cancel(ctx, sqe_addr);
done:
	if (!ret)
		ret = success_ret;
	io_cqring_fill_event(req, ret);
	io_commit_cqring(ctx);
	spin_unlock_irqrestore(&ctx->completion_lock, flags);
@@ -2541,7 +2551,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
	    sqe->cancel_flags)
		return -EINVAL;

	io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt);
	io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt, 0);
	return 0;
}

@@ -2831,7 +2841,8 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
	spin_unlock_irqrestore(&ctx->completion_lock, flags);

	if (prev) {
		io_async_find_and_cancel(ctx, req, prev->user_data, NULL);
		io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
						-ETIME);
		io_put_req(prev);
	} else {
		io_cqring_add_event(req, -ETIME);