Commit 2bae047e authored by Jens Axboe's avatar Jens Axboe
Browse files

io_uring: io_async_task_func() should check and honor cancelation



If the request has been marked as canceled, don't try and issue it.
Instead just fill a canceled event and finish the request.

Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 74ce6ce4
Loading
Loading
Loading
Loading
+15 −0
Original line number Original line Diff line number Diff line
@@ -4181,6 +4181,7 @@ static void io_async_task_func(struct callback_head *cb)
	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
	struct async_poll *apoll = req->apoll;
	struct async_poll *apoll = req->apoll;
	struct io_ring_ctx *ctx = req->ctx;
	struct io_ring_ctx *ctx = req->ctx;
	bool canceled;


	trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
	trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);


@@ -4192,8 +4193,22 @@ static void io_async_task_func(struct callback_head *cb)
	if (hash_hashed(&req->hash_node))
	if (hash_hashed(&req->hash_node))
		hash_del(&req->hash_node);
		hash_del(&req->hash_node);


	canceled = READ_ONCE(apoll->poll.canceled);
	if (canceled) {
		io_cqring_fill_event(req, -ECANCELED);
		io_commit_cqring(ctx);
	}

	spin_unlock_irq(&ctx->completion_lock);
	spin_unlock_irq(&ctx->completion_lock);


	if (canceled) {
		kfree(apoll);
		io_cqring_ev_posted(ctx);
		req_set_fail_links(req);
		io_put_req(req);
		return;
	}

	/* restore ->work in case we need to retry again */
	/* restore ->work in case we need to retry again */
	memcpy(&req->work, &apoll->work, sizeof(req->work));
	memcpy(&req->work, &apoll->work, sizeof(req->work));