Commit 31067255 authored by Jens Axboe's avatar Jens Axboe
Browse files

io_uring: async task poll trigger cleanup



If the request is still hashed in io_async_task_func(), then it cannot
have been canceled and it's pointless to check. So save that check.

Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f2a8d5c7
Loading
Loading
Loading
Loading
+16 −17
Original line number Diff line number Diff line
@@ -4354,7 +4354,7 @@ static void io_async_task_func(struct callback_head *cb)
	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
	struct async_poll *apoll = req->apoll;
	struct io_ring_ctx *ctx = req->ctx;
	bool canceled;
	bool canceled = false;

	trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);

@@ -4363,34 +4363,33 @@ static void io_async_task_func(struct callback_head *cb)
		return;
	}

	if (hash_hashed(&req->hash_node))
	/* If req is still hashed, it cannot have been canceled. Don't check. */
	if (hash_hashed(&req->hash_node)) {
		hash_del(&req->hash_node);

	} else {
		canceled = READ_ONCE(apoll->poll.canceled);
		if (canceled) {
			io_cqring_fill_event(req, -ECANCELED);
			io_commit_cqring(ctx);
		}
	}

	spin_unlock_irq(&ctx->completion_lock);

	/* restore ->work in case we need to retry again */
	memcpy(&req->work, &apoll->work, sizeof(req->work));

	if (canceled) {
	kfree(apoll);
		io_cqring_ev_posted(ctx);
		req_set_fail_links(req);
		io_double_put_req(req);
		return;
	}

	if (!canceled) {
		__set_current_state(TASK_RUNNING);
		mutex_lock(&ctx->uring_lock);
		__io_queue_sqe(req, NULL);
		mutex_unlock(&ctx->uring_lock);

	kfree(apoll);
	} else {
		io_cqring_ev_posted(ctx);
		req_set_fail_links(req);
		io_double_put_req(req);
	}
}

static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,