Commit b52fda00 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: don't iterate io_uring_cancel_files()



io_uring_cancel_files() guarantees to cancel all matching requests,
that's not necessary to do that in a loop. Move it up in the callchain
into io_uring_cancel_task_requests().

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent df9923f9
Loading
Loading
Loading
Loading
+12 −22
Original line number Diff line number Diff line
@@ -8793,16 +8793,10 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
	}
}

/*
 * Returns true if we found and killed one or more files pinning requests
 */
static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
				  struct task_struct *task,
				  struct files_struct *files)
{
	if (list_empty_careful(&ctx->inflight_list))
		return false;

	while (!list_empty_careful(&ctx->inflight_list)) {
		struct io_kiocb *cancel_req = NULL, *req;
		DEFINE_WAIT(wait);
@@ -8835,8 +8829,6 @@ static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
		schedule();
		finish_wait(&ctx->inflight_wait, &wait);
	}

	return true;
}

static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
@@ -8847,15 +8839,12 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
	return io_task_match(req, task);
}

static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
					    struct task_struct *task,
					    struct files_struct *files)
static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
					    struct task_struct *task)
{
	bool ret;

	ret = io_uring_cancel_files(ctx, task, files);
	if (!files) {
	while (1) {
		enum io_wq_cancel cret;
		bool ret = false;

		cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true);
		if (cret != IO_WQ_CANCEL_NOTFOUND)
@@ -8871,9 +8860,11 @@ static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,

		ret |= io_poll_remove_all(ctx, task);
		ret |= io_kill_timeouts(ctx, task);
		if (!ret)
			break;
		io_run_task_work();
		cond_resched();
	}

	return ret;
}

/*
@@ -8894,11 +8885,10 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,

	io_cancel_defer_files(ctx, task, files);
	io_cqring_overflow_flush(ctx, true, task, files);
	io_uring_cancel_files(ctx, task, files);

	while (__io_uring_cancel_task_requests(ctx, task, files)) {
		io_run_task_work();
		cond_resched();
	}
	if (!files)
		__io_uring_cancel_task_requests(ctx, task);

	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
		atomic_dec(&task->io_uring->in_idle);