Commit 85394299 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'io_uring-5.5' into for-linus

* io_uring-5.5:
  io_uring: fix a typo in a comment
  io_uring: hook all linked requests via link_list
  io_uring: fix error handling in io_queue_link_head
  io_uring: use hash table for poll command lookups
parents ece841ab 0b4295b5
Loading
Loading
Loading
Loading
+69 −71
Original line number Diff line number Diff line
@@ -145,7 +145,7 @@ struct io_rings {
	/*
	 * Number of completion events lost because the queue was full;
	 * this should be avoided by the application by making sure
	 * there are not more requests pending thatn there is space in
	 * there are not more requests pending than there is space in
	 * the completion queue.
	 *
	 * Written by the kernel, shouldn't be modified by the
@@ -275,7 +275,8 @@ struct io_ring_ctx {
		 * manipulate the list, hence no extra locking is needed there.
		 */
		struct list_head	poll_list;
		struct rb_root		cancel_tree;
		struct hlist_head	*cancel_hash;
		unsigned		cancel_hash_bits;

		spinlock_t		inflight_lock;
		struct list_head	inflight_list;
@@ -355,7 +356,7 @@ struct io_kiocb {
	struct io_ring_ctx	*ctx;
	union {
		struct list_head	list;
		struct rb_node		rb_node;
		struct hlist_node	hash_node;
	};
	struct list_head	link_list;
	unsigned int		flags;
@@ -444,6 +445,7 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref)
static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
	struct io_ring_ctx *ctx;
	int hash_bits;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
@@ -457,6 +459,21 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
	if (!ctx->completions)
		goto err;

	/*
	 * Use 5 bits less than the max cq entries, that should give us around
	 * 32 entries per hash list if totally full and uniformly spread.
	 */
	hash_bits = ilog2(p->cq_entries);
	hash_bits -= 5;
	if (hash_bits <= 0)
		hash_bits = 1;
	ctx->cancel_hash_bits = hash_bits;
	ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
					GFP_KERNEL);
	if (!ctx->cancel_hash)
		goto err;
	__hash_init(ctx->cancel_hash, 1U << hash_bits);

	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
		goto err;
@@ -470,7 +487,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
	init_waitqueue_head(&ctx->wait);
	spin_lock_init(&ctx->completion_lock);
	INIT_LIST_HEAD(&ctx->poll_list);
	ctx->cancel_tree = RB_ROOT;
	INIT_LIST_HEAD(&ctx->defer_list);
	INIT_LIST_HEAD(&ctx->timeout_list);
	init_waitqueue_head(&ctx->inflight_wait);
@@ -481,6 +497,7 @@ err:
	if (ctx->fallback_req)
		kmem_cache_free(req_cachep, ctx->fallback_req);
	kfree(ctx->completions);
	kfree(ctx->cancel_hash);
	kfree(ctx);
	return NULL;
}
@@ -899,7 +916,6 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{
	struct io_ring_ctx *ctx = req->ctx;
	struct io_kiocb *nxt;
	bool wake_ev = false;

	/* Already got next link */
@@ -911,24 +927,21 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
	 * potentially happen if the chain is messed up, check to be on the
	 * safe side.
	 */
	nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
	while (nxt) {
		list_del_init(&nxt->list);
	while (!list_empty(&req->link_list)) {
		struct io_kiocb *nxt = list_first_entry(&req->link_list,
						struct io_kiocb, link_list);

		if ((req->flags & REQ_F_LINK_TIMEOUT) &&
		    (nxt->flags & REQ_F_TIMEOUT)) {
		if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
			     (nxt->flags & REQ_F_TIMEOUT))) {
			list_del_init(&nxt->link_list);
			wake_ev |= io_link_cancel_timeout(nxt);
			nxt = list_first_entry_or_null(&req->link_list,
							struct io_kiocb, list);
			req->flags &= ~REQ_F_LINK_TIMEOUT;
			continue;
		}
		if (!list_empty(&req->link_list)) {
			INIT_LIST_HEAD(&nxt->link_list);
			list_splice(&req->link_list, &nxt->link_list);
			nxt->flags |= REQ_F_LINK;
		}

		list_del_init(&req->link_list);
		if (!list_empty(&nxt->link_list))
			nxt->flags |= REQ_F_LINK;
		*nxtptr = nxt;
		break;
	}
@@ -944,15 +957,15 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
static void io_fail_links(struct io_kiocb *req)
{
	struct io_ring_ctx *ctx = req->ctx;
	struct io_kiocb *link;
	unsigned long flags;

	spin_lock_irqsave(&ctx->completion_lock, flags);

	while (!list_empty(&req->link_list)) {
		link = list_first_entry(&req->link_list, struct io_kiocb, list);
		list_del_init(&link->list);
		struct io_kiocb *link = list_first_entry(&req->link_list,
						struct io_kiocb, link_list);

		list_del_init(&link->link_list);
		trace_io_uring_fail_link(req, link);

		if ((req->flags & REQ_F_LINK_TIMEOUT) &&
@@ -2260,14 +2273,6 @@ out:
#endif
}

static inline void io_poll_remove_req(struct io_kiocb *req)
{
	if (!RB_EMPTY_NODE(&req->rb_node)) {
		rb_erase(&req->rb_node, &req->ctx->cancel_tree);
		RB_CLEAR_NODE(&req->rb_node);
	}
}

static void io_poll_remove_one(struct io_kiocb *req)
{
	struct io_poll_iocb *poll = &req->poll;
@@ -2279,17 +2284,21 @@ static void io_poll_remove_one(struct io_kiocb *req)
		io_queue_async_work(req);
	}
	spin_unlock(&poll->head->lock);
	io_poll_remove_req(req);
	hash_del(&req->hash_node);
}

static void io_poll_remove_all(struct io_ring_ctx *ctx)
{
	struct rb_node *node;
	struct hlist_node *tmp;
	struct io_kiocb *req;
	int i;

	spin_lock_irq(&ctx->completion_lock);
	while ((node = rb_first(&ctx->cancel_tree)) != NULL) {
		req = rb_entry(node, struct io_kiocb, rb_node);
	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
		struct hlist_head *list;

		list = &ctx->cancel_hash[i];
		hlist_for_each_entry_safe(req, tmp, list, hash_node)
			io_poll_remove_one(req);
	}
	spin_unlock_irq(&ctx->completion_lock);
@@ -2297,18 +2306,12 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)

static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
{
	struct rb_node *p, *parent = NULL;
	struct hlist_head *list;
	struct io_kiocb *req;

	p = ctx->cancel_tree.rb_node;
	while (p) {
		parent = p;
		req = rb_entry(parent, struct io_kiocb, rb_node);
		if (sqe_addr < req->user_data) {
			p = p->rb_left;
		} else if (sqe_addr > req->user_data) {
			p = p->rb_right;
		} else {
	list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
	hlist_for_each_entry(req, list, hash_node) {
		if (sqe_addr == req->user_data) {
			io_poll_remove_one(req);
			return 0;
		}
@@ -2390,7 +2393,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
		spin_unlock_irq(&ctx->completion_lock);
		return;
	}
	io_poll_remove_req(req);
	hash_del(&req->hash_node);
	io_poll_complete(req, mask, ret);
	spin_unlock_irq(&ctx->completion_lock);

@@ -2425,7 +2428,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
	 * for finalizing the request, mark us as having grabbed that already.
	 */
	if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
		io_poll_remove_req(req);
		hash_del(&req->hash_node);
		io_poll_complete(req, mask, 0);
		req->flags |= REQ_F_COMP_LOCKED;
		io_put_req(req);
@@ -2463,20 +2466,10 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
static void io_poll_req_insert(struct io_kiocb *req)
{
	struct io_ring_ctx *ctx = req->ctx;
	struct rb_node **p = &ctx->cancel_tree.rb_node;
	struct rb_node *parent = NULL;
	struct io_kiocb *tmp;

	while (*p) {
		parent = *p;
		tmp = rb_entry(parent, struct io_kiocb, rb_node);
		if (req->user_data < tmp->user_data)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}
	rb_link_node(&req->rb_node, parent, p);
	rb_insert_color(&req->rb_node, &ctx->cancel_tree);
	struct hlist_head *list;

	list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
	hlist_add_head(&req->hash_node, list);
}

static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
@@ -2504,7 +2497,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
	INIT_IO_WORK(&req->work, io_poll_complete_work);
	events = READ_ONCE(sqe->poll_events);
	poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
	RB_CLEAR_NODE(&req->rb_node);
	INIT_HLIST_NODE(&req->hash_node);

	poll->head = NULL;
	poll->done = false;
@@ -3173,10 +3166,11 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
	 * We don't expect the list to be empty, that will only happen if we
	 * race with the completion of the linked work.
	 */
	if (!list_empty(&req->list)) {
		prev = list_entry(req->list.prev, struct io_kiocb, link_list);
	if (!list_empty(&req->link_list)) {
		prev = list_entry(req->link_list.prev, struct io_kiocb,
				  link_list);
		if (refcount_inc_not_zero(&prev->refs)) {
			list_del_init(&req->list);
			list_del_init(&req->link_list);
			prev->flags &= ~REQ_F_LINK_TIMEOUT;
		} else
			prev = NULL;
@@ -3206,7 +3200,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
	 * we got a chance to setup the timer
	 */
	spin_lock_irq(&ctx->completion_lock);
	if (!list_empty(&req->list)) {
	if (!list_empty(&req->link_list)) {
		struct io_timeout_data *data = &req->io->timeout;

		data->timer.function = io_link_timeout_fn;
@@ -3226,7 +3220,8 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
	if (!(req->flags & REQ_F_LINK))
		return NULL;

	nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
	nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
					link_list);
	if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT)
		return NULL;

@@ -3318,7 +3313,7 @@ static inline void io_queue_link_head(struct io_kiocb *req)

#define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)

static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
			  struct io_kiocb **link)
{
	struct io_ring_ctx *ctx = req->ctx;
@@ -3337,7 +3332,7 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
err_req:
		io_cqring_add_event(req, ret);
		io_double_put_req(req);
		return;
		return false;
	}

	/*
@@ -3367,7 +3362,7 @@ err_req:
			goto err_req;
		}
		trace_io_uring_link(ctx, req, prev);
		list_add_tail(&req->list, &prev->link_list);
		list_add_tail(&req->link_list, &prev->link_list);
	} else if (req->sqe->flags & IOSQE_IO_LINK) {
		req->flags |= REQ_F_LINK;

@@ -3376,6 +3371,8 @@ err_req:
	} else {
		io_queue_sqe(req);
	}

	return true;
}

/*
@@ -3505,6 +3502,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
			}
		}

		submitted++;
		sqe_flags = req->sqe->flags;

		req->ring_file = ring_file;
@@ -3514,9 +3512,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
		req->needs_fixed_file = async;
		trace_io_uring_submit_sqe(ctx, req->sqe->user_data,
					  true, async);
		io_submit_sqe(req, statep, &link);
		submitted++;

		if (!io_submit_sqe(req, statep, &link))
			break;
		/*
		 * If previous wasn't linked and we have a linked command,
		 * that's the end of the chain. Submit the previous link.
@@ -4644,6 +4641,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
	free_uid(ctx->user);
	put_cred(ctx->creds);
	kfree(ctx->completions);
	kfree(ctx->cancel_hash);
	kmem_cache_free(req_cachep, ctx->fallback_req);
	kfree(ctx);
}