Commit 6ad4bf6e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'io_uring-5.10-2020-10-12' of git://git.kernel.dk/linux-block

Pull io_uring updates from Jens Axboe:

 - Add blkcg accounting for io-wq offload (Dennis)

 - A use-after-free fix for io-wq (Hillf)

 - Cancelation fixes and improvements

 - Use proper files_struct references for offload

 - Cleanup of io_uring_get_socket() since that can now go into our own
   header

 - SQPOLL fixes and cleanups, and support for sharing the thread

 - Improvement to how page accounting is done for registered buffers and
   huge pages, accounting the real pinned state

 - Series cleaning up the xarray code (Willy)

 - Various cleanups, refactoring, and improvements (Pavel)

 - Use raw spinlock for io-wq (Sebastian)

 - Add support for ring restrictions (Stefano)

* tag 'io_uring-5.10-2020-10-12' of git://git.kernel.dk/linux-block: (62 commits)
  io_uring: keep a pointer ref_node in file_data
  io_uring: refactor *files_register()'s error paths
  io_uring: clean file_data access in files_register
  io_uring: don't delay io_init_req() error check
  io_uring: clean leftovers after splitting issue
  io_uring: remove timeout.list after hrtimer cancel
  io_uring: use a separate struct for timeout_remove
  io_uring: improve submit_state.ios_left accounting
  io_uring: simplify io_file_get()
  io_uring: kill extra check in fixed io_file_get()
  io_uring: clean up ->files grabbing
  io_uring: don't io_prep_async_work() linked reqs
  io_uring: Convert advanced XArray uses to the normal API
  io_uring: Fix XArray usage in io_uring_add_task_file
  io_uring: Fix use of XArray in __io_uring_files_cancel
  io_uring: fix break condition for __io_uring_register() waiting
  io_uring: no need to call xa_destroy() on empty xarray
  io_uring: batch account ->req_issue and task struct references
  io_uring: kill callback_head argument for io_req_task_work_add()
  io_uring: move req preps out of io_issue_sqe()
  ...
parents 3ad11d7a b2e96852
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -62,6 +62,7 @@
#include <linux/oom.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
#include <linux/io_uring.h>

#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -1895,6 +1896,11 @@ static int bprm_execve(struct linux_binprm *bprm,
	struct files_struct *displaced;
	int retval;

	/*
	 * Cancel any io_uring activity across execve
	 */
	io_uring_task_cancel();

	retval = unshare_files(&displaced);
	if (retval)
		return retval;
+2 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <linux/rcupdate.h>
#include <linux/close_range.h>
#include <net/sock.h>
#include <linux/io_uring.h>

unsigned int sysctl_nr_open __read_mostly = 1024*1024;
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
@@ -452,6 +453,7 @@ void exit_files(struct task_struct *tsk)
	struct files_struct * files = tsk->files;

	if (files) {
		io_uring_files_cancel(files);
		task_lock(tsk);
		tsk->files = NULL;
		task_unlock(tsk);
+113 −87
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@
#include <linux/rculist_nulls.h>
#include <linux/fs_struct.h>
#include <linux/task_work.h>
#include <linux/blk-cgroup.h>

#include "io-wq.h"

@@ -26,9 +27,8 @@ enum {
	IO_WORKER_F_UP		= 1,	/* up and active */
	IO_WORKER_F_RUNNING	= 2,	/* account as running */
	IO_WORKER_F_FREE	= 4,	/* worker on free list */
	IO_WORKER_F_EXITING	= 8,	/* worker exiting */
	IO_WORKER_F_FIXED	= 16,	/* static idle worker */
	IO_WORKER_F_BOUND	= 32,	/* is doing bounded work */
	IO_WORKER_F_FIXED	= 8,	/* static idle worker */
	IO_WORKER_F_BOUND	= 16,	/* is doing bounded work */
};

enum {
@@ -57,9 +57,13 @@ struct io_worker {

	struct rcu_head rcu;
	struct mm_struct *mm;
#ifdef CONFIG_BLK_CGROUP
	struct cgroup_subsys_state *blkcg_css;
#endif
	const struct cred *cur_creds;
	const struct cred *saved_creds;
	struct files_struct *restore_files;
	struct nsproxy *restore_nsproxy;
	struct fs_struct *restore_fs;
};

@@ -87,7 +91,7 @@ enum {
 */
struct io_wqe {
	struct {
		spinlock_t lock;
		raw_spinlock_t lock;
		struct io_wq_work_list work_list;
		unsigned long hash_map;
		unsigned flags;
@@ -148,11 +152,12 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)

	if (current->files != worker->restore_files) {
		__acquire(&wqe->lock);
		spin_unlock_irq(&wqe->lock);
		raw_spin_unlock_irq(&wqe->lock);
		dropped_lock = true;

		task_lock(current);
		current->files = worker->restore_files;
		current->nsproxy = worker->restore_nsproxy;
		task_unlock(current);
	}

@@ -166,7 +171,7 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
	if (worker->mm) {
		if (!dropped_lock) {
			__acquire(&wqe->lock);
			spin_unlock_irq(&wqe->lock);
			raw_spin_unlock_irq(&wqe->lock);
			dropped_lock = true;
		}
		__set_current_state(TASK_RUNNING);
@@ -175,6 +180,13 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
		worker->mm = NULL;
	}

#ifdef CONFIG_BLK_CGROUP
	if (worker->blkcg_css) {
		kthread_associate_blkcg(NULL);
		worker->blkcg_css = NULL;
	}
#endif

	return dropped_lock;
}

@@ -200,7 +212,6 @@ static void io_worker_exit(struct io_worker *worker)
{
	struct io_wqe *wqe = worker->wqe;
	struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
	unsigned nr_workers;

	/*
	 * If we're not at zero, someone else is holding a brief reference
@@ -220,23 +231,19 @@ static void io_worker_exit(struct io_worker *worker)
	worker->flags = 0;
	preempt_enable();

	spin_lock_irq(&wqe->lock);
	raw_spin_lock_irq(&wqe->lock);
	hlist_nulls_del_rcu(&worker->nulls_node);
	list_del_rcu(&worker->all_list);
	if (__io_worker_unuse(wqe, worker)) {
		__release(&wqe->lock);
		spin_lock_irq(&wqe->lock);
		raw_spin_lock_irq(&wqe->lock);
	}
	acct->nr_workers--;
	nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
			wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
	spin_unlock_irq(&wqe->lock);

	/* all workers gone, wq exit can proceed */
	if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
		complete(&wqe->wq->done);
	raw_spin_unlock_irq(&wqe->lock);

	kfree_rcu(worker, rcu);
	if (refcount_dec_and_test(&wqe->wq->refs))
		complete(&wqe->wq->done);
}

static inline bool io_wqe_run_queue(struct io_wqe *wqe)
@@ -318,6 +325,7 @@ static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)

	worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
	worker->restore_files = current->files;
	worker->restore_nsproxy = current->nsproxy;
	worker->restore_fs = current->fs;
	io_wqe_inc_running(wqe, worker);
}
@@ -436,6 +444,17 @@ static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
	work->flags |= IO_WQ_WORK_CANCEL;
}

static inline void io_wq_switch_blkcg(struct io_worker *worker,
				      struct io_wq_work *work)
{
#ifdef CONFIG_BLK_CGROUP
	if (work->blkcg_css != worker->blkcg_css) {
		kthread_associate_blkcg(work->blkcg_css);
		worker->blkcg_css = work->blkcg_css;
	}
#endif
}

static void io_wq_switch_creds(struct io_worker *worker,
			       struct io_wq_work *work)
{
@@ -454,6 +473,7 @@ static void io_impersonate_work(struct io_worker *worker,
	if (work->files && current->files != work->files) {
		task_lock(current);
		current->files = work->files;
		current->nsproxy = work->nsproxy;
		task_unlock(current);
	}
	if (work->fs && current->fs != work->fs)
@@ -463,6 +483,7 @@ static void io_impersonate_work(struct io_worker *worker,
	if (worker->cur_creds != work->creds)
		io_wq_switch_creds(worker, work);
	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->fsize;
	io_wq_switch_blkcg(worker, work);
}

static void io_assign_current_work(struct io_worker *worker,
@@ -504,7 +525,7 @@ get_next:
		else if (!wq_list_empty(&wqe->work_list))
			wqe->flags |= IO_WQE_FLAG_STALLED;

		spin_unlock_irq(&wqe->lock);
		raw_spin_unlock_irq(&wqe->lock);
		if (!work)
			break;
		io_assign_current_work(worker, work);
@@ -538,17 +559,17 @@ get_next:
				io_wqe_enqueue(wqe, linked);

			if (hash != -1U && !next_hashed) {
				spin_lock_irq(&wqe->lock);
				raw_spin_lock_irq(&wqe->lock);
				wqe->hash_map &= ~BIT_ULL(hash);
				wqe->flags &= ~IO_WQE_FLAG_STALLED;
				/* skip unnecessary unlock-lock wqe->lock */
				if (!work)
					goto get_next;
				spin_unlock_irq(&wqe->lock);
				raw_spin_unlock_irq(&wqe->lock);
			}
		} while (work);

		spin_lock_irq(&wqe->lock);
		raw_spin_lock_irq(&wqe->lock);
	} while (1);
}

@@ -563,7 +584,7 @@ static int io_wqe_worker(void *data)
	while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
		set_current_state(TASK_INTERRUPTIBLE);
loop:
		spin_lock_irq(&wqe->lock);
		raw_spin_lock_irq(&wqe->lock);
		if (io_wqe_run_queue(wqe)) {
			__set_current_state(TASK_RUNNING);
			io_worker_handle_work(worker);
@@ -574,7 +595,7 @@ loop:
			__release(&wqe->lock);
			goto loop;
		}
		spin_unlock_irq(&wqe->lock);
		raw_spin_unlock_irq(&wqe->lock);
		if (signal_pending(current))
			flush_signals(current);
		if (schedule_timeout(WORKER_IDLE_TIMEOUT))
@@ -586,11 +607,11 @@ loop:
	}

	if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
		spin_lock_irq(&wqe->lock);
		raw_spin_lock_irq(&wqe->lock);
		if (!wq_list_empty(&wqe->work_list))
			io_worker_handle_work(worker);
		else
			spin_unlock_irq(&wqe->lock);
			raw_spin_unlock_irq(&wqe->lock);
	}

	io_worker_exit(worker);
@@ -630,9 +651,9 @@ void io_wq_worker_sleeping(struct task_struct *tsk)

	worker->flags &= ~IO_WORKER_F_RUNNING;

	spin_lock_irq(&wqe->lock);
	raw_spin_lock_irq(&wqe->lock);
	io_wqe_dec_running(wqe, worker);
	spin_unlock_irq(&wqe->lock);
	raw_spin_unlock_irq(&wqe->lock);
}

static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
@@ -656,7 +677,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
		return false;
	}

	spin_lock_irq(&wqe->lock);
	raw_spin_lock_irq(&wqe->lock);
	hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
	list_add_tail_rcu(&worker->all_list, &wqe->all_list);
	worker->flags |= IO_WORKER_F_FREE;
@@ -665,11 +686,12 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
	if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
		worker->flags |= IO_WORKER_F_FIXED;
	acct->nr_workers++;
	spin_unlock_irq(&wqe->lock);
	raw_spin_unlock_irq(&wqe->lock);

	if (index == IO_WQ_ACCT_UNBOUND)
		atomic_inc(&wq->user->processes);

	refcount_inc(&wq->refs);
	wake_up_process(worker->task);
	return true;
}
@@ -685,28 +707,63 @@ static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
	return acct->nr_workers < acct->max_workers;
}

static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
{
	send_sig(SIGINT, worker->task, 1);
	return false;
}

/*
 * Iterate the passed in list and call the specific function for each
 * worker that isn't exiting
 */
static bool io_wq_for_each_worker(struct io_wqe *wqe,
				  bool (*func)(struct io_worker *, void *),
				  void *data)
{
	struct io_worker *worker;
	bool ret = false;

	list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
		if (io_worker_get(worker)) {
			/* no task if node is/was offline */
			if (worker->task)
				ret = func(worker, data);
			io_worker_release(worker);
			if (ret)
				break;
		}
	}

	return ret;
}

static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
	wake_up_process(worker->task);
	return false;
}

/*
 * Manager thread. Tasked with creating new workers, if we need them.
 */
static int io_wq_manager(void *data)
{
	struct io_wq *wq = data;
	int workers_to_create = num_possible_nodes();
	int node;

	/* create fixed workers */
	refcount_set(&wq->refs, workers_to_create);
	refcount_set(&wq->refs, 1);
	for_each_node(node) {
		if (!node_online(node))
			continue;
		if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
			goto err;
		workers_to_create--;
		if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
			continue;
		set_bit(IO_WQ_BIT_ERROR, &wq->state);
		set_bit(IO_WQ_BIT_EXIT, &wq->state);
		goto out;
	}

	while (workers_to_create--)
		refcount_dec(&wq->refs);

	complete(&wq->done);

	while (!kthread_should_stop()) {
@@ -720,12 +777,12 @@ static int io_wq_manager(void *data)
			if (!node_online(node))
				continue;

			spin_lock_irq(&wqe->lock);
			raw_spin_lock_irq(&wqe->lock);
			if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
				fork_worker[IO_WQ_ACCT_BOUND] = true;
			if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
				fork_worker[IO_WQ_ACCT_UNBOUND] = true;
			spin_unlock_irq(&wqe->lock);
			raw_spin_unlock_irq(&wqe->lock);
			if (fork_worker[IO_WQ_ACCT_BOUND])
				create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
			if (fork_worker[IO_WQ_ACCT_UNBOUND])
@@ -738,14 +795,20 @@ static int io_wq_manager(void *data)
	if (current->task_works)
		task_work_run();

	return 0;
err:
	set_bit(IO_WQ_BIT_ERROR, &wq->state);
	set_bit(IO_WQ_BIT_EXIT, &wq->state);
	if (refcount_sub_and_test(workers_to_create, &wq->refs))
out:
	if (refcount_dec_and_test(&wq->refs)) {
		complete(&wq->done);
		return 0;
	}
	/* if ERROR is set and we get here, we have workers to wake */
	if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
		rcu_read_lock();
		for_each_node(node)
			io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
		rcu_read_unlock();
	}
	return 0;
}

static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
			    struct io_wq_work *work)
@@ -821,10 +884,10 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
	}

	work_flags = work->flags;
	spin_lock_irqsave(&wqe->lock, flags);
	raw_spin_lock_irqsave(&wqe->lock, flags);
	io_wqe_insert_work(wqe, work);
	wqe->flags &= ~IO_WQE_FLAG_STALLED;
	spin_unlock_irqrestore(&wqe->lock, flags);
	raw_spin_unlock_irqrestore(&wqe->lock, flags);

	if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
	    !atomic_read(&acct->nr_running))
@@ -850,37 +913,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
	work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
}

static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
{
	send_sig(SIGINT, worker->task, 1);
	return false;
}

/*
 * Iterate the passed in list and call the specific function for each
 * worker that isn't exiting
 */
static bool io_wq_for_each_worker(struct io_wqe *wqe,
				  bool (*func)(struct io_worker *, void *),
				  void *data)
{
	struct io_worker *worker;
	bool ret = false;

	list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
		if (io_worker_get(worker)) {
			/* no task if node is/was offline */
			if (worker->task)
				ret = func(worker, data);
			io_worker_release(worker);
			if (ret)
				break;
		}
	}

	return ret;
}

void io_wq_cancel_all(struct io_wq *wq)
{
	int node;
@@ -951,13 +983,13 @@ static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
	unsigned long flags;

retry:
	spin_lock_irqsave(&wqe->lock, flags);
	raw_spin_lock_irqsave(&wqe->lock, flags);
	wq_list_for_each(node, prev, &wqe->work_list) {
		work = container_of(node, struct io_wq_work, list);
		if (!match->fn(work, match->data))
			continue;
		io_wqe_remove_pending(wqe, work, prev);
		spin_unlock_irqrestore(&wqe->lock, flags);
		raw_spin_unlock_irqrestore(&wqe->lock, flags);
		io_run_cancel(work, wqe);
		match->nr_pending++;
		if (!match->cancel_all)
@@ -966,7 +998,7 @@ retry:
		/* not safe to continue after unlock */
		goto retry;
	}
	spin_unlock_irqrestore(&wqe->lock, flags);
	raw_spin_unlock_irqrestore(&wqe->lock, flags);
}

static void io_wqe_cancel_running_work(struct io_wqe *wqe,
@@ -1074,7 +1106,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
		}
		atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
		wqe->wq = wq;
		spin_lock_init(&wqe->lock);
		raw_spin_lock_init(&wqe->lock);
		INIT_WQ_LIST(&wqe->work_list);
		INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
		INIT_LIST_HEAD(&wqe->all_list);
@@ -1113,12 +1145,6 @@ bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
	return refcount_inc_not_zero(&wq->use_refs);
}

static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
	wake_up_process(worker->task);
	return false;
}

static void __io_wq_destroy(struct io_wq *wq)
{
	int node;
+4 −0
Original line number Diff line number Diff line
@@ -87,7 +87,11 @@ struct io_wq_work {
	struct io_wq_work_node list;
	struct files_struct *files;
	struct mm_struct *mm;
#ifdef CONFIG_BLK_CGROUP
	struct cgroup_subsys_state *blkcg_css;
#endif
	const struct cred *creds;
	struct nsproxy *nsproxy;
	struct fs_struct *fs;
	unsigned long fsize;
	unsigned flags;
+1395 −786

File changed.

Preview size limit exceeded, changes collapsed.

Loading