Commit c172e0a3 authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Ingo Molnar
Browse files

sched/membarrier: Return -ENOMEM to userspace on memory allocation failure



Remove the IPI fallback code from membarrier to deal with very
infrequent cpumask memory allocation failure. Use GFP_KERNEL rather
than GFP_NOWAIT, and relax the blocking guarantees for the expedited
membarrier system call commands, allowing it to block if waiting for
memory to be made available.

In addition, now -ENOMEM can be returned to user-space if the cpumask
memory allocation fails.

Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King - ARM Linux admin <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190919173705.2181-8-mathieu.desnoyers@efficios.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c6d68c1c
Loading
Loading
Loading
Loading
+20 −43
Original line number Diff line number Diff line
@@ -66,7 +66,6 @@ void membarrier_exec_mmap(struct mm_struct *mm)
static int membarrier_global_expedited(void)
{
	int cpu;
	bool fallback = false;
	cpumask_var_t tmpmask;

	if (num_online_cpus() == 1)
@@ -78,15 +77,8 @@ static int membarrier_global_expedited(void)
	 */
	smp_mb();	/* system call entry is not a mb. */

	/*
	 * Expedited membarrier commands guarantee that they won't
	 * block, hence the GFP_NOWAIT allocation flag and fallback
	 * implementation.
	 */
	if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
		/* Fallback for OOM. */
		fallback = true;
	}
	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
		return -ENOMEM;

	cpus_read_lock();
	rcu_read_lock();
@@ -117,18 +109,15 @@ static int membarrier_global_expedited(void)
		if (p->flags & PF_KTHREAD)
			continue;

		if (!fallback)
		__cpumask_set_cpu(cpu, tmpmask);
		else
			smp_call_function_single(cpu, ipi_mb, NULL, 1);
	}
	rcu_read_unlock();
	if (!fallback) {

	preempt_disable();
	smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
	preempt_enable();

	free_cpumask_var(tmpmask);
	}
	cpus_read_unlock();

	/*
@@ -143,7 +132,6 @@ static int membarrier_global_expedited(void)
static int membarrier_private_expedited(int flags)
{
	int cpu;
	bool fallback = false;
	cpumask_var_t tmpmask;
	struct mm_struct *mm = current->mm;

@@ -168,15 +156,8 @@ static int membarrier_private_expedited(int flags)
	 */
	smp_mb();	/* system call entry is not a mb. */

	/*
	 * Expedited membarrier commands guarantee that they won't
	 * block, hence the GFP_NOWAIT allocation flag and fallback
	 * implementation.
	 */
	if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
		/* Fallback for OOM. */
		fallback = true;
	}
	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
		return -ENOMEM;

	cpus_read_lock();
	rcu_read_lock();
@@ -195,20 +176,16 @@ static int membarrier_private_expedited(int flags)
			continue;
		rcu_read_lock();
		p = rcu_dereference(cpu_rq(cpu)->curr);
		if (p && p->mm == mm) {
			if (!fallback)
		if (p && p->mm == mm)
			__cpumask_set_cpu(cpu, tmpmask);
			else
				smp_call_function_single(cpu, ipi_mb, NULL, 1);
		}
	}
	rcu_read_unlock();
	if (!fallback) {

	preempt_disable();
	smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
	preempt_enable();

	free_cpumask_var(tmpmask);
	}
	cpus_read_unlock();

	/*
@@ -264,7 +241,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
		struct rq *rq = cpu_rq(cpu);
		struct task_struct *p;

		p = rcu_dereference(&rq->curr);
		p = rcu_dereference(rq->curr);
		if (p && p->mm == mm)
			__cpumask_set_cpu(cpu, tmpmask);
	}