Commit 5caa1c08 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar
Browse files

stop_machine: Introduce __cpu_stop_queue_work() and cpu_stop_queue_two_works()



Preparation to simplify the review of the next change. Add two simple
helpers, __cpu_stop_queue_work() and cpu_stop_queue_two_works() which
simply take a bit of code from their callers.

Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: heiko.carstens@de.ibm.com
Link: http://lkml.kernel.org/r/20151008145134.GA18146@redhat.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 233e7f26
Loading
Loading
Loading
Loading
+26 −11
Original line number Original line Diff line number Diff line
@@ -73,21 +73,24 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
	}
	}
}
}


static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
					struct cpu_stop_work *work)
{
	list_add_tail(&work->list, &stopper->works);
	wake_up_process(stopper->thread);
}

/* queue @work to @stopper.  if offline, @work is completed immediately */
/* queue @work to @stopper.  if offline, @work is completed immediately */
static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
{
{
	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);

	unsigned long flags;
	unsigned long flags;


	spin_lock_irqsave(&stopper->lock, flags);
	spin_lock_irqsave(&stopper->lock, flags);

	if (stopper->enabled)
	if (stopper->enabled) {
		__cpu_stop_queue_work(stopper, work);
		list_add_tail(&work->list, &stopper->works);
	else
		wake_up_process(stopper->thread);
	} else
		cpu_stop_signal_done(work->done, false);
		cpu_stop_signal_done(work->done, false);

	spin_unlock_irqrestore(&stopper->lock, flags);
	spin_unlock_irqrestore(&stopper->lock, flags);
}
}


@@ -213,6 +216,16 @@ static int multi_cpu_stop(void *data)
	return err;
	return err;
}
}


static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
				    int cpu2, struct cpu_stop_work *work2)
{
	lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
	cpu_stop_queue_work(cpu1, work1);
	cpu_stop_queue_work(cpu2, work2);
	lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);

	return 0;
}
/**
/**
 * stop_two_cpus - stops two cpus
 * stop_two_cpus - stops two cpus
 * @cpu1: the cpu to stop
 * @cpu1: the cpu to stop
@@ -260,10 +273,12 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
		return -ENOENT;
		return -ENOENT;
	}
	}


	lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
	if (cpu1 > cpu2)
	cpu_stop_queue_work(cpu1, &work1);
		swap(cpu1, cpu2);
	cpu_stop_queue_work(cpu2, &work2);
	if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
	lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
		preempt_enable();
		return -ENOENT;
	}


	preempt_enable();
	preempt_enable();