Commit 8678969e authored by Glauber Costa's avatar Glauber Costa Committed by Ingo Molnar
Browse files

x86: merge smp_send_reschedule



function definition is moved to common header, x86_64 version is now called
native_smp_send_reschedule

Signed-off-by: default avatarGlauber Costa <gcosta@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c76cb368
Loading
Loading
Loading
Loading
+5 −2
Original line number Diff line number Diff line
@@ -290,8 +290,9 @@ void flush_tlb_all(void)
 * anything. Worst case is that we lose a reschedule ...
 */

void smp_send_reschedule(int cpu)
static void native_smp_send_reschedule(int cpu)
{
	WARN_ON(cpu_is_offline(cpu));
	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}

@@ -528,5 +529,7 @@ asmlinkage void smp_call_function_interrupt(void)
	}
}

struct smp_ops smp_ops;
struct smp_ops smp_ops = {
	.smp_send_reschedule = native_smp_send_reschedule,
};
EXPORT_SYMBOL_GPL(smp_ops);
+5 −0
Original line number Diff line number Diff line
@@ -23,6 +23,11 @@ struct smp_ops {

#ifdef CONFIG_SMP
extern struct smp_ops smp_ops;

static inline void smp_send_reschedule(int cpu)
{
	smp_ops.smp_send_reschedule(cpu);
}
#endif

#ifdef CONFIG_X86_32
+0 −4
Original line number Diff line number Diff line
@@ -60,10 +60,6 @@ static inline void smp_send_stop(void)
{
	smp_ops.smp_send_stop();
}
static inline void smp_send_reschedule(int cpu)
{
	smp_ops.smp_send_reschedule(cpu);
}
static inline int smp_call_function_mask(cpumask_t mask,
					 void (*func) (void *info), void *info,
					 int wait)
+0 −2
Original line number Diff line number Diff line
@@ -65,8 +65,6 @@ static inline int num_booting_cpus(void)
	return cpus_weight(cpu_callout_map);
}

extern void smp_send_reschedule(int cpu);

#else /* CONFIG_SMP */

extern unsigned int boot_cpu_id;