Commit 11bd67db authored by Andy Ross's avatar Andy Ross Committed by Anas Nashif
Browse files

kernel/idle: Use normal idle in SMP when IPI is available



Now that we have a working IPI framework, there's no reason for the
default spin loop for the SMP idle thread.  Just use the default
platform idle and send an IPI when a new thread is readied.

Long term, this can be optimized if necessary (e.g. only send the IPI
to idling CPUs, or check priorities, etc...), but for a 2-cpu system
this is a very reasonable default.

Signed-off-by: default avatarAndy Ross <andrew.j.ross@intel.com>
parent 6c283ca3
Loading
Loading
Loading
Loading
+7 −9
Original line number Diff line number Diff line
@@ -19,6 +19,10 @@
#define IDLE_THRESH 1
#endif

/* Fallback idle spin loop for SMP platforms without a working IPI */
#define SMP_FALLBACK \
	(defined(CONFIG_SMP) && !defined(CONFIG_SCHED_IPI_SUPPORTED))

#ifdef CONFIG_SYS_POWER_MANAGEMENT
/*
 * Used to allow _sys_suspend() implementation to control notification
@@ -56,7 +60,7 @@ void __attribute__((weak)) _sys_resume_from_deep_sleep(void)
 *
 * @return N/A
 */
#ifndef CONFIG_SMP
#if !SMP_FALLBACK
static void set_kernel_idle_time_in_ticks(s32_t ticks)
{
#ifdef CONFIG_SYS_POWER_MANAGEMENT
@@ -145,18 +149,12 @@ void idle(void *unused1, void *unused2, void *unused3)
	__idle_time_stamp = k_cycle_get_32();
#endif

#ifdef CONFIG_SMP
	/* Simplified idle for SMP CPUs pending driver support.  The
	 * busy waiting is needed to prevent lock contention.  Long
	 * term we need to wake up idle CPUs with an IPI.
	 */
	while (true) {
#if SMP_FALLBACK
		k_busy_wait(100);
		k_yield();
	}
#else
	for (;;) {
		(void)irq_lock();
		(void)z_arch_irq_lock();
		sys_power_save_idle();

		IDLE_YIELD_IF_COOP();
+3 −0
Original line number Diff line number Diff line
@@ -336,6 +336,9 @@ void z_add_thread_to_ready_q(struct k_thread *thread)
		_priq_run_add(&_kernel.ready_q.runq, thread);
		z_mark_thread_as_queued(thread);
		update_cache(0);
#ifdef CONFIG_SCHED_IPI_SUPPORTED
		z_arch_sched_ipi();
#endif
	}
}