Commit 9864f5b5 authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

cpuidle: Move trace_cpu_idle() into generic code



Remove trace_cpu_idle() from the arch_cpu_idle() implementations and
put it in the generic code, right before disabling RCU. Gets rid of
more trace_*_rcuidle() users.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: default avatarMarco Elver <elver@google.com>
Link: https://lkml.kernel.org/r/20200821085348.428433395@infradead.org
parent bf9282dc
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -298,11 +298,7 @@ static void omap3_pm_idle(void)
	if (omap_irq_pending())
		return;

	trace_cpu_idle_rcuidle(1, smp_processor_id());

	omap_sram_idle();

	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}

#ifdef CONFIG_SUSPEND
+0 −2
Original line number Diff line number Diff line
@@ -123,10 +123,8 @@ void arch_cpu_idle(void)
	 * This should do all the clock switching and wait for interrupt
	 * tricks
	 */
	trace_cpu_idle_rcuidle(1, smp_processor_id());
	cpu_do_idle();
	local_irq_enable();
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}

#ifdef CONFIG_HOTPLUG_CPU
+1 −2
Original line number Diff line number Diff line
@@ -33,14 +33,13 @@ void enabled_wait(void)
		PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
	clear_cpu_flag(CIF_NOHZ_DELAY);

	trace_cpu_idle_rcuidle(1, smp_processor_id());
	local_irq_save(flags);
	/* Call the assembler magic in entry.S */
	psw_idle(idle, psw_mask);
	local_irq_restore(flags);
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());

	/* Account time spent with enabled wait psw loaded as idle time. */
	/* XXX seqcount has tracepoints that require RCU */
	write_seqcount_begin(&idle->seqcount);
	idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
	idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
+0 −4
Original line number Diff line number Diff line
@@ -684,9 +684,7 @@ void arch_cpu_idle(void)
 */
void __cpuidle default_idle(void)
{
	trace_cpu_idle_rcuidle(1, smp_processor_id());
	safe_halt();
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
@@ -792,7 +790,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static __cpuidle void mwait_idle(void)
{
	if (!current_set_polling_and_test()) {
		trace_cpu_idle_rcuidle(1, smp_processor_id());
		if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
			mb(); /* quirk */
			clflush((void *)&current_thread_info()->flags);
@@ -804,7 +801,6 @@ static __cpuidle void mwait_idle(void)
			__sti_mwait(0, 0);
		else
			local_irq_enable();
		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
	} else {
		local_irq_enable();
	}
+3 −0
Original line number Diff line number Diff line
@@ -91,11 +91,14 @@ void __cpuidle default_idle_call(void)
	if (current_clr_polling_and_test()) {
		local_irq_enable();
	} else {

		trace_cpu_idle(1, smp_processor_id());
		stop_critical_timings();
		rcu_idle_enter();
		arch_cpu_idle();
		rcu_idle_exit();
		start_critical_timings();
		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
	}
}