Commit 4760aad3 authored by Wilfried Chauveau's avatar Wilfried Chauveau Committed by Carles Cufi
Browse files

arch: arm: cortex_m: Convert cpu_idle from ASM to C



Asm is notoriously harder to maintain than C and requires core specific
adaptation which impairs even more the readability of the code.

This change reduces the need for core specific conditional compilation and
unifies irq locking code.

Signed-off-by: default avatarWilfried Chauveau <wilfried.chauveau@arm.com>

# Conflicts:
#	soc/arm/nordic_nrf/nrf53/soc_cpu_idle.h
parent f11027df
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@ zephyr_library_sources(
  irq_manage.c
  prep_c.c
  thread.c
  cpu_idle.S
  cpu_idle.c
  )

zephyr_library_sources_ifndef(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER irq_init.c)
+128 −0
Original line number Diff line number Diff line
/*
 * Copyright (c) 2013-2014 Wind River Systems, Inc.
 * Copyright (c) 2023 Arm Limited
 *
 * SPDX-License-Identifier: Apache-2.0
 */
@@ -7,102 +8,60 @@
/**
 * @file
 * @brief ARM Cortex-M power management
 *
 */

#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/kernel.h>
#include <cmsis_core.h>

#if defined(CONFIG_ARM_ON_EXIT_CPU_IDLE)
#include <soc_cpu_idle.h>
#endif

_ASM_FILE_PROLOGUE

GTEXT(z_arm_cpu_idle_init)
GTEXT(arch_cpu_idle)
GTEXT(arch_cpu_atomic_idle)

#define _SCB_SCR		0xE000ED10

#define _SCB_SCR_SEVONPEND	(1 << 4)
#define _SCB_SCR_SLEEPDEEP	(1 << 2)
#define _SCB_SCR_SLEEPONEXIT	(1 << 1)
#define _SCR_INIT_BITS		_SCB_SCR_SEVONPEND

.macro _sleep_if_allowed wait_instruction
#if defined(CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK)
	push	{r0, lr}
	bl	z_arm_on_enter_cpu_idle
	/* Skip the wait instruction if on_enter_cpu_idle() returns false. */
	cmp	r0, #0
	beq	_skip_\@
#endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */

	/*
	 * Wait for all memory transactions to complete before entering low
	 * power state.
	 */
	dsb
	\wait_instruction

#if defined(CONFIG_ARM_ON_EXIT_CPU_IDLE)
	/* Inline the macro provided by SoC-specific code */
	SOC_ON_EXIT_CPU_IDLE
#endif /* CONFIG_ARM_ON_EXIT_CPU_IDLE */

#if defined(CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK)
_skip_\@:
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
	pop	{r0, r1}
	mov	lr, r1
#else
	pop	{r0, lr}
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */
.endm

/**
 *
 * @brief Initialization of CPU idle
 *
 * Only called by arch_kernel_init(). Sets SEVONPEND bit once for the system's
 * duration.
 *
 * C function prototype:
 *
 * void z_arm_cpu_idle_init(void);
 */
void z_arm_cpu_idle_init(void)
{
	SCB->SCR = SCB_SCR_SEVONPEND_Msk;
}

SECTION_FUNC(TEXT, z_arm_cpu_idle_init)
	ldr	r1, =_SCB_SCR
	movs.n	r2, #_SCR_INIT_BITS
	str	r2, [r1]
	bx	lr

SECTION_FUNC(TEXT, arch_cpu_idle)
#if defined(CONFIG_TRACING) || \
    defined(CONFIG_ARM_ON_ENTER_CPU_IDLE_PREPARE_HOOK)
	push	{r0, lr}
#if defined(CONFIG_ARM_ON_EXIT_CPU_IDLE)
#define ON_EXIT_IDLE_HOOK SOC_ON_EXIT_CPU_IDLE
#else
#define ON_EXIT_IDLE_HOOK do {} while (false)
#endif

#ifdef CONFIG_TRACING
	bl	sys_trace_idle
#if defined(CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK)
#define SLEEP_IF_ALLOWED(wait_instr) do { \
	if (!z_arm_on_enter_cpu_idle()) { \
		__DSB(); \
		wait_instr(); \
		ON_EXIT_IDLE_HOOK; \
	} \
} while (false)
#else
#define SLEEP_IF_ALLOWED(wait_instr) do { \
	__DSB(); \
	wait_instr(); \
	ON_EXIT_IDLE_HOOK; \
} while (false)
#endif
#ifdef CONFIG_ARM_ON_ENTER_CPU_IDLE_PREPARE_HOOK
	bl	z_arm_on_enter_cpu_idle_prepare

void arch_cpu_idle(void)
{
#if defined(CONFIG_TRACING)
	sys_trace_idle();
#endif

#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
	pop	{r0, r1}
	mov	lr, r1
#else
	pop	{r0, lr}
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#if CONFIG_ARM_ON_ENTER_CPU_IDLE_PREPARE_HOOK
	z_arm_on_enter_cpu_idle_prepare();
#endif

#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
	/*
	 * PRIMASK is always cleared on ARMv7-M and ARMv8-M Mainline (not used
	 * PRIMASK is always cleared on ARMv7-M and ARMv8-M (not used
	 * for interrupt locking), and configuring BASEPRI to the lowest
	 * priority to ensure wake-up will cause interrupts to be serviced
	 * before entering low power state.
@@ -110,15 +69,14 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
	 * Set PRIMASK before configuring BASEPRI to prevent interruption
	 * before wake-up.
	 */
	cpsid	i
	__disable_irq();

	/*
	 * Set wake-up interrupt priority to the lowest and synchronise to
	 * ensure that this is visible to the WFI instruction.
	 */
	eors.n	r0, r0
	msr	BASEPRI, r0
	isb
	__set_BASEPRI(0);
	__ISB();
#else
	/*
	 * For all the other ARM architectures that do not implement BASEPRI,
@@ -127,75 +85,44 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
	 * set by the caller as part of interrupt locking if necessary
	 * (i.e. if the caller sets _kernel.idle).
	 */
#endif /* CONFIG_ARMV7_M_ARMV8_M_MAINLINE */

	/* Enter low power state */
	_sleep_if_allowed wfi
#endif

	/*
	 * Clear PRIMASK and flush instruction buffer to immediately service
	 * the wake-up interrupt.
	 * Wait for all memory transactions to complete before entering low
	 * power state.
	 */
	cpsie	i
	isb

	bx	lr
	SLEEP_IF_ALLOWED(__WFI);

SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#if defined(CONFIG_TRACING) || \
    defined(CONFIG_ARM_ON_ENTER_CPU_IDLE_PREPARE_HOOK)
	push	{r0, lr}
	__enable_irq();
	__ISB();
}

#ifdef CONFIG_TRACING
	bl	sys_trace_idle
#endif
#ifdef CONFIG_ARM_ON_ENTER_CPU_IDLE_PREPARE_HOOK
	bl	z_arm_on_enter_cpu_idle_prepare
void arch_cpu_atomic_idle(unsigned int key)
{
#if defined(CONFIG_TRACING)
	sys_trace_idle();
#endif

#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
	pop	{r0, r1}
	mov	lr, r1
#else
	pop	{r0, lr}
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#if CONFIG_ARM_ON_ENTER_CPU_IDLE_PREPARE_HOOK
	z_arm_on_enter_cpu_idle_prepare();
#endif

	/*
	 * Lock PRIMASK while sleeping: wfe will still get interrupted by
	 * incoming interrupts but the CPU will not service them right away.
	 */
	cpsid	i
	__disable_irq();

	/*
	 * No need to set SEVONPEND, it's set once in z_arm_cpu_idle_init()
	 * and never touched again.
	 */

	/* r0: interrupt mask from caller */

#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
	/* No BASEPRI, call wfe directly
	 * (SEVONPEND is set in z_arm_cpu_idle_init())
	/*
	 * Wait for all memory transactions to complete before entering low
	 * power state.
	 */
	_sleep_if_allowed wfe
	SLEEP_IF_ALLOWED(__WFE);

	cmp	r0, #0
	bne	_irq_disabled
	cpsie	i
_irq_disabled:

#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
	/* r1: zero, for setting BASEPRI (needs a register) */
	eors.n	r1, r1

	/* unlock BASEPRI so wfe gets interrupted by incoming interrupts */
	msr	BASEPRI, r1

	_sleep_if_allowed wfe

	msr	BASEPRI, r0
	cpsie	i
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
	bx	lr
	arch_irq_unlock(key);
}
+14 −11
Original line number Diff line number Diff line
@@ -8,19 +8,22 @@
 * @file SoC extensions of cpu_idle.S for the Nordic Semiconductor nRF53 processors family.
 */


#if defined(_ASMLANGUAGE)
#define SOC_ON_EXIT_CPU_IDLE_4 \
	__NOP(); \
	__NOP(); \
	__NOP(); \
	__NOP();
#define SOC_ON_EXIT_CPU_IDLE_8 \
	SOC_ON_EXIT_CPU_IDLE_4 \
	SOC_ON_EXIT_CPU_IDLE_4

#if defined(CONFIG_SOC_NRF53_ANOMALY_168_WORKAROUND_FOR_EXECUTION_FROM_RAM)
#define SOC_ON_EXIT_CPU_IDLE \
	.rept 26; \
	nop; \
	.endr
	SOC_ON_EXIT_CPU_IDLE_8; \
	SOC_ON_EXIT_CPU_IDLE_8; \
	SOC_ON_EXIT_CPU_IDLE_8; \
	__NOP(); \
	__NOP();
#elif defined(CONFIG_SOC_NRF53_ANOMALY_168_WORKAROUND)
#define SOC_ON_EXIT_CPU_IDLE \
	.rept 8; \
	nop; \
	.endr
#define SOC_ON_EXIT_CPU_IDLE SOC_ON_EXIT_CPU_IDLE_8
#endif

#endif /* _ASMLANGUAGE */