Commit 6544d1dd authored by Stephanos Ioannidis's avatar Stephanos Ioannidis Committed by Carles Cufí
Browse files

Core-A: Import CMSIS-Core(A) 1.2.0 (CMSIS 5.7.0)

This commit imports the CMSIS-Core(A) 1.2.0, which is part of the
CMSIS 5.7.0 release.

Origin: CMSIS_5
License: Apache-2.0
URL: https://github.com/ARM-software/CMSIS_5/tree/5.7.0


commit: a65b7c9a3e6502127fdb80eb288d8cbdf251a6f4
Purpose: Provide a hardware interface to the Arm Cortex-A architecture
Maintained-by: External

Signed-off-by: default avatarStephanos Ioannidis <root@stephanos.io>
parent 0e3e44cb
Loading
Loading
Loading
Loading
+5 −17
Original line number Diff line number Diff line
/**************************************************************************//**
 * @file     cmsis_armcc.h
 * @brief    CMSIS compiler specific macros, functions, instructions
 * @version  V1.0.3
 * @date     15. May 2019
 * @version  V1.0.4
 * @date     30. July 2019
 ******************************************************************************/
/*
 * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
@@ -114,29 +114,17 @@
/**
  \brief   Instruction Synchronization Barrier
 */
#define __ISB() do {\
                   __schedule_barrier();\
                   __isb(0xF);\
                   __schedule_barrier();\
                } while (0U)
#define __ISB()                           __isb(0xF)

/**
  \brief   Data Synchronization Barrier
 */
#define __DSB() do {\
                   __schedule_barrier();\
                   __dsb(0xF);\
                   __schedule_barrier();\
                } while (0U)
#define __DSB()                           __dsb(0xF)

/**
  \brief   Data Memory Barrier
 */
#define __DMB() do {\
                   __schedule_barrier();\
                   __dmb(0xF);\
                   __schedule_barrier();\
                } while (0U)
#define __DMB()                           __dmb(0xF)

/**
  \brief   Reverse byte order (32 bit)
+12 −19
Original line number Diff line number Diff line
/**************************************************************************//**
 * @file     cmsis_armclang.h
 * @brief    CMSIS compiler specific macros, functions, instructions
 * @version  V1.1.1
 * @date     15. May 2019
 * @version  V1.2.0
 * @date     05. August 2019
 ******************************************************************************/
/*
 * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
@@ -130,29 +130,17 @@
/**
  \brief   Instruction Synchronization Barrier
 */
#define __ISB() do {\
                   __schedule_barrier();\
                   __builtin_arm_isb(0xF);\
                   __schedule_barrier();\
                } while (0U)
#define __ISB()                           __builtin_arm_isb(0xF)

/**
  \brief   Data Synchronization Barrier
 */
#define __DSB() do {\
                   __schedule_barrier();\
                   __builtin_arm_dsb(0xF);\
                   __schedule_barrier();\
                } while (0U)
#define __DSB()                           __builtin_arm_dsb(0xF)

/**
  \brief   Data Memory Barrier
 */
#define __DMB() do {\
                   __schedule_barrier();\
                   __builtin_arm_dmb(0xF);\
                   __schedule_barrier();\
                } while (0U)
#define __DMB()                           __builtin_arm_dmb(0xF)

/**
  \brief   Reverse byte order (32 bit)
@@ -322,6 +310,8 @@ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)

#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))

#define     __SADD8                 __builtin_arm_sadd8
#define     __SADD16                __builtin_arm_sadd16
#define     __QADD8                 __builtin_arm_qadd8
#define     __QSUB8                 __builtin_arm_qsub8
#define     __QADD16                __builtin_arm_qadd16
@@ -342,7 +332,10 @@ __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
#define     __SMUSD                 __builtin_arm_smusd
#define     __SMUSDX                __builtin_arm_smusdx
#define     __SMLSDX                __builtin_arm_smlsdx

#define     __USAT16                __builtin_arm_usat16
#define     __SSUB8                 __builtin_arm_ssub8
#define     __SXTB16                __builtin_arm_sxtb16
#define     __SXTAB16               __builtin_arm_sxtab16


__STATIC_FORCEINLINE  int32_t __QADD( int32_t op1,  int32_t op2)
@@ -408,7 +401,7 @@ __STATIC_FORCEINLINE uint32_t __get_CPSR(void)
 */
__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
{
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
}

/** \brief  Get Mode
+115 −16
Original line number Diff line number Diff line
/**************************************************************************//**
 * @file     cmsis_gcc.h
 * @brief    CMSIS compiler specific macros, functions, instructions
 * @version  V1.2.0
 * @date     17. May 2019
 * @version  V1.3.0
 * @date     17. December 2019
 ******************************************************************************/
/*
 * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
@@ -119,6 +119,15 @@ __STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
}


__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
{
  uint32_t result;

  __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  return(result);
}


__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
{
  uint32_t result;
@@ -127,6 +136,14 @@ __STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
  return(result);
}

__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
{
  uint32_t result;

  __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  return(result);
}

__STATIC_FORCEINLINE  int32_t __QADD( int32_t op1,  int32_t op2)
{
  int32_t result;
@@ -135,6 +152,22 @@ __STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)
  return(result);
}

__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
{
  uint32_t result;

  __ASM ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  return(result);
}

__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
{
  uint32_t result;

  __ASM ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  return(result);
}

__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
{
  union llreg_u{
@@ -160,6 +193,15 @@ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)
  return(result);
}

__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)
{
  uint32_t result;

  __ASM ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
  return(result);
}


__STATIC_FORCEINLINE uint32_t __SMUAD  (uint32_t op1, uint32_t op2)
{
  uint32_t result;
@@ -168,9 +210,14 @@ __STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
  return(result);
}



#define __PKHBT(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0x0000FFFFUL) |  \
                                           ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)  )

#define __PKHTB(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0xFFFF0000UL) |  \
                                           ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL)  )

__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
{
  uint32_t result;
@@ -220,7 +267,61 @@ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
 return(result);
}

__STATIC_FORCEINLINE uint32_t __SMUSD  (uint32_t op1, uint32_t op2)
{
  uint32_t result;

  __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  return(result);
}

__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
{
  uint32_t result;

  __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  return(result);
}

__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
{
  uint32_t result;

  __ASM ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  return(result);
}

__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
{
  uint32_t result;

  __ASM ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  return(result);
}

__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
{
  uint32_t result;

  __ASM ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  return(result);
}

__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
{
  uint32_t result;

  __ASM ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  return(result);
}

__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
{
  uint32_t result;

  __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  return(result);
}


/* ##########################  Core Instruction Access  ######################### */
@@ -232,12 +333,12 @@ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
/**
  \brief   Wait For Interrupt
 */
#define __WFI()                             __ASM volatile ("wfi")
#define __WFI()                             __ASM volatile ("wfi":::"memory")

/**
  \brief   Wait For Event
 */
#define __WFE()                             __ASM volatile ("wfe")
#define __WFE()                             __ASM volatile ("wfe":::"memory")

/**
  \brief   Send Event
@@ -289,7 +390,7 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
#else
  uint32_t result;

  __ASM volatile ("rev %0, %1" : "=r" (result) : "r" (value) );
  __ASM ("rev %0, %1" : "=r" (result) : "r" (value) );
  return result;
#endif
}
@@ -300,14 +401,12 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
  \param [in]    value  Value to reverse
  \return               Reversed value
 */
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".rev16_text"))) __STATIC_INLINE uint32_t __REV16(uint32_t value)
__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
{
  uint32_t result;
  __ASM volatile("rev16 %0, %1" : "=r" (result) : "r" (value));
  __ASM ("rev16 %0, %1" : "=r" (result) : "r" (value));
  return result;
}
#endif

/**
  \brief   Reverse byte order (16 bit)
@@ -322,7 +421,7 @@ __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
#else
  int16_t result;

  __ASM volatile ("revsh %0, %1" : "=r" (result) : "r" (value) );
  __ASM ("revsh %0, %1" : "=r" (result) : "r" (value) );
  return result;
#endif
}
@@ -364,7 +463,7 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
#if ((defined (__ARM_ARCH_7M__      ) && (__ARM_ARCH_7M__      == 1)) || \
     (defined (__ARM_ARCH_7EM__     ) && (__ARM_ARCH_7EM__     == 1)) || \
     (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))    )
   __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
   __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) );
#else
  int32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */

@@ -533,7 +632,7 @@ __STATIC_FORCEINLINE void __CLREX(void)
__extension__ \
({                          \
  int32_t __RES, __ARG1 = (ARG1); \
  __ASM ("ssat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
  __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) : "cc" ); \
  __RES; \
 })

@@ -549,7 +648,7 @@ __extension__ \
__extension__ \
({                          \
  uint32_t __RES, __ARG1 = (ARG1); \
  __ASM ("usat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
  __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) : "cc" ); \
  __RES; \
 })

@@ -637,7 +736,7 @@ __STATIC_FORCEINLINE uint32_t __get_CPSR(void)
 */
__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
{
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
}

/** \brief  Get Mode
+2 −0
Original line number Diff line number Diff line
@@ -10,6 +10,8 @@
// Copyright (c) 2017-2018 IAR Systems
// Copyright (c) 2018-2019 Arm Limited 
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
+5 −4
Original line number Diff line number Diff line
/**************************************************************************//**
 * @file     core_ca.h
 * @brief    CMSIS Cortex-A Core Peripheral Access Layer Header File
 * @version  V1.0.2
 * @date     12. November 2018
 * @version  V1.0.3
 * @date     28. January 2020
 ******************************************************************************/
/*
 * Copyright (c) 2009-2018 ARM Limited. All rights reserved.
 * Copyright (c) 2009-2020 ARM Limited. All rights reserved.
 *
 * SPDX-License-Identifier: Apache-2.0
 *
@@ -1282,7 +1282,8 @@ __STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn)
    GICDistributor->ISPENDR[IRQn / 32U] = 1U << (IRQn % 32U);
  } else {
    // INTID 0-15 Software Generated Interrupt
    GICDistributor->SPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U);
    // Forward the interrupt to the CPU interface that requested it
    GICDistributor->SGIR = (IRQn | 0x02000000U);
  }
}

Loading