Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 | /* * Copyright (c) 2013-2014 Wind River Systems, Inc. * * SPDX-License-Identifier: Apache-2.0 */ /** * @file * @brief Exception/interrupt context helpers for Cortex-M CPUs * * Exception/interrupt context helpers. */ #ifndef ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_EXC_H_ #define ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_EXC_H_ #include <arch/cpu.h> #ifdef _ASMLANGUAGE /* nothing */ #else #include <arch/arm/aarch32/cortex_m/cmsis.h> #include <arch/arm/aarch32/exc.h> #include <irq_offload.h> #ifdef __cplusplus extern "C" { #endif #ifdef CONFIG_IRQ_OFFLOAD extern volatile irq_offload_routine_t offload_routine; #endif /* Writes to the AIRCR must be accompanied by a write of the value 0x05FA * to the Vector Key field, otherwise the writes are ignored. */ #define AIRCR_VECT_KEY_PERMIT_WRITE 0x05FAUL /* * The current executing vector is found in the IPSR register. All * IRQs and system exceptions are considered as interrupt context. */ static ALWAYS_INLINE bool arch_is_in_isr(void) { return (__get_IPSR()) ? (true) : (false); } /** * @brief Find out if we were in ISR context * before the current exception occurred. * * A function that determines, based on inspecting the current * ESF, whether the processor was in handler mode before entering * the current exception state (i.e. nested exception) or not. * * Notes: * - The function shall only be called from ISR context. * - We do not use ARM processor state flags to determine * whether we are in a nested exception; we rely on the * RETPSR value stacked on the ESF. Hence, the function * assumes that the ESF stack frame has a valid RETPSR * value. * * @param esf the exception stack frame (cannot be NULL) * @return true if execution state was in handler mode, before * the current exception occurred, otherwise false. */ static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf) { return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false); } /** * @brief Setup system exceptions * * Set exception priorities to conform with the BASEPRI locking mechanism. * Set PendSV priority to lowest possible. * * Enable fault exceptions. * * @return N/A */ static ALWAYS_INLINE void z_arm_exc_setup(void) { NVIC_SetPriority(PendSV_IRQn, _EXC_PENDSV_PRIO); #ifdef CONFIG_CPU_CORTEX_M_HAS_BASEPRI /* Note: SVCall IRQ priority level is left to default (0) * for Cortex-M variants without BASEPRI (e.g. ARMv6-M). */ NVIC_SetPriority(SVCall_IRQn, _EXC_SVC_PRIO); #endif #ifdef CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS NVIC_SetPriority(MemoryManagement_IRQn, _EXC_FAULT_PRIO); NVIC_SetPriority(BusFault_IRQn, _EXC_FAULT_PRIO); NVIC_SetPriority(UsageFault_IRQn, _EXC_FAULT_PRIO); #if defined(CONFIG_ARM_SECURE_FIRMWARE) NVIC_SetPriority(SecureFault_IRQn, _EXC_FAULT_PRIO); #endif /* CONFIG_ARM_SECURE_FIRMWARE */ /* Enable Usage, Mem, & Bus Faults */ SCB->SHCSR |= SCB_SHCSR_USGFAULTENA_Msk | SCB_SHCSR_MEMFAULTENA_Msk | SCB_SHCSR_BUSFAULTENA_Msk; #if defined(CONFIG_ARM_SECURE_FIRMWARE) /* Enable Secure Fault */ SCB->SHCSR |= SCB_SHCSR_SECUREFAULTENA_Msk; /* Clear BFAR before setting BusFaults to target Non-Secure state. */ SCB->BFAR = 0; #endif /* CONFIG_ARM_SECURE_FIRMWARE */ #endif /* CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS */ #if defined(CONFIG_ARM_SECURE_FIRMWARE) && \ !defined(CONFIG_ARM_SECURE_BUSFAULT_HARDFAULT_NMI) /* Set NMI, Hard, and Bus Faults as Non-Secure. * NMI and Bus Faults targeting the Secure state will * escalate to a SecureFault or SecureHardFault. */ SCB->AIRCR = (SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk))) | SCB_AIRCR_BFHFNMINS_Msk | ((AIRCR_VECT_KEY_PERMIT_WRITE << SCB_AIRCR_VECTKEY_Pos) & SCB_AIRCR_VECTKEY_Msk); /* Note: Fault conditions that would generate a SecureFault * in a PE with the Main Extension instead generate a * SecureHardFault in a PE without the Main Extension. */ #endif /* ARM_SECURE_FIRMWARE && !ARM_SECURE_BUSFAULT_HARDFAULT_NMI */ } /** * @brief Clear Fault exceptions * * Clear out exceptions for Mem, Bus, Usage and Hard Faults * * @return N/A */ static ALWAYS_INLINE void z_arm_clear_faults(void) { #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) /* Reset all faults */ SCB->CFSR = SCB_CFSR_USGFAULTSR_Msk | SCB_CFSR_MEMFAULTSR_Msk | SCB_CFSR_BUSFAULTSR_Msk; /* Clear all Hard Faults - HFSR is write-one-to-clear */ SCB->HFSR = 0xffffffff; #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ } #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_CORTEX_M_EXC_H_ */ |