Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | /* * Copyright (c) 2013-2014 Wind River Systems, Inc. * * SPDX-License-Identifier: Apache-2.0 */ /** * @file * @brief ARM Cortex-M exception/interrupt exit API * * * Provides functions for performing kernel handling when exiting exceptions or * interrupts that are installed directly in the vector table (i.e. that are not * wrapped around by _isr_wrapper()). */ #include <kernel_structs.h> #include <offsets_short.h> #include <toolchain.h> #include <arch/cpu.h> _ASM_FILE_PROLOGUE GTEXT(z_ExcExit) GTEXT(_IntExit) GDATA(_kernel) #if defined(CONFIG_CPU_CORTEX_R) GTEXT(__pendsv) #endif /** * * @brief Kernel housekeeping when exiting interrupt handler installed * directly in vector table * * Kernel allows installing interrupt handlers (ISRs) directly into the vector * table to get the lowest interrupt latency possible. This allows the ISR to be * invoked directly without going through a software interrupt table. However, * upon exiting the ISR, some kernel work must still be performed, namely * possible context switching. While ISRs connected in the software interrupt * table do this automatically via a wrapper, ISRs connected directly in the * vector table must invoke _IntExit() as the *very last* action before * returning. * * e.g. * * void myISR(void) * { * printk("in %s\n", __FUNCTION__); * doStuff(); * _IntExit(); * } * * @return N/A */ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit) /* _IntExit falls through to z_ExcExit (they are aliases of each other) */ /** * * @brief Kernel housekeeping when exiting exception handler installed * directly in vector table * * See _IntExit(). * * @return N/A */ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit) #if defined(CONFIG_CPU_CORTEX_R) push {r0, lr} #endif #ifdef CONFIG_PREEMPT_ENABLED ldr r0, =_kernel ldr r1, [r0, #_kernel_offset_to_current] ldr r0, [r0, #_kernel_offset_to_ready_q_cache] cmp r0, r1 beq _EXIT_EXC #if defined(CONFIG_CPU_CORTEX_M) /* context switch required, pend the PendSV exception */ ldr r1, =_SCS_ICSR ldr r2, =_SCS_ICSR_PENDSV str r2, [r1] #elif defined(CONFIG_CPU_CORTEX_R) push {r0, lr} bl __pendsv pop {r0, lr} #endif _ExcExitWithGdbStub: _EXIT_EXC: #endif /* CONFIG_PREEMPT_ENABLED */ #ifdef CONFIG_STACK_SENTINEL push {r0, lr} bl z_check_stack_sentinel #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) pop {r0, r1} mov lr, r1 #else pop {r0, lr} #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_STACK_SENTINEL */ #if defined(CONFIG_CPU_CORTEX_M) bx lr #elif defined(CONFIG_CPU_CORTEX_R) /* * r0-r3 are either the values from the thread before it was switched out * or they are the args to _new_thread for a new thread */ pop {r0, lr} push {r4, r5} cmp r0, #RET_FROM_SVC cps #MODE_SYS ldmia sp!, {r0-r5} beq _svc_exit cps #MODE_IRQ b _exc_exit _svc_exit: cps #MODE_SVC _exc_exit: mov r12, r4 mov lr, r5 pop {r4, r5} movs pc, lr #endif |