Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 | /* * Copyright (c) 2016 Intel Corporation * * SPDX-License-Identifier: Apache-2.0 */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> /* exports */ GTEXT(_exception) /* import */ GTEXT(_Fault) GTEXT(arch_swap) #ifdef CONFIG_IRQ_OFFLOAD GTEXT(z_irq_do_offload) GTEXT(_offload_routine) #endif /* Allows use of r1/at register, otherwise reserved for assembler use */ .set noat /* Placed into special 'exception' section so that the linker can put this code * at ALT_CPU_EXCEPTION_ADDR defined in system.h * * This is the common entry point for processor exceptions and interrupts from * the Internal Interrupt Controller (IIC). * * If the External (EIC) controller is in use, then we will never get here on * behalf of an interrupt, instead the EIC driver will have set up a vector * table and the processor will jump directly into the appropriate table * entry. */ SECTION_FUNC(exception.entry, _exception) /* Reserve thread stack space for saving context */ subi sp, sp, __z_arch_esf_t_SIZEOF /* Preserve all caller-saved registers onto the thread's stack */ stw ra, __z_arch_esf_t_ra_OFFSET(sp) stw r1, __z_arch_esf_t_r1_OFFSET(sp) stw r2, __z_arch_esf_t_r2_OFFSET(sp) stw r3, __z_arch_esf_t_r3_OFFSET(sp) stw r4, __z_arch_esf_t_r4_OFFSET(sp) stw r5, __z_arch_esf_t_r5_OFFSET(sp) stw r6, __z_arch_esf_t_r6_OFFSET(sp) stw r7, __z_arch_esf_t_r7_OFFSET(sp) stw r8, __z_arch_esf_t_r8_OFFSET(sp) stw r9, __z_arch_esf_t_r9_OFFSET(sp) stw r10, __z_arch_esf_t_r10_OFFSET(sp) stw r11, __z_arch_esf_t_r11_OFFSET(sp) stw r12, __z_arch_esf_t_r12_OFFSET(sp) stw r13, __z_arch_esf_t_r13_OFFSET(sp) stw r14, __z_arch_esf_t_r14_OFFSET(sp) stw r15, __z_arch_esf_t_r15_OFFSET(sp) /* Store value of estatus control register */ rdctl et, estatus stw et, __z_arch_esf_t_estatus_OFFSET(sp) /* ea-4 is the address of the instruction when the exception happened, * put this in the stack frame as well */ addi r15, ea, -4 stw r15, __z_arch_esf_t_instr_OFFSET(sp) /* Figure out whether we are here because of an interrupt or an * exception. If an interrupt, switch stacks and enter IRQ handling * code. If an exception, remain on current stack and enter exception * handing code. From the CPU manual, ipending must be nonzero and * estatis.PIE must be enabled for this to be considered an interrupt. * * Stick ipending in r4 since it will be an arg for _enter_irq */ rdctl r4, ipending beq r4, zero, not_interrupt /* We stashed estatus in et earlier */ andi r15, et, 1 beq r15, zero, not_interrupt is_interrupt: /* If we get here, this is an interrupt */ /* Grab a reference to _kernel in r10 so we can determine the * current irq stack pointer */ movhi r10, %hi(_kernel) ori r10, r10, %lo(_kernel) /* Stash a copy of thread's sp in r12 so that we can put it on the IRQ * stack */ mov r12, sp /* Switch to interrupt stack */ ldw sp, _kernel_offset_to_irq_stack(r10) /* Store thread stack pointer onto IRQ stack */ addi sp, sp, -4 stw r12, 0(sp) on_irq_stack: /* Enter C interrupt handling code. Value of ipending will be the * function parameter since we put it in r4 */ call _enter_irq /* Interrupt handler finished and the interrupt should be serviced * now, the appropriate bits in ipending should be cleared */ /* Get a reference to _kernel again in r10 */ movhi r10, %hi(_kernel) ori r10, r10, %lo(_kernel) #ifdef CONFIG_PREEMPT_ENABLED ldw r11, _kernel_offset_to_current(r10) /* Determine whether the exception of the ISR requires context * switch */ /* Call into the kernel to see if a scheduling decision is necessary */ ldw r2, _kernel_offset_to_ready_q_cache(r10) beq r2, r11, no_reschedule /* * A context reschedule is required: keep the volatile registers of * the interrupted thread on the context's stack. Utilize * the existing arch_swap() primitive to save the remaining * thread's registers (including floating point) and perform * a switch to the new thread. */ /* We put the thread stack pointer on top of the IRQ stack before * we switched stacks. Restore it to go back to thread stack */ ldw sp, 0(sp) /* Argument to Swap() is estatus since that's the state of the * status register before the exception happened. When coming * out of the context switch we need this info to restore * IRQ lock state. We put this value in et earlier. */ mov r4, et call arch_swap jmpi _exception_exit #else jmpi no_reschedule #endif /* CONFIG_PREEMPT_ENABLED */ not_interrupt: /* Since this wasn't an interrupt we're not going to restart the * faulting instruction. * * We earlier put ea - 4 in the stack frame, replace it with just ea */ stw ea, __z_arch_esf_t_instr_OFFSET(sp) #ifdef CONFIG_IRQ_OFFLOAD /* Check the contents of _offload_routine. If non-NULL, jump into * the interrupt code anyway. */ movhi r10, %hi(_offload_routine) ori r10, r10, %lo(_offload_routine) ldw r11, (r10) bne r11, zero, is_interrupt #endif _exception_enter_fault: /* If we get here, the exception wasn't in interrupt or an * invocation of irq_oflload(). Let _Fault() handle it in * C domain */ mov r4, sp call _Fault jmpi _exception_exit no_reschedule: /* We put the thread stack pointer on top of the IRQ stack before * we switched stacks. Restore it to go back to thread stack */ ldw sp, 0(sp) /* Fall through */ _exception_exit: /* We are on the thread stack. Restore all saved registers * and return to the interrupted context */ /* Return address from the exception */ ldw ea, __z_arch_esf_t_instr_OFFSET(sp) /* Restore estatus * XXX is this right??? */ ldw r5, __z_arch_esf_t_estatus_OFFSET(sp) wrctl estatus, r5 /* Restore caller-saved registers */ ldw ra, __z_arch_esf_t_ra_OFFSET(sp) ldw r1, __z_arch_esf_t_r1_OFFSET(sp) ldw r2, __z_arch_esf_t_r2_OFFSET(sp) ldw r3, __z_arch_esf_t_r3_OFFSET(sp) ldw r4, __z_arch_esf_t_r4_OFFSET(sp) ldw r5, __z_arch_esf_t_r5_OFFSET(sp) ldw r6, __z_arch_esf_t_r6_OFFSET(sp) ldw r7, __z_arch_esf_t_r7_OFFSET(sp) ldw r8, __z_arch_esf_t_r8_OFFSET(sp) ldw r9, __z_arch_esf_t_r9_OFFSET(sp) ldw r10, __z_arch_esf_t_r10_OFFSET(sp) ldw r11, __z_arch_esf_t_r11_OFFSET(sp) ldw r12, __z_arch_esf_t_r12_OFFSET(sp) ldw r13, __z_arch_esf_t_r13_OFFSET(sp) ldw r14, __z_arch_esf_t_r14_OFFSET(sp) ldw r15, __z_arch_esf_t_r15_OFFSET(sp) /* Put the stack pointer back where it was when we entered * exception state */ addi sp, sp, __z_arch_esf_t_SIZEOF /* All done, copy estatus into status and transfer to ea */ eret |