Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 | /* * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com> * * SPDX-License-Identifier: Apache-2.0 */ /* * ARM64 Cortex-A ISRs wrapper */ #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <offsets_short.h> #include <zephyr/arch/cpu.h> #include <zephyr/sw_isr_table.h> #include <zephyr/drivers/interrupt_controller/gic.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE GDATA(_sw_isr_table) /* * Wrapper around ISRs when inserted in software ISR table * * When inserted in the vector table, _isr_wrapper() demuxes the ISR table * using the running interrupt number as the index, and invokes the registered * ISR with its corresponding argument. When returning from the ISR, it * determines if a context switch needs to happen. */ GTEXT(_isr_wrapper) SECTION_FUNC(TEXT, _isr_wrapper) /* ++_current_cpu->nested to be checked by arch_is_in_isr() */ get_cpu x0 ldr w1, [x0, #___cpu_t_nested_OFFSET] add w2, w1, #1 str w2, [x0, #___cpu_t_nested_OFFSET] /* If not nested: switch to IRQ stack and save current sp on it. */ cbnz w1, 1f ldr x1, [x0, #___cpu_t_irq_stack_OFFSET] mov x2, sp mov sp, x1 str x2, [sp, #-16]! #if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK) sub x1, x1, #CONFIG_ISR_STACK_SIZE str x1, [x0, #_cpu_offset_to_current_stack_limit] #endif 1: #ifdef CONFIG_SCHED_THREAD_USAGE bl z_sched_usage_stop #endif #ifdef CONFIG_TRACING bl sys_trace_isr_enter #endif /* Get active IRQ number from the interrupt controller */ #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_get_active #else bl z_soc_irq_get_active #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #if CONFIG_GIC_VER >= 3 /* * Ignore Special INTIDs 1020..1023 see 2.2.1 of Arm Generic Interrupt Controller * Architecture Specification GIC architecture version 3 and version 4 */ cmp x0, 1019 b.le oob cmp x0, 1023 b.gt oob b spurious_continue oob: #endif /* IRQ out of bounds */ mov x1, #(CONFIG_NUM_IRQS - 1) cmp x0, x1 b.hi spurious_continue stp x0, xzr, [sp, #-16]! /* Retrieve the interrupt service routine */ ldr x1, =_sw_isr_table add x1, x1, x0, lsl #4 /* table is 16-byte wide */ ldp x0, x3, [x1] /* arg in x0, ISR in x3 */ /* * Call the ISR. Unmask and mask again the IRQs to support nested * exception handlers */ msr daifclr, #(DAIFCLR_IRQ_BIT) blr x3 msr daifset, #(DAIFSET_IRQ_BIT) /* Signal end-of-interrupt */ ldp x0, xzr, [sp], #16 spurious_continue: #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_eoi #else bl z_soc_irq_eoi #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #ifdef CONFIG_TRACING bl sys_trace_isr_exit #endif GTEXT(z_arm64_irq_done) z_arm64_irq_done: /* if (--_current_cpu->nested != 0) exit */ get_cpu x0 ldr w1, [x0, #___cpu_t_nested_OFFSET] subs w1, w1, #1 str w1, [x0, #___cpu_t_nested_OFFSET] bne exit /* No more nested: retrieve the task's stack. */ ldr x1, [sp] mov sp, x1 /* retrieve pointer to the current thread */ ldr x1, [x0, #___cpu_t_current_OFFSET] #if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK) /* arch_curr_cpu()->arch.current_stack_limit = thread->arch.stack_limit */ ldr x2, [x1, #_thread_offset_to_stack_limit] str x2, [x0, #_cpu_offset_to_current_stack_limit] #endif /* * Get next thread to schedule with z_get_next_switch_handle(). * We pass it a NULL as we didn't save the whole thread context yet. * If no scheduling is necessary then NULL will be returned. */ str x1, [sp, #-16]! mov x0, xzr bl z_get_next_switch_handle ldr x1, [sp], #16 cbz x0, exit /* * Switch thread * x0: new thread * x1: old thread */ bl z_arm64_context_switch exit: #ifdef CONFIG_STACK_SENTINEL bl z_check_stack_sentinel #endif b z_arm64_exit_exc |