Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | /* * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com> * * SPDX-License-Identifier: Apache-2.0 */ /* * ARM64 Cortex-A ISRs wrapper */ #include <toolchain.h> #include <linker/sections.h> #include <offsets_short.h> #include <arch/cpu.h> #include <sw_isr_table.h> #include <drivers/interrupt_controller/gic.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE GDATA(_sw_isr_table) /* * Wrapper around ISRs when inserted in software ISR table * * When inserted in the vector table, _isr_wrapper() demuxes the ISR table * using the running interrupt number as the index, and invokes the registered * ISR with its corresponding argument. When returning from the ISR, it * determines if a context switch needs to happen. */ GTEXT(_isr_wrapper) SECTION_FUNC(TEXT, _isr_wrapper) /* ++(_kernel->nested) to be checked by arch_is_in_isr() */ inc_nest_counter x0, x1 #ifdef CONFIG_SCHED_THREAD_USAGE bl z_sched_usage_stop #endif #ifdef CONFIG_TRACING bl sys_trace_isr_enter #endif /* Get active IRQ number from the interrupt controller */ #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_get_active #else bl z_soc_irq_get_active #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #if CONFIG_GIC_VER >= 3 /* * Ignore Special INTIDs 1020..1023 see 2.2.1 of Arm Generic Interrupt Controller * Architecture Specification GIC architecture version 3 and version 4 */ cmp x0, 1019 b.le oob cmp x0, 1023 b.gt oob b spurious_continue oob: #endif /* IRQ out of bounds */ mov x1, #(CONFIG_NUM_IRQS - 1) cmp x0, x1 b.hi spurious_continue stp x0, xzr, [sp, #-16]! /* Retrieve the interrupt service routine */ ldr x1, =_sw_isr_table add x1, x1, x0, lsl #4 /* table is 16-byte wide */ ldp x0, x3, [x1] /* arg in x0, ISR in x3 */ /* * Call the ISR. Unmask and mask again the IRQs to support nested * exception handlers */ msr daifclr, #(DAIFCLR_IRQ_BIT) blr x3 msr daifset, #(DAIFSET_IRQ_BIT) /* Signal end-of-interrupt */ ldp x0, xzr, [sp], #16 spurious_continue: #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_eoi #else bl z_soc_irq_eoi #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #ifdef CONFIG_TRACING bl sys_trace_isr_exit #endif /* if (--(_kernel->nested) != 0) exit */ dec_nest_counter x0, x1 bne exit /* * z_arch_get_next_switch_handle() is returning: * * - The next thread to schedule in x0 * - The current thread in x1. This value is returned using the * **old_thread parameter, so we need to make space on the stack for * that. */ sub sp, sp, #16 mov x0, sp bl z_arch_get_next_switch_handle ldp x1, xzr, [sp], #16 /* * x0: 1st thread in the ready queue * x1: _current thread */ #ifdef CONFIG_SMP /* * 2 possibilities here: * - x0 != NULL (implies x0 != x1): we need to context switch and set * the switch_handle in the context switch code * - x0 == NULL: no context switch */ cmp x0, #0x0 bne switch /* * No context switch. Restore x0 from x1 (they are the same thread). * See also comments to z_arch_get_next_switch_handle() */ mov x0, x1 b exit switch: #else cmp x0, x1 beq exit #endif /* Switch thread */ bl z_arm64_context_switch exit: #ifdef CONFIG_STACK_SENTINEL bl z_check_stack_sentinel #endif b z_arm64_exit_exc |