Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 | /* * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com> * * SPDX-License-Identifier: Apache-2.0 */ /* * ARM64 Cortex-A ISRs wrapper */ #include <toolchain.h> #include <linker/sections.h> #include <offsets_short.h> #include <arch/cpu.h> #include <sw_isr_table.h> #include <drivers/interrupt_controller/gic.h> #include "macro_priv.inc" _ASM_FILE_PROLOGUE GDATA(_sw_isr_table) /* * Wrapper around ISRs when inserted in software ISR table * * When inserted in the vector table, _isr_wrapper() demuxes the ISR table * using the running interrupt number as the index, and invokes the registered * ISR with its corresponding argument. When returning from the ISR, it * determines if a context switch needs to happen. */ GTEXT(_isr_wrapper) SECTION_FUNC(TEXT, _isr_wrapper) /* ++(_kernel->nested) to be checked by arch_is_in_isr() */ inc_nest_counter x0, x1 #ifdef CONFIG_TRACING bl sys_trace_isr_enter #endif /* Get active IRQ number from the interrupt controller */ #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_get_active #else bl z_soc_irq_get_active #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ /* IRQ out of bounds */ cmp x0, #(CONFIG_NUM_IRQS - 1) b.hi spurious_continue stp x0, xzr, [sp, #-16]! /* Retrieve the interrupt service routine */ ldr x1, =_sw_isr_table add x1, x1, x0, lsl #4 /* table is 16-byte wide */ ldp x0, x3, [x1] /* arg in x0, ISR in x3 */ /* * Call the ISR. Unmask and mask again the IRQs to support nested * exception handlers */ msr daifclr, #(DAIFCLR_IRQ_BIT) blr x3 msr daifset, #(DAIFSET_IRQ_BIT) /* Signal end-of-interrupt */ ldp x0, xzr, [sp], #16 spurious_continue: #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) bl arm_gic_eoi #else bl z_soc_irq_eoi #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ #ifdef CONFIG_TRACING bl sys_trace_isr_exit #endif /* if (--(_kernel->nested) != 0) exit */ dec_nest_counter x0, x1 bne exit /* * z_arch_get_next_switch_handle() is returning: * * - The next thread to schedule in x0 * - The current thread in x1. This value is returned using the * **old_thread parameter, so we need to make space on the stack for * that. */ sub sp, sp, #16 mov x0, sp bl z_arch_get_next_switch_handle ldp x1, xzr, [sp], #16 /* * x0: 1st thread in the ready queue * x1: _current thread */ cmp x0, x1 beq exit /* Switch thread */ bl z_arm64_context_switch exit: #ifdef CONFIG_STACK_SENTINEL bl z_check_stack_sentinel #endif b z_arm64_exit_exc |