Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | /* * Copyright (c) 2019-2020 Cobham Gaisler AB * * SPDX-License-Identifier: Apache-2.0 */ /** * @file * @brief SPARC specific kernel interface header * This header contains the SPARC specific kernel interface. It is * included by the generic kernel interface header (arch/cpu.h) */ #ifndef ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_ #define ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_ #include <arch/sparc/thread.h> #include <arch/sparc/sparc.h> #include <arch/common/sys_bitops.h> #include <arch/common/sys_io.h> #include <arch/common/ffs.h> #include <irq.h> #include <sw_isr_table.h> #include <soc.h> #include <devicetree.h> /* stacks, for SPARC architecture stack shall be 8byte-aligned */ #define ARCH_STACK_PTR_ALIGN 8 /* * Software trap numbers. * Assembly usage: "ta SPARC_SW_TRAP_<TYPE>" */ #define SPARC_SW_TRAP_FLUSH_WINDOWS 0x03 #define SPARC_SW_TRAP_SET_PIL 0x09 #define SPARC_SW_TRAP_EXCEPT 0x0F #ifndef _ASMLANGUAGE #include <sys/util.h> #ifdef __cplusplus extern "C" { #endif #define STACK_ROUND_UP(x) ROUND_UP(x, ARCH_STACK_PTR_ALIGN) /* * SOC specific function to translate from processor interrupt request level * (1..15) to logical interrupt source number. For example by probing the * interrupt controller. */ int z_sparc_int_get_source(int irl); void z_irq_spurious(const void *unused); #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ { \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ } static ALWAYS_INLINE unsigned int z_sparc_set_pil_inline(unsigned int newpil) { register uint32_t oldpil __asm__ ("o0") = newpil; __asm__ volatile ( "ta %1\nnop\n" : "=r" (oldpil) : "i" (SPARC_SW_TRAP_SET_PIL), "r" (oldpil) : "memory" ); return oldpil; } static ALWAYS_INLINE unsigned int arch_irq_lock(void) { return z_sparc_set_pil_inline(15); } static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) { z_sparc_set_pil_inline(key); } static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) { return key == 0; } static ALWAYS_INLINE void arch_nop(void) { __asm__ volatile ("nop"); } extern uint32_t sys_clock_cycle_get_32(void); static inline uint32_t arch_k_cycle_get_32(void) { return sys_clock_cycle_get_32(); } struct __esf { uint32_t out[8]; uint32_t global[8]; uint32_t psr; uint32_t pc; uint32_t npc; uint32_t wim; uint32_t tbr; uint32_t y; }; typedef struct __esf z_arch_esf_t; #define ARCH_EXCEPT(reason_p) \ do { \ register uint32_t _g1 __asm__("g1") = reason_p; \ \ __asm__ volatile ( \ "ta %[vector]\n\t" \ : \ : [vector] "i" (SPARC_SW_TRAP_EXCEPT), "r" (_g1) \ : "memory" \ ); \ CODE_UNREACHABLE; \ } while (false) #ifdef __cplusplus } #endif #endif /*_ASMLANGUAGE */ #endif /* ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_ */ |