Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | /* * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com> * Contributors: 2018 Antmicro <www.antmicro.com> * * SPDX-License-Identifier: Apache-2.0 */ /** * @file * @brief RISCV specific kernel interface header * This header contains the RISCV specific kernel interface. It is * included by the generic kernel interface header (arch/cpu.h) */ #ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_ #define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_ #include <arch/riscv/thread.h> #include <arch/riscv/exp.h> #include <arch/common/sys_io.h> #include <arch/common/ffs.h> #include <irq.h> #include <sw_isr_table.h> #include <soc.h> #include <devicetree.h> /* stacks, for RISCV architecture stack should be 16byte-aligned */ #define STACK_ALIGN 16 #ifdef CONFIG_64BIT #define RV_OP_LOADREG ld #define RV_OP_STOREREG sd #define RV_REGSIZE 8 #define RV_REGSHIFT 3 #else #define RV_OP_LOADREG lw #define RV_OP_STOREREG sw #define RV_REGSIZE 4 #define RV_REGSHIFT 2 #endif /* Common mstatus bits. All supported cores today have the same * layouts. */ #define MSTATUS_IEN (1UL << 3) #define MSTATUS_MPP_M (3UL << 11) #define MSTATUS_MPIE_EN (1UL << 7) /* This comes from openisa_rv32m1, but doesn't seem to hurt on other * platforms: * - Preserve machine privileges in MPP. If you see any documentation * telling you that MPP is read-only on this SoC, don't believe its * lies. * - Enable interrupts when exiting from exception into a new thread * by setting MPIE now, so it will be copied into IE on mret. */ #define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN) #ifndef _ASMLANGUAGE #include <sys/util.h> #ifdef __cplusplus extern "C" { #endif #define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN) #define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN) /* macros convert value of its argument to a string */ #define DO_TOSTR(s) #s #define TOSTR(s) DO_TOSTR(s) /* concatenate the values of the arguments into one */ #define DO_CONCAT(x, y) x ## y #define CONCAT(x, y) DO_CONCAT(x, y) /* * SOC-specific function to get the IRQ number generating the interrupt. * __soc_get_irq returns a bitfield of pending IRQs. */ extern u32_t __soc_get_irq(void); void arch_irq_enable(unsigned int irq); void arch_irq_disable(unsigned int irq); int arch_irq_is_enabled(unsigned int irq); void arch_irq_priority_set(unsigned int irq, unsigned int prio); void z_irq_spurious(void *unused); #if defined(CONFIG_RISCV_HAS_PLIC) #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ arch_irq_priority_set(irq_p, priority_p); \ irq_p; \ }) #else #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \ ({ \ Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \ irq_p; \ }) #endif /* * use atomic instruction csrrc to lock global irq * csrrc: atomic read and clear bits in CSR register */ static ALWAYS_INLINE unsigned int arch_irq_lock(void) { unsigned int key; ulong_t mstatus; __asm__ volatile ("csrrc %0, mstatus, %1" : "=r" (mstatus) : "r" (MSTATUS_IEN) : "memory"); key = (mstatus & MSTATUS_IEN); return key; } /* * use atomic instruction csrrs to unlock global irq * csrrs: atomic read and set bits in CSR register */ static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) { ulong_t mstatus; __asm__ volatile ("csrrs %0, mstatus, %1" : "=r" (mstatus) : "r" (key & MSTATUS_IEN) : "memory"); } static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) { /* FIXME: looking at arch_irq_lock, this should be reducable * to just testing that key is nonzero (because it should only * have the single bit set). But there is a mask applied to * the argument in arch_irq_unlock() that has me worried * that something elseswhere might try to set a bit? Do it * the safe way for now. */ return (key & MSTATUS_IEN) == MSTATUS_IEN; } static ALWAYS_INLINE void arch_nop(void) { __asm__ volatile("nop"); } extern u32_t z_timer_cycle_get_32(void); static inline u32_t arch_k_cycle_get_32(void) { return z_timer_cycle_get_32(); } #ifdef __cplusplus } #endif #endif /*_ASMLANGUAGE */ #if defined(CONFIG_SOC_FAMILY_RISCV_PRIVILEGE) #include <arch/riscv/riscv-privilege/asm_inline.h> #endif #endif |