Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 | /* * Copyright (c) 2019 Intel Corporation * SPDX-License-Identifier: Apache-2.0 */ #ifndef ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ #define ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ #include <kernel_arch_data.h> #include <zephyr/arch/x86/mmustructs.h> #ifdef CONFIG_X86_64 #include <intel64/kernel_arch_func.h> #else #include <ia32/kernel_arch_func.h> #endif #ifndef _ASMLANGUAGE static inline bool arch_is_in_isr(void) { #ifdef CONFIG_SMP /* On SMP, there is a race vs. the current CPU changing if we * are preempted. Need to mask interrupts while inspecting * (note deliberate lack of gcc size suffix on the * instructions, we need to work with both architectures here) */ bool ret; __asm__ volatile ("pushf; cli"); ret = arch_curr_cpu()->nested != 0; __asm__ volatile ("popf"); return ret; #else return _kernel.cpus[0].nested != 0U; #endif } struct multiboot_info; extern FUNC_NORETURN void z_x86_prep_c(void *arg); #ifdef CONFIG_X86_VERY_EARLY_CONSOLE /* Setup ultra-minimal serial driver for printk() */ void z_x86_early_serial_init(void); #endif /* CONFIG_X86_VERY_EARLY_CONSOLE */ /* Called upon CPU exception that is unhandled and hence fatal; dump * interesting info and call z_x86_fatal_error() */ FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector, const z_arch_esf_t *esf); /* Called upon unrecoverable error; dump registers and transfer control to * kernel via z_fatal_error() */ FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, const z_arch_esf_t *esf); /* Common handling for page fault exceptions */ void z_x86_page_fault_handler(z_arch_esf_t *esf); #ifdef CONFIG_THREAD_STACK_INFO /** * @brief Check if a memory address range falls within the stack * * Given a memory address range, ensure that it falls within the bounds * of the faulting context's stack. * * @param addr Starting address * @param size Size of the region, or 0 if we just want to see if addr is * in bounds * @param cs Code segment of faulting context * @return true if addr/size region is not within the thread stack */ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs); #endif /* CONFIG_THREAD_STACK_INFO */ #ifdef CONFIG_USERSPACE extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3, uintptr_t stack_end, uintptr_t stack_start); /* Preparation steps needed for all threads if user mode is turned on. * * Returns the initial entry point to swap into. */ void *z_x86_userspace_prepare_thread(struct k_thread *thread); #endif /* CONFIG_USERSPACE */ void z_x86_do_kernel_oops(const z_arch_esf_t *esf); /* * Find a free IRQ vector at the specified priority, or return -1 if none left. * For multiple vector allocated one after another, prev_vector can be used to * speed up the allocation: it only needs to be filled with the previous * allocated vector, or -1 to start over. */ int z_x86_allocate_vector(unsigned int priority, int prev_vector); /* * Connect a vector */ void z_x86_irq_connect_on_vector(unsigned int irq, uint8_t vector, void (*func)(const void *arg), const void *arg); #endif /* !_ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ */ |