Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 | /* * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc. * * SPDX-License-Identifier: Apache-2.0 */ /** * @file * @brief Architecture-independent private kernel APIs * * This file contains private kernel APIs that are not architecture-specific. */ #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ #define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ #include <kernel.h> #include <kernel_arch_interface.h> #include <string.h> #ifndef _ASMLANGUAGE #ifdef __cplusplus extern "C" { #endif /* Early boot functions */ void z_bss_zero(void); #ifdef CONFIG_XIP void z_data_copy(void); #else static inline void z_data_copy(void) { /* Do nothing */ } #endif FUNC_NORETURN void z_cstart(void); extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry, void *p1, void *p2, void *p3); extern char *z_setup_new_thread(struct k_thread *new_thread, k_thread_stack_t *stack, size_t stack_size, k_thread_entry_t entry, void *p1, void *p2, void *p3, int prio, uint32_t options, const char *name); /** * @brief Allocate aligned memory from the current thread's resource pool * * Threads may be assigned a resource pool, which will be used to allocate * memory on behalf of certain kernel and driver APIs. Memory reserved * in this way should be freed with k_free(). * * If called from an ISR, the k_malloc() system heap will be used if it exists. * * @param align Required memory alignment * @param size Memory allocation size * @return A pointer to the allocated memory, or NULL if there is insufficient * RAM in the pool or there is no pool to draw memory from */ void *z_thread_aligned_alloc(size_t align, size_t size); /** * @brief Allocate some memory from the current thread's resource pool * * Threads may be assigned a resource pool, which will be used to allocate * memory on behalf of certain kernel and driver APIs. Memory reserved * in this way should be freed with k_free(). * * If called from an ISR, the k_malloc() system heap will be used if it exists. * * @param size Memory allocation size * @return A pointer to the allocated memory, or NULL if there is insufficient * RAM in the pool or there is no pool to draw memory from */ static inline void *z_thread_malloc(size_t size) { return z_thread_aligned_alloc(0, size); } /* set and clear essential thread flag */ extern void z_thread_essential_set(void); extern void z_thread_essential_clear(void); /* clean up when a thread is aborted */ #if defined(CONFIG_THREAD_MONITOR) extern void z_thread_monitor_exit(struct k_thread *thread); #else #define z_thread_monitor_exit(thread) \ do {/* nothing */ \ } while (false) #endif /* CONFIG_THREAD_MONITOR */ #ifdef CONFIG_USE_SWITCH /* This is a arch function traditionally, but when the switch-based * z_swap() is in use it's a simple inline provided by the kernel. */ static ALWAYS_INLINE void arch_thread_return_value_set(struct k_thread *thread, unsigned int value) { thread->swap_retval = value; } #endif static ALWAYS_INLINE void z_thread_return_value_set_with_data(struct k_thread *thread, unsigned int value, void *data) { arch_thread_return_value_set(thread, value); thread->base.swap_data = data; } extern void z_smp_init(void); extern void smp_timer_init(void); extern void z_early_boot_rand_get(uint8_t *buf, size_t length); #if CONFIG_STACK_POINTER_RANDOM extern int z_stack_adjust_initialized; #endif #ifdef CONFIG_BOOT_TIME_MEASUREMENT extern uint32_t z_timestamp_main; /* timestamp when main task starts */ extern uint32_t z_timestamp_idle; /* timestamp when CPU goes idle */ #endif extern struct k_thread z_main_thread; #ifdef CONFIG_MULTITHREADING extern struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS]; #endif extern K_KERNEL_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS, CONFIG_ISR_STACK_SIZE); #ifdef CONFIG_GEN_PRIV_STACKS extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack); #endif #ifdef CONFIG_USERSPACE bool z_stack_is_user_capable(k_thread_stack_t *stack); /* Memory domain setup hook, called from z_setup_new_thread() */ void z_mem_domain_init_thread(struct k_thread *thread); /* Memory domain teardown hook, called from z_thread_single_abort() */ void z_mem_domain_exit_thread(struct k_thread *thread); /* This spinlock: * * - Protects the full set of active k_mem_domain objects and their contents * - Serializes calls to arch_mem_domain_* APIs * * If architecture code needs to access k_mem_domain structures or the * partitions they contain at any other point, this spinlock should be held. * Uniprocessor systems can get away with just locking interrupts but this is * not recommended. */ extern struct k_spinlock z_mem_domain_lock; #endif /* CONFIG_USERSPACE */ #ifdef CONFIG_GDBSTUB struct gdb_ctx; /* Should be called by the arch layer. This is the gdbstub main loop * and synchronously communicate with gdb on host. */ extern int z_gdb_main_loop(struct gdb_ctx *ctx, bool start); #endif #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING void z_thread_mark_switched_in(void); void z_thread_mark_switched_out(void); #else /** * @brief Called after a thread has been selected to run */ #define z_thread_mark_switched_in() /** * @brief Called before a thread has been selected to run */ #define z_thread_mark_switched_out() #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */ /* Init hook for page frame management, invoked immediately upon entry of * main thread, before POST_KERNEL tasks */ void z_mem_manage_init(void); /* Workaround for build-time page table mapping of the kernel */ void z_kernel_map_fixup(void); #ifdef __cplusplus } #endif #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */ |