Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 | /* * Copyright (c) 2018 Intel Corporation * * SPDX-License-Identifier: Apache-2.0 */ #ifndef ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ #define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ #include <ksched.h> #include <spinlock.h> #include <kernel_arch_func.h> #ifdef CONFIG_STACK_SENTINEL extern void z_check_stack_sentinel(void); #else #define z_check_stack_sentinel() /**/ #endif /* In SMP, the irq_lock() is a spinlock which is implicitly released * and reacquired on context switch to preserve the existing * semantics. This means that whenever we are about to return to a * thread (via either z_swap() or interrupt/exception return!) we need * to restore the lock state to whatever the thread's counter * expects. */ void z_smp_reacquire_global_lock(struct k_thread *thread); void z_smp_release_global_lock(struct k_thread *thread); /* context switching and scheduling-related routines */ #ifdef CONFIG_USE_SWITCH /* There is an unavoidable SMP race when threads swap -- their thread * record is in the queue (and visible to other CPUs) before * arch_switch() finishes saving state. We must spin for the switch * handle before entering a new thread. See docs on arch_switch(). * * Note: future SMP architectures may need a fence/barrier or cache * invalidation here. Current ones don't, and sadly Zephyr doesn't * have a framework for that yet. */ static inline void wait_for_switch(struct k_thread *thread) { #ifdef CONFIG_SMP volatile void **shp = (void *)&thread->switch_handle; while (*shp == NULL) { k_busy_wait(1); } #endif } /* New style context switching. arch_switch() is a lower level * primitive that doesn't know about the scheduler or return value. * Needed for SMP, where the scheduler requires spinlocking that we * don't want to have to do in per-architecture assembly. * * Note that is_spinlock is a compile-time construct which will be * optimized out when this function is expanded. */ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, struct k_spinlock *lock, int is_spinlock) { ARG_UNUSED(lock); struct k_thread *new_thread, *old_thread; #ifdef CONFIG_EXECUTION_BENCHMARKING extern void read_timer_start_of_swap(void); read_timer_start_of_swap(); #endif old_thread = _current; z_check_stack_sentinel(); if (is_spinlock) { k_spin_release(lock); } #ifdef CONFIG_SMP /* Null out the switch handle, see wait_for_switch() above. * Note that we set it back to a non-null value if we are not * switching! The value itself doesn't matter, because by * definition _current is running and has no saved state. */ volatile void **shp = (void *)&old_thread->switch_handle; *shp = NULL; #endif new_thread = z_get_next_ready_thread(); #ifdef CONFIG_SMP if (new_thread == old_thread) { *shp = old_thread; } #endif if (new_thread != old_thread) { sys_trace_thread_switched_out(); #ifdef CONFIG_TIMESLICING z_reset_time_slice(); #endif old_thread->swap_retval = -EAGAIN; #ifdef CONFIG_SMP _current_cpu->swap_ok = 0; new_thread->base.cpu = arch_curr_cpu()->id; if (!is_spinlock) { z_smp_release_global_lock(new_thread); } #endif _current_cpu->current = new_thread; wait_for_switch(new_thread); arch_switch(new_thread->switch_handle, &old_thread->switch_handle); sys_trace_thread_switched_in(); } if (is_spinlock) { arch_irq_unlock(key); } else { irq_unlock(key); } return _current->swap_retval; } static inline int z_swap_irqlock(unsigned int key) { return do_swap(key, NULL, 0); } static inline int z_swap(struct k_spinlock *lock, k_spinlock_key_t key) { return do_swap(key.key, lock, 1); } static inline void z_swap_unlocked(void) { struct k_spinlock lock = {}; k_spinlock_key_t key = k_spin_lock(&lock); (void) z_swap(&lock, key); } #else /* !CONFIG_USE_SWITCH */ extern int arch_swap(unsigned int key); static inline int z_swap_irqlock(unsigned int key) { int ret; z_check_stack_sentinel(); #ifndef CONFIG_ARM sys_trace_thread_switched_out(); #endif ret = arch_swap(key); #ifndef CONFIG_ARM sys_trace_thread_switched_in(); #endif return ret; } /* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we * can't be in SMP. The k_spin_release() call is just for validation * handling. */ static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key) { k_spin_release(lock); return z_swap_irqlock(key.key); } static inline void z_swap_unlocked(void) { (void) z_swap_irqlock(arch_irq_lock()); } #endif #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */ |