Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | /* * Copyright (c) 2016 Wind River Systems, Inc. * * SPDX-License-Identifier: Apache-2.0 */ #include <zephyr/kernel.h> #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/drivers/timer/system_timer.h> #include <zephyr/wait_q.h> #include <zephyr/pm/pm.h> #include <stdbool.h> #include <zephyr/logging/log.h> #include <ksched.h> #include <kswap.h> LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); void z_pm_save_idle_exit(void) { #ifdef CONFIG_PM /* Some CPU low power states require notification at the ISR * to allow any operations that needs to be done before kernel * switches task or processes nested interrupts. * This can be simply ignored if not required. */ pm_system_resume(); #endif /* CONFIG_PM */ sys_clock_idle_exit(); } void idle(void *unused1, void *unused2, void *unused3) { ARG_UNUSED(unused1); ARG_UNUSED(unused2); ARG_UNUSED(unused3); __ASSERT_NO_MSG(_current->base.prio >= 0); while (true) { /* SMP systems without a working IPI can't actual * enter an idle state, because they can't be notified * of scheduler changes (i.e. threads they should * run). They just spin instead, with a minimal * relaxation loop to prevent hammering the scheduler * lock and/or timer driver. This is intended as a * fallback configuration for new platform bringup. */ if (IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) { for (volatile int i = 0; i < 100000; i++) { /* Empty loop */ } z_swap_unlocked(); } /* Note weird API: k_cpu_idle() is called with local * CPU interrupts masked, and returns with them * unmasked. It does not take a spinlock or other * higher level construct. */ (void) arch_irq_lock(); #ifdef CONFIG_PM _kernel.idle = z_get_next_timeout_expiry(); /* * Call the suspend hook function of the soc interface * to allow entry into a low power state. The function * returns false if low power state was not entered, in * which case, kernel does normal idle processing. * * This function is entered with interrupts disabled. * If a low power state was entered, then the hook * function should enable inerrupts before exiting. * This is because the kernel does not do its own idle * processing in those cases i.e. skips k_cpu_idle(). * The kernel's idle processing re-enables interrupts * which is essential for the kernel's scheduling * logic. */ if (k_is_pre_kernel() || !pm_system_suspend(_kernel.idle)) { k_cpu_idle(); } #else k_cpu_idle(); #endif #if !defined(CONFIG_PREEMPT_ENABLED) # if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) /* A legacy mess: the idle thread is by definition * preemptible as far as the modern scheduler is * concerned, but older platforms use * CONFIG_PREEMPT_ENABLED=n as an optimization hint * that interrupt exit always returns to the * interrupted context. So in that setup we need to * explicitly yield in the idle thread otherwise * nothing else will run once it starts. */ if (_kernel.ready_q.cache != _current) { z_swap_unlocked(); } # endif #endif } } |