// SPDX-License-Identifier: GPL-2.0-only
/*
* kernel/sched/core.c
*
* Core kernel scheduler code and related syscalls
*
* Copyright (C) 1991-2002 Linus Torvalds
*/
#include <linux/highmem.h>
#include <linux/hrtimer_api.h>
#include <linux/ktime_api.h>
#include <linux/sched/signal.h>
#include <linux/syscalls_api.h>
#include <linux/debug_locks.h>
#include <linux/prefetch.h>
#include <linux/capability.h>
#include <linux/pgtable_api.h>
#include <linux/wait_bit.h>
#include <linux/jiffies.h>
#include <linux/spinlock_api.h>
#include <linux/cpumask_api.h>
#include <linux/lockdep_api.h>
#include <linux/hardirq.h>
#include <linux/softirq.h>
#include <linux/refcount_api.h>
#include <linux/topology.h>
#include <linux/sched/clock.h>
#include <linux/sched/cond_resched.h>
#include <linux/sched/cputime.h>
#include <linux/sched/debug.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/init.h>
#include <linux/sched/isolation.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/mm.h>
#include <linux/sched/nohz.h>
#include <linux/sched/rseq_api.h>
#include <linux/sched/rt.h>
#include <linux/blkdev.h>
#include <linux/context_tracking.h>
#include <linux/cpuset.h>
#include <linux/delayacct.h>
#include <linux/init_task.h>
#include <linux/interrupt.h>
#include <linux/ioprio.h>
#include <linux/kallsyms.h>
#include <linux/kcov.h>
#include <linux/kprobes.h>
#include <linux/llist_api.h>
#include <linux/mmu_context.h>
#include <linux/mmzone.h>
#include <linux/mutex_api.h>
#include <linux/nmi.h>
#include <linux/nospec.h>
#include <linux/perf_event_api.h>
#include <linux/profile.h>
#include <linux/psi.h>
#include <linux/rcuwait_api.h>
#include <linux/sched/wake_q.h>
#include <linux/scs.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/vtime.h>
#include <linux/wait_api.h>
#include <linux/workqueue_api.h>
#ifdef CONFIG_PREEMPT_DYNAMIC
# ifdef CONFIG_GENERIC_ENTRY
# include <linux/entry-common.h>
# endif
#endif
#include <uapi/linux/sched/types.h>
#include <asm/irq_regs.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
#define CREATE_TRACE_POINTS
#include <linux/sched/rseq_api.h>
#include <trace/events/sched.h>
#include <trace/events/ipi.h>
#undef CREATE_TRACE_POINTS
#include "sched.h"
#include "stats.h"
#include "autogroup.h"
#include "autogroup.h"
#include "pelt.h"
#include "smp.h"
#include "stats.h"
#include "../workqueue_internal.h"
#include "../../io_uring/io-wq.h"
#include "../smpboot.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
* associated with them) to allow external modules to probe them.
*/
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#ifdef CONFIG_SCHED_DEBUG
/*
* Debugging: various feature bits
*
* If SCHED_DEBUG is disabled, each compilation unit has its own copy of
* sysctl_sched_features, defined in sched.h, to allow constants propagation
* at compile time and compiler optimization based on features default.
*/
#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
const_debug unsigned int sysctl_sched_features =
#include "features.h"
0;
#undef SCHED_FEAT
/*
* Print a warning if need_resched is set for the given duration (if
* LATENCY_WARN is enabled).
*
* If sysctl_resched_latency_warn_once is set, only one warning will be shown
* per boot.
*/
__read_mostly int sysctl_resched_latency_warn_ms = 100;
__read_mostly int sysctl_resched_latency_warn_once = 1;
#endif /* CONFIG_SCHED_DEBUG */
/*
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
__read_mostly int scheduler_running;
#ifdef CONFIG_SCHED_CORE
DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
/* kernel prio, less is more */
static inline int __task_prio(const struct task_struct *p)
{
if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2;
if (rt_prio(p->prio)) /* includes deadline */
return p->prio; /* [-1, 99] */
if (p->sched_class == &idle_sched_class)
return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
}
/*
* l(a,b)
* le(a,b) := !l(b,a)
* g(a,b) := l(b,a)
* ge(a,b) := !l(a,b)
*/
/* real prio, less is less */
static inline bool prio_less(const struct task_struct *a,
const struct task_struct *b, bool in_fi)
{
int pa = __task_prio(a), pb = __task_prio(b);
if (-pa < -pb)
return true;
if (-pb < -pa)
return false;
if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
return !dl_time_before(a->dl.deadline, b->dl.deadline);
if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
return cfs_prio_less(a, b, in_fi);
return false;
}
static inline bool __sched_core_less(const struct task_struct *a,
const struct task_struct *b)
{
if (a->core_cookie < b->core_cookie)
return true;
if (a->core_cookie > b->core_cookie)
return false;
/* flip prio, so high prio is leftmost */
if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
return true;
return false;
}
#define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
{
return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
}
static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
{
const struct task_struct *p = __node_2_sc(node);
unsigned long cookie = (unsigned long)key;
if (cookie < p->core_cookie)
return -1;
if (cookie > p->core_cookie)
return 1;
return 0;
}
void sched_core_enqueue(struct rq *rq, struct task_struct *p)
{
rq->core->core_task_seq++;
if (!p->core_cookie)
return;
rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
}
void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
{
rq->core->core_task_seq++;
if (sched_core_enqueued(p)) {
rb_erase(&p->core_node, &rq->core_tree);
RB_CLEAR_NODE(&p->core_node);
}
/*
* Migrating the last task off the cpu, with the cpu in forced idle
* state. Reschedule to create an accounting edge for forced idle,
* and re-examine whether the core is still in forced idle state.
*/
if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
rq->core->core_forceidle_count && rq->curr == rq->idle)
resched_curr(rq);
}
static int sched_task_is_throttled(struct task_struct *p, int cpu)
{
if (p->sched_class->task_is_throttled)
return p->sched_class->task_is_throttled(p, cpu);
return 0;
}
static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
{
struct rb_node *node = &p->core_node;
int cpu = task_cpu(p);
do {
node = rb_next(node);
if (!node)
return NULL;
p = __node_2_sc(node);
if (p->core_cookie != cookie)
return NULL;
} while (sched_task_is_throttled(p, cpu));
return p;
}
/*
* Find left-most (aka, highest priority) and unthrottled task matching @cookie.
* If no suitable task is found, NULL will be returned.
*/
static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
{
struct task_struct *p;
struct rb_node *node;
node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
if (!node)
return NULL;
p = __node_2_sc(node);
if (!sched_task_is_throttled(p, rq->cpu))
return p;
return sched_core_next(p, cookie);
}
/*
* Magic required such that:
*
* raw_spin_rq_lock(rq);
* ...
* raw_spin_rq_unlock(rq);
*
* ends up locking and unlocking the _same_ lock, and all CPUs
* always agree on what rq has what lock.
*
* XXX entirely possible to selectively enable cores, don't bother for now.
*/
static DEFINE_MUTEX(sched_core_mutex);
static atomic_t sched_core_count;
static struct cpumask sched_core_mask;
static void sched_core_lock(int cpu, unsigned long *flags)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
int t, i = 0;
local_irq_save(*flags);
for_each_cpu(t, smt_mask)
raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
}
static void sched_core_unlock(int cpu, unsigned long *flags)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
int t;
for_each_cpu(t, smt_mask)
raw_spin_unlock(&cpu_rq(t)->__lock);
local_irq_restore(*flags);
}
static void __sched_core_flip(bool enabled)
{
unsigned long flags;
int cpu, t;
cpus_read_lock();
/*
* Toggle the online cores, one by one.
*/
cpumask_copy(&sched_core_mask, cpu_online_mask);
for_each_cpu(cpu, &sched_core_mask) {
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
sched_core_lock(cpu, &flags);
for_each_cpu(t, smt_mask)
cpu_rq(t)->core_enabled = enabled;
cpu_rq(cpu)->core->core_forceidle_start = 0;
sched_core_unlock(cpu, &flags);
cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
}
/*
* Toggle the offline CPUs.
*/
for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
cpu_rq(cpu)->core_enabled = enabled;
cpus_read_unlock();
}
static void sched_core_assert_empty(void)
{
int cpu;
for_each_possible_cpu(cpu)
WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
}
static void __sched_core_enable(void)
{
static_branch_enable(&__sched_core_enabled);
/*
* Ensure all previous instances of raw_spin_rq_*lock() have finished
* and future ones will observe !sched_core_disabled().
*/
synchronize_rcu();
__sched_core_flip(true);
sched_core_assert_empty();
}
static void __sched_core_disable(void)
{
sched_core_assert_empty();
__sched_core_flip(false);
static_branch_disable(&__sched_core_enabled);
}
void sched_core_get(void)
{
if (atomic_inc_not_zero(&sched_core_count))
return;
mutex_lock(&sched_core_mutex);
if (!atomic_read(&sched_core_count))
__sched_core_enable();
smp_mb__before_atomic();
atomic_inc(&sched_core_count);
mutex_unlock(&sched_core_mutex);
}
static void __sched_core_put(struct work_struct *work)
{
if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
__sched_core_disable();
mutex_unlock(&sched_core_mutex);
}
}
void sched_core_put(void)
{
static DECLARE_WORK(_work, __sched_core_put);
/*
* "There can be only one"
*
* Either this is the last one, or we don't actually need to do any
* 'work'. If it is the last *again*, we rely on
* WORK_STRUCT_PENDING_BIT.
*/
if (!atomic_add_unless(&sched_core_count, -1, 1))
schedule_work(&_work);
}
#else /* !CONFIG_SCHED_CORE */
static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
static inline void
sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
#endif /* CONFIG_SCHED_CORE */
/*
* Serialization rules:
*
* Lock order:
*
* p->pi_lock
* rq->lock
* hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
*
* rq1->lock
* rq2->lock where: rq1 < rq2
*
* Regular state:
*
* Normal scheduling state is serialized by rq->lock. __schedule() takes the
* local CPU's rq->lock, it optionally removes the task from the runqueue and
* always looks at the local rq data structures to find the most eligible task
* to run next.
*
* Task enqueue is also under rq->lock, possibly taken from another CPU.
* Wakeups from another LLC domain might use an IPI to transfer the enqueue to
* the local CPU to avoid bouncing the runqueue state around [ see
* ttwu_queue_wakelist() ]
*
* Task wakeup, specifically wakeups that involve migration, are horribly
* complicated to avoid having to take two rq->locks.
*
* Special state:
*
* System-calls and anything external will use task_rq_lock() which acquires
* both p->pi_lock and rq->lock. As a consequence the state they change is
* stable while holding either lock:
*
* - sched_setaffinity()/
* set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
* - set_user_nice(): p->se.load