Boot Linux faster!

Check our new training course

Boot Linux faster!

Check our new training course
and Creative Commons CC-BY-SA
lecture and lab materials

Bootlin logo

Elixir Cross Referencer

 * Copyright (c) 2016 Wind River Systems, Inc.
 * SPDX-License-Identifier: Apache-2.0

 * The purpose of this file is to provide essential/minimal kernel structure
 * definitions, so that they can be used without including kernel.h.
 * The following rules must be observed:
 *  1. kernel_structs.h shall not depend on kernel.h both directly and
 *    indirectly (i.e. it shall not include any header files that include
 *    kernel.h in their dependency chain).
 *  2. kernel.h shall imply kernel_structs.h, such that it shall not be
 *    necessary to include kernel_structs.h explicitly when kernel.h is
 *    included.


#if !defined(_ASMLANGUAGE)
#include <sys/atomic.h>
#include <zephyr/types.h>
#include <sched_priq.h>
#include <sys/dlist.h>
#include <sys/util.h>
#include <sys/sys_heap.h>



 * Bitmask definitions for the struct k_thread.thread_state field.
 * Must be before kerneL_arch_data.h because it might need them to be already
 * defined.

/* states: common uses low bits, arch-specific use high bits */

/* Not a real thread */
#define _THREAD_DUMMY (BIT(0))

/* Thread is waiting on an object */
#define _THREAD_PENDING (BIT(1))

/* Thread has not yet started */

/* Thread has terminated */
#define _THREAD_DEAD (BIT(3))

/* Thread is suspended */

/* Thread is being aborted (SMP only) */

/* Thread was aborted in interrupt context (SMP only) */

/* Thread is present in the ready queue */
#define _THREAD_QUEUED (BIT(7))

/* end - states */

/* Magic value in lowest bytes of the stack */

/* lowest value of _thread_base.preempt at which a thread is non-preemptible */

/* highest value of _thread_base.preempt at which a thread is preemptible */

#if !defined(_ASMLANGUAGE)

struct _ready_q {
#ifndef CONFIG_SMP
	/* always contains next thread to run: cannot be NULL */
	struct k_thread *cache;

#if defined(CONFIG_SCHED_DUMB)
	sys_dlist_t runq;
	struct _priq_rb runq;
#elif defined(CONFIG_SCHED_MULTIQ)
	struct _priq_mq runq;

typedef struct _ready_q _ready_q_t;

struct _cpu {
	/* nested interrupt count */
	u32_t nested;

	/* interrupt stack pointer base */
	char *irq_stack;

	/* currently scheduled thread */
	struct k_thread *current;

	/* one assigned idle thread per CPU */
	struct k_thread *idle_thread;

	/* Coop thread preempted by current metairq, or NULL */
	struct k_thread *metairq_preempted;

	/* number of ticks remaining in current time slice */
	int slice_ticks;

	u8_t id;

	/* True when _current is allowed to context switch */
	u8_t swap_ok;

typedef struct _cpu _cpu_t;

struct z_kernel {
	struct _cpu cpus[CONFIG_MP_NUM_CPUS];

	/* queue of timeouts */
	sys_dlist_t timeout_q;

	s32_t idle; /* Number of ticks for kernel idling */

	 * ready queue: can be big, keep after small fields, since some
	 * assembly (e.g. ARC) are limited in the encoding of the offset
	struct _ready_q ready_q;

	 * A 'current_sse' field does not exist in addition to the 'current_fp'
	 * field since it's not possible to divide the IA-32 non-integer
	 * registers into 2 distinct blocks owned by differing threads.  In
	 * other words, given that the 'fxnsave/fxrstor' instructions
	 * save/restore both the X87 FPU and XMM registers, it's not possible
	 * for a thread to only "own" the XMM registers.

	/* thread that owns the FP regs */
	struct k_thread *current_fp;

	struct k_thread *threads; /* singly linked list of ALL threads */

typedef struct z_kernel _kernel_t;

extern struct z_kernel _kernel;


/* True if the current context can be preempted and migrated to
 * another SMP CPU.
bool z_smp_cpu_mobile(void);

#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
			arch_curr_cpu(); })
#define _current k_current_get()

#define _current_cpu (&_kernel.cpus[0])
#define _current _kernel.cpus[0].current

#define _timeout_q _kernel.timeout_q

/* kernel wait queue record */


typedef struct {
	struct _priq_rb waitq;
} _wait_q_t;

extern bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);

#define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }


typedef struct {
	sys_dlist_t waitq;
} _wait_q_t;

#define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }


/* kernel timeout record */

struct _timeout;
typedef void (*_timeout_func_t)(struct _timeout *t);

struct _timeout {
	sys_dnode_t node;
	s32_t dticks;
	_timeout_func_t fn;

/* kernel spinlock type */

struct k_spinlock {
	atomic_t locked;

	/* Stores the thread that holds the lock with the locking CPU
	 * ID in the bottom two bits.
	uintptr_t thread_cpu;

#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
	/* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
	 * the k_spinlock struct will have no members. The result
	 * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
	 * This size difference causes problems when the k_spinlock
	 * is embedded into another struct like k_msgq, because C and
	 * C++ will have different ideas on the offsets of the members
	 * that come after the k_spinlock member.
	 * To prevent this we add a 1 byte dummy member to k_spinlock
	 * when the user selects C++ support and k_spinlock would
	 * otherwise be empty.
	char dummy;

/* kernel synchronized heap struct */

struct k_heap {
	struct sys_heap heap;
	_wait_q_t wait_q;
	struct k_spinlock lock;

#endif /* _ASMLANGUAGE */