Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 | /*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief New thread creation for ARCv2
*
* Core thread related primitives for the ARCv2 processor architecture.
*/
#include <kernel.h>
#include <ksched.h>
#include <offsets_short.h>
#include <wait_q.h>
#ifdef CONFIG_USERSPACE
#include <arch/arc/v2/mpu/arc_core_mpu.h>
#endif
/* initial stack frame */
struct init_stack_frame {
uint32_t pc;
#ifdef CONFIG_ARC_HAS_SECURE
uint32_t sec_stat;
#endif
uint32_t status32;
uint32_t r3;
uint32_t r2;
uint32_t r1;
uint32_t r0;
};
#ifdef CONFIG_USERSPACE
struct user_init_stack_frame {
struct init_stack_frame iframe;
uint32_t user_sp;
};
static bool is_user(struct k_thread *thread)
{
return (thread->base.user_options & K_USER) != 0;
}
#endif
/* Set all stack-related architecture variables for the provided thread */
static void setup_stack_vars(struct k_thread *thread)
{
#ifdef CONFIG_USERSPACE
if (is_user(thread)) {
#ifdef CONFIG_GEN_PRIV_STACKS
thread->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(thread->stack_obj);
#else
thread->arch.priv_stack_start = (uint32_t)(thread->stack_obj);
#endif /* CONFIG_GEN_PRIV_STACKS */
thread->arch.priv_stack_start += Z_ARC_STACK_GUARD_SIZE;
} else {
thread->arch.priv_stack_start = 0;
}
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_USERSPACE
if (is_user(thread)) {
thread->arch.k_stack_top = thread->arch.priv_stack_start;
thread->arch.k_stack_base = (thread->arch.priv_stack_start +
CONFIG_PRIVILEGED_STACK_SIZE);
thread->arch.u_stack_top = thread->stack_info.start;
thread->arch.u_stack_base = (thread->stack_info.start +
thread->stack_info.size);
} else
#endif /* CONFIG_USERSPACE */
{
thread->arch.k_stack_top = (uint32_t)thread->stack_info.start;
thread->arch.k_stack_base = (uint32_t)(thread->stack_info.start +
thread->stack_info.size);
#ifdef CONFIG_USERSPACE
thread->arch.u_stack_top = 0;
thread->arch.u_stack_base = 0;
#endif /* CONFIG_USERSPACE */
}
#endif /* CONFIG_ARC_STACK_CHECKING */
}
/* Get the initial stack frame pointer from the thread's stack buffer. */
static struct init_stack_frame *get_iframe(struct k_thread *thread,
char *stack_ptr)
{
#ifdef CONFIG_USERSPACE
if (is_user(thread)) {
/* Initial stack frame for a user thread is slightly larger;
* we land in z_user_thread_entry_wrapper on the privilege
* stack, and pop off an additional value for the user
* stack pointer.
*/
struct user_init_stack_frame *uframe;
uframe = Z_STACK_PTR_TO_FRAME(struct user_init_stack_frame,
thread->arch.priv_stack_start +
CONFIG_PRIVILEGED_STACK_SIZE);
uframe->user_sp = (uint32_t)stack_ptr;
return &uframe->iframe;
}
#endif
return Z_STACK_PTR_TO_FRAME(struct init_stack_frame, stack_ptr);
}
/*
* The initial context is a basic stack frame that contains arguments for
* z_thread_entry() return address, that points at z_thread_entry()
* and status register.
*/
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3)
{
struct init_stack_frame *iframe;
setup_stack_vars(thread);
/* Set up initial stack frame */
iframe = get_iframe(thread, stack_ptr);
#ifdef CONFIG_USERSPACE
/* enable US bit, US is read as zero in user mode. This will allow user
* mode sleep instructions, and it enables a form of denial-of-service
* attack by putting the processor in sleep mode, but since interrupt
* level/mask can't be set from user space that's not worse than
* executing a loop without yielding.
*/
iframe->status32 = _ARC_V2_STATUS32_US;
if (is_user(thread)) {
iframe->pc = (uint32_t)z_user_thread_entry_wrapper;
} else {
iframe->pc = (uint32_t)z_thread_entry_wrapper;
}
#else
iframe->status32 = 0;
iframe->pc = ((uint32_t)z_thread_entry_wrapper);
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_ARC_SECURE_FIRMWARE
iframe->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
#endif
iframe->r0 = (uint32_t)entry;
iframe->r1 = (uint32_t)p1;
iframe->r2 = (uint32_t)p2;
iframe->r3 = (uint32_t)p3;
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
iframe->sec_stat |= _ARC_V2_SEC_STAT_SSC;
#else
iframe->status32 |= _ARC_V2_STATUS32_SC;
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#endif /* CONFIG_ARC_STACK_CHECKING */
#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
iframe->status32 |= _ARC_V2_STATUS32_AD;
#endif
/* Set required thread members */
thread->switch_handle = thread;
thread->arch.relinquish_cause = _CAUSE_COOP;
thread->callee_saved.sp =
(uint32_t)iframe - ___callee_saved_stack_t_SIZEOF;
/* initial values in all other regs/k_thread entries are irrelevant */
}
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
{
*old_thread = _current;
return z_get_next_switch_handle(*old_thread);
}
#ifdef CONFIG_USERSPACE
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
setup_stack_vars(_current);
/* possible optimizaiton: no need to load mem domain anymore */
/* need to lock cpu here ? */
configure_mpu_thread(_current);
z_arc_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)_current->stack_info.start,
(_current->stack_info.size -
_current->stack_info.delta), _current);
CODE_UNREACHABLE;
}
#endif
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
int arch_float_disable(struct k_thread *thread)
{
unsigned int key;
/* Ensure a preemptive context switch does not occur */
key = irq_lock();
/* Disable all floating point capabilities for the thread */
thread->base.user_options &= ~K_FP_REGS;
irq_unlock(key);
return 0;
}
int arch_float_enable(struct k_thread *thread)
{
unsigned int key;
/* Ensure a preemptive context switch does not occur */
key = irq_lock();
/* Enable all floating point capabilities for the thread */
thread->base.user_options |= K_FP_REGS;
irq_unlock(key);
return 0;
}
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|