Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
/* swap_macros.h - helper macros for context switch */

/*
 * Copyright (c) 2014 Wind River Systems, Inc.
 *
 * SPDX-License-Identifier: Apache-2.0
 */

#ifndef ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_

#include <kernel_structs.h>
#include <offsets_short.h>
#include <toolchain.h>
#include <arch/cpu.h>
#include <arch/arc/tool-compat.h>

#ifdef _ASMLANGUAGE

/* save callee regs of current thread in r2 */
.macro _save_callee_saved_regs

	sub_s sp, sp, ___callee_saved_stack_t_SIZEOF

	/* save regs on stack */
	st_s r13, [sp, ___callee_saved_stack_t_r13_OFFSET]
	st_s r14, [sp, ___callee_saved_stack_t_r14_OFFSET]
	st_s r15, [sp, ___callee_saved_stack_t_r15_OFFSET]
	st r16, [sp, ___callee_saved_stack_t_r16_OFFSET]
	st r17, [sp, ___callee_saved_stack_t_r17_OFFSET]
	st r18, [sp, ___callee_saved_stack_t_r18_OFFSET]
	st r19, [sp, ___callee_saved_stack_t_r19_OFFSET]
	st r20, [sp, ___callee_saved_stack_t_r20_OFFSET]
	st r21, [sp, ___callee_saved_stack_t_r21_OFFSET]
	st r22, [sp, ___callee_saved_stack_t_r22_OFFSET]
	st r23, [sp, ___callee_saved_stack_t_r23_OFFSET]
	st r24, [sp, ___callee_saved_stack_t_r24_OFFSET]
	st r25, [sp, ___callee_saved_stack_t_r25_OFFSET]
	st r26, [sp, ___callee_saved_stack_t_r26_OFFSET]
	st fp,  [sp, ___callee_saved_stack_t_fp_OFFSET]

#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	lr r13, [_ARC_V2_SEC_U_SP]
	st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
	lr r13, [_ARC_V2_SEC_K_SP]
	st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
#else
	lr r13, [_ARC_V2_USER_SP]
	st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
	lr r13, [_ARC_V2_KERNEL_SP]
	st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#else
	lr r13, [_ARC_V2_USER_SP]
	st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
#endif
#endif
	st r30, [sp, ___callee_saved_stack_t_r30_OFFSET]

#ifdef CONFIG_ARC_HAS_ACCL_REGS
	st r58, [sp, ___callee_saved_stack_t_r58_OFFSET]
	st r59, [sp, ___callee_saved_stack_t_r59_OFFSET]
#endif

#ifdef CONFIG_FPU_SHARING
	ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
	/* K_FP_REGS is bit 1 */
	bbit0 r13, 1, 1f
	lr r13, [_ARC_V2_FPU_STATUS]
	st_s r13, [sp, ___callee_saved_stack_t_fpu_status_OFFSET]
	lr r13, [_ARC_V2_FPU_CTRL]
	st_s r13, [sp, ___callee_saved_stack_t_fpu_ctrl_OFFSET]

#ifdef CONFIG_FP_FPU_DA
	lr r13, [_ARC_V2_FPU_DPFP1L]
	st_s r13, [sp, ___callee_saved_stack_t_dpfp1l_OFFSET]
	lr r13, [_ARC_V2_FPU_DPFP1H]
	st_s r13, [sp, ___callee_saved_stack_t_dpfp1h_OFFSET]
	lr r13, [_ARC_V2_FPU_DPFP2L]
	st_s r13, [sp, ___callee_saved_stack_t_dpfp2l_OFFSET]
	lr r13, [_ARC_V2_FPU_DPFP2H]
	st_s r13, [sp, ___callee_saved_stack_t_dpfp2h_OFFSET]
#endif
1 :
#endif

	/* save stack pointer in struct k_thread */
	st sp, [r2, _thread_offset_to_sp]
.endm

/* load the callee regs of thread (in r2)*/
.macro _load_callee_saved_regs
	/* restore stack pointer from struct k_thread */
	ld sp, [r2, _thread_offset_to_sp]

#ifdef CONFIG_ARC_HAS_ACCL_REGS
	ld r58, [sp, ___callee_saved_stack_t_r58_OFFSET]
	ld r59, [sp, ___callee_saved_stack_t_r59_OFFSET]
#endif

#ifdef CONFIG_FPU_SHARING
	ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
	/* K_FP_REGS is bit 1 */
	bbit0 r13, 1, 2f

	ld_s r13, [sp, ___callee_saved_stack_t_fpu_status_OFFSET]
	sr r13, [_ARC_V2_FPU_STATUS]
	ld_s r13, [sp, ___callee_saved_stack_t_fpu_ctrl_OFFSET]
	sr r13, [_ARC_V2_FPU_CTRL]

#ifdef CONFIG_FP_FPU_DA
	ld_s r13, [sp, ___callee_saved_stack_t_dpfp1l_OFFSET]
	sr r13, [_ARC_V2_FPU_DPFP1L]
	ld_s r13, [sp, ___callee_saved_stack_t_dpfp1h_OFFSET]
	sr r13, [_ARC_V2_FPU_DPFP1H]
	ld_s r13, [sp, ___callee_saved_stack_t_dpfp2l_OFFSET]
	sr r13, [_ARC_V2_FPU_DPFP2L]
	ld_s r13, [sp, ___callee_saved_stack_t_dpfp2h_OFFSET]
	sr r13, [_ARC_V2_FPU_DPFP2H]
#endif
2 :
#endif

#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
	sr r13, [_ARC_V2_SEC_U_SP]
	ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
	sr r13, [_ARC_V2_SEC_K_SP]
#else
	ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
	sr r13, [_ARC_V2_USER_SP]
	ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
	sr r13, [_ARC_V2_KERNEL_SP]
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#else
	ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
	sr r13, [_ARC_V2_USER_SP]
#endif
#endif

	ld_s r13, [sp, ___callee_saved_stack_t_r13_OFFSET]
	ld_s r14, [sp, ___callee_saved_stack_t_r14_OFFSET]
	ld_s r15, [sp, ___callee_saved_stack_t_r15_OFFSET]
	ld r16, [sp, ___callee_saved_stack_t_r16_OFFSET]
	ld r17, [sp, ___callee_saved_stack_t_r17_OFFSET]
	ld r18, [sp, ___callee_saved_stack_t_r18_OFFSET]
	ld r19, [sp, ___callee_saved_stack_t_r19_OFFSET]
	ld r20, [sp, ___callee_saved_stack_t_r20_OFFSET]
	ld r21, [sp, ___callee_saved_stack_t_r21_OFFSET]
	ld r22, [sp, ___callee_saved_stack_t_r22_OFFSET]
	ld r23, [sp, ___callee_saved_stack_t_r23_OFFSET]
	ld r24, [sp, ___callee_saved_stack_t_r24_OFFSET]
	ld r25, [sp, ___callee_saved_stack_t_r25_OFFSET]
	ld r26, [sp, ___callee_saved_stack_t_r26_OFFSET]
	ld fp,  [sp, ___callee_saved_stack_t_fp_OFFSET]
	ld r30, [sp, ___callee_saved_stack_t_r30_OFFSET]

	add_s sp, sp, ___callee_saved_stack_t_SIZEOF

.endm

/* discard callee regs */
.macro _discard_callee_saved_regs
	add_s sp, sp, ___callee_saved_stack_t_SIZEOF
.endm

/*
 * Must be called with interrupts locked or in P0.
 * Upon exit, sp will be pointing to the stack frame.
 */
.macro _create_irq_stack_frame

	sub_s sp, sp, ___isf_t_SIZEOF

	st blink, [sp, ___isf_t_blink_OFFSET]

	/* store these right away so we can use them if needed */

	st_s r13, [sp, ___isf_t_r13_OFFSET]
	st_s r12, [sp, ___isf_t_r12_OFFSET]
	st   r11, [sp, ___isf_t_r11_OFFSET]
	st   r10, [sp, ___isf_t_r10_OFFSET]
	st   r9,  [sp, ___isf_t_r9_OFFSET]
	st   r8,  [sp, ___isf_t_r8_OFFSET]
	st   r7,  [sp, ___isf_t_r7_OFFSET]
	st   r6,  [sp, ___isf_t_r6_OFFSET]
	st   r5,  [sp, ___isf_t_r5_OFFSET]
	st   r4,  [sp, ___isf_t_r4_OFFSET]
	st_s r3,  [sp, ___isf_t_r3_OFFSET]
	st_s r2,  [sp, ___isf_t_r2_OFFSET]
	st_s r1,  [sp, ___isf_t_r1_OFFSET]
	st_s r0,  [sp, ___isf_t_r0_OFFSET]

	mov r0, lp_count
	st_s r0, [sp, ___isf_t_lp_count_OFFSET]
	lr r1, [_ARC_V2_LP_START]
	lr r0, [_ARC_V2_LP_END]
	st_s r1, [sp, ___isf_t_lp_start_OFFSET]
	st_s r0, [sp, ___isf_t_lp_end_OFFSET]

#ifdef CONFIG_CODE_DENSITY
	lr r1, [_ARC_V2_JLI_BASE]
	lr r0, [_ARC_V2_LDI_BASE]
	lr r2, [_ARC_V2_EI_BASE]
	st_s r1, [sp, ___isf_t_jli_base_OFFSET]
	st_s r0, [sp, ___isf_t_ldi_base_OFFSET]
	st_s r2, [sp, ___isf_t_ei_base_OFFSET]
#endif

.endm

/*
 * Must be called with interrupts locked or in P0.
 * sp must be pointing the to stack frame.
 */
.macro _pop_irq_stack_frame

	ld blink, [sp, ___isf_t_blink_OFFSET]

#ifdef CONFIG_CODE_DENSITY
	ld_s r1, [sp, ___isf_t_jli_base_OFFSET]
	ld_s r0, [sp, ___isf_t_ldi_base_OFFSET]
	ld_s r2, [sp, ___isf_t_ei_base_OFFSET]
	sr r1, [_ARC_V2_JLI_BASE]
	sr r0, [_ARC_V2_LDI_BASE]
	sr r2, [_ARC_V2_EI_BASE]
#endif

	ld_s r0, [sp, ___isf_t_lp_count_OFFSET]
	mov lp_count, r0
	ld_s r1, [sp, ___isf_t_lp_start_OFFSET]
	ld_s r0, [sp, ___isf_t_lp_end_OFFSET]
	sr r1, [_ARC_V2_LP_START]
	sr r0, [_ARC_V2_LP_END]

	ld_s r13, [sp, ___isf_t_r13_OFFSET]
	ld_s r12, [sp, ___isf_t_r12_OFFSET]
	ld   r11, [sp, ___isf_t_r11_OFFSET]
	ld   r10, [sp, ___isf_t_r10_OFFSET]
	ld   r9,  [sp, ___isf_t_r9_OFFSET]
	ld   r8,  [sp, ___isf_t_r8_OFFSET]
	ld   r7,  [sp, ___isf_t_r7_OFFSET]
	ld   r6,  [sp, ___isf_t_r6_OFFSET]
	ld   r5,  [sp, ___isf_t_r5_OFFSET]
	ld   r4,  [sp, ___isf_t_r4_OFFSET]
	ld_s r3,  [sp, ___isf_t_r3_OFFSET]
	ld_s r2,  [sp, ___isf_t_r2_OFFSET]
	ld_s r1,  [sp, ___isf_t_r1_OFFSET]
	ld_s r0,  [sp, ___isf_t_r0_OFFSET]


	/*
	 * All gprs have been reloaded, the only one that is still usable is
	 * ilink.
	 *
	 * The pc and status32 values will still be on the stack. We cannot
	 * pop them yet because the callers of _pop_irq_stack_frame must reload
	 * status32 differently depending on the execution context they are
	 * running in (arch_switch(), firq or exception).
	 */
	add_s sp, sp, ___isf_t_SIZEOF

.endm

/*
 * To use this macro, r2 should have the value of thread struct pointer to
 * _kernel.current. r3 is a scratch reg.
 */
.macro _load_stack_check_regs
#if defined(CONFIG_ARC_SECURE_FIRMWARE)
	ld r3, [r2, _thread_offset_to_k_stack_base]
	sr r3, [_ARC_V2_S_KSTACK_BASE]
	ld r3, [r2, _thread_offset_to_k_stack_top]
	sr r3, [_ARC_V2_S_KSTACK_TOP]
#ifdef CONFIG_USERSPACE
	ld r3, [r2, _thread_offset_to_u_stack_base]
	sr r3, [_ARC_V2_S_USTACK_BASE]
	ld r3, [r2, _thread_offset_to_u_stack_top]
	sr r3, [_ARC_V2_S_USTACK_TOP]
#endif
#else /* CONFIG_ARC_HAS_SECURE */
	ld r3, [r2, _thread_offset_to_k_stack_base]
	sr r3, [_ARC_V2_KSTACK_BASE]
	ld r3, [r2, _thread_offset_to_k_stack_top]
	sr r3, [_ARC_V2_KSTACK_TOP]
#ifdef CONFIG_USERSPACE
	ld r3, [r2, _thread_offset_to_u_stack_base]
	sr r3, [_ARC_V2_USTACK_BASE]
	ld r3, [r2, _thread_offset_to_u_stack_top]
	sr r3, [_ARC_V2_USTACK_TOP]
#endif
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
.endm

/* check and increase the interrupt nest counter
 * after increase, check whether nest counter == 1
 * the result will be EQ bit of status32
 * two temp regs are needed
 */
.macro _check_and_inc_int_nest_counter, reg1, reg2
#ifdef CONFIG_SMP
	_get_cpu_id MACRO_ARG(reg1)
	ld.as MACRO_ARG(reg1), [@_curr_cpu, MACRO_ARG(reg1)]
	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
	mov MACRO_ARG(reg1), _kernel
	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
	add MACRO_ARG(reg2), MACRO_ARG(reg2), 1
#ifdef CONFIG_SMP
	st MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
	st MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
	cmp MACRO_ARG(reg2), 1
.endm

/* decrease interrupt stack nest counter
 * the counter > 0, interrupt stack is used, or
 * not used
 */
.macro _dec_int_nest_counter, reg1, reg2
#ifdef CONFIG_SMP
	_get_cpu_id MACRO_ARG(reg1)
	ld.as MACRO_ARG(reg1), [@_curr_cpu, MACRO_ARG(reg1)]
	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
	mov MACRO_ARG(reg1), _kernel
	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
	sub MACRO_ARG(reg2), MACRO_ARG(reg2), 1
#ifdef CONFIG_SMP
	st MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
	st MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
.endm

/* If multi bits in IRQ_ACT are set, i.e. last bit != fist bit, it's
 * in nest interrupt. The result will be EQ bit of status32
 * need two temp reg to do this
 */
.macro _check_nest_int_by_irq_act, reg1, reg2
	lr MACRO_ARG(reg1), [_ARC_V2_AUX_IRQ_ACT]
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	and MACRO_ARG(reg1), MACRO_ARG(reg1), ((1 << ARC_N_IRQ_START_LEVEL) - 1)
#else
	and MACRO_ARG(reg1), MACRO_ARG(reg1), 0xffff
#endif
	ffs MACRO_ARG(reg2), MACRO_ARG(reg1)
	fls MACRO_ARG(reg1), MACRO_ARG(reg1)
	cmp MACRO_ARG(reg1), MACRO_ARG(reg2)
.endm


/* macro to get id of current cpu
 * the result will be in reg (a reg)
 */
.macro _get_cpu_id, reg
	lr MACRO_ARG(reg), [_ARC_V2_IDENTITY]
	xbfu MACRO_ARG(reg), MACRO_ARG(reg), 0xe8
.endm

/* macro to get the interrupt stack of current cpu
 * the result will be in irq_sp (a reg)
 */
.macro _get_curr_cpu_irq_stack, irq_sp
#ifdef CONFIG_SMP
	_get_cpu_id MACRO_ARG(irq_sp)
	ld.as MACRO_ARG(irq_sp), [@_curr_cpu, MACRO_ARG(irq_sp)]
	ld MACRO_ARG(irq_sp), [MACRO_ARG(irq_sp), ___cpu_t_irq_stack_OFFSET]
#else
	mov MACRO_ARG(irq_sp), _kernel
	ld MACRO_ARG(irq_sp), [MACRO_ARG(irq_sp), _kernel_offset_to_irq_stack]
#endif
.endm

/* macro to push aux reg through reg */
.macro PUSHAX, reg, aux
	lr MACRO_ARG(reg), [MACRO_ARG(aux)]
	st.a MACRO_ARG(reg), [sp, -4]
.endm

/* macro to pop aux reg through reg */
.macro POPAX, reg, aux
	ld.ab MACRO_ARG(reg), [sp, 4]
	sr MACRO_ARG(reg), [MACRO_ARG(aux)]
.endm


/* macro to store old thread call regs */
.macro _store_old_thread_callee_regs

	_save_callee_saved_regs
#ifdef CONFIG_SMP
	/* save old thread into switch handle which is required by
	 * wait_for_switch
	 */
	st r2, [r2, ___thread_t_switch_handle_OFFSET]
#endif
.endm

/* macro to store old thread call regs  in interrupt*/
.macro _irq_store_old_thread_callee_regs
#if defined(CONFIG_USERSPACE)
/*
 * when USERSPACE is enabled, according to ARCv2 ISA, SP will be switched
 * if interrupt comes out in user mode, and will be recorded in bit 31
 * (U bit) of IRQ_ACT. when interrupt exits, SP will be switched back
 * according to U bit.
 *
 * need to remember the user/kernel status of interrupted thread, will be
 * restored when thread switched back
 *
 */
	lr r1, [_ARC_V2_AUX_IRQ_ACT]
	and r3, r1, 0x80000000
	push_s r3

	bclr r1, r1, 31
	sr r1, [_ARC_V2_AUX_IRQ_ACT]
#endif
	_store_old_thread_callee_regs
.endm

/* macro to load new thread callee regs */
.macro _load_new_thread_callee_regs
#ifdef CONFIG_ARC_STACK_CHECKING
	_load_stack_check_regs
#endif
	/*
	 * _load_callee_saved_regs expects incoming thread in r2.
	 * _load_callee_saved_regs restores the stack pointer.
	 */
	_load_callee_saved_regs

#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
	push_s r2
	bl configure_mpu_thread
	pop_s r2
#endif

	ld r3, [r2, _thread_offset_to_relinquish_cause]
.endm


/* when switch to thread caused by coop, some status regs need to set */
.macro _set_misc_regs_irq_switch_from_coop
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	/* must return to secure mode, so set IRM bit to 1 */
	lr r0, [_ARC_V2_SEC_STAT]
	bset r0, r0, _ARC_V2_SEC_STAT_IRM_BIT
	sflag r0
#endif
.endm

/* when switch to thread caused by irq, some status regs need to set */
.macro _set_misc_regs_irq_switch_from_irq
#if defined(CONFIG_USERSPACE)
/*
 * need to recover the user/kernel status of interrupted thread
 */
	pop_s r3
	lr r2, [_ARC_V2_AUX_IRQ_ACT]
	or r2, r2, r3
	sr r2, [_ARC_V2_AUX_IRQ_ACT]
#endif

#ifdef CONFIG_ARC_SECURE_FIRMWARE
	/* here need to recover SEC_STAT.IRM bit */
	pop_s r3
	sflag r3
#endif
.endm

/* macro to get next switch handle in assembly */
.macro _get_next_switch_handle
	push_s r2
	mov r0, sp
	bl z_arch_get_next_switch_handle
	pop_s  r2
.endm

/* macro to disable stack checking in assembly, need a GPR
 * to do this
 */
.macro _disable_stack_checking, reg
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	lr MACRO_ARG(reg), [_ARC_V2_SEC_STAT]
	bclr MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_SEC_STAT_SSC_BIT
	sflag MACRO_ARG(reg)

#else
	lr MACRO_ARG(reg), [_ARC_V2_STATUS32]
	bclr MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_STATUS32_SC_BIT
	kflag MACRO_ARG(reg)
#endif
#endif
.endm

/* macro to enable stack checking in assembly, need a GPR
 * to do this
 */
.macro _enable_stack_checking, reg
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	lr MACRO_ARG(reg), [_ARC_V2_SEC_STAT]
	bset MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_SEC_STAT_SSC_BIT
	sflag MACRO_ARG(reg)
#else
	lr MACRO_ARG(reg), [_ARC_V2_STATUS32]
	bset MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_STATUS32_SC_BIT
	kflag MACRO_ARG(reg)
#endif
#endif
.endm

#endif /* _ASMLANGUAGE */

#endif /*  ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_ */