Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
/* swap_macros.h - helper macros for context switch */

/*
 * Copyright (c) 2014 Wind River Systems, Inc.
 *
 * SPDX-License-Identifier: Apache-2.0
 */

#ifndef ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_
#define ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_

#include <zephyr/kernel_structs.h>
#include <offsets_short.h>
#include <zephyr/toolchain.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/arch/arc/tool-compat.h>
#include <zephyr/arch/arc/asm-compat/assembler.h>
#include <zephyr/kernel.h>
#include "../core/dsp/swap_dsp_macros.h"

#ifdef _ASMLANGUAGE

/* save callee regs of current thread in r2 */
.macro _save_callee_saved_regs

	SUBR sp, sp, ___callee_saved_stack_t_SIZEOF

	/* save regs on stack */
	STR r13, sp, ___callee_saved_stack_t_r13_OFFSET
	STR r14, sp, ___callee_saved_stack_t_r14_OFFSET
	STR r15, sp, ___callee_saved_stack_t_r15_OFFSET
	STR r16, sp, ___callee_saved_stack_t_r16_OFFSET
	STR r17, sp, ___callee_saved_stack_t_r17_OFFSET
	STR r18, sp, ___callee_saved_stack_t_r18_OFFSET
	STR r19, sp, ___callee_saved_stack_t_r19_OFFSET
	STR r20, sp, ___callee_saved_stack_t_r20_OFFSET
	STR r21, sp, ___callee_saved_stack_t_r21_OFFSET
	STR r22, sp, ___callee_saved_stack_t_r22_OFFSET
	STR r23, sp, ___callee_saved_stack_t_r23_OFFSET
	STR r24, sp, ___callee_saved_stack_t_r24_OFFSET
	STR r25, sp, ___callee_saved_stack_t_r25_OFFSET
	STR r26, sp, ___callee_saved_stack_t_r26_OFFSET
	STR fp,  sp, ___callee_saved_stack_t_fp_OFFSET

#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	lr r13, [_ARC_V2_SEC_U_SP]
	st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
	lr r13, [_ARC_V2_SEC_K_SP]
	st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
#else
	lr r13, [_ARC_V2_USER_SP]
	st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
	lr r13, [_ARC_V2_KERNEL_SP]
	st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#else
	lr r13, [_ARC_V2_USER_SP]
	st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
#endif
#endif
	STR r30, sp, ___callee_saved_stack_t_r30_OFFSET

#ifdef CONFIG_ARC_HAS_ACCL_REGS
	STR r58, sp, ___callee_saved_stack_t_r58_OFFSET
#ifndef CONFIG_64BIT
	STR r59, sp, ___callee_saved_stack_t_r59_OFFSET
#endif /* !CONFIG_64BIT */
#endif

#ifdef CONFIG_FPU_SHARING
	ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
	bbit0 r13, K_FP_IDX, fpu_skip_save
	lr r13, [_ARC_V2_FPU_STATUS]
	st_s r13, [sp, ___callee_saved_stack_t_fpu_status_OFFSET]
	lr r13, [_ARC_V2_FPU_CTRL]
	st_s r13, [sp, ___callee_saved_stack_t_fpu_ctrl_OFFSET]

#ifdef CONFIG_FP_FPU_DA
	lr r13, [_ARC_V2_FPU_DPFP1L]
	st_s r13, [sp, ___callee_saved_stack_t_dpfp1l_OFFSET]
	lr r13, [_ARC_V2_FPU_DPFP1H]
	st_s r13, [sp, ___callee_saved_stack_t_dpfp1h_OFFSET]
	lr r13, [_ARC_V2_FPU_DPFP2L]
	st_s r13, [sp, ___callee_saved_stack_t_dpfp2l_OFFSET]
	lr r13, [_ARC_V2_FPU_DPFP2H]
	st_s r13, [sp, ___callee_saved_stack_t_dpfp2h_OFFSET]
#endif
#endif
fpu_skip_save :
	_save_dsp_regs
	/* save stack pointer in struct k_thread */
	STR sp, r2, _thread_offset_to_sp
.endm

/* load the callee regs of thread (in r2)*/
.macro _load_callee_saved_regs
	/* restore stack pointer from struct k_thread */
	LDR sp, r2, _thread_offset_to_sp

#ifdef CONFIG_ARC_HAS_ACCL_REGS
	LDR r58, sp, ___callee_saved_stack_t_r58_OFFSET
#ifndef CONFIG_64BIT
	LDR r59, sp, ___callee_saved_stack_t_r59_OFFSET
#endif /* !CONFIG_64BIT */
#endif

#ifdef CONFIG_FPU_SHARING
	ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
	bbit0 r13, K_FP_IDX, fpu_skip_load

	ld_s r13, [sp, ___callee_saved_stack_t_fpu_status_OFFSET]
	sr r13, [_ARC_V2_FPU_STATUS]
	ld_s r13, [sp, ___callee_saved_stack_t_fpu_ctrl_OFFSET]
	sr r13, [_ARC_V2_FPU_CTRL]

#ifdef CONFIG_FP_FPU_DA
	ld_s r13, [sp, ___callee_saved_stack_t_dpfp1l_OFFSET]
	sr r13, [_ARC_V2_FPU_DPFP1L]
	ld_s r13, [sp, ___callee_saved_stack_t_dpfp1h_OFFSET]
	sr r13, [_ARC_V2_FPU_DPFP1H]
	ld_s r13, [sp, ___callee_saved_stack_t_dpfp2l_OFFSET]
	sr r13, [_ARC_V2_FPU_DPFP2L]
	ld_s r13, [sp, ___callee_saved_stack_t_dpfp2h_OFFSET]
	sr r13, [_ARC_V2_FPU_DPFP2H]
#endif
#endif
fpu_skip_load :
	_load_dsp_regs
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
	sr r13, [_ARC_V2_SEC_U_SP]
	ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
	sr r13, [_ARC_V2_SEC_K_SP]
#else
	ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
	sr r13, [_ARC_V2_USER_SP]
	ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
	sr r13, [_ARC_V2_KERNEL_SP]
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#else
	ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
	sr r13, [_ARC_V2_USER_SP]
#endif
#endif

	LDR r13, sp, ___callee_saved_stack_t_r13_OFFSET
	LDR r14, sp, ___callee_saved_stack_t_r14_OFFSET
	LDR r15, sp, ___callee_saved_stack_t_r15_OFFSET
	LDR r16, sp, ___callee_saved_stack_t_r16_OFFSET
	LDR r17, sp, ___callee_saved_stack_t_r17_OFFSET
	LDR r18, sp, ___callee_saved_stack_t_r18_OFFSET
	LDR r19, sp, ___callee_saved_stack_t_r19_OFFSET
	LDR r20, sp, ___callee_saved_stack_t_r20_OFFSET
	LDR r21, sp, ___callee_saved_stack_t_r21_OFFSET
	LDR r22, sp, ___callee_saved_stack_t_r22_OFFSET
	LDR r23, sp, ___callee_saved_stack_t_r23_OFFSET
	LDR r24, sp, ___callee_saved_stack_t_r24_OFFSET
	LDR r25, sp, ___callee_saved_stack_t_r25_OFFSET
	LDR r26, sp, ___callee_saved_stack_t_r26_OFFSET
	LDR fp,  sp, ___callee_saved_stack_t_fp_OFFSET
	LDR r30, sp, ___callee_saved_stack_t_r30_OFFSET

	ADDR sp, sp, ___callee_saved_stack_t_SIZEOF

.endm

/* discard callee regs */
.macro _discard_callee_saved_regs
	ADDR sp, sp, ___callee_saved_stack_t_SIZEOF
.endm

/*
 * Must be called with interrupts locked or in P0.
 * Upon exit, sp will be pointing to the stack frame.
 */
.macro _create_irq_stack_frame

	SUBR sp, sp, ___isf_t_SIZEOF

	STR blink, sp, ___isf_t_blink_OFFSET

	/* store these right away so we can use them if needed */

	STR r13, sp, ___isf_t_r13_OFFSET
	STR r12, sp, ___isf_t_r12_OFFSET
	STR r11, sp, ___isf_t_r11_OFFSET
	STR r10, sp, ___isf_t_r10_OFFSET
	STR r9,  sp, ___isf_t_r9_OFFSET
	STR r8,  sp, ___isf_t_r8_OFFSET
	STR r7,  sp, ___isf_t_r7_OFFSET
	STR r6,  sp, ___isf_t_r6_OFFSET
	STR r5,  sp, ___isf_t_r5_OFFSET
	STR r4,  sp, ___isf_t_r4_OFFSET
	STR r3,  sp, ___isf_t_r3_OFFSET
	STR r2,  sp, ___isf_t_r2_OFFSET
	STR r1,  sp, ___isf_t_r1_OFFSET
	STR r0,  sp, ___isf_t_r0_OFFSET

#ifdef CONFIG_ARC_HAS_ZOL
	MOVR r0, lp_count
	STR r0, sp, ___isf_t_lp_count_OFFSET
	LRR r1, [_ARC_V2_LP_START]
	LRR r0, [_ARC_V2_LP_END]
	STR r1, sp, ___isf_t_lp_start_OFFSET
	STR r0, sp, ___isf_t_lp_end_OFFSET
#endif /* CONFIG_ARC_HAS_ZOL */

#ifdef CONFIG_CODE_DENSITY
	lr r1, [_ARC_V2_JLI_BASE]
	lr r0, [_ARC_V2_LDI_BASE]
	lr r2, [_ARC_V2_EI_BASE]
	st_s r1, [sp, ___isf_t_jli_base_OFFSET]
	st_s r0, [sp, ___isf_t_ldi_base_OFFSET]
	st_s r2, [sp, ___isf_t_ei_base_OFFSET]
#endif

.endm

/*
 * Must be called with interrupts locked or in P0.
 * sp must be pointing the to stack frame.
 */
.macro _pop_irq_stack_frame

	LDR blink, sp, ___isf_t_blink_OFFSET

#ifdef CONFIG_CODE_DENSITY
	ld_s r1, [sp, ___isf_t_jli_base_OFFSET]
	ld_s r0, [sp, ___isf_t_ldi_base_OFFSET]
	ld_s r2, [sp, ___isf_t_ei_base_OFFSET]
	sr r1, [_ARC_V2_JLI_BASE]
	sr r0, [_ARC_V2_LDI_BASE]
	sr r2, [_ARC_V2_EI_BASE]
#endif

#ifdef CONFIG_ARC_HAS_ZOL
	LDR r0, sp, ___isf_t_lp_count_OFFSET
	MOVR lp_count, r0
	LDR r1, sp, ___isf_t_lp_start_OFFSET
	LDR r0, sp, ___isf_t_lp_end_OFFSET
	SRR r1, [_ARC_V2_LP_START]
	SRR r0, [_ARC_V2_LP_END]
#endif /* CONFIG_ARC_HAS_ZOL */

	LDR r13, sp, ___isf_t_r13_OFFSET
	LDR r12, sp, ___isf_t_r12_OFFSET
	LDR r11, sp, ___isf_t_r11_OFFSET
	LDR r10, sp, ___isf_t_r10_OFFSET
	LDR r9,  sp, ___isf_t_r9_OFFSET
	LDR r8,  sp, ___isf_t_r8_OFFSET
	LDR r7,  sp, ___isf_t_r7_OFFSET
	LDR r6,  sp, ___isf_t_r6_OFFSET
	LDR r5,  sp, ___isf_t_r5_OFFSET
	LDR r4,  sp, ___isf_t_r4_OFFSET
	LDR r3,  sp, ___isf_t_r3_OFFSET
	LDR r2,  sp, ___isf_t_r2_OFFSET
	LDR r1,  sp, ___isf_t_r1_OFFSET
	LDR r0,  sp, ___isf_t_r0_OFFSET


	/*
	 * All gprs have been reloaded, the only one that is still usable is
	 * ilink.
	 *
	 * The pc and status32 values will still be on the stack. We cannot
	 * pop them yet because the callers of _pop_irq_stack_frame must reload
	 * status32 differently depending on the execution context they are
	 * running in (arch_switch(), firq or exception).
	 */
	ADDR sp, sp, ___isf_t_SIZEOF

.endm

/*
 * To use this macro, r2 should have the value of thread struct pointer to
 * _kernel.current. r3 is a scratch reg.
 */
.macro _load_stack_check_regs
#if defined(CONFIG_ARC_SECURE_FIRMWARE)
	ld r3, [r2, _thread_offset_to_k_stack_base]
	sr r3, [_ARC_V2_S_KSTACK_BASE]
	ld r3, [r2, _thread_offset_to_k_stack_top]
	sr r3, [_ARC_V2_S_KSTACK_TOP]
#ifdef CONFIG_USERSPACE
	ld r3, [r2, _thread_offset_to_u_stack_base]
	sr r3, [_ARC_V2_S_USTACK_BASE]
	ld r3, [r2, _thread_offset_to_u_stack_top]
	sr r3, [_ARC_V2_S_USTACK_TOP]
#endif
#else /* CONFIG_ARC_HAS_SECURE */
	ld r3, [r2, _thread_offset_to_k_stack_base]
	sr r3, [_ARC_V2_KSTACK_BASE]
	ld r3, [r2, _thread_offset_to_k_stack_top]
	sr r3, [_ARC_V2_KSTACK_TOP]
#ifdef CONFIG_USERSPACE
	ld r3, [r2, _thread_offset_to_u_stack_base]
	sr r3, [_ARC_V2_USTACK_BASE]
	ld r3, [r2, _thread_offset_to_u_stack_top]
	sr r3, [_ARC_V2_USTACK_TOP]
#endif
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
.endm

/* check and increase the interrupt nest counter
 * after increase, check whether nest counter == 1
 * the result will be EQ bit of status32
 * two temp regs are needed
 */
.macro _check_and_inc_int_nest_counter, reg1, reg2
#ifdef CONFIG_SMP
	/* get pointer to _cpu_t of this CPU */
	_get_cpu_id MACRO_ARG(reg1)
	ASLR MACRO_ARG(reg1), MACRO_ARG(reg1), ARC_REGSHIFT
	LDR MACRO_ARG(reg1), MACRO_ARG(reg1), _curr_cpu
	/* _cpu_t.nested is 32 bit despite of platform bittnes */
	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
	MOVR MACRO_ARG(reg1), _kernel
	/* z_kernel.nested is 32 bit despite of platform bittnes */
	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
	add MACRO_ARG(reg2), MACRO_ARG(reg2), 1
#ifdef CONFIG_SMP
	st MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
	st MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
	cmp MACRO_ARG(reg2), 1
.endm

/* decrease interrupt stack nest counter
 * the counter > 0, interrupt stack is used, or
 * not used
 */
.macro _dec_int_nest_counter, reg1, reg2
#ifdef CONFIG_SMP
	/* get pointer to _cpu_t of this CPU */
	_get_cpu_id MACRO_ARG(reg1)
	ASLR MACRO_ARG(reg1), MACRO_ARG(reg1), ARC_REGSHIFT
	LDR MACRO_ARG(reg1), MACRO_ARG(reg1), _curr_cpu
	/* _cpu_t.nested is 32 bit despite of platform bittnes */
	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
	MOVR MACRO_ARG(reg1), _kernel
	/* z_kernel.nested is 32 bit despite of platform bittnes */
	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
	sub MACRO_ARG(reg2), MACRO_ARG(reg2), 1
#ifdef CONFIG_SMP
	st MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
	st MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
.endm

/* If multi bits in IRQ_ACT are set, i.e. last bit != fist bit, it's
 * in nest interrupt. The result will be EQ bit of status32
 * need two temp reg to do this
 */
.macro _check_nest_int_by_irq_act, reg1, reg2
	lr MACRO_ARG(reg1), [_ARC_V2_AUX_IRQ_ACT]
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	and MACRO_ARG(reg1), MACRO_ARG(reg1), ((1 << ARC_N_IRQ_START_LEVEL) - 1)
#else
	and MACRO_ARG(reg1), MACRO_ARG(reg1), 0xffff
#endif
	ffs MACRO_ARG(reg2), MACRO_ARG(reg1)
	fls MACRO_ARG(reg1), MACRO_ARG(reg1)
	cmp MACRO_ARG(reg1), MACRO_ARG(reg2)
.endm


/* macro to get id of current cpu
 * the result will be in reg (a reg)
 */
.macro _get_cpu_id, reg
	LRR MACRO_ARG(reg), [_ARC_V2_IDENTITY]
	xbfu MACRO_ARG(reg), MACRO_ARG(reg), 0xe8
.endm

/* macro to get the interrupt stack of current cpu
 * the result will be in irq_sp (a reg)
 */
.macro _get_curr_cpu_irq_stack, irq_sp
#ifdef CONFIG_SMP
	/* get pointer to _cpu_t of this CPU */
	_get_cpu_id MACRO_ARG(irq_sp)
	ASLR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), ARC_REGSHIFT
	LDR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), _curr_cpu
	/* get pointer to irq_stack itself */
	LDR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), ___cpu_t_irq_stack_OFFSET
#else
	MOVR MACRO_ARG(irq_sp), _kernel
	LDR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), _kernel_offset_to_irq_stack
#endif
.endm

/* macro to push aux reg through reg */
.macro PUSHAX, reg, aux
	LRR MACRO_ARG(reg), [MACRO_ARG(aux)]
	PUSHR MACRO_ARG(reg)
.endm

/* macro to pop aux reg through reg */
.macro POPAX, reg, aux
	POPR MACRO_ARG(reg)
	SRR MACRO_ARG(reg), [MACRO_ARG(aux)]
.endm


/* macro to store old thread call regs */
.macro _store_old_thread_callee_regs

	_save_callee_saved_regs
	/* Save old thread into switch handle which is required by z_sched_switch_spin.
	 * NOTE: we shouldn't save anything related to old thread context after this point!
	 * TODO: we should add SMP write-after-write data memory barrier here, as we want all
	 * previous writes completed before setting switch_handle which is polled by other cores
	 * in z_sched_switch_spin in case of SMP. Though it's not likely that this issue
	 * will reproduce in real world as there is some gap before reading switch_handle and
	 * reading rest of the data we've stored before.
	 */
	STR r2, r2, ___thread_t_switch_handle_OFFSET
.endm

/* macro to store old thread call regs  in interrupt*/
.macro _irq_store_old_thread_callee_regs
#if defined(CONFIG_USERSPACE)
/*
 * when USERSPACE is enabled, according to ARCv2 ISA, SP will be switched
 * if interrupt comes out in user mode, and will be recorded in bit 31
 * (U bit) of IRQ_ACT. when interrupt exits, SP will be switched back
 * according to U bit.
 *
 * need to remember the user/kernel status of interrupted thread, will be
 * restored when thread switched back
 *
 */
	lr r1, [_ARC_V2_AUX_IRQ_ACT]
	and r3, r1, 0x80000000
	push_s r3

	bclr r1, r1, 31
	sr r1, [_ARC_V2_AUX_IRQ_ACT]
#endif
	_store_old_thread_callee_regs
.endm

/* macro to load new thread callee regs */
.macro _load_new_thread_callee_regs
#ifdef CONFIG_ARC_STACK_CHECKING
	_load_stack_check_regs
#endif
	/*
	 * _load_callee_saved_regs expects incoming thread in r2.
	 * _load_callee_saved_regs restores the stack pointer.
	 */
	_load_callee_saved_regs

#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
	push_s r2
	bl configure_mpu_thread
	pop_s r2
#endif
	/* _thread_arch.relinquish_cause is 32 bit despite of platform bittnes */
	ld r3, [r2, _thread_offset_to_relinquish_cause]
.endm


/* when switch to thread caused by coop, some status regs need to set */
.macro _set_misc_regs_irq_switch_from_coop
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	/* must return to secure mode, so set IRM bit to 1 */
	lr r0, [_ARC_V2_SEC_STAT]
	bset r0, r0, _ARC_V2_SEC_STAT_IRM_BIT
	sflag r0
#endif
.endm

/* when switch to thread caused by irq, some status regs need to set */
.macro _set_misc_regs_irq_switch_from_irq
#if defined(CONFIG_USERSPACE)
/*
 * need to recover the user/kernel status of interrupted thread
 */
	pop_s r3
	lr r2, [_ARC_V2_AUX_IRQ_ACT]
	or r2, r2, r3
	sr r2, [_ARC_V2_AUX_IRQ_ACT]
#endif

#ifdef CONFIG_ARC_SECURE_FIRMWARE
	/* here need to recover SEC_STAT.IRM bit */
	pop_s r3
	sflag r3
#endif
.endm

/* macro to get next switch handle in assembly */
.macro _get_next_switch_handle
	PUSHR r2
	MOVR r0, sp
	bl z_arch_get_next_switch_handle
	POPR  r2
.endm

/* macro to disable stack checking in assembly, need a GPR
 * to do this
 */
.macro _disable_stack_checking, reg
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	lr MACRO_ARG(reg), [_ARC_V2_SEC_STAT]
	bclr MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_SEC_STAT_SSC_BIT
	sflag MACRO_ARG(reg)

#else
	lr MACRO_ARG(reg), [_ARC_V2_STATUS32]
	bclr MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_STATUS32_SC_BIT
	kflag MACRO_ARG(reg)
#endif
#endif
.endm

/* macro to enable stack checking in assembly, need a GPR
 * to do this
 */
.macro _enable_stack_checking, reg
#ifdef CONFIG_ARC_STACK_CHECKING
#ifdef CONFIG_ARC_SECURE_FIRMWARE
	lr MACRO_ARG(reg), [_ARC_V2_SEC_STAT]
	bset MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_SEC_STAT_SSC_BIT
	sflag MACRO_ARG(reg)
#else
	lr MACRO_ARG(reg), [_ARC_V2_STATUS32]
	bset MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_STATUS32_SC_BIT
	kflag MACRO_ARG(reg)
#endif
#endif
.endm


#define __arc_u9_max		(255)
#define __arc_u9_min		(-256)
#define __arc_ldst32_as_shift	2

/*
 * When we accessing bloated struct member we can exceed u9 operand in store
 * instruction. So we can use _st32_huge_offset macro instead
 */
.macro _st32_huge_offset, d, s, offset, temp
	.if MACRO_ARG(offset) <= __arc_u9_max && MACRO_ARG(offset) >= __arc_u9_min
		st MACRO_ARG(d), [MACRO_ARG(s), MACRO_ARG(offset)]
	/* Technically we can optimize with .as both big positive and negative offsets here, but
	 * as we use only positive offsets in hand-written assembly code we keep only
	 * positive offset case here for simplicity.
	 */
	.elseif !(MACRO_ARG(offset) % (1 << __arc_ldst32_as_shift)) &&                             \
		MACRO_ARG(offset) <= (__arc_u9_max << __arc_ldst32_as_shift) &&                    \
		MACRO_ARG(offset) >= 0
		st.as MACRO_ARG(d), [MACRO_ARG(s), MACRO_ARG(offset) >> __arc_ldst32_as_shift]
	.else
		ADDR MACRO_ARG(temp), MACRO_ARG(s), MACRO_ARG(offset)
		st MACRO_ARG(d), [MACRO_ARG(temp)]
	.endif
.endm

#endif /* _ASMLANGUAGE */

#endif /*  ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_ */