Linux Audio

Check our new training course

Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
/*
 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
 * Copyright (c) 2018 Foundries.io Ltd
 *
 * SPDX-License-Identifier: Apache-2.0
 */

#include <toolchain.h>
#include <linker/sections.h>
#include <offsets_short.h>
#include <arch/cpu.h>
#include <sys/util.h>
#include <kernel.h>

/* Convenience macros for loading/storing register states. */

#define DO_FP_CALLER_SAVED(op, reg) \
	op ft0, __z_arch_esf_t_ft0_OFFSET(reg)	 ;\
	op ft1, __z_arch_esf_t_ft1_OFFSET(reg)	 ;\
	op ft2, __z_arch_esf_t_ft2_OFFSET(reg)	 ;\
	op ft3, __z_arch_esf_t_ft3_OFFSET(reg)	 ;\
	op ft4, __z_arch_esf_t_ft4_OFFSET(reg)	 ;\
	op ft5, __z_arch_esf_t_ft5_OFFSET(reg)	 ;\
	op ft6, __z_arch_esf_t_ft6_OFFSET(reg)	 ;\
	op ft7, __z_arch_esf_t_ft7_OFFSET(reg)	 ;\
	op ft8, __z_arch_esf_t_ft8_OFFSET(reg)	 ;\
	op ft9, __z_arch_esf_t_ft9_OFFSET(reg)	 ;\
	op ft10, __z_arch_esf_t_ft10_OFFSET(reg) ;\
	op ft11, __z_arch_esf_t_ft11_OFFSET(reg) ;\
	op fa0, __z_arch_esf_t_fa0_OFFSET(reg)	 ;\
	op fa1, __z_arch_esf_t_fa1_OFFSET(reg)	 ;\
	op fa2, __z_arch_esf_t_fa2_OFFSET(reg)	 ;\
	op fa3, __z_arch_esf_t_fa3_OFFSET(reg)	 ;\
	op fa4, __z_arch_esf_t_fa4_OFFSET(reg)	 ;\
	op fa5, __z_arch_esf_t_fa5_OFFSET(reg)	 ;\
	op fa6, __z_arch_esf_t_fa6_OFFSET(reg)	 ;\
	op fa7, __z_arch_esf_t_fa7_OFFSET(reg)	 ;

#define STORE_FP_CALLER_SAVED(reg) \
	DO_FP_CALLER_SAVED(RV_OP_STOREFPREG, reg)

#define LOAD_FP_CALLER_SAVED(reg) \
	DO_FP_CALLER_SAVED(RV_OP_LOADFPREG, reg)

#define DO_FP_CALLEE_SAVED(op, reg) \
	op fs0, _thread_offset_to_fs0(reg)   ;\
	op fs1, _thread_offset_to_fs1(reg)   ;\
	op fs2, _thread_offset_to_fs2(reg)   ;\
	op fs3, _thread_offset_to_fs3(reg)   ;\
	op fs4, _thread_offset_to_fs4(reg)   ;\
	op fs5, _thread_offset_to_fs5(reg)   ;\
	op fs6, _thread_offset_to_fs6(reg)   ;\
	op fs7, _thread_offset_to_fs7(reg)   ;\
	op fs8, _thread_offset_to_fs8(reg)   ;\
	op fs9, _thread_offset_to_fs9(reg)   ;\
	op fs10, _thread_offset_to_fs10(reg) ;\
	op fs11, _thread_offset_to_fs11(reg) ;

#define STORE_FP_CALLEE_SAVED(reg) \
	frcsr t2				       ;\
	RV_OP_STOREREG t2, _thread_offset_to_fcsr(reg) ;\
	DO_FP_CALLEE_SAVED(RV_OP_STOREFPREG, reg)

#define LOAD_FP_CALLEE_SAVED(reg) \
	RV_OP_LOADREG t2, _thread_offset_to_fcsr(reg) ;\
	fscsr x0, t2				      ;\
	DO_FP_CALLEE_SAVED(RV_OP_LOADFPREG, reg)

/* imports */
GDATA(_sw_isr_table)
GTEXT(__soc_is_irq)
GTEXT(__soc_handle_irq)
GTEXT(_Fault)
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
GTEXT(__soc_save_context)
GTEXT(__soc_restore_context)
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */

GTEXT(_k_neg_eagain)
GTEXT(_is_next_thread_current)
GTEXT(z_get_next_ready_thread)

#ifdef CONFIG_TRACING
GTEXT(sys_trace_thread_switched_in)
GTEXT(sys_trace_isr_enter)
#endif

#ifdef CONFIG_IRQ_OFFLOAD
GTEXT(_offload_routine)
#endif

/* exports */
GTEXT(__irq_wrapper)

/* use ABI name of registers for the sake of simplicity */

/*
 * Generic architecture-level IRQ handling, along with callouts to
 * SoC-specific routines.
 *
 * Architecture level IRQ handling includes basic context save/restore
 * of standard registers and calling ISRs registered at Zephyr's driver
 * level.
 *
 * Since RISC-V does not completely prescribe IRQ handling behavior,
 * implementations vary (some implementations also deviate from
 * what standard behavior is defined). Hence, the arch level code expects
 * the following functions to be provided at the SOC level:
 *
 *     - __soc_is_irq: decide if we're handling an interrupt or an exception
 *     - __soc_handle_irq: handle SoC-specific details for a pending IRQ
 *       (e.g. clear a pending bit in a SoC-specific register)
 *
 * If CONFIG_RISCV_SOC_CONTEXT_SAVE=y, calls to SoC-level context save/restore
 * routines are also made here. For details, see the Kconfig help text.
 */

/*
 * Handler called upon each exception/interrupt/fault
 * In this architecture, system call (ECALL) is used to perform context
 * switching or IRQ offloading (when enabled).
 */
SECTION_FUNC(exception.entry, __irq_wrapper)
	/* Allocate space on thread stack to save registers */
	addi sp, sp, -__z_arch_esf_t_SIZEOF

	/* Save caller-saved registers on current thread stack. */
	RV_OP_STOREREG ra, __z_arch_esf_t_ra_OFFSET(sp)
	RV_OP_STOREREG gp, __z_arch_esf_t_gp_OFFSET(sp)
	RV_OP_STOREREG tp, __z_arch_esf_t_tp_OFFSET(sp)
	RV_OP_STOREREG t0, __z_arch_esf_t_t0_OFFSET(sp)
	RV_OP_STOREREG t1, __z_arch_esf_t_t1_OFFSET(sp)
	RV_OP_STOREREG t2, __z_arch_esf_t_t2_OFFSET(sp)
	RV_OP_STOREREG t3, __z_arch_esf_t_t3_OFFSET(sp)
	RV_OP_STOREREG t4, __z_arch_esf_t_t4_OFFSET(sp)
	RV_OP_STOREREG t5, __z_arch_esf_t_t5_OFFSET(sp)
	RV_OP_STOREREG t6, __z_arch_esf_t_t6_OFFSET(sp)
	RV_OP_STOREREG a0, __z_arch_esf_t_a0_OFFSET(sp)
	RV_OP_STOREREG a1, __z_arch_esf_t_a1_OFFSET(sp)
	RV_OP_STOREREG a2, __z_arch_esf_t_a2_OFFSET(sp)
	RV_OP_STOREREG a3, __z_arch_esf_t_a3_OFFSET(sp)
	RV_OP_STOREREG a4, __z_arch_esf_t_a4_OFFSET(sp)
	RV_OP_STOREREG a5, __z_arch_esf_t_a5_OFFSET(sp)
	RV_OP_STOREREG a6, __z_arch_esf_t_a6_OFFSET(sp)
	RV_OP_STOREREG a7, __z_arch_esf_t_a7_OFFSET(sp)

#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
	/* Assess whether floating-point registers need to be saved. */
	la t0, _kernel
	RV_OP_LOADREG t0, _kernel_offset_to_current(t0)
	RV_OP_LOADREG t0, _thread_offset_to_user_options(t0)
	andi t0, t0, K_FP_REGS
	RV_OP_STOREREG t0, __z_arch_esf_t_fp_state_OFFSET(sp)
	beqz t0, skip_store_fp_caller_saved
	STORE_FP_CALLER_SAVED(sp)

skip_store_fp_caller_saved:
#endif

	/* Save MEPC register */
	csrr t0, mepc
	RV_OP_STOREREG t0, __z_arch_esf_t_mepc_OFFSET(sp)

	/* Save SOC-specific MSTATUS register */
	csrr t0, mstatus
	RV_OP_STOREREG t0, __z_arch_esf_t_mstatus_OFFSET(sp)

#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
	/* Handle context saving at SOC level. */
	addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
	jal ra, __soc_save_context
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */

#ifdef CONFIG_EXECUTION_BENCHMARKING
	call read_timer_start_of_isr
#endif

	/*
	 * Check if exception is the result of an interrupt or not.
	 * (SOC dependent). Following the RISC-V architecture spec, the MSB
	 * of the mcause register is used to indicate whether an exception
	 * is the result of an interrupt or an exception/fault. But for some
	 * SOCs (like pulpino or riscv-qemu), the MSB is never set to indicate
	 * interrupt. Hence, check for interrupt/exception via the __soc_is_irq
	 * function (that needs to be implemented by each SOC). The result is
	 * returned via register a0 (1: interrupt, 0 exception)
	 */
	jal ra, __soc_is_irq

	/* If a0 != 0, jump to is_interrupt */
	addi t1, x0, 0
	bnez a0, is_interrupt

	/*
	 * If the exception is the result of an ECALL, check whether to
	 * perform a context-switch or an IRQ offload. Otherwise call _Fault
	 * to report the exception.
	 */
	csrr t0, mcause
	li t2, SOC_MCAUSE_EXP_MASK
	and t0, t0, t2
	li t1, SOC_MCAUSE_ECALL_EXP

	/*
	 * If mcause == SOC_MCAUSE_ECALL_EXP, handle system call,
	 * otherwise handle fault
	 */
	beq t0, t1, is_syscall

	/*
	 * Call _Fault to handle exception.
	 * Stack pointer is pointing to a z_arch_esf_t structure, pass it
	 * to _Fault (via register a0).
	 * If _Fault shall return, set return address to no_reschedule
	 * to restore stack.
	 */
	addi a0, sp, 0
	la ra, no_reschedule
	tail _Fault

is_syscall:
	/*
	 * A syscall is the result of an ecall instruction, in which case the
	 * MEPC will contain the address of the ecall instruction.
	 * Increment saved MEPC by 4 to prevent triggering the same ecall
	 * again upon exiting the ISR.
	 *
	 * It's safe to always increment by 4, even with compressed
	 * instructions, because the ecall instruction is always 4 bytes.
	 */
	RV_OP_LOADREG t0, __z_arch_esf_t_mepc_OFFSET(sp)
	addi t0, t0, 4
	RV_OP_STOREREG t0, __z_arch_esf_t_mepc_OFFSET(sp)

#ifdef CONFIG_IRQ_OFFLOAD
	/*
	 * Determine if the system call is the result of an IRQ offloading.
	 * Done by checking if _offload_routine is not pointing to NULL.
	 * If NULL, jump to reschedule to perform a context-switch, otherwise,
	 * jump to is_interrupt to handle the IRQ offload.
	 */
	la t0, _offload_routine
	RV_OP_LOADREG t1, 0x00(t0)
	bnez t1, is_interrupt
#endif

	/*
	 * Go to reschedule to handle context-switch
	 */
	j reschedule

is_interrupt:
	/*
	 * Save current thread stack pointer and switch
	 * stack pointer to interrupt stack.
	 */

	/* Save thread stack pointer to temp register t0 */
	addi t0, sp, 0

	/* Switch to interrupt stack */
	la t2, _kernel
	RV_OP_LOADREG sp, _kernel_offset_to_irq_stack(t2)

	/*
	 * Save thread stack pointer on interrupt stack
	 * In RISC-V, stack pointer needs to be 16-byte aligned
	 */
	addi sp, sp, -16
	RV_OP_STOREREG t0, 0x00(sp)

on_irq_stack:
	/* Increment _kernel.cpus[0].nested variable */
	lw t3, _kernel_offset_to_nested(t2)
	addi t3, t3, 1
	sw t3, _kernel_offset_to_nested(t2)

#ifdef CONFIG_IRQ_OFFLOAD
	/*
	 * If we are here due to a system call, t1 register should != 0.
	 * In this case, perform IRQ offloading, otherwise jump to call_irq
	 */
	beqz t1, call_irq

	/*
	 * Call z_irq_do_offload to handle IRQ offloading.
	 * Set return address to on_thread_stack in order to jump there
	 * upon returning from z_irq_do_offload
	 */
	la ra, on_thread_stack
	tail z_irq_do_offload

call_irq:
#endif /* CONFIG_IRQ_OFFLOAD */
#ifdef CONFIG_TRACING_ISR
	call sys_trace_isr_enter
#endif

	/* Get IRQ causing interrupt */
	csrr a0, mcause
	li t0, SOC_MCAUSE_EXP_MASK
	and a0, a0, t0

	/*
	 * Clear pending IRQ generating the interrupt at SOC level
	 * Pass IRQ number to __soc_handle_irq via register a0
	 */
	jal ra, __soc_handle_irq

	/*
	 * Call corresponding registered function in _sw_isr_table.
	 * (table is 2-word wide, we should shift index accordingly)
	 */
	la t0, _sw_isr_table
	slli a0, a0, (RV_REGSHIFT + 1)
	add t0, t0, a0

	/* Load argument in a0 register */
	RV_OP_LOADREG a0, 0x00(t0)

	/* Load ISR function address in register t1 */
	RV_OP_LOADREG t1, RV_REGSIZE(t0)

#ifdef CONFIG_EXECUTION_BENCHMARKING
	addi sp, sp, -16
	RV_OP_STOREREG a0, 0x00(sp)
	RV_OP_STOREREG t1, RV_REGSIZE(sp)
	call read_timer_end_of_isr
	RV_OP_LOADREG t1, RV_REGSIZE(sp)
	RV_OP_LOADREG a0, 0x00(sp)
	addi sp, sp, 16
#endif
	/* Call ISR function */
	jalr ra, t1

on_thread_stack:
	/* Get reference to _kernel */
	la t1, _kernel

	/* Decrement _kernel.cpus[0].nested variable */
	lw t2, _kernel_offset_to_nested(t1)
	addi t2, t2, -1
	sw t2, _kernel_offset_to_nested(t1)

	/* Restore thread stack pointer */
	RV_OP_LOADREG t0, 0x00(sp)
	addi sp, t0, 0

#ifdef CONFIG_STACK_SENTINEL
	call z_check_stack_sentinel
	la t1, _kernel
#endif

#ifdef CONFIG_PREEMPT_ENABLED
	/*
	 * Check if we need to perform a reschedule
	 */

	/* Get pointer to _kernel.current */
	RV_OP_LOADREG t2, _kernel_offset_to_current(t1)

	/*
	 * Check if next thread to schedule is current thread.
	 * If yes do not perform a reschedule
	 */
	RV_OP_LOADREG t3, _kernel_offset_to_ready_q_cache(t1)
	beq t3, t2, no_reschedule
#else
	j no_reschedule
#endif /* CONFIG_PREEMPT_ENABLED */

reschedule:
#if CONFIG_TRACING
	call sys_trace_thread_switched_in
#endif
	/* Get reference to _kernel */
	la t0, _kernel

	/* Get pointer to _kernel.current */
	RV_OP_LOADREG t1, _kernel_offset_to_current(t0)

	/*
	 * Save callee-saved registers of current thread
	 * prior to handle context-switching
	 */
	RV_OP_STOREREG s0, _thread_offset_to_s0(t1)
	RV_OP_STOREREG s1, _thread_offset_to_s1(t1)
	RV_OP_STOREREG s2, _thread_offset_to_s2(t1)
	RV_OP_STOREREG s3, _thread_offset_to_s3(t1)
	RV_OP_STOREREG s4, _thread_offset_to_s4(t1)
	RV_OP_STOREREG s5, _thread_offset_to_s5(t1)
	RV_OP_STOREREG s6, _thread_offset_to_s6(t1)
	RV_OP_STOREREG s7, _thread_offset_to_s7(t1)
	RV_OP_STOREREG s8, _thread_offset_to_s8(t1)
	RV_OP_STOREREG s9, _thread_offset_to_s9(t1)
	RV_OP_STOREREG s10, _thread_offset_to_s10(t1)
	RV_OP_STOREREG s11, _thread_offset_to_s11(t1)

#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
	/* Assess whether floating-point registers need to be saved. */
	RV_OP_LOADREG t2, _thread_offset_to_user_options(t1)
	andi t2, t2, K_FP_REGS
	beqz t2, skip_store_fp_callee_saved
	STORE_FP_CALLEE_SAVED(t1)

skip_store_fp_callee_saved:
#endif

	/*
	 * Save stack pointer of current thread and set the default return value
	 * of z_swap to _k_neg_eagain for the thread.
	 */
	RV_OP_STOREREG sp, _thread_offset_to_sp(t1)
	la t2, _k_neg_eagain
	lw t3, 0x00(t2)
	sw t3, _thread_offset_to_swap_return_value(t1)

	/* Get next thread to schedule. */
	RV_OP_LOADREG t1, _kernel_offset_to_ready_q_cache(t0)

	/*
	 * Set _kernel.current to new thread loaded in t1
	 */
	RV_OP_STOREREG t1, _kernel_offset_to_current(t0)

	/* Switch to new thread stack */
	RV_OP_LOADREG sp, _thread_offset_to_sp(t1)

	/* Restore callee-saved registers of new thread */
	RV_OP_LOADREG s0, _thread_offset_to_s0(t1)
	RV_OP_LOADREG s1, _thread_offset_to_s1(t1)
	RV_OP_LOADREG s2, _thread_offset_to_s2(t1)
	RV_OP_LOADREG s3, _thread_offset_to_s3(t1)
	RV_OP_LOADREG s4, _thread_offset_to_s4(t1)
	RV_OP_LOADREG s5, _thread_offset_to_s5(t1)
	RV_OP_LOADREG s6, _thread_offset_to_s6(t1)
	RV_OP_LOADREG s7, _thread_offset_to_s7(t1)
	RV_OP_LOADREG s8, _thread_offset_to_s8(t1)
	RV_OP_LOADREG s9, _thread_offset_to_s9(t1)
	RV_OP_LOADREG s10, _thread_offset_to_s10(t1)
	RV_OP_LOADREG s11, _thread_offset_to_s11(t1)

#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
	/* Determine if we need to restore floating-point registers. */
	RV_OP_LOADREG t2, _thread_offset_to_user_options(t1)
	andi t2, t2, K_FP_REGS
	beqz t2, skip_load_fp_callee_saved

	/*
	 * If we are switching from a thread with floating-point disabled the
	 * mstatus FS bits will still be cleared, which can cause an illegal
	 * instruction fault. Set the FS state before restoring the registers.
	 * mstatus will be restored later on.
	 */
	li t2, MSTATUS_FS_INIT
	csrrs x0, mstatus, t2

	LOAD_FP_CALLEE_SAVED(t1)

skip_load_fp_callee_saved:
#endif

#ifdef CONFIG_EXECUTION_BENCHMARKING
	addi sp, sp, -__z_arch_esf_t_SIZEOF

	RV_OP_STOREREG ra, __z_arch_esf_t_ra_OFFSET(sp)
	RV_OP_STOREREG gp, __z_arch_esf_t_gp_OFFSET(sp)
	RV_OP_STOREREG tp, __z_arch_esf_t_tp_OFFSET(sp)
	RV_OP_STOREREG t0, __z_arch_esf_t_t0_OFFSET(sp)
	RV_OP_STOREREG t1, __z_arch_esf_t_t1_OFFSET(sp)
	RV_OP_STOREREG t2, __z_arch_esf_t_t2_OFFSET(sp)
	RV_OP_STOREREG t3, __z_arch_esf_t_t3_OFFSET(sp)
	RV_OP_STOREREG t4, __z_arch_esf_t_t4_OFFSET(sp)
	RV_OP_STOREREG t5, __z_arch_esf_t_t5_OFFSET(sp)
	RV_OP_STOREREG t6, __z_arch_esf_t_t6_OFFSET(sp)
	RV_OP_STOREREG a0, __z_arch_esf_t_a0_OFFSET(sp)
	RV_OP_STOREREG a1, __z_arch_esf_t_a1_OFFSET(sp)
	RV_OP_STOREREG a2, __z_arch_esf_t_a2_OFFSET(sp)
	RV_OP_STOREREG a3, __z_arch_esf_t_a3_OFFSET(sp)
	RV_OP_STOREREG a4, __z_arch_esf_t_a4_OFFSET(sp)
	RV_OP_STOREREG a5, __z_arch_esf_t_a5_OFFSET(sp)
	RV_OP_STOREREG a6, __z_arch_esf_t_a6_OFFSET(sp)
	RV_OP_STOREREG a7, __z_arch_esf_t_a7_OFFSET(sp)

#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
	/* Assess whether floating-point registers need to be saved. */
	RV_OP_LOADREG t2, _thread_offset_to_user_options(sp)
	andi t2, t2, K_FP_REGS
	RV_OP_STOREREG t2, __z_arch_esf_t_fp_state_OFFSET(sp)
	beqz t2, skip_store_fp_caller_saved_benchmark
	STORE_FP_CALLER_SAVED(sp)

skip_store_fp_caller_saved_benchmark:
#endif

	call read_timer_end_of_swap

#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
	/* Determine if we need to restore floating-point registers. */
	RV_OP_LOADREG t2, __z_arch_esf_t_fp_state_OFFSET(sp)
	beqz t2, skip_load_fp_caller_saved_benchmark
	LOAD_FP_CALLER_SAVED(sp)

skip_load_fp_caller_saved_benchmark:
#endif

	RV_OP_LOADREG ra, __z_arch_esf_t_ra_OFFSET(sp)
	RV_OP_LOADREG gp, __z_arch_esf_t_gp_OFFSET(sp)
	RV_OP_LOADREG tp, __z_arch_esf_t_tp_OFFSET(sp)
	RV_OP_LOADREG t0, __z_arch_esf_t_t0_OFFSET(sp)
	RV_OP_LOADREG t1, __z_arch_esf_t_t1_OFFSET(sp)
	RV_OP_LOADREG t2, __z_arch_esf_t_t2_OFFSET(sp)
	RV_OP_LOADREG t3, __z_arch_esf_t_t3_OFFSET(sp)
	RV_OP_LOADREG t4, __z_arch_esf_t_t4_OFFSET(sp)
	RV_OP_LOADREG t5, __z_arch_esf_t_t5_OFFSET(sp)
	RV_OP_LOADREG t6, __z_arch_esf_t_t6_OFFSET(sp)
	RV_OP_LOADREG a0, __z_arch_esf_t_a0_OFFSET(sp)
	RV_OP_LOADREG a1, __z_arch_esf_t_a1_OFFSET(sp)
	RV_OP_LOADREG a2, __z_arch_esf_t_a2_OFFSET(sp)
	RV_OP_LOADREG a3, __z_arch_esf_t_a3_OFFSET(sp)
	RV_OP_LOADREG a4, __z_arch_esf_t_a4_OFFSET(sp)
	RV_OP_LOADREG a5, __z_arch_esf_t_a5_OFFSET(sp)
	RV_OP_LOADREG a6, __z_arch_esf_t_a6_OFFSET(sp)
	RV_OP_LOADREG a7, __z_arch_esf_t_a7_OFFSET(sp)

	/* Release stack space */
	addi sp, sp, __z_arch_esf_t_SIZEOF
#endif

no_reschedule:
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
	/* Restore context at SOC level */
	addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
	jal ra, __soc_restore_context
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */

	/* Restore MEPC register */
	RV_OP_LOADREG t0, __z_arch_esf_t_mepc_OFFSET(sp)
	csrw mepc, t0

	/* Restore SOC-specific MSTATUS register */
	RV_OP_LOADREG t0, __z_arch_esf_t_mstatus_OFFSET(sp)
	csrw mstatus, t0

#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
	/*
	 * Determine if we need to restore floating-point registers. This needs
	 * to happen before restoring integer registers to avoid stomping on
	 * t0.
	 */
	RV_OP_LOADREG t0, __z_arch_esf_t_fp_state_OFFSET(sp)
	beqz t0, skip_load_fp_caller_saved
	LOAD_FP_CALLER_SAVED(sp)

skip_load_fp_caller_saved:
#endif

	/* Restore caller-saved registers from thread stack */
	RV_OP_LOADREG ra, __z_arch_esf_t_ra_OFFSET(sp)
	RV_OP_LOADREG gp, __z_arch_esf_t_gp_OFFSET(sp)
	RV_OP_LOADREG tp, __z_arch_esf_t_tp_OFFSET(sp)
	RV_OP_LOADREG t0, __z_arch_esf_t_t0_OFFSET(sp)
	RV_OP_LOADREG t1, __z_arch_esf_t_t1_OFFSET(sp)
	RV_OP_LOADREG t2, __z_arch_esf_t_t2_OFFSET(sp)
	RV_OP_LOADREG t3, __z_arch_esf_t_t3_OFFSET(sp)
	RV_OP_LOADREG t4, __z_arch_esf_t_t4_OFFSET(sp)
	RV_OP_LOADREG t5, __z_arch_esf_t_t5_OFFSET(sp)
	RV_OP_LOADREG t6, __z_arch_esf_t_t6_OFFSET(sp)
	RV_OP_LOADREG a0, __z_arch_esf_t_a0_OFFSET(sp)
	RV_OP_LOADREG a1, __z_arch_esf_t_a1_OFFSET(sp)
	RV_OP_LOADREG a2, __z_arch_esf_t_a2_OFFSET(sp)
	RV_OP_LOADREG a3, __z_arch_esf_t_a3_OFFSET(sp)
	RV_OP_LOADREG a4, __z_arch_esf_t_a4_OFFSET(sp)
	RV_OP_LOADREG a5, __z_arch_esf_t_a5_OFFSET(sp)
	RV_OP_LOADREG a6, __z_arch_esf_t_a6_OFFSET(sp)
	RV_OP_LOADREG a7, __z_arch_esf_t_a7_OFFSET(sp)

	/* Release stack space */
	addi sp, sp, __z_arch_esf_t_SIZEOF

	/* Call SOC_ERET to exit ISR */
	SOC_ERET