Boot Linux faster!

Check our new training course

Boot Linux faster!

Check our new training course
and Creative Commons CC-BY-SA
lecture and lab materials

Bootlin logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
/*
 * Copyright (c) 2010-2014 Wind River Systems, Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/**
 * @file
 * @brief Interrupt management support for IA-32 architecture
 *
 * This module implements assembly routines to manage interrupts on
 * the Intel IA-32 architecture.  More specifically, the interrupt (asynchronous
 * exception) stubs are implemented in this module.  The stubs are invoked when
 * entering and exiting a C interrupt handler.
 */

#define _ASMLANGUAGE

#include <nano_private.h>
#include <arch/x86/asm.h>
#include <offsets.h>	/* nanokernel structure offset definitions */
#include <arch/cpu.h>	/* _NANO_ERR_SPURIOUS_INT */
#include <drivers/loapic.h> /* LOAPIC_EOI */


	/* exports (internal APIs) */

	GTEXT(_IntEnt)
	GTEXT(_IntExitWithEoi)
	GTEXT(_IntExit)
	GTEXT(_SpuriousIntNoErrCodeHandler)
	GTEXT(_SpuriousIntHandler)
	GTEXT(_DynIntStubsBegin)
	GTEXT(_irq_sw_handler)

	/* externs */

	GTEXT(_Swap)

#ifdef CONFIG_SYS_POWER_MANAGEMENT
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
	GTEXT(_power_save_idle_exit)
#else
	GTEXT(_sys_power_save_idle_exit)
#endif
#endif


#ifdef CONFIG_INT_LATENCY_BENCHMARK
	GTEXT(_int_latency_start)
	GTEXT(_int_latency_stop)
#endif
/**
 *
 * @brief Inform the kernel of an interrupt
 *
 * This function is called from the interrupt stub created by IRQ_CONNECT()
 * to inform the kernel of an interrupt.  This routine increments
 * _nanokernel.nested (to support interrupt nesting), switches to the
 * base of the interrupt stack, if not already on the interrupt stack, and then
 * saves the volatile integer registers onto the stack.  Finally, control is
 * returned back to the interrupt stub code (which will then invoke the
 * "application" interrupt service routine).
 *
 * Only the volatile integer registers are saved since ISRs are assumed not to
 * utilize floating point (or SSE) instructions.  If an ISR requires the usage
 * of floating point (or SSE) instructions, it must first invoke nanoCpuFpSave()
 * (or nanoCpuSseSave()) at the beginning of the ISR.  A subsequent
 * nanoCpuFpRestore() (or nanoCpuSseRestore()) is needed just prior to returning
 * from the ISR.  Note that the nanoCpuFpSave(), nanoCpuSseSave(),
 * nanoCpuFpRestore(), and nanoCpuSseRestore() APIs have not been
 * implemented yet.
 *
 * WARNINGS
 *
 * Host-based tools and the target-based GDB agent depend on the stack frame
 * created by this routine to determine the locations of volatile registers.
 * These tools must be updated to reflect any changes to the stack frame.
 *
 * @return N/A
 *
 * C function prototype:
 *
 * void _IntEnt (void);
 */
SECTION_FUNC(TEXT, _IntEnt)

	/*
	 * The _IntVecSet() routine creates an interrupt-gate descriptor for
	 * all connections.  The processor will automatically clear the IF
	 * bit in the EFLAGS register upon execution of the handler, thus
	 * _IntEnt() (and _ExcEnt) need not issue an 'cli' as the first
	 * instruction.
	 *
	 * Clear the direction flag.  It is automatically restored when the
	 * interrupt exits via the IRET instruction.
	 */

	cld



	/*
	 * Note that the processor has pushed both the EFLAGS register
	 * and the logical return address (cs:eip) onto the stack prior
	 * to invoking the handler specified in the IDT
	 */


	/*
	 * swap eax and return address on the current stack;
	 * this saves eax on the stack without losing knowledge
	 * of how to get back to the interrupt stub
	 */
	xchgl	%eax, (%esp)

	/*
	 * The remaining volatile registers are pushed onto the current
	 * stack.
	 */

	pushl	%ecx
	pushl	%edx

#ifdef CONFIG_INT_LATENCY_BENCHMARK
	/*
	 * Volatile registers are now saved it is safe to start measuring
	 * how long interrupt are disabled.
	 * The interrupt gate created by IRQ_CONNECT disables the
	 * interrupt.
	 *
	 * Preserve EAX as it contains the stub return address.
	 */

	pushl	%eax
	call	_int_latency_start
	popl	%eax
#endif

#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
	/*
	 * Preserve EAX as it contains the stub return address.
	 */
	pushl	%eax
	call	_sys_k_event_logger_interrupt
	popl	%eax
#endif

#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
	/*
	 * Preserve EAX as it contains the stub return address.
	 */
	pushl	%eax
	call	_sys_k_event_logger_exit_sleep
	popl	%eax
#endif

	/* load %ecx with &_nanokernel */

	movl	$_nanokernel, %ecx

	/* switch to the interrupt stack for the non-nested case */

	incl	__tNANO_nested_OFFSET(%ecx)	/* inc interrupt nest count */
	cmpl	$1, __tNANO_nested_OFFSET(%ecx)	/* use int stack if !nested */
	jne	alreadyOnIntStack

	/* switch to base of the interrupt stack */

	movl	%esp, %edx		/* save current thread's stack pointer */
	movl	__tNANO_common_isp_OFFSET(%ecx), %esp	/* load new sp value */


	/* save thread's stack pointer onto base of interrupt stack */

	pushl	%edx			/* Save stack pointer */

#ifdef CONFIG_SYS_POWER_MANAGEMENT
	cmpl	$0, __tNANO_idle_OFFSET(%ecx)
	jne	_HandleIdle
	/* fast path is !idle, in the pipeline */
#endif


	/* fall through to nested case */

BRANCH_LABEL(alreadyOnIntStack)
#ifdef CONFIG_INT_LATENCY_BENCHMARK
	/* preserve eax which contain stub return address */
	pushl	%eax
	call	_int_latency_stop
	popl	%eax
#endif
#ifdef CONFIG_NESTED_INTERRUPTS
	sti			/* re-enable interrupts */
#endif
	jmp	*%eax		/* "return" back to stub */

#ifdef CONFIG_SYS_POWER_MANAGEMENT
BRANCH_LABEL(_HandleIdle)
#if defined(CONFIG_NANOKERNEL) && defined(CONFIG_TICKLESS_IDLE)
	pushl	%eax
	call _power_save_idle_exit
#else
	pushl	%eax
	push	__tNANO_idle_OFFSET(%ecx)
	movl	$0, __tNANO_idle_OFFSET(%ecx)

	/*
	 * Beware that a timer driver's _sys_power_save_idle_exit() implementation might
	 * expect that interrupts are disabled when invoked.  This ensures that
	 * the calculation and programming of the device for the next timer
	 * deadline is not interrupted.
	 */

	call	_sys_power_save_idle_exit
	add	$0x4, %esp
#endif /* CONFIG_NANOKERNEL && CONFIG_TICKLESS_IDLE */
#ifdef CONFIG_INT_LATENCY_BENCHMARK
	call	_int_latency_stop
#endif
	sti			/* re-enable interrupts */
	popl	%eax
	jmp	*%eax		/* "return" back to stub */
#endif /* CONFIG_SYS_POWER_MANAGEMENT */

/**
 * @brief Perform EOI, clean up stack, and do interrupt exit
 *
 * This is used by the interrupt stubs, which all leave the stack in
 * a particular state and need to poke the interrupt controller.
 * Prior to running the logic in _IntExit, the ISR parameter is popped off
 * the stack and EOI is set to the LOAPIC.
 */
SECTION_FUNC(TEXT, _IntExitWithEoi)
	popl %eax			/* Pushed onto stack by stub */
#if CONFIG_EOI_FORWARDING_BUG
	call	_lakemont_eoi
#endif
	xorl %eax, %eax			/* zeroes eax */
	/* TODO not great to have hard-coded LOAPIC stuff here. When
	 * we get around to introducing the interrupt controller abstraction
	 * layer, the in-use IRQ controller code will define an ASM macro
	 * with a specific name which does the correct thing for the particular
	 * controller.
	 */
	loapic_eoi_reg = (CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_EOI)
	movl %eax, loapic_eoi_reg	/* tell LOAPIC the IRQ is handled */
	/* fall through to _IntExit */

/**
 *
 * @brief Inform the kernel of an interrupt exit
 *
 * This function is called from the interrupt stub created by IRQ_CONNECT()
 * to inform the kernel that the processing of an interrupt has
 * completed.  This routine decrements _nanokernel.nested (to support interrupt
 * nesting), restores the volatile integer registers, and then switches
 * back to the interrupted execution context's stack, if this isn't a nested
 * interrupt.
 *
 * Finally, control is returned back to the interrupted fiber or ISR.
 * A context switch _may_ occur if the interrupted context was a task context,
 * in which case one or more other fibers and tasks will execute before
 * this routine resumes and control gets returned to the interrupted task.
 *
 * @return N/A
 *
 * C function prototype:
 *
 * void _IntExit (void);
 */
BRANCH_LABEL(_IntExit)

	cli			/* disable interrupts */
#ifdef CONFIG_INT_LATENCY_BENCHMARK
	call	_int_latency_start
#endif

	/* determine whether exiting from a nested interrupt */

	movl	$_nanokernel, %ecx
	decl	__tNANO_nested_OFFSET(%ecx)	/* dec interrupt nest count */
	jne	nestedInterrupt                 /* 'iret' if nested case */


	/*
	 * Determine whether the execution of the ISR requires a context
	 * switch.  If the interrupted thread is PREEMPTIBLE (a task) and
	 * _nanokernel.fiber is non-NULL, a _Swap() needs to occur.
	 */

	movl	__tNANO_current_OFFSET (%ecx), %eax
	testl	$PREEMPTIBLE, __tTCS_flags_OFFSET(%eax)
	je	noReschedule
	cmpl	$0, __tNANO_fiber_OFFSET (%ecx)
	je	noReschedule

	/*
	 * Set the INT_ACTIVE bit in the tTCS to allow the upcoming call to
	 * _Swap() to determine whether non-floating registers need to be
	 * preserved using the lazy save/restore algorithm, or to indicate to
	 * debug tools that a preemptive context switch has occurred.
	 *
	 * Setting the NO_METRICS bit tells _Swap() that the per-execution context
	 * [totalRunTime] calculation has already been performed and that
	 * there is no need to do it again.
	 */

#if defined(CONFIG_FP_SHARING) ||  defined(CONFIG_GDB_INFO)
	orl	$INT_ACTIVE, __tTCS_flags_OFFSET(%eax)
#endif

	/*
	 * A context reschedule is required: keep the volatile registers of
	 * the interrupted thread on the context's stack.  Utilize
	 * the existing _Swap() primitive to save the remaining
	 * thread's registers (including floating point) and perform
	 * a switch to the new thread.
	 */

	popl	%esp		/* switch back to kernel stack */

	pushfl			/* push KERNEL_LOCK_KEY argument */
	call	_Swap

	/*
	 * The interrupted thread has now been scheduled,
	 * as the result of a _later_ invocation of _Swap().
	 *
	 * Now need to restore the interrupted thread's environment before
	 * returning control to it at the point where it was interrupted ...
	 */


#if ( defined(CONFIG_FP_SHARING) ||  \
      defined(CONFIG_GDB_INFO) )
	/*
	 * _Swap() has restored the floating point registers, if needed.
	 * Clear the INT_ACTIVE bit of the interrupted thread's TCS
	 * since it has served its purpose.
	 */

	movl	_nanokernel + __tNANO_current_OFFSET, %eax
	andl	$~INT_ACTIVE, __tTCS_flags_OFFSET (%eax)
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */


	addl 	$4, %esp	/* pop KERNEL_LOCK_KEY argument */




	/* Restore volatile registers and return to the interrupted thread */
#ifdef CONFIG_INT_LATENCY_BENCHMARK
	call	_int_latency_stop
#endif

	popl	%edx
	popl	%ecx
	popl	%eax

	/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
	iret


BRANCH_LABEL(noReschedule)

	/*
	 * A thread reschedule is not required; switch back to the
	 * interrupted thread's stack and restore volatile registers
	 */

	popl	%esp		/* pop thread stack pointer */


	/* fall through to 'nestedInterrupt' */


	/*
	 * For the nested interrupt case, the interrupt stack must still be
	 * utilized, and more importantly, a rescheduling decision must
	 * not be performed.
	 */

BRANCH_LABEL(nestedInterrupt)
#ifdef CONFIG_INT_LATENCY_BENCHMARK
	call	_int_latency_stop
#endif
	popl	%edx		/* pop volatile registers in reverse order */
	popl	%ecx
	popl	%eax
	/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
	iret


/**
 *
 * _SpuriousIntHandler -
 * @brief Spurious interrupt handler stubs
 *
 * Interrupt-gate descriptors are statically created for all slots in the IDT
 * that point to _SpuriousIntHandler() or _SpuriousIntNoErrCodeHandler().  The
 * former stub is connected to exception vectors where the processor pushes an
 * error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP
 * records.
 *
 * A spurious interrupt is considered a fatal condition, thus this routine
 * merely sets up the 'reason' and 'pEsf' parameters to the routine
 *  _SysFatalHwErrorHandler().  In other words, there is no provision to return
 * to the interrupted execution context and thus the volatile registers are not
 * saved.
 *
 * @return Never returns
 *
 * C function prototype:
 *
 * void _SpuriousIntHandler (void);
 *
 * INTERNAL
 * The _IntVecSet() routine creates an interrupt-gate descriptor for all
 * connections.  The processor will automatically clear the IF bit
 * in the EFLAGS register upon execution of the handler,
 * thus _SpuriousIntNoErrCodeHandler()/_SpuriousIntHandler() shall be
 * invoked with interrupts disabled.
 */
SECTION_FUNC(TEXT, _SpuriousIntNoErrCodeHandler)

	pushl	$0			/* push dummy err code onto stk */

	/* fall through to _SpuriousIntHandler */


SECTION_FUNC(TEXT, _SpuriousIntHandler)

	cld				/* Clear direction flag */

	/* Create the ESF */

	pushl %eax
	pushl %ecx
	pushl %edx
	pushl %edi
	pushl %esi
	pushl %ebx
	pushl %ebp

	leal	44(%esp), %ecx   /* Calculate ESP before exception occurred */
	pushl	%ecx             /* Save calculated ESP */

	/*
	 * The task's regular stack is being used, but push the value of ESP
	 * anyway so that _ExcExit can "recover the stack pointer"
	 * without determining whether the exception occurred while CPL=3
	 */

	pushl	%esp			/* push cur stack pointer: pEsf arg */

BRANCH_LABEL(finishSpuriousInt)

	/* re-enable interrupts */

	sti

	/* push the 'unsigned int reason' parameter */

	pushl	$_NANO_ERR_SPURIOUS_INT

BRANCH_LABEL(callFatalHandler)

	/* call the fatal error handler */

	call	_NanoFatalErrorHandler

	/* handler shouldn't return, but call it again if it does */

	jmp	callFatalHandler

#if ALL_DYN_IRQ_STUBS > 0
BRANCH_LABEL(_DynIntStubCommon)
	call _common_dynamic_irq_handler
	/* Clean up and call IRET */
	jmp _IntExitWithEoi

/* Create all the dynamic IRQ stubs
 *
 * NOTE: Please update DYN_STUB_SIZE in include/arch/x86/arch.h if you change
 * how large the generated stubs are, otherwise _get_dynamic_stub() will
 * be unable to correctly determine the offset
 */

/*
 * Create nice labels for all the stubs so we can see where we
 * are in a debugger
 */
.altmacro
.macro __INT_STUB_NUM id
BRANCH_LABEL(_DynIntStub\id)
.endm
.macro INT_STUB_NUM id
__INT_STUB_NUM %id
.endm

SECTION_FUNC(TEXT, _DynIntStubsBegin)
stub_num = 0
.rept ((ALL_DYN_IRQ_STUBS + DYN_STUB_PER_BLOCK - 1) / DYN_STUB_PER_BLOCK)
	block_counter = 0
	.rept DYN_STUB_PER_BLOCK
		.if stub_num < ALL_DYN_IRQ_STUBS
			INT_STUB_NUM stub_num
			/*
			 * TODO: make this call in _DynIntStubCommon, saving
			 * 5 bytes per stub. Some voodoo will be necessary
			 * in _IntEnt/_IntExit to transplant the pushed
			 * stub_num to the irq stack
			 */
			call _IntEnt

			/*
			 * 2-byte push imm8. Consumed by
			 * common_dynamic_handler(), see intconnect.c
			 */
			push $stub_num

			/*
			 * Check to make sure this isn't the last stub in
			 * a block, in which case we just fall through
			 */
			.if (block_counter <> (DYN_STUB_PER_BLOCK - 1) && \
			     (stub_num <> ALL_DYN_IRQ_STUBS - 1))
				/* This should always be a 2-byte jmp rel8 */
				jmp 1f
			.endif
			stub_num = stub_num + 1
			block_counter = block_counter + 1
		.endif
	.endr
	/*
	 * This must a 5-bvte jump rel32, which is why _DynStubCommon
	 * is before the actual stubs
	 */
1:	jmp _DynIntStubCommon
.endr
#endif /* ALL_DYN_IRQ_STUBS */

#if CONFIG_IRQ_OFFLOAD
SECTION_FUNC(TEXT, _irq_sw_handler)
	call _IntEnt
	call _irq_do_offload
	jmp _IntExit

#endif