Boot Linux faster!

Check our new training course

Boot Linux faster!

Check our new training course
and Creative Commons CC-BY-SA
lecture and lab materials

Bootlin logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
/*
 * Copyright (c) 2016 Cadence Design Systems, Inc.
 * SPDX-License-Identifier: Apache-2.0
 */

#include <xtensa/coreasm.h>
#include <xtensa/corebits.h>
#include <xtensa/cacheasm.h>
#include <xtensa/cacheattrasm.h>
#include <xtensa/xtensa-xer.h>
#include <xtensa/xdm-regs.h>
#include <xtensa/config/specreg.h>
#include <xtensa/config/system.h>  /* for XSHAL_USE_ABSOLUTE_LITERALS only */
#include <xtensa/xtruntime-core-state.h>

/*
 * The following reset vector avoids initializing certain registers already
 * initialized by processor reset.  But it does initialize some of them
 * anyway, for minimal support of warm restart (restarting in software by
 * jumping to the reset vector rather than asserting hardware reset).
 */

	.begin	literal_prefix	.ResetVector
	.section		.ResetVector.text, "ax"

	.align	4
	.global	_ResetVector
_ResetVector:

#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE
	/*
	 *  NOTE:
	 *
	 *  IMPORTANT:  If you move the _ResetHandler portion to a section
	 *  other than .ResetVector.text that is outside the range of
	 *  the reset vector's 'j' instruction, the _ResetHandler symbol
	 *  and a more elaborate j/movi/jx sequence are needed in
	 *  .ResetVector.text to dispatch to the new location.
	 */
	j	_ResetHandler

	.size	_ResetVector, . - _ResetVector

#if XCHAL_HAVE_HALT
	/*
	 *  Xtensa TX: reset vector segment is only 4 bytes, so must place the
	 *  unpacker code elsewhere in the memory that contains the reset
	 *  vector.
	 */
#if XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTRAM0_VADDR
	.section .iram0.text, "ax"
#elif XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTROM0_VADDR
	.section .irom0.text, "ax"
#elif XCHAL_RESET_VECTOR_VADDR == XCHAL_URAM0_VADDR
	.section .uram0.text, "ax"
#else
#warning "Xtensa TX reset vector not at start of iram0, irom0, or uram0 -- ROMing LSPs may not work"
	.text
#endif
#endif /* XCHAL_HAVE_HALT */

	.extern	__memctl_default

	.align	4

	/* tells the assembler/linker to place literals here */
	.literal_position
	.align	4
	.global	_ResetHandler
_ResetHandler:
#endif

#if !XCHAL_HAVE_HALT

	/*
	 *  Even if the processor supports the non-PC-relative L32R option,
	 *  it will always start up in PC-relative mode.  We take advantage of
	 *  this, and use PC-relative mode at least until we're sure the .lit4
	 *  section is in place (which is sometimes only after unpacking).
	 */
	.begin	no-absolute-literals

	/*
	 * If we have dynamic cache way support, init the caches as soon
	 * as we can, which is now. Except, if we are waking up from a
	 * PSO event, then we need to do this slightly later.
	 */
#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS
# if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION
	 /* Do this later on in the code -- see below */
# else
	movi	a0, __memctl_default
	wsr	a0, MEMCTL
# endif
#endif

	/*
	 * If we have PSO support, then we must check for a warm start with
	 * caches left powered on. If the caches had been left powered on,
	 * we must restore the state of MEMCTL to the saved state if any.
	 * Note that MEMCTL may not be present depending on config.
	 */
#if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION
	/* Read PWRSTAT */
	movi	a2, XDM_MISC_PWRSTAT
	/* Save area address - retained for later */
	movi	a3, _xtos_pso_savearea
	/* Signature for compare - retained for later */
	movi	a5, CORE_STATE_SIGNATURE
	 /* PWRSTAT value - retained for later */
	rer	a7, a2
	/* Now bottom 2 bits are core wakeup and cache power lost */
	extui	a4, a7, 1, 2
	/* a4==1 means PSO wakeup, caches did not lose power */
	bnei	a4, 1, .Lcold_start
	/* Load save area signature field */
	l32i	a4, a3, CS_SA_signature
	sub	a4, a4, a5
	/* If signature mismatch then do cold start */
	bnez	a4, .Lcold_start
#if XCHAL_USE_MEMCTL
	/* Load saved MEMCTL value */
	l32i	a4, a3, CS_SA_memctl
	movi	a0, ~MEMCTL_INV_EN
	/* Clear invalidate bit */
	and	a0, a4, a0
	wsr	a0, MEMCTL
#endif
	j	.Lwarm_start

.Lcold_start:

#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS
	/*
	 * Enable and invalidate all ways of both caches. If there is no
	 * dynamic way support then this write will have no effect.
	 */
	movi	a0, __memctl_default
	wsr	a0, MEMCTL
#endif

.Lwarm_start:

#endif
	/* a0 is always 0 in this code, used to initialize lots of things */
	movi	a0, 0

/* technically this should be under !FULL_RESET, assuming hard reset */
#if XCHAL_HAVE_INTERRUPTS
	/* make sure that interrupts are shut off (*before* we lower
	 * PS.INTLEVEL and PS.EXCM!)
	 */
	wsr	a0, INTENABLE
#endif

#if !XCHAL_HAVE_FULL_RESET

/* pre-LX2 cores only */
#if XCHAL_HAVE_CCOUNT && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0)
	/* not really necessary, but nice; best done very early */
	wsr	a0, CCOUNT
#endif

	/*
	 * For full MMU configs, put page table at an unmapped virtual address.
	 * This ensures that accesses outside the static maps result
	 * in miss exceptions rather than random behaviour.
	 * Assumes XCHAL_SEG_MAPPABLE_VADDR == 0 (true in released MMU).
	 */
#if XCHAL_ITLB_ARF_WAYS > 0 || XCHAL_DTLB_ARF_WAYS > 0
	wsr	a0, PTEVADDR
#endif

	/*
	 * Debug initialization
	 *
	 * NOTE: DBREAKCn must be initialized before the combination of these
	 * two things: any load/store, and a lowering of PS.INTLEVEL below
	 * DEBUG_LEVEL.  The processor already resets IBREAKENABLE
	 * appropriately.
	 */
#if XCHAL_HAVE_DEBUG
#if XCHAL_NUM_DBREAK
#if XCHAL_NUM_DBREAK >= 2
	wsr	a0, DBREAKC1
#endif
	wsr	a0, DBREAKC0
	dsync			 /* wait for WSRs to DBREAKCn to complete */
#endif /* XCHAL_NUM_DBREAK */

/* pre-LX cores only */
# if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RA_2004_1
	/*
	 *  Starting in Xtensa LX, ICOUNTLEVEL resets to zero (not 15), so no
	 *  need to initialize it.  Prior to that we do, otherwise we get an
	 *  ICOUNT exception, 2^32 instructions after reset.
	 */

	/* are we being debugged? (detected by ICOUNTLEVEL not 15, or dropped
	 * below 12)
	 */
	rsr	a2, ICOUNTLEVEL
	/* if so, avoid initializing ICOUNTLEVEL which drops single-steps
	 * through here
	 * */
	bltui	a2, 12, 1f
	 /* avoid ICOUNT exceptions */
	wsr	a0, ICOUNTLEVEL
	/* wait for WSR to ICOUNTLEVEL to complete */
	isync
1:
#endif
#endif /* XCHAL_HAVE_DEBUG */

#endif /* !XCHAL_HAVE_FULL_RESET */

#if XCHAL_HAVE_ABSOLUTE_LITERALS
	/* Technically, this only needs to be done under !FULL_RESET,
	 * assuming hard reset:
	 */
	wsr	a0, LITBASE
	rsync
#endif

#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION
	/*
	 * If we're powering up from a temporary power shut-off (PSO),
	 * restore state saved just prior to shut-off. Note that the
	 * MEMCTL register was already restored earlier, and as a side
	 * effect, registers a3, a5, a7 are now preloaded with values
	 * that we will use here.
	 * a3 - pointer to save area base address (_xtos_pso_savearea)
	 * a5 - saved state signature (CORE_STATE_SIGNATURE)
	 * a7 - contents of PWRSTAT register
	 */

	/* load save area signature */
	l32i	a4, a3, CS_SA_signature
	/* compare signature with expected one */
	sub	a4, a4, a5
# if XTOS_PSO_TEST
	/* pretend PSO warm start with warm caches */
	movi	a7, PWRSTAT_WAKEUP_RESET
# endif
	/* wakeup from PSO? (branch if not) */
	bbci.l	a7, PWRSTAT_WAKEUP_RESET_SHIFT, 1f
	/* Yes, wakeup from PSO.  Check whether state was properly saved.
	 * speculatively clear PSO-wakeup bit  */
	addi	a5, a7, - PWRSTAT_WAKEUP_RESET
	/* if state not saved (corrupted?), mark as cold start */
	movnez	a7, a5, a4
	/* if state not saved, just continue with reset */
	bnez	a4, 1f
	/* Wakeup from PSO with good signature.  Now check cache status:
	 * if caches warm, restore now  */
	bbci.l	a7, PWRSTAT_CACHES_LOST_POWER_SHIFT, .Lpso_restore
	/* Caches got shutoff.  Continue reset, we'll end up initializing
	 * caches, and check again later for PSO.
	 */
# if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I
	j	.Ldonesync	 /* skip reset sync, only done for cold start */
# endif
1:	/*  Cold start.  (Not PSO wakeup.)  Proceed with normal full reset. */
#endif

#if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I
	/* Core 0 initializes the XMP synchronization variable, if present.
	 * This operation needs to happen as early as possible in the startup
	 * sequence so that the other cores can be released from reset.
	 */
	.weak _ResetSync
	movi 	a2, _ResetSync	 /* address of sync variable */
	rsr.prid a3		 /* core and multiprocessor ID */
	extui 	a3, a3, 0, 8	 /* extract core ID (FIXME: need proper
				  * constants for PRID bits to extract) */
	beqz	a2, .Ldonesync	 /* skip if no sync variable */
	bnez	a3, .Ldonesync	 /* only do this on core 0 */
	s32i	a0, a2, 0	 /* clear sync variable */
.Ldonesync:
#endif
#if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MP_RUNSTALL
	/* On core 0, this releases other cores.  On other cores this has no
	 * effect, because runstall control is unconnected
	 */
	movi	a2, XER_MPSCORE
	wer	a0, a2
#endif

	/*
	 * For processors with relocatable vectors, apply any alternate
	 * vector base given to xt-genldscripts, which sets the
	 * _memmap_vecbase_reset symbol accordingly.
	 */
#if XCHAL_HAVE_VECBASE
	/* note: absolute symbol, not a ptr */
	movi	a2, _memmap_vecbase_reset
	wsr	a2, vecbase
#endif

/* have ATOMCTL ? */
#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
#if XCHAL_DCACHE_IS_COHERENT
	/* MX -- internal for writeback, RCW otherwise */
	movi	a3, 0x25
#else
	/* non-MX -- always RCW */
	movi	a3, 0x15
#endif /* XCHAL_DCACHE_IS_COHERENT */
	wsr	a3, ATOMCTL
#endif

#if XCHAL_HAVE_INTERRUPTS && XCHAL_HAVE_DEBUG
	/* lower PS.INTLEVEL here to make reset vector easier to debug */
	rsil	a2, 1
#endif

	/* If either of the caches does not have dynamic way support, then
	 * use the old (slow) method to init them. If the cache is absent
	 * the macros will expand to empty.
	 */
#if ! XCHAL_HAVE_ICACHE_DYN_WAYS
	icache_reset	a2, a3
#endif
#if ! XCHAL_HAVE_DCACHE_DYN_WAYS
	dcache_reset	a2, a3
#endif

#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION
	/* Here, a7 still contains status from the power status register,
	 * or zero if signature check failed.
	 */

	/* wakeup from PSO with good signature? */
	bbci.l	a7, PWRSTAT_WAKEUP_RESET_SHIFT, .Lcoldstart
	/* Yes, wakeup from PSO.  Caches had been powered down, now are
	 * initialized.
	 */
.Lpso_restore:
	/* Assume memory still initialized, so all code still unpacked etc.
	 * So we can just jump/call to relevant state restore code (wherever
	 * located).
	 */

	/* make shutoff routine return zero */
	movi	a2, 0
	movi	a3, _xtos_pso_savearea
	/* Here, as below for _start, call0 is used as an unlimited-range
	 * jump.
	 */
	call0	_xtos_core_restore_nw
	/*  (does not return) */
.Lcoldstart:
#endif

#if XCHAL_HAVE_PREFETCH
	/* Enable cache prefetch if present.  */
	movi	a2, XCHAL_CACHE_PREFCTL_DEFAULT
	wsr	a2, PREFCTL
#endif

	/*
	 *  Now setup the memory attributes.  On some cores this "enables"
	 *  caches.  We do this ahead of unpacking, so it can proceed more
	 *  efficiently.
	 *
	 *  The _memmap_cacheattr_reset symbol's value (address) is defined by
	 *  the LSP's linker script, as generated by xt-genldscripts.  If
	 *  defines 4-bit attributes for eight 512MB regions.
	 *
	 *  (NOTE:  for cores with the older MMU v1 or v2, or without any
	 *  memory protection mechanism, the following code has no effect.)
	 */
#if XCHAL_HAVE_MPU
	/*  If there's an empty background map, setup foreground maps to mimic
	 *  region protection:
	 */
# if XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2
	.pushsection .rodata, "a"
	.global _xtos_mpu_attribs
	.align 4
_xtos_mpu_attribs:
	/*  Illegal	(---) */
	.word   0x00006000+XCHAL_MPU_ENTRIES-8
	/* Writeback	(rwx Cacheable Non-shareable wb rd-alloc wr-alloc) */
	.word   0x000F7700+XCHAL_MPU_ENTRIES-8
	/* WBNA		(rwx Cacheable Non-shareable wb rd-alloc) */
	.word   0x000D5700+XCHAL_MPU_ENTRIES-8
	/* Writethru	(rwx Cacheable Non-shareable wt rd-alloc) */
	.word   0x000C4700+XCHAL_MPU_ENTRIES-8
	/* Bypass	(rwx Device non-interruptible system-shareable) */
	.word   0x00006700+XCHAL_MPU_ENTRIES-8
	.popsection

	/*
	 * We assume reset state:  all MPU entries zeroed and disabled.
	 * Otherwise we'd need a loop to zero everything.
	 */
	/* note: absolute symbol, not a ptr */
	movi	a2, _memmap_cacheattr_reset
	movi	a3, _xtos_mpu_attribs
	movi	a4, 0x20000000	/* 512 MB delta */
	movi	a6, 8
	movi	a7, 1		/* MPU entry vaddr 0, with valid bit set */
	movi	a9, 0		/* cacheadrdis value */
	/* enable everything temporarily while MPU updates */
	wsr.cacheadrdis a9

	/* Write eight MPU entries, from the last one going backwards
	 * (entries n-1 thru n-8)
	 */
2:	extui	a8, a2, 28, 4	/* get next attribute nibble (msb first) */
	extui	a5, a8, 0, 2	/* lower two bit indicate whether cached */
	slli	a9, a9, 1	/* add a bit to cacheadrdis... */
	addi	a10, a9, 1	/* set that new bit if... */
	moveqz	a9, a10, a5	/* ... that region is non-cacheable */
	addx4	a5, a8, a3	/* index into _xtos_mpu_attribs table */
	addi	a8, a8, -5	/* make valid attrib indices negative */
	movgez	a5, a3, a8	/* if not valid attrib, use Illegal */
	l32i	a5, a5, 0	/* load access rights, memtype from table
				 * entry
				 */
	slli	a2, a2, 4
	sub	a7, a7, a4	/* next 512MB region (last to first) */
	addi	a6, a6, -1
	add	a5, a5, a6	/* add the index */
	wptlb	a5, a7		/* write the MPU entry */
	bnez	a6, 2b		/* loop until done */
# else
	/* default value of CACHEADRDIS for bgnd map */
	movi	a9, XCHAL_MPU_BG_CACHEADRDIS
# endif
	wsr.cacheadrdis a9			 /* update cacheadrdis */
#elif XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR \
		|| XCHAL_HAVE_XLT_CACHEATTR \
		|| (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
	/* note: absolute symbol, not a ptr */
	movi	a2, _memmap_cacheattr_reset
	/* set CACHEATTR from a2 (clobbers a3-a8) */
	cacheattr_set
#endif

	/* Now that caches are initialized, cache coherency can be enabled. */
#if XCHAL_DCACHE_IS_COHERENT
# if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX && \
		(XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0)
	/* Opt into coherence for MX (for backward compatibility / testing). */
	movi	a3, 1
	movi	a2, XER_CCON
	wer	a3, a2
# endif
#endif

	/* Enable zero-overhead loop instr buffer, and snoop responses, if
	 * configured.  If HW erratum 453 fix is to be applied, then don't
	 * enable loop instr buffer.
	 */
#if XCHAL_USE_MEMCTL && XCHAL_SNOOP_LB_MEMCTL_DEFAULT
	movi	a3, XCHAL_SNOOP_LB_MEMCTL_DEFAULT
	rsr	a2, MEMCTL
	or	a2, a2, a3
	wsr	a2, MEMCTL
#endif

	/* Caches are all up and running, clear PWRCTL.ShutProcOffOnPWait. */
#if XCHAL_HAVE_PSO_CDM
	movi	a2, XDM_MISC_PWRCTL
	movi	a4, ~PWRCTL_CORE_SHUTOFF
	rer	a3, a2
	and	a3, a3, a4
	wer	a3, a2
#endif

#endif /* !XCHAL_HAVE_HALT */

	/*
	 *  Unpack code and data (eg. copy ROMed segments to RAM, vectors into
	 *  their proper location, etc).
	 */

#if defined(XTOS_UNPACK)
	movi	a2, _rom_store_table
	beqz	a2, unpackdone
unpack:	l32i	a3, a2, 0	 /* start vaddr */
	l32i	a4, a2, 4	 /* end vaddr */
	l32i	a5, a2, 8	 /* store vaddr */
	addi	a2, a2, 12
	bgeu	a3, a4, upnext	 /* skip unless start < end */
uploop:	l32i 	a6, a5, 0
	addi	a5, a5, 4
	s32i	a6, a3, 0
	addi	a3, a3, 4
	bltu	a3, a4, uploop
	j	unpack
upnext:	bnez	a3, unpack
	bnez	a5, unpack
#endif /* XTOS_UNPACK */

unpackdone:

#if defined(XTOS_UNPACK) || defined(XTOS_MP)
	/*
	 * If writeback caches are configured and enabled, unpacked data must
	 * be written out to memory before trying to execute it:
	 */
	dcache_writeback_all	a2, a3, a4, 0
	/* ensure data written back is visible to i-fetch */
	icache_sync		a2
	/*
	 * Note:  no need to invalidate the i-cache after the above, because
	 * we already invalidated it further above and did not execute
	 * anything within unpacked regions afterwards.  [Strictly speaking,
	 * if an unpacked region follows this code very closely, it's possible
	 * for cache-ahead to have cached a bit of that unpacked region, so in
	 * the future we may need to invalidate the entire i-cache here again
	 * anyway.]
	 */
#endif


#if !XCHAL_HAVE_HALT	/* skip for TX */

	/*
	 *  Now that we know the .lit4 section is present (if got unpacked)
	 *  (and if absolute literals are used), initialize LITBASE to use it.
	 */
#if XCHAL_HAVE_ABSOLUTE_LITERALS && XSHAL_USE_ABSOLUTE_LITERALS
	/*
	 *  Switch from PC-relative to absolute (litbase-relative) L32R mode.
	 *  Set LITBASE to 256 kB beyond the start of the literals in .lit4
	 *  (aligns to the nearest 4 kB boundary, LITBASE does not have bits
	 *  1..11) and set the enable bit (_lit4_start is assumed 4-byte
	 *  aligned).
	 */
	movi	a2, _lit4_start + 0x40001
	wsr	a2, LITBASE
	rsync
#endif /* have and use absolute literals */
	/* we can now start using absolute literals */
	.end	no-absolute-literals

	/* Technically, this only needs to be done pre-LX2, assuming hard
	 * reset:
	 */
# if XCHAL_HAVE_WINDOWED && defined(__XTENSA_WINDOWED_ABI__)
	/* Windowed register init, so we can call windowed code (eg. C code). */
	movi	a1, 1
	wsr	a1, WINDOWSTART
	/*
	 *  The processor always clears WINDOWBASE at reset, so no need to
	 *  clear it here.  It resets WINDOWSTART to 1 starting with LX2.0/X7.0
	 *  (RB-2006.0).  However, assuming hard reset is not yet always
	 *  practical, so do this anyway:
	 */
	wsr	a0, WINDOWBASE
	rsync
	movi	a0, 0			 /* possibly a different a0, clear it */
# endif

/* only pre-LX2 needs this */
#if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0
	/* Coprocessor option initialization */
# if XCHAL_HAVE_CP
	/*
	 * To allow creating new coprocessors using TC that are not known
	 * at GUI build time without having to explicitly enable them,
	 * all CPENABLE bits must be set, even though they may not always
	 * correspond to a coprocessor.
	 */
	movi	a2, 0xFF	 /* enable *all* bits, to allow dynamic TIE */
	wsr	a2, CPENABLE
# endif

	/*
	 * Floating point coprocessor option initialization (at least
	 * rounding mode, so that floating point ops give predictable results)
	 */
# if XCHAL_HAVE_FP && !XCHAL_HAVE_VECTORFPU2005
/* floating-point control register (user register number) */
#  define FCR	232
/* floating-point status register (user register number) */
#  define FSR	233
	/* wait for WSR to CPENABLE to complete before accessing FP coproc
	 * state
	 */
	rsync
	wur	a0, FCR	/* clear FCR (default rounding mode, round-nearest) */
	wur	a0, FSR	/* clear FSR */
# endif
#endif /* pre-LX2 */


	/*
	 *  Initialize memory error handler address.
	 *  Putting this address in a register allows multiple instances of
	 *  the same configured core (with separate program images but shared
	 *  code memory, thus forcing memory error vector to be shared given
	 *  it is not VECBASE relative) to have the same memory error vector,
	 *  yet each have their own handler and associated data save area.
	 */
#if XCHAL_HAVE_MEM_ECC_PARITY
	movi	a4, _MemErrorHandler
	wsr	a4, MESAVE
#endif


	/*
	 *  Initialize medium and high priority interrupt dispatchers:
	 */
#if HAVE_XSR

/*  For asm macros; works for positive a,b smaller than 1000:  */
# define GREATERTHAN(a,b)	(((b)-(a)) & ~0xFFF)

# ifndef XCHAL_DEBUGLEVEL		/* debug option not selected? */
#  define XCHAL_DEBUGLEVEL	99	/* bogus value outside 2..6 */
# endif

	.macro	init_vector	level
	  .if GREATERTHAN(XCHAL_NUM_INTLEVELS+1,\level)
	    .if XCHAL_DEBUGLEVEL-\level
	      .weak   _Level&level&FromVector
	      movi    a4, _Level&level&FromVector
	      wsr     a4, EXCSAVE+\level
	      .if GREATERTHAN(\level,XCHAL_EXCM_LEVEL)
		movi    a5, _Pri_&level&_HandlerAddress
		s32i    a4, a5, 0
		/*  If user provides their own handler, that handler might
		 *  not provide its own _Pri_<n>_HandlerAddress variable for
		 *  linking handlers.  In that case, the reference below
		 *  would pull in the XTOS handler anyway, causing a conflict.
		 *  To avoid that, provide a weak version of it here:
		 */
		.pushsection .data, "aw"
		.global  _Pri_&level&_HandlerAddress
		.weak   _Pri_&level&_HandlerAddress
		.align	4
		_Pri_&level&_HandlerAddress: .space 4
		.popsection
	      .endif
	    .endif
	  .endif
	.endm

	init_vector	2
	init_vector	3
	init_vector	4
	init_vector	5
	init_vector	6

#endif /*HAVE_XSR*/


	/*
	 *  Complete reset initialization outside the vector, to avoid
	 *  requiring a vector that is larger than necessary.  This 2nd-stage
	 *  startup code sets up the C Run-Time (CRT) and calls main().
	 *
	 *  Here we use call0 not because we expect any return, but because the
	 *  assembler/linker dynamically sizes call0 as needed (with
	 *  -mlongcalls) which it doesn't with j or jx.  Note:  This needs to
	 *  be call0 regardless of the selected ABI.
	 */
	call0	_start		 /* jump to _start (in crt1-*.S) */
	/* does not return */

#else /* XCHAL_HAVE_HALT */

	j	_start	/* jump to _start (in crt1-*.S) */
			/* (TX has max 64kB IRAM, so J always in range) */

	 /* Paranoia -- double-check requirements / assumptions of this Xtensa
	  * TX code:
	  */
# if !defined(__XTENSA_CALL0_ABI__) || !XCHAL_HAVE_FULL_RESET \
		|| XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_CCOUNT \
		|| XCHAL_DTLB_ARF_WAYS || XCHAL_HAVE_DEBUG \
		|| XCHAL_HAVE_S32C1I || XCHAL_HAVE_ABSOLUTE_LITERALS \
		|| XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE || XCHAL_HAVE_PIF \
		|| XCHAL_HAVE_WINDOWED
#  error "Halt architecture (Xtensa TX) requires: call0 ABI, all flops reset, no exceptions or interrupts, no TLBs, no debug, no S32C1I, no LITBASE, no cache, no PIF, no windowed regs"
# endif

#endif /* XCHAL_HAVE_HALT */


#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE
	.size	_ResetHandler, . - _ResetHandler
#else
	.size	_ResetVector, . - _ResetVector
#endif

	.text
	.global xthals_hw_configid0, xthals_hw_configid1
	.global xthals_release_major, xthals_release_minor
	.end	literal_prefix