1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc.
4 * Copyright (C) 2003  Motorola,Inc.
5 */
6
7/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards
8 *
9 * The processor starts at 0xfffffffc and the code is first executed in the
10 * last 4K page(0xfffff000-0xffffffff) in flash/rom.
11 *
12 */
13
14#include <asm-offsets.h>
15#include <config.h>
16#include <mpc85xx.h>
17
18#include <ppc_asm.tmpl>
19#include <ppc_defs.h>
20
21#include <asm/cache.h>
22#include <asm/mmu.h>
23
24#undef	MSR_KERNEL
25#define MSR_KERNEL ( MSR_ME )	/* Machine Check */
26
27#define LAW_EN		0x80000000
28
29#if defined(CONFIG_NAND_SPL) || \
30	(defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_INIT_MINIMAL))
31#define MINIMAL_SPL
32#endif
33
34#if !defined(CONFIG_SPL) && !defined(CONFIG_SYS_RAMBOOT) && \
35	!defined(CONFIG_NXP_ESBC) && !defined(CONFIG_SRIO_PCIE_BOOT_SLAVE)
36#define NOR_BOOT
37#endif
38
39/*
40 * Set up GOT: Global Offset Table
41 *
42 * Use r12 to access the GOT
43 */
44	START_GOT
45	GOT_ENTRY(_GOT2_TABLE_)
46	GOT_ENTRY(_FIXUP_TABLE_)
47
48#ifndef MINIMAL_SPL
49	GOT_ENTRY(_start)
50	GOT_ENTRY(_start_of_vectors)
51	GOT_ENTRY(_end_of_vectors)
52	GOT_ENTRY(transfer_to_handler)
53#endif
54
55	GOT_ENTRY(__init_end)
56	GOT_ENTRY(__bss_end)
57	GOT_ENTRY(__bss_start)
58	END_GOT
59
60/*
61 * e500 Startup -- after reset only the last 4KB of the effective
62 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg
63 * section is located at THIS LAST page and basically does three
64 * things: clear some registers, set up exception tables and
65 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to
66 * continue the boot procedure.
67
68 * Once the boot rom is mapped by TLB entries we can proceed
69 * with normal startup.
70 *
71 */
72
73	.section .bootpg,"ax"
74	.globl _start_e500
75
76_start_e500:
77/* Enable debug exception */
78	li	r1,MSR_DE
79	mtmsr	r1
80
81	/*
82	 * If we got an ePAPR device tree pointer passed in as r3, we need that
83	 * later in cpu_init_early_f(). Save it to a safe register before we
84	 * clobber it so that we can fetch it from there later.
85	 */
86	mr	r24, r3
87
88#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
89	mfspr	r3,SPRN_SVR
90	rlwinm	r3,r3,0,0xff
91	li	r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV
92	cmpw	r3,r4
93	beq	1f
94
95#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
96	li	r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
97	cmpw	r3,r4
98	beq	1f
99#endif
100
101	/* Not a supported revision affected by erratum */
102	li	r27,0
103	b	2f
104
1051:	li	r27,1	/* Remember for later that we have the erratum */
106	/* Erratum says set bits 55:60 to 001001 */
107	msync
108	isync
109	mfspr	r3,SPRN_HDBCR0
110	li	r4,0x48
111	rlwimi	r3,r4,0,0x1f8
112	mtspr	SPRN_HDBCR0,r3
113	isync
1142:
115#endif
116#ifdef CONFIG_SYS_FSL_ERRATUM_A005125
117	msync
118	isync
119	mfspr	r3, SPRN_HDBCR0
120	oris	r3, r3, 0x0080
121	mtspr	SPRN_HDBCR0, r3
122#endif
123
124
125#if defined(CONFIG_NXP_ESBC) && defined(CONFIG_E500MC) && \
126	!defined(CONFIG_E6500)
127	/* ISBC uses L2 as stack.
128	 * Disable L2 cache here so that u-boot can enable it later
129	 * as part of it's normal flow
130	*/
131
132	/* Check if L2 is enabled */
133	mfspr	r3, SPRN_L2CSR0
134	lis	r2, L2CSR0_L2E@h
135	ori	r2, r2, L2CSR0_L2E@l
136	and.	r4, r3, r2
137	beq	l2_disabled
138
139	mfspr r3, SPRN_L2CSR0
140	/* Flush L2 cache */
141	lis     r2,(L2CSR0_L2FL)@h
142	ori     r2, r2, (L2CSR0_L2FL)@l
143	or      r3, r2, r3
144	sync
145	isync
146	mtspr   SPRN_L2CSR0,r3
147	isync
1481:
149	mfspr r3, SPRN_L2CSR0
150	and. r1, r3, r2
151	bne 1b
152
153	mfspr r3, SPRN_L2CSR0
154	lis r2, L2CSR0_L2E@h
155	ori r2, r2, L2CSR0_L2E@l
156	andc r4, r3, r2
157	sync
158	isync
159	mtspr SPRN_L2CSR0,r4
160	isync
161
162l2_disabled:
163#endif
164
165/* clear registers/arrays not reset by hardware */
166
167	/* L1 */
168	li	r0,2
169	mtspr	L1CSR0,r0	/* invalidate d-cache */
170	mtspr	L1CSR1,r0	/* invalidate i-cache */
171
172	mfspr	r1,DBSR
173	mtspr	DBSR,r1		/* Clear all valid bits */
174
175
176	.macro	create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch
177	lis	\scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
178	ori	\scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
179	mtspr	MAS0, \scratch
180	lis	\scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h
181	ori	\scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l
182	mtspr	MAS1, \scratch
183	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
184	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
185	mtspr	MAS2, \scratch
186	lis	\scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
187	ori	\scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
188	mtspr	MAS3, \scratch
189	lis	\scratch, \phy_high@h
190	ori	\scratch, \scratch, \phy_high@l
191	mtspr	MAS7, \scratch
192	isync
193	msync
194	tlbwe
195	isync
196	.endm
197
198	.macro	create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch
199	lis	\scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
200	ori	\scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
201	mtspr	MAS0, \scratch
202	lis	\scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h
203	ori	\scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l
204	mtspr	MAS1, \scratch
205	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
206	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
207	mtspr	MAS2, \scratch
208	lis	\scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
209	ori	\scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
210	mtspr	MAS3, \scratch
211	lis	\scratch, \phy_high@h
212	ori	\scratch, \scratch, \phy_high@l
213	mtspr	MAS7, \scratch
214	isync
215	msync
216	tlbwe
217	isync
218	.endm
219
220	.macro	delete_tlb1_entry esel scratch
221	lis	\scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
222	ori	\scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
223	mtspr	MAS0, \scratch
224	li	\scratch, 0
225	mtspr	MAS1, \scratch
226	isync
227	msync
228	tlbwe
229	isync
230	.endm
231
232	.macro	delete_tlb0_entry esel epn wimg scratch
233	lis	\scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
234	ori	\scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
235	mtspr	MAS0, \scratch
236	li	\scratch, 0
237	mtspr	MAS1, \scratch
238	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
239	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
240	mtspr	MAS2, \scratch
241	isync
242	msync
243	tlbwe
244	isync
245	.endm
246
247/* Interrupt vectors do not fit in minimal SPL. */
248#if !defined(MINIMAL_SPL)
249	/* Setup interrupt vectors */
250	lis	r1,CONFIG_SYS_MONITOR_BASE@h
251	mtspr	IVPR,r1
252
253	li	r4,CriticalInput@l
254	mtspr	IVOR0,r4	/* 0: Critical input */
255	li	r4,MachineCheck@l
256	mtspr	IVOR1,r4	/* 1: Machine check */
257	li	r4,DataStorage@l
258	mtspr	IVOR2,r4	/* 2: Data storage */
259	li	r4,InstStorage@l
260	mtspr	IVOR3,r4	/* 3: Instruction storage */
261	li	r4,ExtInterrupt@l
262	mtspr	IVOR4,r4	/* 4: External interrupt */
263	li	r4,Alignment@l
264	mtspr	IVOR5,r4	/* 5: Alignment */
265	li	r4,ProgramCheck@l
266	mtspr	IVOR6,r4	/* 6: Program check */
267	li	r4,FPUnavailable@l
268	mtspr	IVOR7,r4	/* 7: floating point unavailable */
269	li	r4,SystemCall@l
270	mtspr	IVOR8,r4	/* 8: System call */
271	/* 9: Auxiliary processor unavailable(unsupported) */
272	li	r4,Decrementer@l
273	mtspr	IVOR10,r4	/* 10: Decrementer */
274	li	r4,IntervalTimer@l
275	mtspr	IVOR11,r4	/* 11: Interval timer */
276	li	r4,WatchdogTimer@l
277	mtspr	IVOR12,r4	/* 12: Watchdog timer */
278	li	r4,DataTLBError@l
279	mtspr	IVOR13,r4	/* 13: Data TLB error */
280	li	r4,InstructionTLBError@l
281	mtspr	IVOR14,r4	/* 14: Instruction TLB error */
282	li	r4,DebugBreakpoint@l
283	mtspr	IVOR15,r4	/* 15: Debug */
284#endif
285
286	/* Clear and set up some registers. */
287	li      r0,0x0000
288	lis	r1,0xffff
289	mtspr	DEC,r0			/* prevent dec exceptions */
290	mttbl	r0			/* prevent fit & wdt exceptions */
291	mttbu	r0
292	mtspr	TSR,r1			/* clear all timer exception status */
293	mtspr	TCR,r0			/* disable all */
294	mtspr	ESR,r0			/* clear exception syndrome register */
295	mtspr	MCSR,r0			/* machine check syndrome register */
296	mtxer	r0			/* clear integer exception register */
297
298#ifdef CONFIG_SYS_BOOK3E_HV
299	mtspr	MAS8,r0			/* make sure MAS8 is clear */
300#endif
301
302	/* Enable Time Base and Select Time Base Clock */
303	lis	r0,HID0_EMCP@h		/* Enable machine check */
304#if defined(CONFIG_ENABLE_36BIT_PHYS)
305	ori	r0,r0,HID0_ENMAS7@l	/* Enable MAS7 */
306#endif
307#ifndef CONFIG_E500MC
308	ori	r0,r0,HID0_TBEN@l	/* Enable Timebase */
309#endif
310	mtspr	HID0,r0
311
312#if !defined(CONFIG_E500MC) && !defined(CONFIG_ARCH_QEMU_E500)
313	li	r0,(HID1_ASTME|HID1_ABE)@l	/* Addr streaming & broadcast */
314	mfspr	r3,PVR
315	andi.	r3,r3, 0xff
316	cmpwi	r3,0x50@l	/* if we are rev 5.0 or greater set MBDD */
317	blt 1f
318	/* Set MBDD bit also */
319	ori r0, r0, HID1_MBDD@l
3201:
321	mtspr	HID1,r0
322#endif
323
324#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999
325	mfspr	r3,SPRN_HDBCR1
326	oris	r3,r3,0x0100
327	mtspr	SPRN_HDBCR1,r3
328#endif
329
330	/* Enable Branch Prediction */
331#if defined(CONFIG_BTB)
332	lis	r0,BUCSR_ENABLE@h
333	ori	r0,r0,BUCSR_ENABLE@l
334	mtspr	SPRN_BUCSR,r0
335#endif
336
337#if defined(CONFIG_SYS_INIT_DBCR)
338	lis	r1,0xffff
339	ori	r1,r1,0xffff
340	mtspr	DBSR,r1			/* Clear all status bits */
341	lis	r0,CONFIG_SYS_INIT_DBCR@h	/* DBCR0[IDM] must be set */
342	ori	r0,r0,CONFIG_SYS_INIT_DBCR@l
343	mtspr	DBCR0,r0
344#endif
345
346/*
347 * Search for the TLB that covers the code we're executing, and shrink it
348 * so that it covers only this 4K page.  That will ensure that any other
349 * TLB we create won't interfere with it.  We assume that the TLB exists,
350 * which is why we don't check the Valid bit of MAS1.  We also assume
351 * it is in TLB1.
352 *
353 * This is necessary, for example, when booting from the on-chip ROM,
354 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
355 */
356	bl	nexti		/* Find our address */
357nexti:	mflr	r1		/* R1 = our PC */
358	li	r2, 0
359	mtspr	MAS6, r2	/* Assume the current PID and AS are 0 */
360	isync
361	msync
362	tlbsx	0, r1		/* This must succeed */
363
364	mfspr	r14, MAS0	/* Save ESEL for later */
365	rlwinm	r14, r14, 16, 0xfff
366
367	/* Set the size of the TLB to 4KB */
368	mfspr	r3, MAS1
369	li	r2, 0xF80
370	andc	r3, r3, r2	/* Clear the TSIZE bits */
371	ori	r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
372	oris	r3, r3, MAS1_IPROT@h
373	mtspr	MAS1, r3
374
375	/*
376	 * Set the base address of the TLB to our PC.  We assume that
377	 * virtual == physical.  We also assume that MAS2_EPN == MAS3_RPN.
378	 */
379	lis	r3, MAS2_EPN@h
380	ori	r3, r3, MAS2_EPN@l	/* R3 = MAS2_EPN */
381
382	and	r1, r1, r3	/* Our PC, rounded down to the nearest page */
383
384	mfspr	r2, MAS2
385	andc	r2, r2, r3
386	or	r2, r2, r1
387#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
388	cmpwi	r27,0
389	beq	1f
390	andi.	r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */
391	rlwinm	r2, r2, 0, ~MAS2_I
392	ori	r2, r2, MAS2_G
3931:
394#endif
395	mtspr	MAS2, r2	/* Set the EPN to our PC base address */
396
397	mfspr	r2, MAS3
398	andc	r2, r2, r3
399	or	r2, r2, r1
400	mtspr	MAS3, r2	/* Set the RPN to our PC base address */
401
402	isync
403	msync
404	tlbwe
405
406/*
407 * Clear out any other TLB entries that may exist, to avoid conflicts.
408 * Our TLB entry is in r14.
409 */
410	li	r0, TLBIVAX_ALL | TLBIVAX_TLB0
411	tlbivax 0, r0
412	tlbsync
413
414	mfspr	r4, SPRN_TLB1CFG
415	rlwinm	r4, r4, 0, TLBnCFG_NENTRY_MASK
416
417	li	r3, 0
418	mtspr	MAS1, r3
4191:	cmpw	r3, r14
420	rlwinm	r5, r3, 16, MAS0_ESEL_MSK
421	addi	r3, r3, 1
422	beq	2f		/* skip the entry we're executing from */
423
424	oris	r5, r5, MAS0_TLBSEL(1)@h
425	mtspr	MAS0, r5
426
427	isync
428	tlbwe
429	isync
430	msync
431
4322:	cmpw	r3, r4
433	blt	1b
434
435#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) && \
436	!defined(CONFIG_NXP_ESBC)
437/*
438 * TLB entry for debuggging in AS1
439 * Create temporary TLB entry in AS0 to handle debug exception
440 * As on debug exception MSR is cleared i.e. Address space is changed
441 * to 0. A TLB entry (in AS0) is required to handle debug exception generated
442 * in AS1.
443 */
444
445#ifdef NOR_BOOT
446/*
447 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
448 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff.
449 * and this window is outside of 4K boot window.
450 */
451	create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
452		0, BOOKE_PAGESZ_4M, \
453		CONFIG_SYS_MONITOR_BASE & 0xffc00000,  MAS2_I|MAS2_G, \
454		0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
455		0, r6
456
457#else
458/*
459 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
460 * because "nexti" will resize TLB to 4K
461 */
462	create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
463		0, BOOKE_PAGESZ_256K, \
464		CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS2_I, \
465		CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \
466		0, r6
467#endif
468#endif
469
470/*
471 * Relocate CCSR, if necessary.  We relocate CCSR if (obviously) the default
472 * location is not where we want it.  This typically happens on a 36-bit
473 * system, where we want to move CCSR to near the top of 36-bit address space.
474 *
475 * To move CCSR, we create two temporary TLBs, one for the old location, and
476 * another for the new location.  On CoreNet systems, we also need to create
477 * a special, temporary LAW.
478 *
479 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for
480 * long-term TLBs, so we use TLB0 here.
481 */
482#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS)
483
484#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW)
485#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined."
486#endif
487
488create_ccsr_new_tlb:
489	/*
490	 * Create a TLB for the new location of CCSR.  Register R8 is reserved
491	 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR).
492	 */
493	lis	r8, CONFIG_SYS_CCSRBAR@h
494	ori	r8, r8, CONFIG_SYS_CCSRBAR@l
495	lis	r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
496	ori	r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
497	create_tlb0_entry 0, \
498		0, BOOKE_PAGESZ_4K, \
499		CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \
500		CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \
501		CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
502	/*
503	 * Create a TLB for the current location of CCSR.  Register R9 is reserved
504	 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000).
505	 */
506create_ccsr_old_tlb:
507	create_tlb0_entry 1, \
508		0, BOOKE_PAGESZ_4K, \
509		CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \
510		CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \
511		0, r3 /* The default CCSR address is always a 32-bit number */
512
513
514	/*
515	 * We have a TLB for what we think is the current (old) CCSR.  Let's
516	 * verify that, otherwise we won't be able to move it.
517	 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only
518	 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems.
519	 */
520verify_old_ccsr:
521	lis     r0, CONFIG_SYS_CCSRBAR_DEFAULT@h
522	ori     r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l
523#ifdef CONFIG_FSL_CORENET
524	lwz	r1, 4(r9)		/* CCSRBARL */
525#else
526	lwz	r1, 0(r9)		/* CCSRBAR, shifted right by 12 */
527	slwi	r1, r1, 12
528#endif
529
530	cmpl	0, r0, r1
531
532	/*
533	 * If the value we read from CCSRBARL is not what we expect, then
534	 * enter an infinite loop.  This will at least allow a debugger to
535	 * halt execution and examine TLBs, etc.  There's no point in going
536	 * on.
537	 */
538infinite_debug_loop:
539	bne	infinite_debug_loop
540
541#ifdef CONFIG_FSL_CORENET
542
543#define CCSR_LAWBARH0	(CONFIG_SYS_CCSRBAR + 0x1000)
544#define LAW_SIZE_4K	0xb
545#define CCSRBAR_LAWAR	(LAW_EN | (0x1e << 20) | LAW_SIZE_4K)
546#define CCSRAR_C	0x80000000	/* Commit */
547
548create_temp_law:
549	/*
550	 * On CoreNet systems, we create the temporary LAW using a special LAW
551	 * target ID of 0x1e.  LAWBARH is at offset 0xc00 in CCSR.
552	 */
553	lis     r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
554	ori     r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
555	lis     r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
556	ori     r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
557	lis     r2, CCSRBAR_LAWAR@h
558	ori     r2, r2, CCSRBAR_LAWAR@l
559
560	stw     r0, 0xc00(r9)	/* LAWBARH0 */
561	stw     r1, 0xc04(r9)	/* LAWBARL0 */
562	sync
563	stw     r2, 0xc08(r9)	/* LAWAR0 */
564
565	/*
566	 * Read back from LAWAR to ensure the update is complete.  e500mc
567	 * cores also require an isync.
568	 */
569	lwz	r0, 0xc08(r9)	/* LAWAR0 */
570	isync
571
572	/*
573	 * Read the current CCSRBARH and CCSRBARL using load word instructions.
574	 * Follow this with an isync instruction. This forces any outstanding
575	 * accesses to configuration space to completion.
576	 */
577read_old_ccsrbar:
578	lwz	r0, 0(r9)	/* CCSRBARH */
579	lwz	r0, 4(r9)	/* CCSRBARL */
580	isync
581
582	/*
583	 * Write the new values for CCSRBARH and CCSRBARL to their old
584	 * locations.  The CCSRBARH has a shadow register. When the CCSRBARH
585	 * has a new value written it loads a CCSRBARH shadow register. When
586	 * the CCSRBARL is written, the CCSRBARH shadow register contents
587	 * along with the CCSRBARL value are loaded into the CCSRBARH and
588	 * CCSRBARL registers, respectively.  Follow this with a sync
589	 * instruction.
590	 */
591write_new_ccsrbar:
592	lis	r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
593	ori	r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
594	lis	r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
595	ori	r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
596	lis	r2, CCSRAR_C@h
597	ori	r2, r2, CCSRAR_C@l
598
599	stw	r0, 0(r9)	/* Write to CCSRBARH */
600	sync			/* Make sure we write to CCSRBARH first */
601	stw	r1, 4(r9)	/* Write to CCSRBARL */
602	sync
603
604	/*
605	 * Write a 1 to the commit bit (C) of CCSRAR at the old location.
606	 * Follow this with a sync instruction.
607	 */
608	stw	r2, 8(r9)
609	sync
610
611	/* Delete the temporary LAW */
612delete_temp_law:
613	li	r1, 0
614	stw	r1, 0xc08(r8)
615	sync
616	stw	r1, 0xc00(r8)
617	stw	r1, 0xc04(r8)
618	sync
619
620#else /* #ifdef CONFIG_FSL_CORENET */
621
622write_new_ccsrbar:
623	/*
624	 * Read the current value of CCSRBAR using a load word instruction
625	 * followed by an isync. This forces all accesses to configuration
626	 * space to complete.
627	 */
628	sync
629	lwz	r0, 0(r9)
630	isync
631
632/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */
633#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \
634			   (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12))
635
636	/* Write the new value to CCSRBAR. */
637	lis	r0, CCSRBAR_PHYS_RS12@h
638	ori	r0, r0, CCSRBAR_PHYS_RS12@l
639	stw	r0, 0(r9)
640	sync
641
642	/*
643	 * The manual says to perform a load of an address that does not
644	 * access configuration space or the on-chip SRAM using an existing TLB,
645	 * but that doesn't appear to be necessary.  We will do the isync,
646	 * though.
647	 */
648	isync
649
650	/*
651	 * Read the contents of CCSRBAR from its new location, followed by
652	 * another isync.
653	 */
654	lwz	r0, 0(r8)
655	isync
656
657#endif  /* #ifdef CONFIG_FSL_CORENET */
658
659	/* Delete the temporary TLBs */
660delete_temp_tlbs:
661	delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3
662	delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3
663
664#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
665
666#if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
667create_ccsr_l2_tlb:
668	/*
669	 * Create a TLB for the MMR location of CCSR
670	 * to access L2CSR0 register
671	 */
672	create_tlb0_entry 0, \
673		0, BOOKE_PAGESZ_4K, \
674		CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \
675		CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \
676		CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
677
678enable_l2_cluster_l2:
679	/* enable L2 cache */
680	lis	r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h
681	ori	r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l
682	li	r4, 33	/* stash id */
683	stw	r4, 4(r3)
684	lis	r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h
685	ori	r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l
686	sync
687	stw	r4, 0(r3)	/* invalidate L2 */
688	/* Poll till the bits are cleared */
6891:	sync
690	lwz	r0, 0(r3)
691	twi	0, r0, 0
692	isync
693	and.	r1, r0, r4
694	bne	1b
695
696	/* L2PE must be set before L2 cache is enabled */
697	lis	r4, (L2CSR0_L2PE)@h
698	ori	r4, r4, (L2CSR0_L2PE)@l
699	sync
700	stw	r4, 0(r3)	/* enable L2 parity/ECC error checking */
701	/* Poll till the bit is set */
7021:	sync
703	lwz	r0, 0(r3)
704	twi	0, r0, 0
705	isync
706	and.	r1, r0, r4
707	beq	1b
708
709	lis	r4, (L2CSR0_L2E|L2CSR0_L2PE)@h
710	ori	r4, r4, (L2CSR0_L2REP_MODE)@l
711	sync
712	stw	r4, 0(r3)	/* enable L2 */
713	/* Poll till the bit is set */
7141:	sync
715	lwz	r0, 0(r3)
716	twi	0, r0, 0
717	isync
718	and.	r1, r0, r4
719	beq	1b
720
721delete_ccsr_l2_tlb:
722	delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3
723#endif
724
725	/*
726	 * Enable the L1. On e6500, this has to be done
727	 * after the L2 is up.
728	 */
729
730#ifdef CONFIG_SYS_CACHE_STASHING
731	/* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
732	li	r2,(32 + 0)
733	mtspr	L1CSR2,r2
734#endif
735
736	/* Enable/invalidate the I-Cache */
737	lis	r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
738	ori	r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
739	mtspr	SPRN_L1CSR1,r2
7401:
741	mfspr	r3,SPRN_L1CSR1
742	and.	r1,r3,r2
743	bne	1b
744
745	lis	r3,(L1CSR1_CPE|L1CSR1_ICE)@h
746	ori	r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
747	mtspr	SPRN_L1CSR1,r3
748	isync
7492:
750	mfspr	r3,SPRN_L1CSR1
751	andi.	r1,r3,L1CSR1_ICE@l
752	beq	2b
753
754	/* Enable/invalidate the D-Cache */
755	lis	r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
756	ori	r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
757	mtspr	SPRN_L1CSR0,r2
7581:
759	mfspr	r3,SPRN_L1CSR0
760	and.	r1,r3,r2
761	bne	1b
762
763	lis	r3,(L1CSR0_CPE|L1CSR0_DCE)@h
764	ori	r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
765	mtspr	SPRN_L1CSR0,r3
766	isync
7672:
768	mfspr	r3,SPRN_L1CSR0
769	andi.	r1,r3,L1CSR0_DCE@l
770	beq	2b
771#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
772#define DCSR_LAWBARH0	(CONFIG_SYS_CCSRBAR + 0x1000)
773#define LAW_SIZE_1M	0x13
774#define DCSRBAR_LAWAR	(LAW_EN | (0x1d << 20) | LAW_SIZE_1M)
775
776	cmpwi	r27,0
777	beq	9f
778
779	/*
780	 * Create a TLB entry for CCSR
781	 *
782	 * We're executing out of TLB1 entry in r14, and that's the only
783	 * TLB entry that exists.  To allocate some TLB entries for our
784	 * own use, flip a bit high enough that we won't flip it again
785	 * via incrementing.
786	 */
787
788	xori	r8, r14, 32
789	lis	r0, MAS0_TLBSEL(1)@h
790	rlwimi	r0, r8, 16, MAS0_ESEL_MSK
791	lis	r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h
792	ori	r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l
793	lis	r7, CONFIG_SYS_CCSRBAR@h
794	ori	r7, r7, CONFIG_SYS_CCSRBAR@l
795	ori	r2, r7, MAS2_I|MAS2_G
796	lis	r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h
797	ori	r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l
798	lis	r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
799	ori	r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
800	mtspr	MAS0, r0
801	mtspr	MAS1, r1
802	mtspr	MAS2, r2
803	mtspr	MAS3, r3
804	mtspr	MAS7, r4
805	isync
806	tlbwe
807	isync
808	msync
809
810	/* Map DCSR temporarily to physical address zero */
811	li	r0, 0
812	lis	r3, DCSRBAR_LAWAR@h
813	ori	r3, r3, DCSRBAR_LAWAR@l
814
815	stw	r0, 0xc00(r7)	/* LAWBARH0 */
816	stw	r0, 0xc04(r7)	/* LAWBARL0 */
817	sync
818	stw	r3, 0xc08(r7)	/* LAWAR0 */
819
820	/* Read back from LAWAR to ensure the update is complete. */
821	lwz	r3, 0xc08(r7)	/* LAWAR0 */
822	isync
823
824	/* Create a TLB entry for DCSR at zero */
825
826	addi	r9, r8, 1
827	lis	r0, MAS0_TLBSEL(1)@h
828	rlwimi	r0, r9, 16, MAS0_ESEL_MSK
829	lis	r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h
830	ori	r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l
831	li	r6, 0	/* DCSR effective address */
832	ori	r2, r6, MAS2_I|MAS2_G
833	li	r3, MAS3_SW|MAS3_SR
834	li	r4, 0
835	mtspr	MAS0, r0
836	mtspr	MAS1, r1
837	mtspr	MAS2, r2
838	mtspr	MAS3, r3
839	mtspr	MAS7, r4
840	isync
841	tlbwe
842	isync
843	msync
844
845	/* enable the timebase */
846#define CTBENR	0xe2084
847	li	r3, 1
848	addis	r4, r7, CTBENR@ha
849	stw	r3, CTBENR@l(r4)
850	lwz	r3, CTBENR@l(r4)
851	twi	0,r3,0
852	isync
853
854	.macro	erratum_set_ccsr offset value
855	addis	r3, r7, \offset@ha
856	lis	r4, \value@h
857	addi	r3, r3, \offset@l
858	ori	r4, r4, \value@l
859	bl	erratum_set_value
860	.endm
861
862	.macro	erratum_set_dcsr offset value
863	addis	r3, r6, \offset@ha
864	lis	r4, \value@h
865	addi	r3, r3, \offset@l
866	ori	r4, r4, \value@l
867	bl	erratum_set_value
868	.endm
869
870	erratum_set_dcsr 0xb0e08 0xe0201800
871	erratum_set_dcsr 0xb0e18 0xe0201800
872	erratum_set_dcsr 0xb0e38 0xe0400000
873	erratum_set_dcsr 0xb0008 0x00900000
874	erratum_set_dcsr 0xb0e40 0xe00a0000
875	erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY
876#ifdef  CONFIG_RAMBOOT_PBL
877	erratum_set_ccsr 0x10f00 0x495e5000
878#else
879	erratum_set_ccsr 0x10f00 0x415e5000
880#endif
881	erratum_set_ccsr 0x11f00 0x415e5000
882
883	/* Make temp mapping uncacheable again, if it was initially */
884	bl	2f
8852:	mflr	r3
886	tlbsx	0, r3
887	mfspr	r4, MAS2
888	rlwimi	r4, r15, 0, MAS2_I
889	rlwimi	r4, r15, 0, MAS2_G
890	mtspr	MAS2, r4
891	isync
892	tlbwe
893	isync
894	msync
895
896	/* Clear the cache */
897	lis	r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
898	ori	r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
899	sync
900	isync
901	mtspr	SPRN_L1CSR1,r3
902	isync
9032:	sync
904	mfspr	r4,SPRN_L1CSR1
905	and.	r4,r4,r3
906	bne	2b
907
908	lis	r3,(L1CSR1_CPE|L1CSR1_ICE)@h
909	ori	r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
910	sync
911	isync
912	mtspr	SPRN_L1CSR1,r3
913	isync
9142:	sync
915	mfspr	r4,SPRN_L1CSR1
916	and.	r4,r4,r3
917	beq	2b
918
919	/* Remove temporary mappings */
920	lis	r0, MAS0_TLBSEL(1)@h
921	rlwimi	r0, r9, 16, MAS0_ESEL_MSK
922	li	r3, 0
923	mtspr	MAS0, r0
924	mtspr	MAS1, r3
925	isync
926	tlbwe
927	isync
928	msync
929
930	li	r3, 0
931	stw	r3, 0xc08(r7)	/* LAWAR0 */
932	lwz	r3, 0xc08(r7)
933	isync
934
935	lis	r0, MAS0_TLBSEL(1)@h
936	rlwimi	r0, r8, 16, MAS0_ESEL_MSK
937	li	r3, 0
938	mtspr	MAS0, r0
939	mtspr	MAS1, r3
940	isync
941	tlbwe
942	isync
943	msync
944
945	b	9f
946
947	/* r3 = addr, r4 = value, clobbers r5, r11, r12 */
948erratum_set_value:
949	/* Lock two cache lines into I-Cache */
950	sync
951	mfspr	r11, SPRN_L1CSR1
952	rlwinm	r11, r11, 0, ~L1CSR1_ICUL
953	sync
954	isync
955	mtspr	SPRN_L1CSR1, r11
956	isync
957
958	mflr	r12
959	bl	5f
9605:	mflr	r5
961	addi	r5, r5, 2f - 5b
962	icbtls	0, 0, r5
963	addi	r5, r5, 64
964
965	sync
966	mfspr	r11, SPRN_L1CSR1
9673:	andi.	r11, r11, L1CSR1_ICUL
968	bne	3b
969
970	icbtls	0, 0, r5
971	addi	r5, r5, 64
972
973	sync
974	mfspr	r11, SPRN_L1CSR1
9753:	andi.	r11, r11, L1CSR1_ICUL
976	bne	3b
977
978	b	2f
979	.align	6
980	/* Inside a locked cacheline, wait a while, write, then wait a while */
9812:	sync
982
983	mfspr	r5, SPRN_TBRL
984	addis	r11, r5, 0x10000@h /* wait 65536 timebase ticks */
9854:	mfspr	r5, SPRN_TBRL
986	subf.	r5, r5, r11
987	bgt	4b
988
989	stw	r4, 0(r3)
990
991	mfspr	r5, SPRN_TBRL
992	addis	r11, r5, 0x10000@h /* wait 65536 timebase ticks */
9934:	mfspr	r5, SPRN_TBRL
994	subf.	r5, r5, r11
995	bgt	4b
996
997	sync
998
999	/*
1000	 * Fill out the rest of this cache line and the next with nops,
1001	 * to ensure that nothing outside the locked area will be
1002	 * fetched due to a branch.
1003	 */
1004	.rept 19
1005	nop
1006	.endr
1007
1008	sync
1009	mfspr	r11, SPRN_L1CSR1
1010	rlwinm	r11, r11, 0, ~L1CSR1_ICUL
1011	sync
1012	isync
1013	mtspr	SPRN_L1CSR1, r11
1014	isync
1015
1016	mtlr	r12
1017	blr
1018
10199:
1020#endif
1021
1022create_init_ram_area:
1023	lis     r6,FSL_BOOKE_MAS0(1, 15, 0)@h
1024	ori     r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
1025
1026#ifdef NOR_BOOT
1027	/* create a temp mapping in AS=1 to the 4M boot window */
1028	create_tlb1_entry 15, \
1029		1, BOOKE_PAGESZ_4M, \
1030		CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \
1031		0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1032		0, r6
1033
1034#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_NXP_ESBC)
1035	/* create a temp mapping in AS = 1 for Flash mapping
1036	 * created by PBL for ISBC code
1037	 */
1038	create_tlb1_entry 15, \
1039		1, BOOKE_PAGESZ_1M, \
1040		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1041		CONFIG_SYS_PBI_FLASH_WINDOW & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1042		0, r6
1043
1044/*
1045 * For Targets without CONFIG_SPL like P3, P5
1046 * and for targets with CONFIG_SPL like T1, T2, T4, only for
1047 * u-boot-spl i.e. CONFIG_SPL_BUILD
1048 */
1049#elif defined(CONFIG_RAMBOOT_PBL) && defined(CONFIG_NXP_ESBC) && \
1050	(!defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD))
1051	/* create a temp mapping in AS = 1 for mapping CONFIG_SYS_MONITOR_BASE
1052	 * to L3 Address configured by PBL for ISBC code
1053	 */
1054	create_tlb1_entry 15, \
1055		1, BOOKE_PAGESZ_1M, \
1056		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1057		CONFIG_SYS_INIT_L3_ADDR & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1058		0, r6
1059
1060#else
1061	/*
1062	 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main
1063	 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage.
1064	 */
1065	create_tlb1_entry 15, \
1066		1, BOOKE_PAGESZ_1M, \
1067		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1068		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1069		0, r6
1070#endif
1071
1072	/* create a temp mapping in AS=1 to the stack */
1073#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \
1074    defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH)
1075	create_tlb1_entry 14, \
1076		1, BOOKE_PAGESZ_16K, \
1077		CONFIG_SYS_INIT_RAM_ADDR, 0, \
1078		CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \
1079		CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6
1080
1081#else
1082	create_tlb1_entry 14, \
1083		1, BOOKE_PAGESZ_16K, \
1084		CONFIG_SYS_INIT_RAM_ADDR, 0, \
1085		CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \
1086		0, r6
1087#endif
1088
1089	lis	r6,MSR_IS|MSR_DS|MSR_DE@h
1090	ori	r6,r6,MSR_IS|MSR_DS|MSR_DE@l
1091	lis	r7,switch_as@h
1092	ori	r7,r7,switch_as@l
1093
1094	mtspr	SPRN_SRR0,r7
1095	mtspr	SPRN_SRR1,r6
1096	rfi
1097
1098switch_as:
1099/* L1 DCache is used for initial RAM */
1100
1101	/* Allocate Initial RAM in data cache.
1102	 */
1103	lis	r3,CONFIG_SYS_INIT_RAM_ADDR@h
1104	ori	r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1105	mfspr	r2, L1CFG0
1106	andi.	r2, r2, 0x1ff
1107	/* cache size * 1024 / (2 * L1 line size) */
1108	slwi	r2, r2, (10 - 1 - L1_CACHE_SHIFT)
1109	mtctr	r2
1110	li	r0,0
11111:
1112	dcbz	r0,r3
1113#ifdef CONFIG_E6500	/* Lock/unlock L2 cache long with L1 */
1114	dcbtls	2, r0, r3
1115	dcbtls	0, r0, r3
1116#else
1117	dcbtls	0, r0, r3
1118#endif
1119	addi	r3,r3,CONFIG_SYS_CACHELINE_SIZE
1120	bdnz	1b
1121
1122	/* Jump out the last 4K page and continue to 'normal' start */
1123#if defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL)
1124	/* We assume that we're already running at the address we're linked at */
1125	b	_start_cont
1126#else
1127	/* Calculate absolute address in FLASH and jump there		*/
1128	/*--------------------------------------------------------------*/
1129	lis	r3,CONFIG_SYS_MONITOR_BASE@h
1130	ori	r3,r3,CONFIG_SYS_MONITOR_BASE@l
1131	addi	r3,r3,_start_cont - _start
1132	mtlr	r3
1133	blr
1134#endif
1135
1136	.text
1137	.globl	_start
1138_start:
1139	.long	0x27051956		/* U-BOOT Magic Number */
1140
1141	.globl	_start_cont
1142_start_cont:
1143	/* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/
1144	lis	r3,(CONFIG_SYS_INIT_RAM_ADDR)@h
1145	ori	r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */
1146
1147#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1148#if CONFIG_VAL(SYS_MALLOC_F_LEN) + GENERATED_GBL_DATA_SIZE > CONFIG_SYS_INIT_RAM_SIZE
1149#error "SYS_MALLOC_F_LEN too large to fit into initial RAM."
1150#endif
1151
1152	/* Leave 16+ byte for back chain termination and NULL return address */
1153	subi	r3,r3,((CONFIG_VAL(SYS_MALLOC_F_LEN)+16+15)&~0xf)
1154#endif
1155
1156	/* End of RAM */
1157	lis	r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
1158	ori	r4,r4,(CONFIG_SYS_INIT_RAM_SIZE)@l
1159
1160	li	r0,0
1161
11621:	subi	r4,r4,4
1163	stw	r0,0(r4)
1164	cmplw	r4,r3
1165	bne	1b
1166
1167#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1168	lis	r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
1169	ori	r4,r4,(CONFIG_SYS_GBL_DATA_OFFSET)@l
1170
1171	addi	r3,r3,16	/* Pre-relocation malloc area */
1172	stw	r3,GD_MALLOC_BASE(r4)
1173	subi	r3,r3,16
1174#endif
1175	li	r0,0
1176	stw	r0,0(r3)	/* Terminate Back Chain */
1177	stw	r0,+4(r3)	/* NULL return address. */
1178	mr	r1,r3		/* Transfer to SP(r1) */
1179
1180	GET_GOT
1181	/* Needed for -msingle-pic-base */
1182	bl	_GLOBAL_OFFSET_TABLE_@local-4
1183	mflr	r30
1184
1185	/* Pass our potential ePAPR device tree pointer to cpu_init_early_f */
1186	mr	r3, r24
1187
1188	bl	cpu_init_early_f
1189
1190	/* switch back to AS = 0 */
1191	lis	r3,(MSR_CE|MSR_ME|MSR_DE)@h
1192	ori	r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l
1193	mtmsr	r3
1194	isync
1195
1196	bl	cpu_init_f	/* return boot_flag for calling board_init_f */
1197	bl	board_init_f
1198	isync
1199
1200	/* NOTREACHED - board_init_f() does not return */
1201
1202#ifndef MINIMAL_SPL
1203	.globl	_start_of_vectors
1204_start_of_vectors:
1205
1206/* Critical input. */
1207	CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException)
1208
1209/* Machine check */
1210	MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException)
1211
1212/* Data Storage exception. */
1213	STD_EXCEPTION(0x0300, DataStorage, UnknownException)
1214
1215/* Instruction Storage exception. */
1216	STD_EXCEPTION(0x0400, InstStorage, UnknownException)
1217
1218/* External Interrupt exception. */
1219	STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException)
1220
1221/* Alignment exception. */
1222Alignment:
1223	EXCEPTION_PROLOG(SRR0, SRR1)
1224	mfspr	r4,DAR
1225	stw	r4,_DAR(r21)
1226	mfspr	r5,DSISR
1227	stw	r5,_DSISR(r21)
1228	addi	r3,r1,STACK_FRAME_OVERHEAD
1229	EXC_XFER_TEMPLATE(0x600, Alignment, AlignmentException,
1230		MSR_KERNEL, COPY_EE)
1231
1232/* Program check exception */
1233ProgramCheck:
1234	EXCEPTION_PROLOG(SRR0, SRR1)
1235	addi	r3,r1,STACK_FRAME_OVERHEAD
1236	EXC_XFER_TEMPLATE(0x700, ProgramCheck, ProgramCheckException,
1237		MSR_KERNEL, COPY_EE)
1238
1239	/* No FPU on MPC85xx.  This exception is not supposed to happen.
1240	*/
1241	STD_EXCEPTION(0x0800, FPUnavailable, UnknownException)
1242	STD_EXCEPTION(0x0900, SystemCall, UnknownException)
1243	STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt)
1244	STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException)
1245	STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException)
1246
1247	STD_EXCEPTION(0x0d00, DataTLBError, UnknownException)
1248	STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException)
1249
1250	CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException )
1251
1252	.globl	_end_of_vectors
1253_end_of_vectors:
1254
1255
1256	. = . + (0x100 - ( . & 0xff ))	/* align for debug */
1257
1258/*
1259 * This code finishes saving the registers to the exception frame
1260 * and jumps to the appropriate handler for the exception.
1261 * Register r21 is pointer into trap frame, r1 has new stack pointer.
1262 * r23 is the address of the handler.
1263 */
1264	.globl	transfer_to_handler
1265transfer_to_handler:
1266	SAVE_GPR(7, r21)
1267	SAVE_4GPRS(8, r21)
1268	SAVE_8GPRS(12, r21)
1269	SAVE_8GPRS(24, r21)
1270
1271	li	r22,0
1272	stw	r22,RESULT(r21)
1273	mtspr	SPRG2,r22		/* r1 is now kernel sp */
1274
1275	mtctr	r23			/* virtual address of handler */
1276	mtmsr	r20
1277	bctrl
1278
1279int_return:
1280	mfmsr	r28		/* Disable interrupts */
1281	li	r4,0
1282	ori	r4,r4,MSR_EE
1283	andc	r28,r28,r4
1284	SYNC			/* Some chip revs need this... */
1285	mtmsr	r28
1286	SYNC
1287	lwz	r2,_CTR(r1)
1288	lwz	r0,_LINK(r1)
1289	mtctr	r2
1290	mtlr	r0
1291	lwz	r2,_XER(r1)
1292	lwz	r0,_CCR(r1)
1293	mtspr	XER,r2
1294	mtcrf	0xFF,r0
1295	REST_10GPRS(3, r1)
1296	REST_10GPRS(13, r1)
1297	REST_8GPRS(23, r1)
1298	REST_GPR(31, r1)
1299	lwz	r2,_NIP(r1)	/* Restore environment */
1300	lwz	r0,_MSR(r1)
1301	mtspr	SRR0,r2
1302	mtspr	SRR1,r0
1303	lwz	r0,GPR0(r1)
1304	lwz	r2,GPR2(r1)
1305	lwz	r1,GPR1(r1)
1306	SYNC
1307	rfi
1308
1309/* Cache functions.
1310*/
1311.globl flush_icache
1312flush_icache:
1313.globl invalidate_icache
1314invalidate_icache:
1315	mfspr	r0,L1CSR1
1316	ori	r0,r0,L1CSR1_ICFI
1317	msync
1318	isync
1319	mtspr	L1CSR1,r0
1320	isync
1321	blr				/* entire I cache */
1322
1323.globl invalidate_dcache
1324invalidate_dcache:
1325	mfspr	r0,L1CSR0
1326	ori	r0,r0,L1CSR0_DCFI
1327	msync
1328	isync
1329	mtspr	L1CSR0,r0
1330	isync
1331	blr
1332
1333	.globl	icache_enable
1334icache_enable:
1335	mflr	r8
1336	bl	invalidate_icache
1337	mtlr	r8
1338	isync
1339	mfspr	r4,L1CSR1
1340	ori	r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@l
1341	oris	r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@h
1342	mtspr	L1CSR1,r4
1343	isync
1344	blr
1345
1346	.globl	icache_disable
1347icache_disable:
1348	mfspr	r0,L1CSR1
1349	lis	r3,0
1350	ori	r3,r3,L1CSR1_ICE
1351	andc	r0,r0,r3
1352	mtspr	L1CSR1,r0
1353	isync
1354	blr
1355
1356	.globl	icache_status
1357icache_status:
1358	mfspr	r3,L1CSR1
1359	andi.	r3,r3,L1CSR1_ICE
1360	blr
1361
1362	.globl	dcache_enable
1363dcache_enable:
1364	mflr	r8
1365	bl	invalidate_dcache
1366	mtlr	r8
1367	isync
1368	mfspr	r0,L1CSR0
1369	ori	r0,r0,(L1CSR0_CPE |  L1CSR0_DCE)@l
1370	oris	r0,r0,(L1CSR0_CPE |  L1CSR0_DCE)@h
1371	msync
1372	isync
1373	mtspr	L1CSR0,r0
1374	isync
1375	blr
1376
1377	.globl	dcache_disable
1378dcache_disable:
1379	mfspr	r3,L1CSR0
1380	lis	r4,0
1381	ori	r4,r4,L1CSR0_DCE
1382	andc	r3,r3,r4
1383	mtspr	L1CSR0,r3
1384	isync
1385	blr
1386
1387	.globl	dcache_status
1388dcache_status:
1389	mfspr	r3,L1CSR0
1390	andi.	r3,r3,L1CSR0_DCE
1391	blr
1392
1393/*------------------------------------------------------------------------------- */
1394/* Function:	 in8 */
1395/* Description:	 Input 8 bits */
1396/*------------------------------------------------------------------------------- */
1397	.globl	in8
1398in8:
1399	lbz	r3,0x0000(r3)
1400	blr
1401
1402/*------------------------------------------------------------------------------- */
1403/* Function:	 out8 */
1404/* Description:	 Output 8 bits */
1405/*------------------------------------------------------------------------------- */
1406	.globl	out8
1407out8:
1408	stb	r4,0x0000(r3)
1409	sync
1410	blr
1411
1412/*------------------------------------------------------------------------------- */
1413/* Function:	 out16 */
1414/* Description:	 Output 16 bits */
1415/*------------------------------------------------------------------------------- */
1416	.globl	out16
1417out16:
1418	sth	r4,0x0000(r3)
1419	sync
1420	blr
1421
1422/*------------------------------------------------------------------------------- */
1423/* Function:	 out16r */
1424/* Description:	 Byte reverse and output 16 bits */
1425/*------------------------------------------------------------------------------- */
1426	.globl	out16r
1427out16r:
1428	sthbrx	r4,r0,r3
1429	sync
1430	blr
1431
1432/*------------------------------------------------------------------------------- */
1433/* Function:	 out32 */
1434/* Description:	 Output 32 bits */
1435/*------------------------------------------------------------------------------- */
1436	.globl	out32
1437out32:
1438	stw	r4,0x0000(r3)
1439	sync
1440	blr
1441
1442/*------------------------------------------------------------------------------- */
1443/* Function:	 out32r */
1444/* Description:	 Byte reverse and output 32 bits */
1445/*------------------------------------------------------------------------------- */
1446	.globl	out32r
1447out32r:
1448	stwbrx	r4,r0,r3
1449	sync
1450	blr
1451
1452/*------------------------------------------------------------------------------- */
1453/* Function:	 in16 */
1454/* Description:	 Input 16 bits */
1455/*------------------------------------------------------------------------------- */
1456	.globl	in16
1457in16:
1458	lhz	r3,0x0000(r3)
1459	blr
1460
1461/*------------------------------------------------------------------------------- */
1462/* Function:	 in16r */
1463/* Description:	 Input 16 bits and byte reverse */
1464/*------------------------------------------------------------------------------- */
1465	.globl	in16r
1466in16r:
1467	lhbrx	r3,r0,r3
1468	blr
1469
1470/*------------------------------------------------------------------------------- */
1471/* Function:	 in32 */
1472/* Description:	 Input 32 bits */
1473/*------------------------------------------------------------------------------- */
1474	.globl	in32
1475in32:
1476	lwz	3,0x0000(3)
1477	blr
1478
1479/*------------------------------------------------------------------------------- */
1480/* Function:	 in32r */
1481/* Description:	 Input 32 bits and byte reverse */
1482/*------------------------------------------------------------------------------- */
1483	.globl	in32r
1484in32r:
1485	lwbrx	r3,r0,r3
1486	blr
1487#endif  /* !MINIMAL_SPL */
1488
1489/*------------------------------------------------------------------------------*/
1490
1491/*
1492 * void write_tlb(mas0, mas1, mas2, mas3, mas7)
1493 */
1494	.globl	write_tlb
1495write_tlb:
1496	mtspr	MAS0,r3
1497	mtspr	MAS1,r4
1498	mtspr	MAS2,r5
1499	mtspr	MAS3,r6
1500#ifdef CONFIG_ENABLE_36BIT_PHYS
1501	mtspr	MAS7,r7
1502#endif
1503	li	r3,0
1504#ifdef CONFIG_SYS_BOOK3E_HV
1505	mtspr	MAS8,r3
1506#endif
1507	isync
1508	tlbwe
1509	msync
1510	isync
1511	blr
1512
1513/*
1514 * void relocate_code(addr_sp, gd, addr_moni)
1515 *
1516 * This "function" does not return, instead it continues in RAM
1517 * after relocating the monitor code.
1518 *
1519 * r3 = dest
1520 * r4 = src
1521 * r5 = length in bytes
1522 * r6 = cachelinesize
1523 */
1524	.globl	relocate_code
1525relocate_code:
1526	mr	r1,r3		/* Set new stack pointer		*/
1527	mr	r9,r4		/* Save copy of Init Data pointer	*/
1528	mr	r10,r5		/* Save copy of Destination Address	*/
1529
1530	GET_GOT
1531#ifndef CONFIG_SPL_SKIP_RELOCATE
1532	mr	r3,r5				/* Destination Address	*/
1533	lis	r4,CONFIG_SYS_MONITOR_BASE@h		/* Source      Address	*/
1534	ori	r4,r4,CONFIG_SYS_MONITOR_BASE@l
1535	lwz	r5,GOT(__init_end)
1536	sub	r5,r5,r4
1537	li	r6,CONFIG_SYS_CACHELINE_SIZE		/* Cache Line Size	*/
1538
1539	/*
1540	 * Fix GOT pointer:
1541	 *
1542	 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address
1543	 *
1544	 * Offset:
1545	 */
1546	sub	r15,r10,r4
1547
1548	/* First our own GOT */
1549	add	r12,r12,r15
1550	/* the the one used by the C code */
1551	add	r30,r30,r15
1552
1553	/*
1554	 * Now relocate code
1555	 */
1556
1557	cmplw	cr1,r3,r4
1558	addi	r0,r5,3
1559	srwi.	r0,r0,2
1560	beq	cr1,4f		/* In place copy is not necessary	*/
1561	beq	7f		/* Protect against 0 count		*/
1562	mtctr	r0
1563	bge	cr1,2f
1564
1565	la	r8,-4(r4)
1566	la	r7,-4(r3)
15671:	lwzu	r0,4(r8)
1568	stwu	r0,4(r7)
1569	bdnz	1b
1570	b	4f
1571
15722:	slwi	r0,r0,2
1573	add	r8,r4,r0
1574	add	r7,r3,r0
15753:	lwzu	r0,-4(r8)
1576	stwu	r0,-4(r7)
1577	bdnz	3b
1578
1579/*
1580 * Now flush the cache: note that we must start from a cache aligned
1581 * address. Otherwise we might miss one cache line.
1582 */
15834:	cmpwi	r6,0
1584	add	r5,r3,r5
1585	beq	7f		/* Always flush prefetch queue in any case */
1586	subi	r0,r6,1
1587	andc	r3,r3,r0
1588	mr	r4,r3
15895:	dcbst	0,r4
1590	add	r4,r4,r6
1591	cmplw	r4,r5
1592	blt	5b
1593	sync			/* Wait for all dcbst to complete on bus */
1594	mr	r4,r3
15956:	icbi	0,r4
1596	add	r4,r4,r6
1597	cmplw	r4,r5
1598	blt	6b
15997:	sync			/* Wait for all icbi to complete on bus */
1600	isync
1601
1602/*
1603 * We are done. Do not return, instead branch to second part of board
1604 * initialization, now running from RAM.
1605 */
1606
1607	addi	r0,r10,in_ram - _start
1608
1609	/*
1610	 * As IVPR is going to point RAM address,
1611	 * Make sure IVOR15 has valid opcode to support debugger
1612	 */
1613	mtspr	IVOR15,r0
1614
1615	/*
1616	 * Re-point the IVPR at RAM
1617	 */
1618	mtspr	IVPR,r10
1619
1620	mtlr	r0
1621	blr				/* NEVER RETURNS! */
1622#endif
1623	.globl	in_ram
1624in_ram:
1625
1626	/*
1627	 * Relocation Function, r12 point to got2+0x8000
1628	 *
1629	 * Adjust got2 pointers, no need to check for 0, this code
1630	 * already puts a few entries in the table.
1631	 */
1632	li	r0,__got2_entries@sectoff@l
1633	la	r3,GOT(_GOT2_TABLE_)
1634	lwz	r11,GOT(_GOT2_TABLE_)
1635	mtctr	r0
1636	sub	r11,r3,r11
1637	addi	r3,r3,-4
16381:	lwzu	r0,4(r3)
1639	cmpwi	r0,0
1640	beq-	2f
1641	add	r0,r0,r11
1642	stw	r0,0(r3)
16432:	bdnz	1b
1644
1645	/*
1646	 * Now adjust the fixups and the pointers to the fixups
1647	 * in case we need to move ourselves again.
1648	 */
1649	li	r0,__fixup_entries@sectoff@l
1650	lwz	r3,GOT(_FIXUP_TABLE_)
1651	cmpwi	r0,0
1652	mtctr	r0
1653	addi	r3,r3,-4
1654	beq	4f
16553:	lwzu	r4,4(r3)
1656	lwzux	r0,r4,r11
1657	cmpwi	r0,0
1658	add	r0,r0,r11
1659	stw	r4,0(r3)
1660	beq-	5f
1661	stw	r0,0(r4)
16625:	bdnz	3b
16634:
1664clear_bss:
1665	/*
1666	 * Now clear BSS segment
1667	 */
1668	lwz	r3,GOT(__bss_start)
1669	lwz	r4,GOT(__bss_end)
1670
1671	cmplw	0,r3,r4
1672	beq	6f
1673
1674	li	r0,0
16755:
1676	stw	r0,0(r3)
1677	addi	r3,r3,4
1678	cmplw	0,r3,r4
1679	blt	5b
16806:
1681
1682	mr	r3,r9		/* Init Data pointer		*/
1683	mr	r4,r10		/* Destination Address		*/
1684	bl	board_init_r
1685
1686#ifndef MINIMAL_SPL
1687	/*
1688	 * Copy exception vector code to low memory
1689	 *
1690	 * r3: dest_addr
1691	 * r7: source address, r8: end address, r9: target address
1692	 */
1693	.globl	trap_init
1694trap_init:
1695	mflr	r11
1696	bl	_GLOBAL_OFFSET_TABLE_-4
1697	mflr	r12
1698
1699	/* Update IVORs as per relocation */
1700	mtspr	IVPR,r3
1701
1702	lwz	r4,CriticalInput@got(r12)
1703	mtspr	IVOR0,r4	/* 0: Critical input */
1704	lwz	r4,MachineCheck@got(r12)
1705	mtspr	IVOR1,r4	/* 1: Machine check */
1706	lwz	r4,DataStorage@got(r12)
1707	mtspr	IVOR2,r4	/* 2: Data storage */
1708	lwz	r4,InstStorage@got(r12)
1709	mtspr	IVOR3,r4	/* 3: Instruction storage */
1710	lwz	r4,ExtInterrupt@got(r12)
1711	mtspr	IVOR4,r4	/* 4: External interrupt */
1712	lwz	r4,Alignment@got(r12)
1713	mtspr	IVOR5,r4	/* 5: Alignment */
1714	lwz	r4,ProgramCheck@got(r12)
1715	mtspr	IVOR6,r4	/* 6: Program check */
1716	lwz	r4,FPUnavailable@got(r12)
1717	mtspr	IVOR7,r4	/* 7: floating point unavailable */
1718	lwz	r4,SystemCall@got(r12)
1719	mtspr	IVOR8,r4	/* 8: System call */
1720	/* 9: Auxiliary processor unavailable(unsupported) */
1721	lwz	r4,Decrementer@got(r12)
1722	mtspr	IVOR10,r4	/* 10: Decrementer */
1723	lwz	r4,IntervalTimer@got(r12)
1724	mtspr	IVOR11,r4	/* 11: Interval timer */
1725	lwz	r4,WatchdogTimer@got(r12)
1726	mtspr	IVOR12,r4	/* 12: Watchdog timer */
1727	lwz	r4,DataTLBError@got(r12)
1728	mtspr	IVOR13,r4	/* 13: Data TLB error */
1729	lwz	r4,InstructionTLBError@got(r12)
1730	mtspr	IVOR14,r4	/* 14: Instruction TLB error */
1731	lwz	r4,DebugBreakpoint@got(r12)
1732	mtspr	IVOR15,r4	/* 15: Debug */
1733
1734	mtlr	r11
1735	blr
1736
1737.globl unlock_ram_in_cache
1738unlock_ram_in_cache:
1739	/* invalidate the INIT_RAM section */
1740	lis	r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h
1741	ori	r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l
1742	mfspr	r4,L1CFG0
1743	andi.	r4,r4,0x1ff
1744	slwi	r4,r4,(10 - 1 - L1_CACHE_SHIFT)
1745	mtctr	r4
17461:	dcbi	r0,r3
1747#ifdef CONFIG_E6500	/* lock/unlock L2 cache long with L1 */
1748	dcblc	2, r0, r3
1749	dcblc	0, r0, r3
1750#else
1751	dcblc	r0,r3
1752#endif
1753	addi	r3,r3,CONFIG_SYS_CACHELINE_SIZE
1754	bdnz	1b
1755	sync
1756
1757	/* Invalidate the TLB entries for the cache */
1758	lis	r3,CONFIG_SYS_INIT_RAM_ADDR@h
1759	ori	r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1760	tlbivax	0,r3
1761	addi	r3,r3,0x1000
1762	tlbivax	0,r3
1763	addi	r3,r3,0x1000
1764	tlbivax	0,r3
1765	addi	r3,r3,0x1000
1766	tlbivax	0,r3
1767	isync
1768	blr
1769
1770.globl flush_dcache
1771flush_dcache:
1772	mfspr	r3,SPRN_L1CFG0
1773
1774	rlwinm	r5,r3,9,3	/* Extract cache block size */
1775	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
1776				 * are currently defined.
1777				 */
1778	li	r4,32
1779	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
1780				 *      log2(number of ways)
1781				 */
1782	slw	r5,r4,r5	/* r5 = cache block size */
1783
1784	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
1785	mulli	r7,r7,13	/* An 8-way cache will require 13
1786				 * loads per set.
1787				 */
1788	slw	r7,r7,r6
1789
1790	/* save off HID0 and set DCFA */
1791	mfspr	r8,SPRN_HID0
1792	ori	r9,r8,HID0_DCFA@l
1793	mtspr	SPRN_HID0,r9
1794	isync
1795
1796	lis	r4,0
1797	mtctr	r7
1798
17991:	lwz	r3,0(r4)	/* Load... */
1800	add	r4,r4,r5
1801	bdnz	1b
1802
1803	msync
1804	lis	r4,0
1805	mtctr	r7
1806
18071:	dcbf	0,r4		/* ...and flush. */
1808	add	r4,r4,r5
1809	bdnz	1b
1810
1811	/* restore HID0 */
1812	mtspr	SPRN_HID0,r8
1813	isync
1814
1815	blr
1816#endif /* !MINIMAL_SPL */
1817