1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2004,2007-2011 Freescale Semiconductor, Inc.
4 * (C) Copyright 2002, 2003 Motorola Inc.
5 * Xianghua Xiao (X.Xiao@motorola.com)
6 *
7 * (C) Copyright 2000
8 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
9 */
10
11 #include <config.h>
12 #include <common.h>
13 #include <cpu_func.h>
14 #include <clock_legacy.h>
15 #include <init.h>
16 #include <irq_func.h>
17 #include <log.h>
18 #include <time.h>
19 #include <vsprintf.h>
20 #include <watchdog.h>
21 #include <command.h>
22 #include <fsl_esdhc.h>
23 #include <asm/cache.h>
24 #include <asm/global_data.h>
25 #include <asm/io.h>
26 #include <asm/mmu.h>
27 #include <fsl_ifc.h>
28 #include <asm/fsl_law.h>
29 #include <asm/fsl_lbc.h>
30 #include <post.h>
31 #include <asm/processor.h>
32 #include <fsl_ddr_sdram.h>
33 #include <asm/ppc.h>
34 #include <linux/delay.h>
35
36 DECLARE_GLOBAL_DATA_PTR;
37
38 /*
39 * Default board reset function
40 */
41 static void
__board_reset(void)42 __board_reset(void)
43 {
44 /* Do nothing */
45 }
46 void board_reset(void) __attribute__((weak, alias("__board_reset")));
47
checkcpu(void)48 int checkcpu (void)
49 {
50 sys_info_t sysinfo;
51 uint pvr, svr;
52 uint ver;
53 uint major, minor;
54 struct cpu_type *cpu;
55 char buf1[32], buf2[32];
56 #if defined(CONFIG_DYNAMIC_DDR_CLK_FREQ) || \
57 defined(CONFIG_STATIC_DDR_CLK_FREQ) || defined(CONFIG_FSL_CORENET)
58 ccsr_gur_t __iomem *gur =
59 (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
60 #endif
61
62 /*
63 * Cornet platforms use ddr sync bit in RCW to indicate sync vs async
64 * mode. Previous platform use ddr ratio to do the same. This
65 * information is only for display here.
66 */
67 #ifdef CONFIG_FSL_CORENET
68 #ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
69 u32 ddr_sync = 0; /* only async mode is supported */
70 #else
71 u32 ddr_sync = ((gur->rcwsr[5]) & FSL_CORENET_RCWSR5_DDR_SYNC)
72 >> FSL_CORENET_RCWSR5_DDR_SYNC_SHIFT;
73 #endif /* CONFIG_SYS_FSL_QORIQ_CHASSIS2 */
74 #else /* CONFIG_FSL_CORENET */
75 #if defined(CONFIG_DYNAMIC_DDR_CLK_FREQ) || defined(CONFIG_STATIC_DDR_CLK_FREQ)
76 u32 ddr_ratio = ((gur->porpllsr) & MPC85xx_PORPLLSR_DDR_RATIO)
77 >> MPC85xx_PORPLLSR_DDR_RATIO_SHIFT;
78 #else
79 u32 ddr_ratio = 0;
80 #endif /* CONFIG_DYNAMIC_DDR_CLK_FREQ || CONFIG_STATIC_DDR_CLK_FREQ */
81 #endif /* CONFIG_FSL_CORENET */
82
83 unsigned int i, core, nr_cores = cpu_numcores();
84 u32 mask = cpu_mask();
85
86 #ifdef CONFIG_HETROGENOUS_CLUSTERS
87 unsigned int j, dsp_core, dsp_numcores = cpu_num_dspcores();
88 u32 dsp_mask = cpu_dsp_mask();
89 #endif
90
91 svr = get_svr();
92 major = SVR_MAJ(svr);
93 minor = SVR_MIN(svr);
94
95 #if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
96 if (SVR_SOC_VER(svr) == SVR_T4080) {
97 ccsr_rcpm_t *rcpm =
98 (void __iomem *)(CONFIG_SYS_FSL_CORENET_RCPM_ADDR);
99
100 setbits_be32(&gur->devdisr2, FSL_CORENET_DEVDISR2_DTSEC1_6 ||
101 FSL_CORENET_DEVDISR2_DTSEC1_9);
102 setbits_be32(&gur->devdisr3, FSL_CORENET_DEVDISR3_PCIE3);
103 setbits_be32(&gur->devdisr5, FSL_CORENET_DEVDISR5_DDR3);
104
105 /* It needs SW to disable core4~7 as HW design sake on T4080 */
106 for (i = 4; i < 8; i++)
107 cpu_disable(i);
108
109 /* request core4~7 into PH20 state, prior to entering PCL10
110 * state, all cores in cluster should be placed in PH20 state.
111 */
112 setbits_be32(&rcpm->pcph20setr, 0xf0);
113
114 /* put the 2nd cluster into PCL10 state */
115 setbits_be32(&rcpm->clpcl10setr, 1 << 1);
116 }
117 #endif
118
119 if (cpu_numcores() > 1) {
120 #ifndef CONFIG_MP
121 puts("Unicore software on multiprocessor system!!\n"
122 "To enable mutlticore build define CONFIG_MP\n");
123 #endif
124 volatile ccsr_pic_t *pic = (void *)(CONFIG_SYS_MPC8xxx_PIC_ADDR);
125 printf("CPU%d: ", pic->whoami);
126 } else {
127 puts("CPU: ");
128 }
129
130 cpu = gd->arch.cpu;
131
132 puts(cpu->name);
133 if (IS_E_PROCESSOR(svr))
134 puts("E");
135
136 printf(", Version: %d.%d, (0x%08x)\n", major, minor, svr);
137
138 pvr = get_pvr();
139 ver = PVR_VER(pvr);
140 major = PVR_MAJ(pvr);
141 minor = PVR_MIN(pvr);
142
143 printf("Core: ");
144 switch(ver) {
145 case PVR_VER_E500_V1:
146 case PVR_VER_E500_V2:
147 puts("e500");
148 break;
149 case PVR_VER_E500MC:
150 puts("e500mc");
151 break;
152 case PVR_VER_E5500:
153 puts("e5500");
154 break;
155 case PVR_VER_E6500:
156 puts("e6500");
157 break;
158 default:
159 puts("Unknown");
160 break;
161 }
162
163 printf(", Version: %d.%d, (0x%08x)\n", major, minor, pvr);
164
165 if (nr_cores > CONFIG_MAX_CPUS) {
166 panic("\nUnexpected number of cores: %d, max is %d\n",
167 nr_cores, CONFIG_MAX_CPUS);
168 }
169
170 get_sys_info(&sysinfo);
171
172 #ifdef CONFIG_SYS_FSL_SINGLE_SOURCE_CLK
173 if (sysinfo.diff_sysclk == 1)
174 puts("Single Source Clock Configuration\n");
175 #endif
176
177 puts("Clock Configuration:");
178 for_each_cpu(i, core, nr_cores, mask) {
179 if (!(i & 3))
180 printf ("\n ");
181 printf("CPU%d:%-4s MHz, ", core,
182 strmhz(buf1, sysinfo.freq_processor[core]));
183 }
184
185 #ifdef CONFIG_HETROGENOUS_CLUSTERS
186 for_each_cpu(j, dsp_core, dsp_numcores, dsp_mask) {
187 if (!(j & 3))
188 printf("\n ");
189 printf("DSP CPU%d:%-4s MHz, ", j,
190 strmhz(buf1, sysinfo.freq_processor_dsp[dsp_core]));
191 }
192 #endif
193
194 printf("\n CCB:%-4s MHz,", strmhz(buf1, sysinfo.freq_systembus));
195 printf("\n");
196
197 #ifdef CONFIG_FSL_CORENET
198 if (ddr_sync == 1) {
199 printf(" DDR:%-4s MHz (%s MT/s data rate) "
200 "(Synchronous), ",
201 strmhz(buf1, sysinfo.freq_ddrbus/2),
202 strmhz(buf2, sysinfo.freq_ddrbus));
203 } else {
204 printf(" DDR:%-4s MHz (%s MT/s data rate) "
205 "(Asynchronous), ",
206 strmhz(buf1, sysinfo.freq_ddrbus/2),
207 strmhz(buf2, sysinfo.freq_ddrbus));
208 }
209 #else
210 switch (ddr_ratio) {
211 case 0x0:
212 printf(" DDR:%-4s MHz (%s MT/s data rate), ",
213 strmhz(buf1, sysinfo.freq_ddrbus/2),
214 strmhz(buf2, sysinfo.freq_ddrbus));
215 break;
216 case 0x7:
217 printf(" DDR:%-4s MHz (%s MT/s data rate) "
218 "(Synchronous), ",
219 strmhz(buf1, sysinfo.freq_ddrbus/2),
220 strmhz(buf2, sysinfo.freq_ddrbus));
221 break;
222 default:
223 printf(" DDR:%-4s MHz (%s MT/s data rate) "
224 "(Asynchronous), ",
225 strmhz(buf1, sysinfo.freq_ddrbus/2),
226 strmhz(buf2, sysinfo.freq_ddrbus));
227 break;
228 }
229 #endif
230
231 #if defined(CONFIG_FSL_LBC)
232 if (sysinfo.freq_localbus > LCRR_CLKDIV) {
233 printf("LBC:%-4s MHz\n", strmhz(buf1, sysinfo.freq_localbus));
234 } else {
235 printf("LBC: unknown (LCRR[CLKDIV] = 0x%02lx)\n",
236 sysinfo.freq_localbus);
237 }
238 #endif
239
240 #if defined(CONFIG_FSL_IFC)
241 printf("IFC:%-4s MHz\n", strmhz(buf1, sysinfo.freq_localbus));
242 #endif
243
244 #ifdef CONFIG_CPM2
245 printf("CPM: %s MHz\n", strmhz(buf1, sysinfo.freq_systembus));
246 #endif
247
248 #ifdef CONFIG_QE
249 printf(" QE:%-4s MHz\n", strmhz(buf1, sysinfo.freq_qe));
250 #endif
251
252 #if defined(CONFIG_SYS_CPRI)
253 printf(" ");
254 printf("CPRI:%-4s MHz", strmhz(buf1, sysinfo.freq_cpri));
255 #endif
256
257 #if defined(CONFIG_SYS_MAPLE)
258 printf("\n ");
259 printf("MAPLE:%-4s MHz, ", strmhz(buf1, sysinfo.freq_maple));
260 printf("MAPLE-ULB:%-4s MHz, ", strmhz(buf1, sysinfo.freq_maple_ulb));
261 printf("MAPLE-eTVPE:%-4s MHz\n",
262 strmhz(buf1, sysinfo.freq_maple_etvpe));
263 #endif
264
265 #ifdef CONFIG_SYS_DPAA_FMAN
266 for (i = 0; i < CONFIG_SYS_NUM_FMAN; i++) {
267 printf(" FMAN%d: %s MHz\n", i + 1,
268 strmhz(buf1, sysinfo.freq_fman[i]));
269 }
270 #endif
271
272 #ifdef CONFIG_SYS_DPAA_QBMAN
273 printf(" QMAN: %s MHz\n", strmhz(buf1, sysinfo.freq_qman));
274 #endif
275
276 #ifdef CONFIG_SYS_DPAA_PME
277 printf(" PME: %s MHz\n", strmhz(buf1, sysinfo.freq_pme));
278 #endif
279
280 puts("L1: D-cache 32 KiB enabled\n I-cache 32 KiB enabled\n");
281
282 #ifdef CONFIG_FSL_CORENET
283 /* Display the RCW, so that no one gets confused as to what RCW
284 * we're actually using for this boot.
285 */
286 puts("Reset Configuration Word (RCW):");
287 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
288 u32 rcw = in_be32(&gur->rcwsr[i]);
289
290 if ((i % 4) == 0)
291 printf("\n %08x:", i * 4);
292 printf(" %08x", rcw);
293 }
294 puts("\n");
295 #endif
296
297 return 0;
298 }
299
300
301 /* ------------------------------------------------------------------------- */
302
do_reset(struct cmd_tbl * cmdtp,int flag,int argc,char * const argv[])303 int do_reset(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[])
304 {
305 /* Everything after the first generation of PQ3 parts has RSTCR */
306 #if defined(CONFIG_ARCH_MPC8540) || defined(CONFIG_ARCH_MPC8560)
307 unsigned long val, msr;
308
309 /*
310 * Initiate hard reset in debug control register DBCR0
311 * Make sure MSR[DE] = 1. This only resets the core.
312 */
313 msr = mfmsr ();
314 msr |= MSR_DE;
315 mtmsr (msr);
316
317 val = mfspr(DBCR0);
318 val |= 0x70000000;
319 mtspr(DBCR0,val);
320 #else
321 volatile ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
322
323 /* Attempt board-specific reset */
324 board_reset();
325
326 /* Next try asserting HRESET_REQ */
327 out_be32(&gur->rstcr, 0x2);
328 udelay(100);
329 #endif
330
331 return 1;
332 }
333
334
335 /*
336 * Get timebase clock frequency
337 */
338 #ifndef CONFIG_SYS_FSL_TBCLK_DIV
339 #define CONFIG_SYS_FSL_TBCLK_DIV 8
340 #endif
get_tbclk(void)341 __weak unsigned long get_tbclk(void)
342 {
343 unsigned long tbclk_div = CONFIG_SYS_FSL_TBCLK_DIV;
344
345 return (gd->bus_clk + (tbclk_div >> 1)) / tbclk_div;
346 }
347
348
349 #if defined(CONFIG_WATCHDOG)
350 #define WATCHDOG_MASK (TCR_WP(63) | TCR_WRC(3) | TCR_WIE)
351 void
init_85xx_watchdog(void)352 init_85xx_watchdog(void)
353 {
354 mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WATCHDOG_MASK) |
355 TCR_WP(CONFIG_WATCHDOG_PRESC) | TCR_WRC(CONFIG_WATCHDOG_RC));
356 }
357
358 void
reset_85xx_watchdog(void)359 reset_85xx_watchdog(void)
360 {
361 /*
362 * Clear TSR(WIS) bit by writing 1
363 */
364 mtspr(SPRN_TSR, TSR_WIS);
365 }
366
367 void
watchdog_reset(void)368 watchdog_reset(void)
369 {
370 int re_enable = disable_interrupts();
371
372 reset_85xx_watchdog();
373 if (re_enable)
374 enable_interrupts();
375 }
376 #endif /* CONFIG_WATCHDOG */
377
378 /*
379 * Initializes on-chip MMC controllers.
380 * to override, implement board_mmc_init()
381 */
cpu_mmc_init(struct bd_info * bis)382 int cpu_mmc_init(struct bd_info *bis)
383 {
384 #ifdef CONFIG_FSL_ESDHC
385 return fsl_esdhc_mmc_init(bis);
386 #else
387 return 0;
388 #endif
389 }
390
391 /*
392 * Print out the state of various machine registers.
393 * Currently prints out LAWs, BR0/OR0 for LBC, CSPR/CSOR/Timing
394 * parameters for IFC and TLBs
395 */
print_reginfo(void)396 void print_reginfo(void)
397 {
398 print_tlbcam();
399 #ifdef CONFIG_FSL_LAW
400 print_laws();
401 #endif
402 #if defined(CONFIG_FSL_LBC)
403 print_lbc_regs();
404 #endif
405 #ifdef CONFIG_FSL_IFC
406 print_ifc_regs();
407 #endif
408
409 }
410
411 /* Common ddr init for non-corenet fsl 85xx platforms */
412 #ifndef CONFIG_FSL_CORENET
413 #if (defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL)) && \
414 !defined(CONFIG_SYS_INIT_L2_ADDR)
dram_init(void)415 int dram_init(void)
416 {
417 #if defined(CONFIG_SPD_EEPROM) || defined(CONFIG_DDR_SPD) || \
418 defined(CONFIG_ARCH_QEMU_E500)
419 gd->ram_size = fsl_ddr_sdram_size();
420 #else
421 gd->ram_size = (phys_size_t)CONFIG_SYS_SDRAM_SIZE * 1024 * 1024;
422 #endif
423
424 return 0;
425 }
426 #else /* CONFIG_SYS_RAMBOOT */
dram_init(void)427 int dram_init(void)
428 {
429 phys_size_t dram_size = 0;
430
431 #if defined(CONFIG_SYS_FSL_ERRATUM_DDR_MSYNC_IN)
432 {
433 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
434 unsigned int x = 10;
435 unsigned int i;
436
437 /*
438 * Work around to stabilize DDR DLL
439 */
440 out_be32(&gur->ddrdllcr, 0x81000000);
441 asm("sync;isync;msync");
442 udelay(200);
443 while (in_be32(&gur->ddrdllcr) != 0x81000100) {
444 setbits_be32(&gur->devdisr, 0x00010000);
445 for (i = 0; i < x; i++)
446 ;
447 clrbits_be32(&gur->devdisr, 0x00010000);
448 x++;
449 }
450 }
451 #endif
452
453 #if defined(CONFIG_SPD_EEPROM) || \
454 defined(CONFIG_DDR_SPD) || \
455 defined(CONFIG_SYS_DDR_RAW_TIMING)
456 dram_size = fsl_ddr_sdram();
457 #else
458 dram_size = fixed_sdram();
459 #endif
460 dram_size = setup_ddr_tlbs(dram_size / 0x100000);
461 dram_size *= 0x100000;
462
463 #if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER)
464 /*
465 * Initialize and enable DDR ECC.
466 */
467 ddr_enable_ecc(dram_size);
468 #endif
469
470 #if defined(CONFIG_FSL_LBC)
471 /* Some boards also have sdram on the lbc */
472 lbc_sdram_init();
473 #endif
474
475 debug("DDR: ");
476 gd->ram_size = dram_size;
477
478 return 0;
479 }
480 #endif /* CONFIG_SYS_RAMBOOT */
481 #endif
482
483 #if CONFIG_POST & CONFIG_SYS_POST_MEMORY
484
485 /* Board-specific functions defined in each board's ddr.c */
486 void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd,
487 unsigned int ctrl_num, unsigned int dimm_slots_per_ctrl);
488 void read_tlbcam_entry(int idx, u32 *valid, u32 *tsize, unsigned long *epn,
489 phys_addr_t *rpn);
490 unsigned int
491 setup_ddr_tlbs_phys(phys_addr_t p_addr, unsigned int memsize_in_meg);
492
493 void clear_ddr_tlbs_phys(phys_addr_t p_addr, unsigned int memsize_in_meg);
494
dump_spd_ddr_reg(void)495 static void dump_spd_ddr_reg(void)
496 {
497 int i, j, k, m;
498 u8 *p_8;
499 u32 *p_32;
500 struct ccsr_ddr __iomem *ddr[CONFIG_SYS_NUM_DDR_CTLRS];
501 generic_spd_eeprom_t
502 spd[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR];
503
504 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++)
505 fsl_ddr_get_spd(spd[i], i, CONFIG_DIMM_SLOTS_PER_CTLR);
506
507 puts("SPD data of all dimms (zero value is omitted)...\n");
508 puts("Byte (hex) ");
509 k = 1;
510 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) {
511 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++)
512 printf("Dimm%d ", k++);
513 }
514 puts("\n");
515 for (k = 0; k < sizeof(generic_spd_eeprom_t); k++) {
516 m = 0;
517 printf("%3d (0x%02x) ", k, k);
518 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) {
519 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
520 p_8 = (u8 *) &spd[i][j];
521 if (p_8[k]) {
522 printf("0x%02x ", p_8[k]);
523 m++;
524 } else
525 puts(" ");
526 }
527 }
528 if (m)
529 puts("\n");
530 else
531 puts("\r");
532 }
533
534 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) {
535 switch (i) {
536 case 0:
537 ddr[i] = (void *)CONFIG_SYS_FSL_DDR_ADDR;
538 break;
539 #if defined(CONFIG_SYS_FSL_DDR2_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 1)
540 case 1:
541 ddr[i] = (void *)CONFIG_SYS_FSL_DDR2_ADDR;
542 break;
543 #endif
544 #if defined(CONFIG_SYS_FSL_DDR3_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 2)
545 case 2:
546 ddr[i] = (void *)CONFIG_SYS_FSL_DDR3_ADDR;
547 break;
548 #endif
549 #if defined(CONFIG_SYS_FSL_DDR4_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 3)
550 case 3:
551 ddr[i] = (void *)CONFIG_SYS_FSL_DDR4_ADDR;
552 break;
553 #endif
554 default:
555 printf("%s unexpected controller number = %u\n",
556 __func__, i);
557 return;
558 }
559 }
560 printf("DDR registers dump for all controllers "
561 "(zero value is omitted)...\n");
562 puts("Offset (hex) ");
563 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++)
564 printf(" Base + 0x%04x", (u32)ddr[i] & 0xFFFF);
565 puts("\n");
566 for (k = 0; k < sizeof(struct ccsr_ddr)/4; k++) {
567 m = 0;
568 printf("%6d (0x%04x)", k * 4, k * 4);
569 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) {
570 p_32 = (u32 *) ddr[i];
571 if (p_32[k]) {
572 printf(" 0x%08x", p_32[k]);
573 m++;
574 } else
575 puts(" ");
576 }
577 if (m)
578 puts("\n");
579 else
580 puts("\r");
581 }
582 puts("\n");
583 }
584
585 /* invalid the TLBs for DDR and setup new ones to cover p_addr */
reset_tlb(phys_addr_t p_addr,u32 size,phys_addr_t * phys_offset)586 static int reset_tlb(phys_addr_t p_addr, u32 size, phys_addr_t *phys_offset)
587 {
588 u32 vstart = CONFIG_SYS_DDR_SDRAM_BASE;
589 unsigned long epn;
590 u32 tsize, valid, ptr;
591 int ddr_esel;
592
593 clear_ddr_tlbs_phys(p_addr, size>>20);
594
595 /* Setup new tlb to cover the physical address */
596 setup_ddr_tlbs_phys(p_addr, size>>20);
597
598 ptr = vstart;
599 ddr_esel = find_tlb_idx((void *)ptr, 1);
600 if (ddr_esel != -1) {
601 read_tlbcam_entry(ddr_esel, &valid, &tsize, &epn, phys_offset);
602 } else {
603 printf("TLB error in function %s\n", __func__);
604 return -1;
605 }
606
607 return 0;
608 }
609
610 /*
611 * slide the testing window up to test another area
612 * for 32_bit system, the maximum testable memory is limited to
613 * CONFIG_MAX_MEM_MAPPED
614 */
arch_memory_test_advance(u32 * vstart,u32 * size,phys_addr_t * phys_offset)615 int arch_memory_test_advance(u32 *vstart, u32 *size, phys_addr_t *phys_offset)
616 {
617 phys_addr_t test_cap, p_addr;
618 phys_size_t p_size = min(gd->ram_size, CONFIG_MAX_MEM_MAPPED);
619
620 #if !defined(CONFIG_PHYS_64BIT) || \
621 !defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS) || \
622 (CONFIG_SYS_INIT_RAM_ADDR_PHYS < 0x100000000ull)
623 test_cap = p_size;
624 #else
625 test_cap = gd->ram_size;
626 #endif
627 p_addr = (*vstart) + (*size) + (*phys_offset);
628 if (p_addr < test_cap - 1) {
629 p_size = min(test_cap - p_addr, CONFIG_MAX_MEM_MAPPED);
630 if (reset_tlb(p_addr, p_size, phys_offset) == -1)
631 return -1;
632 *vstart = CONFIG_SYS_DDR_SDRAM_BASE;
633 *size = (u32) p_size;
634 printf("Testing 0x%08llx - 0x%08llx\n",
635 (u64)(*vstart) + (*phys_offset),
636 (u64)(*vstart) + (*phys_offset) + (*size) - 1);
637 } else
638 return 1;
639
640 return 0;
641 }
642
643 /* initialization for testing area */
arch_memory_test_prepare(u32 * vstart,u32 * size,phys_addr_t * phys_offset)644 int arch_memory_test_prepare(u32 *vstart, u32 *size, phys_addr_t *phys_offset)
645 {
646 phys_size_t p_size = min(gd->ram_size, CONFIG_MAX_MEM_MAPPED);
647
648 *vstart = CONFIG_SYS_DDR_SDRAM_BASE;
649 *size = (u32) p_size; /* CONFIG_MAX_MEM_MAPPED < 4G */
650 *phys_offset = 0;
651
652 #if !defined(CONFIG_PHYS_64BIT) || \
653 !defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS) || \
654 (CONFIG_SYS_INIT_RAM_ADDR_PHYS < 0x100000000ull)
655 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
656 puts("Cannot test more than ");
657 print_size(CONFIG_MAX_MEM_MAPPED,
658 " without proper 36BIT support.\n");
659 }
660 #endif
661 printf("Testing 0x%08llx - 0x%08llx\n",
662 (u64)(*vstart) + (*phys_offset),
663 (u64)(*vstart) + (*phys_offset) + (*size) - 1);
664
665 return 0;
666 }
667
668 /* invalid TLBs for DDR and remap as normal after testing */
arch_memory_test_cleanup(u32 * vstart,u32 * size,phys_addr_t * phys_offset)669 int arch_memory_test_cleanup(u32 *vstart, u32 *size, phys_addr_t *phys_offset)
670 {
671 unsigned long epn;
672 u32 tsize, valid, ptr;
673 phys_addr_t rpn = 0;
674 int ddr_esel;
675
676 /* disable the TLBs for this testing */
677 ptr = *vstart;
678
679 while (ptr < (*vstart) + (*size)) {
680 ddr_esel = find_tlb_idx((void *)ptr, 1);
681 if (ddr_esel != -1) {
682 read_tlbcam_entry(ddr_esel, &valid, &tsize, &epn, &rpn);
683 disable_tlb(ddr_esel);
684 }
685 ptr += TSIZE_TO_BYTES(tsize);
686 }
687
688 puts("Remap DDR ");
689 setup_ddr_tlbs(gd->ram_size>>20);
690 puts("\n");
691
692 return 0;
693 }
694
arch_memory_failure_handle(void)695 void arch_memory_failure_handle(void)
696 {
697 dump_spd_ddr_reg();
698 }
699 #endif
700