1 /*
2  * xen/arch/arm/setup.c
3  *
4  * Early bringup code for an ARMv7-A with virt extensions.
5  *
6  * Tim Deegan <tim@xen.org>
7  * Copyright (c) 2011 Citrix Systems.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #include <xen/compile.h>
21 #include <xen/device_tree.h>
22 #include <xen/domain_page.h>
23 #include <xen/grant_table.h>
24 #include <xen/types.h>
25 #include <xen/string.h>
26 #include <xen/serial.h>
27 #include <xen/sched.h>
28 #include <xen/console.h>
29 #include <xen/err.h>
30 #include <xen/init.h>
31 #include <xen/irq.h>
32 #include <xen/mm.h>
33 #include <xen/param.h>
34 #include <xen/softirq.h>
35 #include <xen/keyhandler.h>
36 #include <xen/cpu.h>
37 #include <xen/pfn.h>
38 #include <xen/virtual_region.h>
39 #include <xen/vmap.h>
40 #include <xen/trace.h>
41 #include <xen/libfdt/libfdt.h>
42 #include <xen/acpi.h>
43 #include <xen/warning.h>
44 #include <asm/alternative.h>
45 #include <asm/page.h>
46 #include <asm/current.h>
47 #include <asm/setup.h>
48 #include <asm/gic.h>
49 #include <asm/cpuerrata.h>
50 #include <asm/cpufeature.h>
51 #include <asm/platform.h>
52 #include <asm/procinfo.h>
53 #include <asm/setup.h>
54 #include <asm/tee/tee.h>
55 #include <xsm/xsm.h>
56 #include <asm/acpi.h>
57 
58 struct bootinfo __initdata bootinfo;
59 
60 struct cpuinfo_arm __read_mostly boot_cpu_data;
61 
62 #ifdef CONFIG_ACPI
63 bool __read_mostly acpi_disabled;
64 #endif
65 
66 #ifdef CONFIG_ARM_32
67 static unsigned long opt_xenheap_megabytes __initdata;
68 integer_param("xenheap_megabytes", opt_xenheap_megabytes);
69 #endif
70 
71 domid_t __read_mostly max_init_domid;
72 
init_done(void)73 static __used void init_done(void)
74 {
75     /* Must be done past setting system_state. */
76     unregister_init_virtual_region();
77 
78     discard_initial_modules();
79     free_init_memory();
80     startup_cpu_idle_loop();
81 }
82 
init_idle_domain(void)83 static void __init init_idle_domain(void)
84 {
85     scheduler_init();
86     set_current(idle_vcpu[0]);
87     /* TODO: setup_idle_pagetable(); */
88 }
89 
90 static const char * __initdata processor_implementers[] = {
91     ['A'] = "ARM Limited",
92     ['B'] = "Broadcom Corporation",
93     ['C'] = "Cavium Inc.",
94     ['D'] = "Digital Equipment Corp",
95     ['M'] = "Motorola, Freescale Semiconductor Inc.",
96     ['P'] = "Applied Micro",
97     ['Q'] = "Qualcomm Inc.",
98     ['V'] = "Marvell Semiconductor Inc.",
99     ['i'] = "Intel Corporation",
100 };
101 
processor_id(void)102 static void __init processor_id(void)
103 {
104     const char *implementer = "Unknown";
105     struct cpuinfo_arm *c = &boot_cpu_data;
106 
107     identify_cpu(c);
108     current_cpu_data = *c;
109 
110     if ( c->midr.implementer < ARRAY_SIZE(processor_implementers) &&
111          processor_implementers[c->midr.implementer] )
112         implementer = processor_implementers[c->midr.implementer];
113 
114     if ( c->midr.architecture != 0xf )
115         printk("Huh, cpu architecture %x, expected 0xf (defined by cpuid)\n",
116                c->midr.architecture);
117 
118     printk("Processor: %08"PRIx32": \"%s\", variant: 0x%x, part 0x%03x, rev 0x%x\n",
119            c->midr.bits, implementer,
120            c->midr.variant, c->midr.part_number, c->midr.revision);
121 
122 #if defined(CONFIG_ARM_64)
123     printk("64-bit Execution:\n");
124     printk("  Processor Features: %016"PRIx64" %016"PRIx64"\n",
125            boot_cpu_data.pfr64.bits[0], boot_cpu_data.pfr64.bits[1]);
126     printk("    Exception Levels: EL3:%s EL2:%s EL1:%s EL0:%s\n",
127            cpu_has_el3_32 ? "64+32" : cpu_has_el3_64 ? "64" : "No",
128            cpu_has_el2_32 ? "64+32" : cpu_has_el2_64 ? "64" : "No",
129            cpu_has_el1_32 ? "64+32" : cpu_has_el1_64 ? "64" : "No",
130            cpu_has_el0_32 ? "64+32" : cpu_has_el0_64 ? "64" : "No");
131     printk("    Extensions:%s%s%s\n",
132            cpu_has_fp ? " FloatingPoint" : "",
133            cpu_has_simd ? " AdvancedSIMD" : "",
134            cpu_has_gicv3 ? " GICv3-SysReg" : "");
135 
136     printk("  Debug Features: %016"PRIx64" %016"PRIx64"\n",
137            boot_cpu_data.dbg64.bits[0], boot_cpu_data.dbg64.bits[1]);
138     printk("  Auxiliary Features: %016"PRIx64" %016"PRIx64"\n",
139            boot_cpu_data.aux64.bits[0], boot_cpu_data.aux64.bits[1]);
140     printk("  Memory Model Features: %016"PRIx64" %016"PRIx64"\n",
141            boot_cpu_data.mm64.bits[0], boot_cpu_data.mm64.bits[1]);
142     printk("  ISA Features:  %016"PRIx64" %016"PRIx64"\n",
143            boot_cpu_data.isa64.bits[0], boot_cpu_data.isa64.bits[1]);
144 #endif
145 
146     /*
147      * On AArch64 these refer to the capabilities when running in
148      * AArch32 mode.
149      */
150     if ( cpu_has_aarch32 )
151     {
152         printk("32-bit Execution:\n");
153         printk("  Processor Features: %08"PRIx32":%08"PRIx32"\n",
154                boot_cpu_data.pfr32.bits[0], boot_cpu_data.pfr32.bits[1]);
155         printk("    Instruction Sets:%s%s%s%s%s%s\n",
156                cpu_has_aarch32 ? " AArch32" : "",
157                cpu_has_arm ? " A32" : "",
158                cpu_has_thumb ? " Thumb" : "",
159                cpu_has_thumb2 ? " Thumb-2" : "",
160                cpu_has_thumbee ? " ThumbEE" : "",
161                cpu_has_jazelle ? " Jazelle" : "");
162         printk("    Extensions:%s%s\n",
163                cpu_has_gentimer ? " GenericTimer" : "",
164                cpu_has_security ? " Security" : "");
165 
166         printk("  Debug Features: %08"PRIx32"\n",
167                boot_cpu_data.dbg32.bits[0]);
168         printk("  Auxiliary Features: %08"PRIx32"\n",
169                boot_cpu_data.aux32.bits[0]);
170         printk("  Memory Model Features: "
171                "%08"PRIx32" %08"PRIx32" %08"PRIx32" %08"PRIx32"\n",
172                boot_cpu_data.mm32.bits[0], boot_cpu_data.mm32.bits[1],
173                boot_cpu_data.mm32.bits[2], boot_cpu_data.mm32.bits[3]);
174         printk(" ISA Features: %08x %08x %08x %08x %08x %08x\n",
175                boot_cpu_data.isa32.bits[0], boot_cpu_data.isa32.bits[1],
176                boot_cpu_data.isa32.bits[2], boot_cpu_data.isa32.bits[3],
177                boot_cpu_data.isa32.bits[4], boot_cpu_data.isa32.bits[5]);
178     }
179     else
180     {
181         printk("32-bit Execution: Unsupported\n");
182     }
183 
184     processor_setup();
185 }
186 
dt_unreserved_regions(paddr_t s,paddr_t e,void (* cb)(paddr_t,paddr_t),int first)187 void __init dt_unreserved_regions(paddr_t s, paddr_t e,
188                                   void (*cb)(paddr_t, paddr_t), int first)
189 {
190     int i, nr = fdt_num_mem_rsv(device_tree_flattened);
191 
192     for ( i = first; i < nr ; i++ )
193     {
194         paddr_t r_s, r_e;
195 
196         if ( fdt_get_mem_rsv(device_tree_flattened, i, &r_s, &r_e ) < 0 )
197             /* If we can't read it, pretend it doesn't exist... */
198             continue;
199 
200         r_e += r_s; /* fdt_get_mem_rsv returns length */
201 
202         if ( s < r_e && r_s < e )
203         {
204             dt_unreserved_regions(r_e, e, cb, i+1);
205             dt_unreserved_regions(s, r_s, cb, i+1);
206             return;
207         }
208     }
209 
210     /*
211      * i is the current bootmodule we are evaluating across all possible
212      * kinds.
213      *
214      * When retrieving the corresponding reserved-memory addresses
215      * below, we need to index the bootinfo.reserved_mem bank starting
216      * from 0, and only counting the reserved-memory modules. Hence,
217      * we need to use i - nr.
218      */
219     for ( ; i - nr < bootinfo.reserved_mem.nr_banks; i++ )
220     {
221         paddr_t r_s = bootinfo.reserved_mem.bank[i - nr].start;
222         paddr_t r_e = r_s + bootinfo.reserved_mem.bank[i - nr].size;
223 
224         if ( s < r_e && r_s < e )
225         {
226             dt_unreserved_regions(r_e, e, cb, i + 1);
227             dt_unreserved_regions(s, r_s, cb, i + 1);
228             return;
229         }
230     }
231 
232     cb(s, e);
233 }
234 
add_boot_module(bootmodule_kind kind,paddr_t start,paddr_t size,bool domU)235 struct bootmodule __init *add_boot_module(bootmodule_kind kind,
236                                           paddr_t start, paddr_t size,
237                                           bool domU)
238 {
239     struct bootmodules *mods = &bootinfo.modules;
240     struct bootmodule *mod;
241     unsigned int i;
242 
243     if ( mods->nr_mods == MAX_MODULES )
244     {
245         printk("Ignoring %s boot module at %"PRIpaddr"-%"PRIpaddr" (too many)\n",
246                boot_module_kind_as_string(kind), start, start + size);
247         return NULL;
248     }
249     for ( i = 0 ; i < mods->nr_mods ; i++ )
250     {
251         mod = &mods->module[i];
252         if ( mod->kind == kind && mod->start == start )
253         {
254             if ( !domU )
255                 mod->domU = false;
256             return mod;
257         }
258     }
259 
260     mod = &mods->module[mods->nr_mods++];
261     mod->kind = kind;
262     mod->start = start;
263     mod->size = size;
264     mod->domU = domU;
265 
266     return mod;
267 }
268 
269 /*
270  * boot_module_find_by_kind can only be used to return Xen modules (e.g
271  * XSM, DTB) or Dom0 modules. This is not suitable for looking up guest
272  * modules.
273  */
boot_module_find_by_kind(bootmodule_kind kind)274 struct bootmodule * __init boot_module_find_by_kind(bootmodule_kind kind)
275 {
276     struct bootmodules *mods = &bootinfo.modules;
277     struct bootmodule *mod;
278     int i;
279     for (i = 0 ; i < mods->nr_mods ; i++ )
280     {
281         mod = &mods->module[i];
282         if ( mod->kind == kind && !mod->domU )
283             return mod;
284     }
285     return NULL;
286 }
287 
add_boot_cmdline(const char * name,const char * cmdline,bootmodule_kind kind,paddr_t start,bool domU)288 void __init add_boot_cmdline(const char *name, const char *cmdline,
289                              bootmodule_kind kind, paddr_t start, bool domU)
290 {
291     struct bootcmdlines *cmds = &bootinfo.cmdlines;
292     struct bootcmdline *cmd;
293 
294     if ( cmds->nr_mods == MAX_MODULES )
295     {
296         printk("Ignoring %s cmdline (too many)\n", name);
297         return;
298     }
299 
300     cmd = &cmds->cmdline[cmds->nr_mods++];
301     cmd->kind = kind;
302     cmd->domU = domU;
303     cmd->start = start;
304 
305     ASSERT(strlen(name) <= DT_MAX_NAME);
306     safe_strcpy(cmd->dt_name, name);
307 
308     if ( strlen(cmdline) > BOOTMOD_MAX_CMDLINE )
309         panic("module %s command line too long\n", name);
310     safe_strcpy(cmd->cmdline, cmdline);
311 }
312 
313 /*
314  * boot_cmdline_find_by_kind can only be used to return Xen modules (e.g
315  * XSM, DTB) or Dom0 modules. This is not suitable for looking up guest
316  * modules.
317  */
boot_cmdline_find_by_kind(bootmodule_kind kind)318 struct bootcmdline * __init boot_cmdline_find_by_kind(bootmodule_kind kind)
319 {
320     struct bootcmdlines *cmds = &bootinfo.cmdlines;
321     struct bootcmdline *cmd;
322     int i;
323 
324     for ( i = 0 ; i < cmds->nr_mods ; i++ )
325     {
326         cmd = &cmds->cmdline[i];
327         if ( cmd->kind == kind && !cmd->domU )
328             return cmd;
329     }
330     return NULL;
331 }
332 
boot_cmdline_find_by_name(const char * name)333 struct bootcmdline * __init boot_cmdline_find_by_name(const char *name)
334 {
335     struct bootcmdlines *mods = &bootinfo.cmdlines;
336     struct bootcmdline *mod;
337     unsigned int i;
338 
339     for (i = 0 ; i < mods->nr_mods ; i++ )
340     {
341         mod = &mods->cmdline[i];
342         if ( strcmp(mod->dt_name, name) == 0 )
343             return mod;
344     }
345     return NULL;
346 }
347 
boot_module_find_by_addr_and_kind(bootmodule_kind kind,paddr_t start)348 struct bootmodule * __init boot_module_find_by_addr_and_kind(bootmodule_kind kind,
349                                                              paddr_t start)
350 {
351     struct bootmodules *mods = &bootinfo.modules;
352     struct bootmodule *mod;
353     unsigned int i;
354 
355     for (i = 0 ; i < mods->nr_mods ; i++ )
356     {
357         mod = &mods->module[i];
358         if ( mod->kind == kind && mod->start == start )
359             return mod;
360     }
361     return NULL;
362 }
363 
boot_module_kind_as_string(bootmodule_kind kind)364 const char * __init boot_module_kind_as_string(bootmodule_kind kind)
365 {
366     switch ( kind )
367     {
368     case BOOTMOD_XEN:     return "Xen";
369     case BOOTMOD_FDT:     return "Device Tree";
370     case BOOTMOD_KERNEL:  return "Kernel";
371     case BOOTMOD_RAMDISK: return "Ramdisk";
372     case BOOTMOD_XSM:     return "XSM";
373     case BOOTMOD_GUEST_DTB:     return "DTB";
374     case BOOTMOD_UNKNOWN: return "Unknown";
375     default: BUG();
376     }
377 }
378 
discard_initial_modules(void)379 void __init discard_initial_modules(void)
380 {
381     struct bootmodules *mi = &bootinfo.modules;
382     int i;
383 
384     for ( i = 0; i < mi->nr_mods; i++ )
385     {
386         paddr_t s = mi->module[i].start;
387         paddr_t e = s + PAGE_ALIGN(mi->module[i].size);
388 
389         if ( mi->module[i].kind == BOOTMOD_XEN )
390             continue;
391 
392         if ( !mfn_valid(maddr_to_mfn(s)) ||
393              !mfn_valid(maddr_to_mfn(e)) )
394             continue;
395 
396         dt_unreserved_regions(s, e, init_domheap_pages, 0);
397     }
398 
399     mi->nr_mods = 0;
400 
401     remove_early_mappings();
402 }
403 
404 /* Relocate the FDT in Xen heap */
relocate_fdt(paddr_t dtb_paddr,size_t dtb_size)405 static void * __init relocate_fdt(paddr_t dtb_paddr, size_t dtb_size)
406 {
407     void *fdt = xmalloc_bytes(dtb_size);
408 
409     if ( !fdt )
410         panic("Unable to allocate memory for relocating the Device-Tree.\n");
411 
412     copy_from_paddr(fdt, dtb_paddr, dtb_size);
413 
414     return fdt;
415 }
416 
417 #ifdef CONFIG_ARM_32
418 /*
419  * Returns the end address of the highest region in the range s..e
420  * with required size and alignment that does not conflict with the
421  * modules from first_mod to nr_modules.
422  *
423  * For non-recursive callers first_mod should normally be 0 (all
424  * modules and Xen itself) or 1 (all modules but not Xen).
425  */
consider_modules(paddr_t s,paddr_t e,uint32_t size,paddr_t align,int first_mod)426 static paddr_t __init consider_modules(paddr_t s, paddr_t e,
427                                        uint32_t size, paddr_t align,
428                                        int first_mod)
429 {
430     const struct bootmodules *mi = &bootinfo.modules;
431     int i;
432     int nr;
433 
434     s = (s+align-1) & ~(align-1);
435     e = e & ~(align-1);
436 
437     if ( s > e ||  e - s < size )
438         return 0;
439 
440     /* First check the boot modules */
441     for ( i = first_mod; i < mi->nr_mods; i++ )
442     {
443         paddr_t mod_s = mi->module[i].start;
444         paddr_t mod_e = mod_s + mi->module[i].size;
445 
446         if ( s < mod_e && mod_s < e )
447         {
448             mod_e = consider_modules(mod_e, e, size, align, i+1);
449             if ( mod_e )
450                 return mod_e;
451 
452             return consider_modules(s, mod_s, size, align, i+1);
453         }
454     }
455 
456     /* Now check any fdt reserved areas. */
457 
458     nr = fdt_num_mem_rsv(device_tree_flattened);
459 
460     for ( ; i < mi->nr_mods + nr; i++ )
461     {
462         paddr_t mod_s, mod_e;
463 
464         if ( fdt_get_mem_rsv(device_tree_flattened,
465                              i - mi->nr_mods,
466                              &mod_s, &mod_e ) < 0 )
467             /* If we can't read it, pretend it doesn't exist... */
468             continue;
469 
470         /* fdt_get_mem_rsv returns length */
471         mod_e += mod_s;
472 
473         if ( s < mod_e && mod_s < e )
474         {
475             mod_e = consider_modules(mod_e, e, size, align, i+1);
476             if ( mod_e )
477                 return mod_e;
478 
479             return consider_modules(s, mod_s, size, align, i+1);
480         }
481     }
482 
483     /*
484      * i is the current bootmodule we are evaluating, across all
485      * possible kinds of bootmodules.
486      *
487      * When retrieving the corresponding reserved-memory addresses, we
488      * need to index the bootinfo.reserved_mem bank starting from 0, and
489      * only counting the reserved-memory modules. Hence, we need to use
490      * i - nr.
491      */
492     nr += mi->nr_mods;
493     for ( ; i - nr < bootinfo.reserved_mem.nr_banks; i++ )
494     {
495         paddr_t r_s = bootinfo.reserved_mem.bank[i - nr].start;
496         paddr_t r_e = r_s + bootinfo.reserved_mem.bank[i - nr].size;
497 
498         if ( s < r_e && r_s < e )
499         {
500             r_e = consider_modules(r_e, e, size, align, i + 1);
501             if ( r_e )
502                 return r_e;
503 
504             return consider_modules(s, r_s, size, align, i + 1);
505         }
506     }
507     return e;
508 }
509 #endif
510 
511 /*
512  * Return the end of the non-module region starting at s. In other
513  * words return s the start of the next modules after s.
514  *
515  * On input *end is the end of the region which should be considered
516  * and it is updated to reflect the end of the module, clipped to the
517  * end of the region if it would run over.
518  */
next_module(paddr_t s,paddr_t * end)519 static paddr_t __init next_module(paddr_t s, paddr_t *end)
520 {
521     struct bootmodules *mi = &bootinfo.modules;
522     paddr_t lowest = ~(paddr_t)0;
523     int i;
524 
525     for ( i = 0; i < mi->nr_mods; i++ )
526     {
527         paddr_t mod_s = mi->module[i].start;
528         paddr_t mod_e = mod_s + mi->module[i].size;
529 
530         if ( !mi->module[i].size )
531             continue;
532 
533         if ( mod_s < s )
534             continue;
535         if ( mod_s > lowest )
536             continue;
537         if ( mod_s > *end )
538             continue;
539         lowest = mod_s;
540         *end = min(*end, mod_e);
541     }
542     return lowest;
543 }
544 
init_pdx(void)545 static void __init init_pdx(void)
546 {
547     paddr_t bank_start, bank_size, bank_end;
548 
549     /*
550      * Arm does not have any restrictions on the bits to compress. Pass 0 to
551      * let the common code further restrict the mask.
552      *
553      * If the logic changes in pfn_pdx_hole_setup we might have to
554      * update this function too.
555      */
556     uint64_t mask = pdx_init_mask(0x0);
557     int bank;
558 
559     for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
560     {
561         bank_start = bootinfo.mem.bank[bank].start;
562         bank_size = bootinfo.mem.bank[bank].size;
563 
564         mask |= bank_start | pdx_region_mask(bank_start, bank_size);
565     }
566 
567     for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
568     {
569         bank_start = bootinfo.mem.bank[bank].start;
570         bank_size = bootinfo.mem.bank[bank].size;
571 
572         if (~mask & pdx_region_mask(bank_start, bank_size))
573             mask = 0;
574     }
575 
576     pfn_pdx_hole_setup(mask >> PAGE_SHIFT);
577 
578     for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
579     {
580         bank_start = bootinfo.mem.bank[bank].start;
581         bank_size = bootinfo.mem.bank[bank].size;
582         bank_end = bank_start + bank_size;
583 
584         set_pdx_range(paddr_to_pfn(bank_start),
585                       paddr_to_pfn(bank_end));
586     }
587 }
588 
589 #ifdef CONFIG_ARM_32
setup_mm(void)590 static void __init setup_mm(void)
591 {
592     paddr_t ram_start, ram_end, ram_size;
593     paddr_t s, e;
594     unsigned long ram_pages;
595     unsigned long heap_pages, xenheap_pages, domheap_pages;
596     int i;
597     const uint32_t ctr = READ_CP32(CTR);
598 
599     if ( !bootinfo.mem.nr_banks )
600         panic("No memory bank\n");
601 
602     /* We only supports instruction caches implementing the IVIPT extension. */
603     if ( ((ctr >> CTR_L1Ip_SHIFT) & CTR_L1Ip_MASK) == CTR_L1Ip_AIVIVT )
604         panic("AIVIVT instruction cache not supported\n");
605 
606     init_pdx();
607 
608     ram_start = bootinfo.mem.bank[0].start;
609     ram_size  = bootinfo.mem.bank[0].size;
610     ram_end   = ram_start + ram_size;
611 
612     for ( i = 1; i < bootinfo.mem.nr_banks; i++ )
613     {
614         paddr_t bank_start = bootinfo.mem.bank[i].start;
615         paddr_t bank_size = bootinfo.mem.bank[i].size;
616         paddr_t bank_end = bank_start + bank_size;
617 
618         ram_size  = ram_size + bank_size;
619         ram_start = min(ram_start,bank_start);
620         ram_end   = max(ram_end,bank_end);
621     }
622 
623     total_pages = ram_pages = ram_size >> PAGE_SHIFT;
624 
625     /*
626      * If the user has not requested otherwise via the command line
627      * then locate the xenheap using these constraints:
628      *
629      *  - must be 32 MiB aligned
630      *  - must not include Xen itself or the boot modules
631      *  - must be at most 1GB or 1/32 the total RAM in the system if less
632      *  - must be at least 32M
633      *
634      * We try to allocate the largest xenheap possible within these
635      * constraints.
636      */
637     heap_pages = ram_pages;
638     if ( opt_xenheap_megabytes )
639         xenheap_pages = opt_xenheap_megabytes << (20-PAGE_SHIFT);
640     else
641     {
642         xenheap_pages = (heap_pages/32 + 0x1fffUL) & ~0x1fffUL;
643         xenheap_pages = max(xenheap_pages, 32UL<<(20-PAGE_SHIFT));
644         xenheap_pages = min(xenheap_pages, 1UL<<(30-PAGE_SHIFT));
645     }
646 
647     do
648     {
649         e = consider_modules(ram_start, ram_end,
650                              pfn_to_paddr(xenheap_pages),
651                              32<<20, 0);
652         if ( e )
653             break;
654 
655         xenheap_pages >>= 1;
656     } while ( !opt_xenheap_megabytes && xenheap_pages > 32<<(20-PAGE_SHIFT) );
657 
658     if ( ! e )
659         panic("Not not enough space for xenheap\n");
660 
661     domheap_pages = heap_pages - xenheap_pages;
662 
663     printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages%s)\n",
664            e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages,
665            opt_xenheap_megabytes ? ", from command-line" : "");
666     printk("Dom heap: %lu pages\n", domheap_pages);
667 
668     setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);
669 
670     /* Add non-xenheap memory */
671     for ( i = 0; i < bootinfo.mem.nr_banks; i++ )
672     {
673         paddr_t bank_start = bootinfo.mem.bank[i].start;
674         paddr_t bank_end = bank_start + bootinfo.mem.bank[i].size;
675 
676         s = bank_start;
677         while ( s < bank_end )
678         {
679             paddr_t n = bank_end;
680 
681             e = next_module(s, &n);
682 
683             if ( e == ~(paddr_t)0 )
684             {
685                 e = n = ram_end;
686             }
687 
688             /*
689              * Module in a RAM bank other than the one which we are
690              * not dealing with here.
691              */
692             if ( e > bank_end )
693                 e = bank_end;
694 
695             /* Avoid the xenheap */
696             if ( s < mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages))
697                  && mfn_to_maddr(xenheap_mfn_start) < e )
698             {
699                 e = mfn_to_maddr(xenheap_mfn_start);
700                 n = mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages));
701             }
702 
703             dt_unreserved_regions(s, e, init_boot_pages, 0);
704 
705             s = n;
706         }
707     }
708 
709     /* Frame table covers all of RAM region, including holes */
710     setup_frametable_mappings(ram_start, ram_end);
711     max_page = PFN_DOWN(ram_end);
712 
713     /* Add xenheap memory that was not already added to the boot allocator. */
714     init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
715                        mfn_to_maddr(xenheap_mfn_end));
716 }
717 #else /* CONFIG_ARM_64 */
setup_mm(void)718 static void __init setup_mm(void)
719 {
720     paddr_t ram_start = ~0;
721     paddr_t ram_end = 0;
722     paddr_t ram_size = 0;
723     int bank;
724 
725     init_pdx();
726 
727     total_pages = 0;
728     for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
729     {
730         paddr_t bank_start = bootinfo.mem.bank[bank].start;
731         paddr_t bank_size = bootinfo.mem.bank[bank].size;
732         paddr_t bank_end = bank_start + bank_size;
733         paddr_t s, e;
734 
735         ram_size = ram_size + bank_size;
736         ram_start = min(ram_start,bank_start);
737         ram_end = max(ram_end,bank_end);
738 
739         setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT);
740 
741         s = bank_start;
742         while ( s < bank_end )
743         {
744             paddr_t n = bank_end;
745 
746             e = next_module(s, &n);
747 
748             if ( e == ~(paddr_t)0 )
749             {
750                 e = n = bank_end;
751             }
752 
753             if ( e > bank_end )
754                 e = bank_end;
755 
756             dt_unreserved_regions(s, e, init_boot_pages, 0);
757             s = n;
758         }
759     }
760 
761     total_pages += ram_size >> PAGE_SHIFT;
762 
763     xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
764     xenheap_mfn_start = maddr_to_mfn(ram_start);
765     xenheap_mfn_end = maddr_to_mfn(ram_end);
766 
767     setup_frametable_mappings(ram_start, ram_end);
768     max_page = PFN_DOWN(ram_end);
769 }
770 #endif
771 
772 size_t __read_mostly dcache_line_bytes;
773 
774 /* C entry point for boot CPU */
start_xen(unsigned long boot_phys_offset,unsigned long fdt_paddr)775 void __init start_xen(unsigned long boot_phys_offset,
776                       unsigned long fdt_paddr)
777 {
778     size_t fdt_size;
779     int cpus, i;
780     const char *cmdline;
781     struct bootmodule *xen_bootmodule;
782     struct domain *dom0;
783     struct xen_domctl_createdomain dom0_cfg = {
784         .flags = XEN_DOMCTL_CDF_hvm | XEN_DOMCTL_CDF_hap,
785         .max_evtchn_port = -1,
786         .max_grant_frames = gnttab_dom0_frames(),
787         .max_maptrack_frames = -1,
788     };
789     int rc;
790 
791     dcache_line_bytes = read_dcache_line_bytes();
792 
793     percpu_init_areas();
794     set_processor_id(0); /* needed early, for smp_processor_id() */
795 
796     setup_virtual_regions(NULL, NULL);
797     /* Initialize traps early allow us to get backtrace when an error occurred */
798     init_traps();
799 
800     setup_pagetables(boot_phys_offset);
801 
802     smp_clear_cpu_maps();
803 
804     device_tree_flattened = early_fdt_map(fdt_paddr);
805     if ( !device_tree_flattened )
806         panic("Invalid device tree blob at physical address %#lx.\n"
807               "The DTB must be 8-byte aligned and must not exceed 2 MB in size.\n\n"
808               "Please check your bootloader.\n",
809               fdt_paddr);
810 
811     /* Register Xen's load address as a boot module. */
812     xen_bootmodule = add_boot_module(BOOTMOD_XEN,
813                              (paddr_t)(uintptr_t)(_start + boot_phys_offset),
814                              (paddr_t)(uintptr_t)(_end - _start), false);
815     BUG_ON(!xen_bootmodule);
816 
817     fdt_size = boot_fdt_info(device_tree_flattened, fdt_paddr);
818 
819     cmdline = boot_fdt_cmdline(device_tree_flattened);
820     printk("Command line: %s\n", cmdline);
821     cmdline_parse(cmdline);
822 
823     setup_mm();
824 
825     /* Parse the ACPI tables for possible boot-time configuration */
826     acpi_boot_table_init();
827 
828     end_boot_allocator();
829 
830     /*
831      * The memory subsystem has been initialized, we can now switch from
832      * early_boot -> boot.
833      */
834     system_state = SYS_STATE_boot;
835 
836     vm_init();
837 
838     if ( acpi_disabled )
839     {
840         printk("Booting using Device Tree\n");
841         device_tree_flattened = relocate_fdt(fdt_paddr, fdt_size);
842         dt_unflatten_host_device_tree();
843     }
844     else
845     {
846         printk("Booting using ACPI\n");
847         device_tree_flattened = NULL;
848     }
849 
850     init_IRQ();
851 
852     platform_init();
853 
854     preinit_xen_time();
855 
856     gic_preinit();
857 
858     arm_uart_init();
859     console_init_preirq();
860     console_init_ring();
861 
862     processor_id();
863 
864     smp_init_cpus();
865     cpus = smp_get_max_cpus();
866     printk(XENLOG_INFO "SMP: Allowing %u CPUs\n", cpus);
867     nr_cpu_ids = cpus;
868 
869     /*
870      * Some errata relies on SMCCC version which is detected by psci_init()
871      * (called from smp_init_cpus()).
872      */
873     check_local_cpu_errata();
874 
875     init_xen_time();
876 
877     gic_init();
878 
879     tasklet_subsys_init();
880 
881     if ( xsm_dt_init() != 1 )
882         warning_add("WARNING: SILO mode is not enabled.\n"
883                     "It has implications on the security of the system,\n"
884                     "unless the communications have been forbidden between\n"
885                     "untrusted domains.\n");
886 
887     init_maintenance_interrupt();
888     init_timer_interrupt();
889 
890     timer_init();
891 
892     init_idle_domain();
893 
894     rcu_init();
895 
896     setup_system_domains();
897 
898     local_irq_enable();
899     local_abort_enable();
900 
901     smp_prepare_cpus();
902 
903     initialize_keytable();
904 
905     console_init_postirq();
906 
907     do_presmp_initcalls();
908 
909     for_each_present_cpu ( i )
910     {
911         if ( (num_online_cpus() < cpus) && !cpu_online(i) )
912         {
913             int ret = cpu_up(i);
914             if ( ret != 0 )
915                 printk("Failed to bring up CPU %u (error %d)\n", i, ret);
916         }
917     }
918 
919     printk("Brought up %ld CPUs\n", (long)num_online_cpus());
920     /* TODO: smp_cpus_done(); */
921 
922     /*
923      * The IOMMU subsystem must be initialized before P2M as we need
924      * to gather requirements regarding the maximum IPA bits supported by
925      * each IOMMU device.
926      */
927     rc = iommu_setup();
928     if ( !iommu_enabled && rc != -ENODEV )
929         panic("Couldn't configure correctly all the IOMMUs.\n");
930 
931     setup_virt_paging();
932 
933     do_initcalls();
934 
935     /*
936      * It needs to be called after do_initcalls to be able to use
937      * stop_machine (tasklets initialized via an initcall).
938      */
939     apply_alternatives_all();
940     enable_errata_workarounds();
941 
942     /* Create initial domain 0. */
943     /* The vGIC for DOM0 is exactly emulating the hardware GIC */
944     dom0_cfg.arch.gic_version = XEN_DOMCTL_CONFIG_GIC_NATIVE;
945     /*
946      * Xen vGIC supports a maximum of 992 interrupt lines.
947      * 32 are substracted to cover local IRQs.
948      */
949     dom0_cfg.arch.nr_spis = min(gic_number_lines(), (unsigned int) 992) - 32;
950     if ( gic_number_lines() > 992 )
951         printk(XENLOG_WARNING "Maximum number of vGIC IRQs exceeded.\n");
952     dom0_cfg.arch.tee_type = tee_get_type();
953     dom0_cfg.max_vcpus = dom0_max_vcpus();
954 
955     if ( iommu_enabled )
956         dom0_cfg.flags |= XEN_DOMCTL_CDF_iommu;
957 
958     dom0 = domain_create(0, &dom0_cfg, true);
959     if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) )
960         panic("Error creating domain 0\n");
961 
962     if ( construct_dom0(dom0) != 0)
963         panic("Could not set up DOM0 guest OS\n");
964 
965     heap_init_late();
966 
967     init_trace_bufs();
968 
969     init_constructors();
970 
971     console_endboot();
972 
973     /* Hide UART from DOM0 if we're using it */
974     serial_endboot();
975 
976     system_state = SYS_STATE_active;
977 
978     create_domUs();
979 
980     domain_unpause_by_systemcontroller(dom0);
981 
982     /* Switch on to the dynamically allocated stack for the idle vcpu
983      * since the static one we're running on is about to be freed. */
984     memcpy(idle_vcpu[0]->arch.cpu_info, get_cpu_info(),
985            sizeof(struct cpu_info));
986     switch_stack_and_jump(idle_vcpu[0]->arch.cpu_info, init_done);
987 }
988 
arch_get_xen_caps(xen_capabilities_info_t * info)989 void arch_get_xen_caps(xen_capabilities_info_t *info)
990 {
991     /* Interface name is always xen-3.0-* for Xen-3.x. */
992     int major = 3, minor = 0;
993     char s[32];
994 
995     (*info)[0] = '\0';
996 
997 #ifdef CONFIG_ARM_64
998     snprintf(s, sizeof(s), "xen-%d.%d-aarch64 ", major, minor);
999     safe_strcat(*info, s);
1000 #endif
1001     if ( cpu_has_aarch32 )
1002     {
1003         snprintf(s, sizeof(s), "xen-%d.%d-armv7l ", major, minor);
1004         safe_strcat(*info, s);
1005     }
1006 }
1007 
1008 /*
1009  * Local variables:
1010  * mode: C
1011  * c-file-style: "BSD"
1012  * c-basic-offset: 4
1013  * indent-tabs-mode: nil
1014  * End:
1015  */
1016