1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/setup.c
4 *
5 * Copyright (C) 1995-2001 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9 #include <linux/acpi.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/initrd.h>
16 #include <linux/console.h>
17 #include <linux/cache.h>
18 #include <linux/screen_info.h>
19 #include <linux/init.h>
20 #include <linux/kexec.h>
21 #include <linux/root_dev.h>
22 #include <linux/cpu.h>
23 #include <linux/interrupt.h>
24 #include <linux/smp.h>
25 #include <linux/fs.h>
26 #include <linux/panic_notifier.h>
27 #include <linux/proc_fs.h>
28 #include <linux/memblock.h>
29 #include <linux/of_fdt.h>
30 #include <linux/efi.h>
31 #include <linux/psci.h>
32 #include <linux/sched/task.h>
33 #include <linux/mm.h>
34
35 #include <asm/acpi.h>
36 #include <asm/fixmap.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/daifflags.h>
40 #include <asm/elf.h>
41 #include <asm/cpufeature.h>
42 #include <asm/cpu_ops.h>
43 #include <asm/kasan.h>
44 #include <asm/numa.h>
45 #include <asm/sections.h>
46 #include <asm/setup.h>
47 #include <asm/smp_plat.h>
48 #include <asm/cacheflush.h>
49 #include <asm/tlbflush.h>
50 #include <asm/traps.h>
51 #include <asm/efi.h>
52 #include <asm/xen/hypervisor.h>
53 #include <asm/mmu_context.h>
54
55 static int num_standard_resources;
56 static struct resource *standard_resources;
57
58 phys_addr_t __fdt_pointer __initdata;
59
60 /*
61 * Standard memory resources
62 */
63 static struct resource mem_res[] = {
64 {
65 .name = "Kernel code",
66 .start = 0,
67 .end = 0,
68 .flags = IORESOURCE_SYSTEM_RAM
69 },
70 {
71 .name = "Kernel data",
72 .start = 0,
73 .end = 0,
74 .flags = IORESOURCE_SYSTEM_RAM
75 }
76 };
77
78 #define kernel_code mem_res[0]
79 #define kernel_data mem_res[1]
80
81 /*
82 * The recorded values of x0 .. x3 upon kernel entry.
83 */
84 u64 __cacheline_aligned boot_args[4];
85
smp_setup_processor_id(void)86 void __init smp_setup_processor_id(void)
87 {
88 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
89 set_cpu_logical_map(0, mpidr);
90
91 pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
92 (unsigned long)mpidr, read_cpuid_id());
93 }
94
arch_match_cpu_phys_id(int cpu,u64 phys_id)95 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
96 {
97 return phys_id == cpu_logical_map(cpu);
98 }
99
100 struct mpidr_hash mpidr_hash;
101 /**
102 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
103 * level in order to build a linear index from an
104 * MPIDR value. Resulting algorithm is a collision
105 * free hash carried out through shifting and ORing
106 */
smp_build_mpidr_hash(void)107 static void __init smp_build_mpidr_hash(void)
108 {
109 u32 i, affinity, fs[4], bits[4], ls;
110 u64 mask = 0;
111 /*
112 * Pre-scan the list of MPIDRS and filter out bits that do
113 * not contribute to affinity levels, ie they never toggle.
114 */
115 for_each_possible_cpu(i)
116 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
117 pr_debug("mask of set bits %#llx\n", mask);
118 /*
119 * Find and stash the last and first bit set at all affinity levels to
120 * check how many bits are required to represent them.
121 */
122 for (i = 0; i < 4; i++) {
123 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
124 /*
125 * Find the MSB bit and LSB bits position
126 * to determine how many bits are required
127 * to express the affinity level.
128 */
129 ls = fls(affinity);
130 fs[i] = affinity ? ffs(affinity) - 1 : 0;
131 bits[i] = ls - fs[i];
132 }
133 /*
134 * An index can be created from the MPIDR_EL1 by isolating the
135 * significant bits at each affinity level and by shifting
136 * them in order to compress the 32 bits values space to a
137 * compressed set of values. This is equivalent to hashing
138 * the MPIDR_EL1 through shifting and ORing. It is a collision free
139 * hash though not minimal since some levels might contain a number
140 * of CPUs that is not an exact power of 2 and their bit
141 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
142 */
143 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
144 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
145 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
146 (bits[1] + bits[0]);
147 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
148 fs[3] - (bits[2] + bits[1] + bits[0]);
149 mpidr_hash.mask = mask;
150 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
151 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
152 mpidr_hash.shift_aff[0],
153 mpidr_hash.shift_aff[1],
154 mpidr_hash.shift_aff[2],
155 mpidr_hash.shift_aff[3],
156 mpidr_hash.mask,
157 mpidr_hash.bits);
158 /*
159 * 4x is an arbitrary value used to warn on a hash table much bigger
160 * than expected on most systems.
161 */
162 if (mpidr_hash_size() > 4 * num_possible_cpus())
163 pr_warn("Large number of MPIDR hash buckets detected\n");
164 }
165
166 static void *early_fdt_ptr __initdata;
167
get_early_fdt_ptr(void)168 void __init *get_early_fdt_ptr(void)
169 {
170 return early_fdt_ptr;
171 }
172
early_fdt_map(u64 dt_phys)173 asmlinkage void __init early_fdt_map(u64 dt_phys)
174 {
175 int fdt_size;
176
177 early_fixmap_init();
178 early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL);
179 }
180
setup_machine_fdt(phys_addr_t dt_phys)181 static void __init setup_machine_fdt(phys_addr_t dt_phys)
182 {
183 int size;
184 void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
185 const char *name;
186
187 if (dt_virt)
188 memblock_reserve(dt_phys, size);
189
190 if (!dt_virt || !early_init_dt_scan(dt_virt)) {
191 pr_crit("\n"
192 "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
193 "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
194 "\nPlease check your bootloader.",
195 &dt_phys, dt_virt);
196
197 while (true)
198 cpu_relax();
199 }
200
201 /* Early fixups are done, map the FDT as read-only now */
202 fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
203
204 name = of_flat_dt_get_machine_name();
205 if (!name)
206 return;
207
208 pr_info("Machine model: %s\n", name);
209 dump_stack_set_arch_desc("%s (DT)", name);
210 }
211
request_standard_resources(void)212 static void __init request_standard_resources(void)
213 {
214 struct memblock_region *region;
215 struct resource *res;
216 unsigned long i = 0;
217 size_t res_size;
218
219 kernel_code.start = __pa_symbol(_stext);
220 kernel_code.end = __pa_symbol(__init_begin - 1);
221 kernel_data.start = __pa_symbol(_sdata);
222 kernel_data.end = __pa_symbol(_end - 1);
223
224 num_standard_resources = memblock.memory.cnt;
225 res_size = num_standard_resources * sizeof(*standard_resources);
226 standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
227 if (!standard_resources)
228 panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
229
230 for_each_mem_region(region) {
231 res = &standard_resources[i++];
232 if (memblock_is_nomap(region)) {
233 res->name = "reserved";
234 res->flags = IORESOURCE_MEM;
235 } else {
236 res->name = "System RAM";
237 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
238 }
239 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
240 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
241
242 request_resource(&iomem_resource, res);
243
244 if (kernel_code.start >= res->start &&
245 kernel_code.end <= res->end)
246 request_resource(res, &kernel_code);
247 if (kernel_data.start >= res->start &&
248 kernel_data.end <= res->end)
249 request_resource(res, &kernel_data);
250 #ifdef CONFIG_KEXEC_CORE
251 /* Userspace will find "Crash kernel" region in /proc/iomem. */
252 if (crashk_res.end && crashk_res.start >= res->start &&
253 crashk_res.end <= res->end)
254 request_resource(res, &crashk_res);
255 #endif
256 }
257 }
258
reserve_memblock_reserved_regions(void)259 static int __init reserve_memblock_reserved_regions(void)
260 {
261 u64 i, j;
262
263 for (i = 0; i < num_standard_resources; ++i) {
264 struct resource *mem = &standard_resources[i];
265 phys_addr_t r_start, r_end, mem_size = resource_size(mem);
266
267 if (!memblock_is_region_reserved(mem->start, mem_size))
268 continue;
269
270 for_each_reserved_mem_range(j, &r_start, &r_end) {
271 resource_size_t start, end;
272
273 start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
274 end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
275
276 if (start > mem->end || end < mem->start)
277 continue;
278
279 reserve_region_with_split(mem, start, end, "reserved");
280 }
281 }
282
283 return 0;
284 }
285 arch_initcall(reserve_memblock_reserved_regions);
286
287 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
288
cpu_logical_map(unsigned int cpu)289 u64 cpu_logical_map(unsigned int cpu)
290 {
291 return __cpu_logical_map[cpu];
292 }
293
setup_arch(char ** cmdline_p)294 void __init __no_sanitize_address setup_arch(char **cmdline_p)
295 {
296 setup_initial_init_mm(_stext, _etext, _edata, _end);
297
298 *cmdline_p = boot_command_line;
299
300 /*
301 * If know now we are going to need KPTI then use non-global
302 * mappings from the start, avoiding the cost of rewriting
303 * everything later.
304 */
305 arm64_use_ng_mappings = kaslr_requires_kpti();
306
307 early_fixmap_init();
308 early_ioremap_init();
309
310 setup_machine_fdt(__fdt_pointer);
311
312 /*
313 * Initialise the static keys early as they may be enabled by the
314 * cpufeature code and early parameters.
315 */
316 jump_label_init();
317 parse_early_param();
318
319 /*
320 * Unmask asynchronous aborts and fiq after bringing up possible
321 * earlycon. (Report possible System Errors once we can report this
322 * occurred).
323 */
324 local_daif_restore(DAIF_PROCCTX_NOIRQ);
325
326 /*
327 * TTBR0 is only used for the identity mapping at this stage. Make it
328 * point to zero page to avoid speculatively fetching new entries.
329 */
330 cpu_uninstall_idmap();
331
332 xen_early_init();
333 efi_init();
334
335 if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
336 pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
337
338 arm64_memblock_init();
339
340 paging_init();
341
342 acpi_table_upgrade();
343
344 /* Parse the ACPI tables for possible boot-time configuration */
345 acpi_boot_table_init();
346
347 if (acpi_disabled)
348 unflatten_device_tree();
349
350 bootmem_init();
351
352 kasan_init();
353
354 request_standard_resources();
355
356 early_ioremap_reset();
357
358 if (acpi_disabled)
359 psci_dt_init();
360 else
361 psci_acpi_init();
362
363 init_bootcpu_ops();
364 smp_init_cpus();
365 smp_build_mpidr_hash();
366
367 /* Init percpu seeds for random tags after cpus are set up. */
368 kasan_init_sw_tags();
369
370 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
371 /*
372 * Make sure init_thread_info.ttbr0 always generates translation
373 * faults in case uaccess_enable() is inadvertently called by the init
374 * thread.
375 */
376 init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
377 #endif
378
379 if (boot_args[1] || boot_args[2] || boot_args[3]) {
380 pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
381 "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
382 "This indicates a broken bootloader or old kernel\n",
383 boot_args[1], boot_args[2], boot_args[3]);
384 }
385 }
386
cpu_can_disable(unsigned int cpu)387 static inline bool cpu_can_disable(unsigned int cpu)
388 {
389 #ifdef CONFIG_HOTPLUG_CPU
390 const struct cpu_operations *ops = get_cpu_ops(cpu);
391
392 if (ops && ops->cpu_can_disable)
393 return ops->cpu_can_disable(cpu);
394 #endif
395 return false;
396 }
397
topology_init(void)398 static int __init topology_init(void)
399 {
400 int i;
401
402 for_each_online_node(i)
403 register_one_node(i);
404
405 for_each_possible_cpu(i) {
406 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
407 cpu->hotpluggable = cpu_can_disable(i);
408 register_cpu(cpu, i);
409 }
410
411 return 0;
412 }
413 subsys_initcall(topology_init);
414
dump_kernel_offset(void)415 static void dump_kernel_offset(void)
416 {
417 const unsigned long offset = kaslr_offset();
418
419 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
420 pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
421 offset, KIMAGE_VADDR);
422 pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
423 } else {
424 pr_emerg("Kernel Offset: disabled\n");
425 }
426 }
427
arm64_panic_block_dump(struct notifier_block * self,unsigned long v,void * p)428 static int arm64_panic_block_dump(struct notifier_block *self,
429 unsigned long v, void *p)
430 {
431 dump_kernel_offset();
432 dump_cpu_features();
433 dump_mem_limit();
434 return 0;
435 }
436
437 static struct notifier_block arm64_panic_block = {
438 .notifier_call = arm64_panic_block_dump
439 };
440
register_arm64_panic_block(void)441 static int __init register_arm64_panic_block(void)
442 {
443 atomic_notifier_chain_register(&panic_notifier_list,
444 &arm64_panic_block);
445 return 0;
446 }
447 device_initcall(register_arm64_panic_block);
448