1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kasan.h>
3 #include <linux/sched/task.h>
4 #include <linux/memblock.h>
5 #include <linux/pgtable.h>
6 #include <asm/pgalloc.h>
7 #include <asm/kasan.h>
8 #include <asm/mem_detect.h>
9 #include <asm/processor.h>
10 #include <asm/sclp.h>
11 #include <asm/facility.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
14 #include <asm/uv.h>
15 
16 static unsigned long segment_pos __initdata;
17 static unsigned long segment_low __initdata;
18 static unsigned long pgalloc_pos __initdata;
19 static unsigned long pgalloc_low __initdata;
20 static unsigned long pgalloc_freeable __initdata;
21 static bool has_edat __initdata;
22 static bool has_nx __initdata;
23 
24 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
25 
26 static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
27 
kasan_early_panic(const char * reason)28 static void __init kasan_early_panic(const char *reason)
29 {
30 	sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
31 	sclp_early_printk(reason);
32 	disabled_wait();
33 }
34 
kasan_early_alloc_segment(void)35 static void * __init kasan_early_alloc_segment(void)
36 {
37 	segment_pos -= _SEGMENT_SIZE;
38 
39 	if (segment_pos < segment_low)
40 		kasan_early_panic("out of memory during initialisation\n");
41 
42 	return (void *)segment_pos;
43 }
44 
kasan_early_alloc_pages(unsigned int order)45 static void * __init kasan_early_alloc_pages(unsigned int order)
46 {
47 	pgalloc_pos -= (PAGE_SIZE << order);
48 
49 	if (pgalloc_pos < pgalloc_low)
50 		kasan_early_panic("out of memory during initialisation\n");
51 
52 	return (void *)pgalloc_pos;
53 }
54 
kasan_early_crst_alloc(unsigned long val)55 static void * __init kasan_early_crst_alloc(unsigned long val)
56 {
57 	unsigned long *table;
58 
59 	table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
60 	if (table)
61 		crst_table_init(table, val);
62 	return table;
63 }
64 
kasan_early_pte_alloc(void)65 static pte_t * __init kasan_early_pte_alloc(void)
66 {
67 	static void *pte_leftover;
68 	pte_t *pte;
69 
70 	BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
71 
72 	if (!pte_leftover) {
73 		pte_leftover = kasan_early_alloc_pages(0);
74 		pte = pte_leftover + _PAGE_TABLE_SIZE;
75 	} else {
76 		pte = pte_leftover;
77 		pte_leftover = NULL;
78 	}
79 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
80 	return pte;
81 }
82 
83 enum populate_mode {
84 	POPULATE_ONE2ONE,
85 	POPULATE_MAP,
86 	POPULATE_ZERO_SHADOW,
87 	POPULATE_SHALLOW
88 };
kasan_early_pgtable_populate(unsigned long address,unsigned long end,enum populate_mode mode)89 static void __init kasan_early_pgtable_populate(unsigned long address,
90 						unsigned long end,
91 						enum populate_mode mode)
92 {
93 	unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
94 	pgd_t *pg_dir;
95 	p4d_t *p4_dir;
96 	pud_t *pu_dir;
97 	pmd_t *pm_dir;
98 	pte_t *pt_dir;
99 
100 	pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
101 	if (!has_nx)
102 		pgt_prot_zero &= ~_PAGE_NOEXEC;
103 	pgt_prot = pgprot_val(PAGE_KERNEL);
104 	sgt_prot = pgprot_val(SEGMENT_KERNEL);
105 	if (!has_nx || mode == POPULATE_ONE2ONE) {
106 		pgt_prot &= ~_PAGE_NOEXEC;
107 		sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
108 	}
109 
110 	/*
111 	 * The first 1MB of 1:1 mapping is mapped with 4KB pages
112 	 */
113 	while (address < end) {
114 		pg_dir = pgd_offset_k(address);
115 		if (pgd_none(*pg_dir)) {
116 			if (mode == POPULATE_ZERO_SHADOW &&
117 			    IS_ALIGNED(address, PGDIR_SIZE) &&
118 			    end - address >= PGDIR_SIZE) {
119 				pgd_populate(&init_mm, pg_dir,
120 						kasan_early_shadow_p4d);
121 				address = (address + PGDIR_SIZE) & PGDIR_MASK;
122 				continue;
123 			}
124 			p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
125 			pgd_populate(&init_mm, pg_dir, p4_dir);
126 		}
127 
128 		if (mode == POPULATE_SHALLOW) {
129 			address = (address + P4D_SIZE) & P4D_MASK;
130 			continue;
131 		}
132 
133 		p4_dir = p4d_offset(pg_dir, address);
134 		if (p4d_none(*p4_dir)) {
135 			if (mode == POPULATE_ZERO_SHADOW &&
136 			    IS_ALIGNED(address, P4D_SIZE) &&
137 			    end - address >= P4D_SIZE) {
138 				p4d_populate(&init_mm, p4_dir,
139 						kasan_early_shadow_pud);
140 				address = (address + P4D_SIZE) & P4D_MASK;
141 				continue;
142 			}
143 			pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
144 			p4d_populate(&init_mm, p4_dir, pu_dir);
145 		}
146 
147 		pu_dir = pud_offset(p4_dir, address);
148 		if (pud_none(*pu_dir)) {
149 			if (mode == POPULATE_ZERO_SHADOW &&
150 			    IS_ALIGNED(address, PUD_SIZE) &&
151 			    end - address >= PUD_SIZE) {
152 				pud_populate(&init_mm, pu_dir,
153 						kasan_early_shadow_pmd);
154 				address = (address + PUD_SIZE) & PUD_MASK;
155 				continue;
156 			}
157 			pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
158 			pud_populate(&init_mm, pu_dir, pm_dir);
159 		}
160 
161 		pm_dir = pmd_offset(pu_dir, address);
162 		if (pmd_none(*pm_dir)) {
163 			if (IS_ALIGNED(address, PMD_SIZE) &&
164 			    end - address >= PMD_SIZE) {
165 				if (mode == POPULATE_ZERO_SHADOW) {
166 					pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
167 					address = (address + PMD_SIZE) & PMD_MASK;
168 					continue;
169 				} else if (has_edat && address) {
170 					void *page;
171 
172 					if (mode == POPULATE_ONE2ONE) {
173 						page = (void *)address;
174 					} else {
175 						page = kasan_early_alloc_segment();
176 						memset(page, 0, _SEGMENT_SIZE);
177 					}
178 					pmd_val(*pm_dir) = __pa(page) | sgt_prot;
179 					address = (address + PMD_SIZE) & PMD_MASK;
180 					continue;
181 				}
182 			}
183 			pt_dir = kasan_early_pte_alloc();
184 			pmd_populate(&init_mm, pm_dir, pt_dir);
185 		} else if (pmd_large(*pm_dir)) {
186 			address = (address + PMD_SIZE) & PMD_MASK;
187 			continue;
188 		}
189 
190 		pt_dir = pte_offset_kernel(pm_dir, address);
191 		if (pte_none(*pt_dir)) {
192 			void *page;
193 
194 			switch (mode) {
195 			case POPULATE_ONE2ONE:
196 				page = (void *)address;
197 				pte_val(*pt_dir) = __pa(page) | pgt_prot;
198 				break;
199 			case POPULATE_MAP:
200 				page = kasan_early_alloc_pages(0);
201 				memset(page, 0, PAGE_SIZE);
202 				pte_val(*pt_dir) = __pa(page) | pgt_prot;
203 				break;
204 			case POPULATE_ZERO_SHADOW:
205 				page = kasan_early_shadow_page;
206 				pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
207 				break;
208 			case POPULATE_SHALLOW:
209 				/* should never happen */
210 				break;
211 			}
212 		}
213 		address += PAGE_SIZE;
214 	}
215 }
216 
kasan_set_pgd(pgd_t * pgd,unsigned long asce_type)217 static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
218 {
219 	unsigned long asce_bits;
220 
221 	asce_bits = asce_type | _ASCE_TABLE_LENGTH;
222 	S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
223 	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
224 
225 	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
226 	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
227 	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
228 }
229 
kasan_enable_dat(void)230 static void __init kasan_enable_dat(void)
231 {
232 	psw_t psw;
233 
234 	psw.mask = __extract_psw();
235 	psw_bits(psw).dat = 1;
236 	psw_bits(psw).as = PSW_BITS_AS_HOME;
237 	__load_psw_mask(psw.mask);
238 }
239 
kasan_early_detect_facilities(void)240 static void __init kasan_early_detect_facilities(void)
241 {
242 	if (test_facility(8)) {
243 		has_edat = true;
244 		__ctl_set_bit(0, 23);
245 	}
246 	if (!noexec_disabled && test_facility(130)) {
247 		has_nx = true;
248 		__ctl_set_bit(0, 20);
249 	}
250 }
251 
kasan_early_init(void)252 void __init kasan_early_init(void)
253 {
254 	unsigned long shadow_alloc_size;
255 	unsigned long initrd_end;
256 	unsigned long memsize;
257 	unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
258 	pte_t pte_z;
259 	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
260 	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
261 	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
262 
263 	kasan_early_detect_facilities();
264 	if (!has_nx)
265 		pgt_prot &= ~_PAGE_NOEXEC;
266 	pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
267 
268 	memsize = get_mem_detect_end();
269 	if (!memsize)
270 		kasan_early_panic("cannot detect physical memory size\n");
271 	/*
272 	 * Kasan currently supports standby memory but only if it follows
273 	 * online memory (default allocation), i.e. no memory holes.
274 	 * - memsize represents end of online memory
275 	 * - ident_map_size represents online + standby and memory limits
276 	 *   accounted.
277 	 * Kasan maps "memsize" right away.
278 	 * [0, memsize]			- as identity mapping
279 	 * [__sha(0), __sha(memsize)]	- shadow memory for identity mapping
280 	 * The rest [memsize, ident_map_size] if memsize < ident_map_size
281 	 * could be mapped/unmapped dynamically later during memory hotplug.
282 	 */
283 	memsize = min(memsize, ident_map_size);
284 
285 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
286 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
287 	crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY);
288 
289 	/* init kasan zero shadow */
290 	crst_table_init((unsigned long *)kasan_early_shadow_p4d,
291 				p4d_val(p4d_z));
292 	crst_table_init((unsigned long *)kasan_early_shadow_pud,
293 				pud_val(pud_z));
294 	crst_table_init((unsigned long *)kasan_early_shadow_pmd,
295 				pmd_val(pmd_z));
296 	memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
297 
298 	shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
299 	pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
300 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
301 		initrd_end =
302 		    round_up(initrd_data.start + initrd_data.size, _SEGMENT_SIZE);
303 		pgalloc_low = max(pgalloc_low, initrd_end);
304 	}
305 
306 	if (pgalloc_low + shadow_alloc_size > memsize)
307 		kasan_early_panic("out of memory during initialisation\n");
308 
309 	if (has_edat) {
310 		segment_pos = round_down(memsize, _SEGMENT_SIZE);
311 		segment_low = segment_pos - shadow_alloc_size;
312 		pgalloc_pos = segment_low;
313 	} else {
314 		pgalloc_pos = memsize;
315 	}
316 	init_mm.pgd = early_pg_dir;
317 	/*
318 	 * Current memory layout:
319 	 * +- 0 -------------+	   +- shadow start -+
320 	 * | 1:1 ram mapping |	  /| 1/8 ram	    |
321 	 * |		     |	 / |		    |
322 	 * +- end of ram ----+	/  +----------------+
323 	 * | ... gap ...     | /   |		    |
324 	 * |		     |/    |	kasan	    |
325 	 * +- shadow start --+	   |	zero	    |
326 	 * | 1/8 addr space  |	   |	page	    |
327 	 * +- shadow end    -+	   |	mapping	    |
328 	 * | ... gap ...     |\    |  (untracked)   |
329 	 * +- vmalloc area  -+ \   |		    |
330 	 * | vmalloc_size    |	\  |		    |
331 	 * +- modules vaddr -+	 \ +----------------+
332 	 * | 2Gb	     |	  \|	  unmapped  | allocated per module
333 	 * +-----------------+	   +- shadow end ---+
334 	 *
335 	 * Current memory layout (KASAN_VMALLOC):
336 	 * +- 0 -------------+	   +- shadow start -+
337 	 * | 1:1 ram mapping |	  /| 1/8 ram	    |
338 	 * |		     |	 / |		    |
339 	 * +- end of ram ----+	/  +----------------+
340 	 * | ... gap ...     | /   |	kasan	    |
341 	 * |		     |/    |	zero	    |
342 	 * +- shadow start --+	   |	page	    |
343 	 * | 1/8 addr space  |	   |	mapping     |
344 	 * +- shadow end    -+	   |  (untracked)   |
345 	 * | ... gap ...     |\    |		    |
346 	 * +- vmalloc area  -+ \   +- vmalloc area -+
347 	 * | vmalloc_size    |	\  |shallow populate|
348 	 * +- modules vaddr -+	 \ +- modules area -+
349 	 * | 2Gb	     |	  \|shallow populate|
350 	 * +-----------------+	   +- shadow end ---+
351 	 */
352 	/* populate kasan shadow (for identity mapping and zero page mapping) */
353 	kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
354 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
355 		/* shallowly populate kasan shadow for vmalloc and modules */
356 		kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
357 					     POPULATE_SHALLOW);
358 	}
359 	/* populate kasan shadow for untracked memory */
360 	kasan_early_pgtable_populate(__sha(ident_map_size),
361 				     IS_ENABLED(CONFIG_KASAN_VMALLOC) ?
362 						   __sha(VMALLOC_START) :
363 						   __sha(MODULES_VADDR),
364 				     POPULATE_ZERO_SHADOW);
365 	kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
366 				     POPULATE_ZERO_SHADOW);
367 	/* memory allocated for identity mapping structs will be freed later */
368 	pgalloc_freeable = pgalloc_pos;
369 	/* populate identity mapping */
370 	kasan_early_pgtable_populate(0, memsize, POPULATE_ONE2ONE);
371 	kasan_set_pgd(early_pg_dir, _ASCE_TYPE_REGION2);
372 	kasan_enable_dat();
373 	/* enable kasan */
374 	init_task.kasan_depth = 0;
375 	memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
376 	sclp_early_printk("KernelAddressSanitizer initialized\n");
377 }
378 
kasan_copy_shadow_mapping(void)379 void __init kasan_copy_shadow_mapping(void)
380 {
381 	/*
382 	 * At this point we are still running on early pages setup early_pg_dir,
383 	 * while swapper_pg_dir has just been initialized with identity mapping.
384 	 * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
385 	 */
386 
387 	pgd_t *pg_dir_src;
388 	pgd_t *pg_dir_dst;
389 	p4d_t *p4_dir_src;
390 	p4d_t *p4_dir_dst;
391 
392 	pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
393 	pg_dir_dst = pgd_offset_raw(init_mm.pgd, KASAN_SHADOW_START);
394 	p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
395 	p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
396 	memcpy(p4_dir_dst, p4_dir_src,
397 	       (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
398 }
399 
kasan_free_early_identity(void)400 void __init kasan_free_early_identity(void)
401 {
402 	memblock_phys_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
403 }
404