1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_emulate.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13
14 #include <hyp/fault.h>
15
16 #include <nvhe/gfp.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mem_protect.h>
19 #include <nvhe/mm.h>
20
21 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
22
23 extern unsigned long hyp_nr_cpus;
24 struct host_kvm host_kvm;
25
26 static struct hyp_pool host_s2_pool;
27
28 const u8 pkvm_hyp_id = 1;
29
host_s2_zalloc_pages_exact(size_t size)30 static void *host_s2_zalloc_pages_exact(size_t size)
31 {
32 void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
33
34 hyp_split_page(hyp_virt_to_page(addr));
35
36 /*
37 * The size of concatenated PGDs is always a power of two of PAGE_SIZE,
38 * so there should be no need to free any of the tail pages to make the
39 * allocation exact.
40 */
41 WARN_ON(size != (PAGE_SIZE << get_order(size)));
42
43 return addr;
44 }
45
host_s2_zalloc_page(void * pool)46 static void *host_s2_zalloc_page(void *pool)
47 {
48 return hyp_alloc_pages(pool, 0);
49 }
50
host_s2_get_page(void * addr)51 static void host_s2_get_page(void *addr)
52 {
53 hyp_get_page(&host_s2_pool, addr);
54 }
55
host_s2_put_page(void * addr)56 static void host_s2_put_page(void *addr)
57 {
58 hyp_put_page(&host_s2_pool, addr);
59 }
60
prepare_s2_pool(void * pgt_pool_base)61 static int prepare_s2_pool(void *pgt_pool_base)
62 {
63 unsigned long nr_pages, pfn;
64 int ret;
65
66 pfn = hyp_virt_to_pfn(pgt_pool_base);
67 nr_pages = host_s2_pgtable_pages();
68 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
69 if (ret)
70 return ret;
71
72 host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
73 .zalloc_pages_exact = host_s2_zalloc_pages_exact,
74 .zalloc_page = host_s2_zalloc_page,
75 .phys_to_virt = hyp_phys_to_virt,
76 .virt_to_phys = hyp_virt_to_phys,
77 .page_count = hyp_page_count,
78 .get_page = host_s2_get_page,
79 .put_page = host_s2_put_page,
80 };
81
82 return 0;
83 }
84
prepare_host_vtcr(void)85 static void prepare_host_vtcr(void)
86 {
87 u32 parange, phys_shift;
88
89 /* The host stage 2 is id-mapped, so use parange for T0SZ */
90 parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
91 phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
92
93 host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
94 id_aa64mmfr1_el1_sys_val, phys_shift);
95 }
96
97 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
98
kvm_host_prepare_stage2(void * pgt_pool_base)99 int kvm_host_prepare_stage2(void *pgt_pool_base)
100 {
101 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
102 int ret;
103
104 prepare_host_vtcr();
105 hyp_spin_lock_init(&host_kvm.lock);
106
107 ret = prepare_s2_pool(pgt_pool_base);
108 if (ret)
109 return ret;
110
111 ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch,
112 &host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
113 host_stage2_force_pte_cb);
114 if (ret)
115 return ret;
116
117 mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
118 mmu->arch = &host_kvm.arch;
119 mmu->pgt = &host_kvm.pgt;
120 WRITE_ONCE(mmu->vmid.vmid_gen, 0);
121 WRITE_ONCE(mmu->vmid.vmid, 0);
122
123 return 0;
124 }
125
__pkvm_prot_finalize(void)126 int __pkvm_prot_finalize(void)
127 {
128 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
129 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
130
131 if (params->hcr_el2 & HCR_VM)
132 return -EPERM;
133
134 params->vttbr = kvm_get_vttbr(mmu);
135 params->vtcr = host_kvm.arch.vtcr;
136 params->hcr_el2 |= HCR_VM;
137 kvm_flush_dcache_to_poc(params, sizeof(*params));
138
139 write_sysreg(params->hcr_el2, hcr_el2);
140 __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
141
142 /*
143 * Make sure to have an ISB before the TLB maintenance below but only
144 * when __load_stage2() doesn't include one already.
145 */
146 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
147
148 /* Invalidate stale HCR bits that may be cached in TLBs */
149 __tlbi(vmalls12e1);
150 dsb(nsh);
151 isb();
152
153 return 0;
154 }
155
host_stage2_unmap_dev_all(void)156 static int host_stage2_unmap_dev_all(void)
157 {
158 struct kvm_pgtable *pgt = &host_kvm.pgt;
159 struct memblock_region *reg;
160 u64 addr = 0;
161 int i, ret;
162
163 /* Unmap all non-memory regions to recycle the pages */
164 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
165 reg = &hyp_memory[i];
166 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
167 if (ret)
168 return ret;
169 }
170 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
171 }
172
173 struct kvm_mem_range {
174 u64 start;
175 u64 end;
176 };
177
find_mem_range(phys_addr_t addr,struct kvm_mem_range * range)178 static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
179 {
180 int cur, left = 0, right = hyp_memblock_nr;
181 struct memblock_region *reg;
182 phys_addr_t end;
183
184 range->start = 0;
185 range->end = ULONG_MAX;
186
187 /* The list of memblock regions is sorted, binary search it */
188 while (left < right) {
189 cur = (left + right) >> 1;
190 reg = &hyp_memory[cur];
191 end = reg->base + reg->size;
192 if (addr < reg->base) {
193 right = cur;
194 range->end = reg->base;
195 } else if (addr >= end) {
196 left = cur + 1;
197 range->start = end;
198 } else {
199 range->start = reg->base;
200 range->end = end;
201 return true;
202 }
203 }
204
205 return false;
206 }
207
addr_is_memory(phys_addr_t phys)208 bool addr_is_memory(phys_addr_t phys)
209 {
210 struct kvm_mem_range range;
211
212 return find_mem_range(phys, &range);
213 }
214
is_in_mem_range(u64 addr,struct kvm_mem_range * range)215 static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
216 {
217 return range->start <= addr && addr < range->end;
218 }
219
range_is_memory(u64 start,u64 end)220 static bool range_is_memory(u64 start, u64 end)
221 {
222 struct kvm_mem_range r;
223
224 if (!find_mem_range(start, &r))
225 return false;
226
227 return is_in_mem_range(end - 1, &r);
228 }
229
__host_stage2_idmap(u64 start,u64 end,enum kvm_pgtable_prot prot)230 static inline int __host_stage2_idmap(u64 start, u64 end,
231 enum kvm_pgtable_prot prot)
232 {
233 return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
234 prot, &host_s2_pool);
235 }
236
237 /*
238 * The pool has been provided with enough pages to cover all of memory with
239 * page granularity, but it is difficult to know how much of the MMIO range
240 * we will need to cover upfront, so we may need to 'recycle' the pages if we
241 * run out.
242 */
243 #define host_stage2_try(fn, ...) \
244 ({ \
245 int __ret; \
246 hyp_assert_lock_held(&host_kvm.lock); \
247 __ret = fn(__VA_ARGS__); \
248 if (__ret == -ENOMEM) { \
249 __ret = host_stage2_unmap_dev_all(); \
250 if (!__ret) \
251 __ret = fn(__VA_ARGS__); \
252 } \
253 __ret; \
254 })
255
range_included(struct kvm_mem_range * child,struct kvm_mem_range * parent)256 static inline bool range_included(struct kvm_mem_range *child,
257 struct kvm_mem_range *parent)
258 {
259 return parent->start <= child->start && child->end <= parent->end;
260 }
261
host_stage2_adjust_range(u64 addr,struct kvm_mem_range * range)262 static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
263 {
264 struct kvm_mem_range cur;
265 kvm_pte_t pte;
266 u32 level;
267 int ret;
268
269 hyp_assert_lock_held(&host_kvm.lock);
270 ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
271 if (ret)
272 return ret;
273
274 if (kvm_pte_valid(pte))
275 return -EAGAIN;
276
277 if (pte)
278 return -EPERM;
279
280 do {
281 u64 granule = kvm_granule_size(level);
282 cur.start = ALIGN_DOWN(addr, granule);
283 cur.end = cur.start + granule;
284 level++;
285 } while ((level < KVM_PGTABLE_MAX_LEVELS) &&
286 !(kvm_level_supports_block_mapping(level) &&
287 range_included(&cur, range)));
288
289 *range = cur;
290
291 return 0;
292 }
293
host_stage2_idmap_locked(phys_addr_t addr,u64 size,enum kvm_pgtable_prot prot)294 int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
295 enum kvm_pgtable_prot prot)
296 {
297 hyp_assert_lock_held(&host_kvm.lock);
298
299 return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
300 }
301
host_stage2_set_owner_locked(phys_addr_t addr,u64 size,u8 owner_id)302 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
303 {
304 hyp_assert_lock_held(&host_kvm.lock);
305
306 return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
307 addr, size, &host_s2_pool, owner_id);
308 }
309
host_stage2_force_pte_cb(u64 addr,u64 end,enum kvm_pgtable_prot prot)310 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
311 {
312 /*
313 * Block mappings must be used with care in the host stage-2 as a
314 * kvm_pgtable_stage2_map() operation targeting a page in the range of
315 * an existing block will delete the block under the assumption that
316 * mappings in the rest of the block range can always be rebuilt lazily.
317 * That assumption is correct for the host stage-2 with RWX mappings
318 * targeting memory or RW mappings targeting MMIO ranges (see
319 * host_stage2_idmap() below which implements some of the host memory
320 * abort logic). However, this is not safe for any other mappings where
321 * the host stage-2 page-table is in fact the only place where this
322 * state is stored. In all those cases, it is safer to use page-level
323 * mappings, hence avoiding to lose the state because of side-effects in
324 * kvm_pgtable_stage2_map().
325 */
326 if (range_is_memory(addr, end))
327 return prot != PKVM_HOST_MEM_PROT;
328 else
329 return prot != PKVM_HOST_MMIO_PROT;
330 }
331
host_stage2_idmap(u64 addr)332 static int host_stage2_idmap(u64 addr)
333 {
334 struct kvm_mem_range range;
335 bool is_memory = find_mem_range(addr, &range);
336 enum kvm_pgtable_prot prot;
337 int ret;
338
339 prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
340
341 hyp_spin_lock(&host_kvm.lock);
342 ret = host_stage2_adjust_range(addr, &range);
343 if (ret)
344 goto unlock;
345
346 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
347 unlock:
348 hyp_spin_unlock(&host_kvm.lock);
349
350 return ret;
351 }
352
check_prot(enum kvm_pgtable_prot prot,enum kvm_pgtable_prot required,enum kvm_pgtable_prot denied)353 static inline bool check_prot(enum kvm_pgtable_prot prot,
354 enum kvm_pgtable_prot required,
355 enum kvm_pgtable_prot denied)
356 {
357 return (prot & (required | denied)) == required;
358 }
359
__pkvm_host_share_hyp(u64 pfn)360 int __pkvm_host_share_hyp(u64 pfn)
361 {
362 phys_addr_t addr = hyp_pfn_to_phys(pfn);
363 enum kvm_pgtable_prot prot, cur;
364 void *virt = __hyp_va(addr);
365 enum pkvm_page_state state;
366 kvm_pte_t pte;
367 int ret;
368
369 if (!addr_is_memory(addr))
370 return -EINVAL;
371
372 hyp_spin_lock(&host_kvm.lock);
373 hyp_spin_lock(&pkvm_pgd_lock);
374
375 ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, NULL);
376 if (ret)
377 goto unlock;
378 if (!pte)
379 goto map_shared;
380
381 /*
382 * Check attributes in the host stage-2 PTE. We need the page to be:
383 * - mapped RWX as we're sharing memory;
384 * - not borrowed, as that implies absence of ownership.
385 * Otherwise, we can't let it got through
386 */
387 cur = kvm_pgtable_stage2_pte_prot(pte);
388 prot = pkvm_mkstate(0, PKVM_PAGE_SHARED_BORROWED);
389 if (!check_prot(cur, PKVM_HOST_MEM_PROT, prot)) {
390 ret = -EPERM;
391 goto unlock;
392 }
393
394 state = pkvm_getstate(cur);
395 if (state == PKVM_PAGE_OWNED)
396 goto map_shared;
397
398 /*
399 * Tolerate double-sharing the same page, but this requires
400 * cross-checking the hypervisor stage-1.
401 */
402 if (state != PKVM_PAGE_SHARED_OWNED) {
403 ret = -EPERM;
404 goto unlock;
405 }
406
407 ret = kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)virt, &pte, NULL);
408 if (ret)
409 goto unlock;
410
411 /*
412 * If the page has been shared with the hypervisor, it must be
413 * already mapped as SHARED_BORROWED in its stage-1.
414 */
415 cur = kvm_pgtable_hyp_pte_prot(pte);
416 prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
417 if (!check_prot(cur, prot, ~prot))
418 ret = -EPERM;
419 goto unlock;
420
421 map_shared:
422 /*
423 * If the page is not yet shared, adjust mappings in both page-tables
424 * while both locks are held.
425 */
426 prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
427 ret = pkvm_create_mappings_locked(virt, virt + PAGE_SIZE, prot);
428 BUG_ON(ret);
429
430 prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
431 ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot);
432 BUG_ON(ret);
433
434 unlock:
435 hyp_spin_unlock(&pkvm_pgd_lock);
436 hyp_spin_unlock(&host_kvm.lock);
437
438 return ret;
439 }
440
handle_host_mem_abort(struct kvm_cpu_context * host_ctxt)441 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
442 {
443 struct kvm_vcpu_fault_info fault;
444 u64 esr, addr;
445 int ret = 0;
446
447 esr = read_sysreg_el2(SYS_ESR);
448 BUG_ON(!__get_fault_info(esr, &fault));
449
450 addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
451 ret = host_stage2_idmap(addr);
452 BUG_ON(ret && ret != -EAGAIN);
453 }
454