1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 *
4 * Copyright SUSE Linux Products GmbH 2010
5 *
6 * Authors: Alexander Graf <agraf@suse.de>
7 */
8
9 #ifndef __ASM_KVM_BOOK3S_64_H__
10 #define __ASM_KVM_BOOK3S_64_H__
11
12 #include <linux/string.h>
13 #include <asm/bitops.h>
14 #include <asm/book3s/64/mmu-hash.h>
15 #include <asm/cpu_has_feature.h>
16 #include <asm/ppc-opcode.h>
17 #include <asm/pte-walk.h>
18
19 #ifdef CONFIG_PPC_PSERIES
kvmhv_on_pseries(void)20 static inline bool kvmhv_on_pseries(void)
21 {
22 return !cpu_has_feature(CPU_FTR_HVMODE);
23 }
24 #else
kvmhv_on_pseries(void)25 static inline bool kvmhv_on_pseries(void)
26 {
27 return false;
28 }
29 #endif
30
31 /*
32 * Structure for a nested guest, that is, for a guest that is managed by
33 * one of our guests.
34 */
35 struct kvm_nested_guest {
36 struct kvm *l1_host; /* L1 VM that owns this nested guest */
37 int l1_lpid; /* lpid L1 guest thinks this guest is */
38 int shadow_lpid; /* real lpid of this nested guest */
39 pgd_t *shadow_pgtable; /* our page table for this guest */
40 u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
41 u64 process_table; /* process table entry for this guest */
42 u64 hfscr; /* HFSCR that the L1 requested for this nested guest */
43 long refcnt; /* number of pointers to this struct */
44 struct mutex tlb_lock; /* serialize page faults and tlbies */
45 struct kvm_nested_guest *next;
46 cpumask_t need_tlb_flush;
47 cpumask_t cpu_in_guest;
48 short prev_cpu[NR_CPUS];
49 u8 radix; /* is this nested guest radix */
50 };
51
52 /*
53 * We define a nested rmap entry as a single 64-bit quantity
54 * 0xFFF0000000000000 12-bit lpid field
55 * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number
56 * 0x0000000000000001 1-bit single entry flag
57 */
58 #define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL
59 #define RMAP_NESTED_LPID_SHIFT (52)
60 #define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL
61 #define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL
62
63 /* Structure for a nested guest rmap entry */
64 struct rmap_nested {
65 struct llist_node list;
66 u64 rmap;
67 };
68
69 /*
70 * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
71 * safe against removal of the list entry or NULL list
72 * @pos: a (struct rmap_nested *) to use as a loop cursor
73 * @node: pointer to the first entry
74 * NOTE: this can be NULL
75 * @rmapp: an (unsigned long *) in which to return the rmap entries on each
76 * iteration
77 * NOTE: this must point to already allocated memory
78 *
79 * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
80 * rmap entry in the memslot. The list is always terminated by a "single entry"
81 * stored in the list element of the final entry of the llist. If there is ONLY
82 * a single entry then this is itself in the rmap entry of the memslot, not a
83 * llist head pointer.
84 *
85 * Note that the iterator below assumes that a nested rmap entry is always
86 * non-zero. This is true for our usage because the LPID field is always
87 * non-zero (zero is reserved for the host).
88 *
89 * This should be used to iterate over the list of rmap_nested entries with
90 * processing done on the u64 rmap value given by each iteration. This is safe
91 * against removal of list entries and it is always safe to call free on (pos).
92 *
93 * e.g.
94 * struct rmap_nested *cursor;
95 * struct llist_node *first;
96 * unsigned long rmap;
97 * for_each_nest_rmap_safe(cursor, first, &rmap) {
98 * do_something(rmap);
99 * free(cursor);
100 * }
101 */
102 #define for_each_nest_rmap_safe(pos, node, rmapp) \
103 for ((pos) = llist_entry((node), typeof(*(pos)), list); \
104 (node) && \
105 (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
106 ((u64) (node)) : ((pos)->rmap))) && \
107 (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
108 ((struct llist_node *) ((pos) = NULL)) : \
109 (pos)->list.next)), true); \
110 (pos) = llist_entry((node), typeof(*(pos)), list))
111
112 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
113 bool create);
114 void kvmhv_put_nested(struct kvm_nested_guest *gp);
115 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
116
117 /* Encoding of first parameter for H_TLB_INVALIDATE */
118 #define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
119 ___PPC_R(r))
120
121 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
122 #define PPC_MIN_HPT_ORDER 18
123 #define PPC_MAX_HPT_ORDER 46
124
125 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
svcpu_get(struct kvm_vcpu * vcpu)126 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
127 {
128 preempt_disable();
129 return &get_paca()->shadow_vcpu;
130 }
131
svcpu_put(struct kvmppc_book3s_shadow_vcpu * svcpu)132 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
133 {
134 preempt_enable();
135 }
136 #endif
137
138 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
139
kvm_is_radix(struct kvm * kvm)140 static inline bool kvm_is_radix(struct kvm *kvm)
141 {
142 return kvm->arch.radix;
143 }
144
kvmhv_vcpu_is_radix(struct kvm_vcpu * vcpu)145 static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
146 {
147 bool radix;
148
149 if (vcpu->arch.nested)
150 radix = vcpu->arch.nested->radix;
151 else
152 radix = kvm_is_radix(vcpu->kvm);
153
154 return radix;
155 }
156
157 int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
158
159 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
160 #endif
161
162 /*
163 * Invalid HDSISR value which is used to indicate when HW has not set the reg.
164 * Used to work around an errata.
165 */
166 #define HDSISR_CANARY 0x7fff
167
168 /*
169 * We use a lock bit in HPTE dword 0 to synchronize updates and
170 * accesses to each HPTE, and another bit to indicate non-present
171 * HPTEs.
172 */
173 #define HPTE_V_HVLOCK 0x40UL
174 #define HPTE_V_ABSENT 0x20UL
175
176 /*
177 * We use this bit in the guest_rpte field of the revmap entry
178 * to indicate a modified HPTE.
179 */
180 #define HPTE_GR_MODIFIED (1ul << 62)
181
182 /* These bits are reserved in the guest view of the HPTE */
183 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
184
try_lock_hpte(__be64 * hpte,unsigned long bits)185 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
186 {
187 unsigned long tmp, old;
188 __be64 be_lockbit, be_bits;
189
190 /*
191 * We load/store in native endian, but the HTAB is in big endian. If
192 * we byte swap all data we apply on the PTE we're implicitly correct
193 * again.
194 */
195 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
196 be_bits = cpu_to_be64(bits);
197
198 asm volatile(" ldarx %0,0,%2\n"
199 " and. %1,%0,%3\n"
200 " bne 2f\n"
201 " or %0,%0,%4\n"
202 " stdcx. %0,0,%2\n"
203 " beq+ 2f\n"
204 " mr %1,%3\n"
205 "2: isync"
206 : "=&r" (tmp), "=&r" (old)
207 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
208 : "cc", "memory");
209 return old == 0;
210 }
211
unlock_hpte(__be64 * hpte,unsigned long hpte_v)212 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
213 {
214 hpte_v &= ~HPTE_V_HVLOCK;
215 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
216 hpte[0] = cpu_to_be64(hpte_v);
217 }
218
219 /* Without barrier */
__unlock_hpte(__be64 * hpte,unsigned long hpte_v)220 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
221 {
222 hpte_v &= ~HPTE_V_HVLOCK;
223 hpte[0] = cpu_to_be64(hpte_v);
224 }
225
226 /*
227 * These functions encode knowledge of the POWER7/8/9 hardware
228 * interpretations of the HPTE LP (large page size) field.
229 */
kvmppc_hpte_page_shifts(unsigned long h,unsigned long l)230 static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
231 {
232 unsigned int lphi;
233
234 if (!(h & HPTE_V_LARGE))
235 return 12; /* 4kB */
236 lphi = (l >> 16) & 0xf;
237 switch ((l >> 12) & 0xf) {
238 case 0:
239 return !lphi ? 24 : 0; /* 16MB */
240 break;
241 case 1:
242 return 16; /* 64kB */
243 break;
244 case 3:
245 return !lphi ? 34 : 0; /* 16GB */
246 break;
247 case 7:
248 return (16 << 8) + 12; /* 64kB in 4kB */
249 break;
250 case 8:
251 if (!lphi)
252 return (24 << 8) + 16; /* 16MB in 64kkB */
253 if (lphi == 3)
254 return (24 << 8) + 12; /* 16MB in 4kB */
255 break;
256 }
257 return 0;
258 }
259
kvmppc_hpte_base_page_shift(unsigned long h,unsigned long l)260 static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
261 {
262 return kvmppc_hpte_page_shifts(h, l) & 0xff;
263 }
264
kvmppc_hpte_actual_page_shift(unsigned long h,unsigned long l)265 static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
266 {
267 int tmp = kvmppc_hpte_page_shifts(h, l);
268
269 if (tmp >= 0x100)
270 tmp >>= 8;
271 return tmp;
272 }
273
kvmppc_actual_pgsz(unsigned long v,unsigned long r)274 static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
275 {
276 int shift = kvmppc_hpte_actual_page_shift(v, r);
277
278 if (shift)
279 return 1ul << shift;
280 return 0;
281 }
282
kvmppc_pgsize_lp_encoding(int base_shift,int actual_shift)283 static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
284 {
285 switch (base_shift) {
286 case 12:
287 switch (actual_shift) {
288 case 12:
289 return 0;
290 case 16:
291 return 7;
292 case 24:
293 return 0x38;
294 }
295 break;
296 case 16:
297 switch (actual_shift) {
298 case 16:
299 return 1;
300 case 24:
301 return 8;
302 }
303 break;
304 case 24:
305 return 0;
306 }
307 return -1;
308 }
309
compute_tlbie_rb(unsigned long v,unsigned long r,unsigned long pte_index)310 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
311 unsigned long pte_index)
312 {
313 int a_pgshift, b_pgshift;
314 unsigned long rb = 0, va_low, sllp;
315
316 b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
317 if (a_pgshift >= 0x100) {
318 b_pgshift &= 0xff;
319 a_pgshift >>= 8;
320 }
321
322 /*
323 * Ignore the top 14 bits of va
324 * v have top two bits covering segment size, hence move
325 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
326 * AVA field in v also have the lower 23 bits ignored.
327 * For base page size 4K we need 14 .. 65 bits (so need to
328 * collect extra 11 bits)
329 * For others we need 14..14+i
330 */
331 /* This covers 14..54 bits of va*/
332 rb = (v & ~0x7fUL) << 16; /* AVA field */
333
334 /*
335 * AVA in v had cleared lower 23 bits. We need to derive
336 * that from pteg index
337 */
338 va_low = pte_index >> 3;
339 if (v & HPTE_V_SECONDARY)
340 va_low = ~va_low;
341 /*
342 * get the vpn bits from va_low using reverse of hashing.
343 * In v we have va with 23 bits dropped and then left shifted
344 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
345 * right shift it with (SID_SHIFT - (23 - 7))
346 */
347 if (!(v & HPTE_V_1TB_SEG))
348 va_low ^= v >> (SID_SHIFT - 16);
349 else
350 va_low ^= v >> (SID_SHIFT_1T - 16);
351 va_low &= 0x7ff;
352
353 if (b_pgshift <= 12) {
354 if (a_pgshift > 12) {
355 sllp = (a_pgshift == 16) ? 5 : 4;
356 rb |= sllp << 5; /* AP field */
357 }
358 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
359 } else {
360 int aval_shift;
361 /*
362 * remaining bits of AVA/LP fields
363 * Also contain the rr bits of LP
364 */
365 rb |= (va_low << b_pgshift) & 0x7ff000;
366 /*
367 * Now clear not needed LP bits based on actual psize
368 */
369 rb &= ~((1ul << a_pgshift) - 1);
370 /*
371 * AVAL field 58..77 - base_page_shift bits of va
372 * we have space for 58..64 bits, Missing bits should
373 * be zero filled. +1 is to take care of L bit shift
374 */
375 aval_shift = 64 - (77 - b_pgshift) + 1;
376 rb |= ((va_low << aval_shift) & 0xfe);
377
378 rb |= 1; /* L field */
379 rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
380 }
381 /*
382 * This sets both bits of the B field in the PTE. 0b1x values are
383 * reserved, but those will have been filtered by kvmppc_do_h_enter.
384 */
385 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
386 return rb;
387 }
388
hpte_rpn(unsigned long ptel,unsigned long psize)389 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
390 {
391 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
392 }
393
hpte_is_writable(unsigned long ptel)394 static inline int hpte_is_writable(unsigned long ptel)
395 {
396 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
397
398 return pp != PP_RXRX && pp != PP_RXXX;
399 }
400
hpte_make_readonly(unsigned long ptel)401 static inline unsigned long hpte_make_readonly(unsigned long ptel)
402 {
403 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
404 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
405 else
406 ptel |= PP_RXRX;
407 return ptel;
408 }
409
hpte_cache_flags_ok(unsigned long hptel,bool is_ci)410 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
411 {
412 unsigned int wimg = hptel & HPTE_R_WIMG;
413
414 /* Handle SAO */
415 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
416 cpu_has_feature(CPU_FTR_ARCH_206))
417 wimg = HPTE_R_M;
418
419 if (!is_ci)
420 return wimg == HPTE_R_M;
421 /*
422 * if host is mapped cache inhibited, make sure hptel also have
423 * cache inhibited.
424 */
425 if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
426 return false;
427 return !!(wimg & HPTE_R_I);
428 }
429
430 /*
431 * If it's present and writable, atomically set dirty and referenced bits and
432 * return the PTE, otherwise return 0.
433 */
kvmppc_read_update_linux_pte(pte_t * ptep,int writing)434 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
435 {
436 pte_t old_pte, new_pte = __pte(0);
437
438 while (1) {
439 /*
440 * Make sure we don't reload from ptep
441 */
442 old_pte = READ_ONCE(*ptep);
443 /*
444 * wait until H_PAGE_BUSY is clear then set it atomically
445 */
446 if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
447 cpu_relax();
448 continue;
449 }
450 /* If pte is not present return None */
451 if (unlikely(!pte_present(old_pte)))
452 return __pte(0);
453
454 new_pte = pte_mkyoung(old_pte);
455 if (writing && pte_write(old_pte))
456 new_pte = pte_mkdirty(new_pte);
457
458 if (pte_xchg(ptep, old_pte, new_pte))
459 break;
460 }
461 return new_pte;
462 }
463
hpte_read_permission(unsigned long pp,unsigned long key)464 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
465 {
466 if (key)
467 return PP_RWRX <= pp && pp <= PP_RXRX;
468 return true;
469 }
470
hpte_write_permission(unsigned long pp,unsigned long key)471 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
472 {
473 if (key)
474 return pp == PP_RWRW;
475 return pp <= PP_RWRW;
476 }
477
hpte_get_skey_perm(unsigned long hpte_r,unsigned long amr)478 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
479 {
480 unsigned long skey;
481
482 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
483 ((hpte_r & HPTE_R_KEY_LO) >> 9);
484 return (amr >> (62 - 2 * skey)) & 3;
485 }
486
lock_rmap(unsigned long * rmap)487 static inline void lock_rmap(unsigned long *rmap)
488 {
489 do {
490 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
491 cpu_relax();
492 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
493 }
494
unlock_rmap(unsigned long * rmap)495 static inline void unlock_rmap(unsigned long *rmap)
496 {
497 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
498 }
499
slot_is_aligned(struct kvm_memory_slot * memslot,unsigned long pagesize)500 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
501 unsigned long pagesize)
502 {
503 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
504
505 if (pagesize <= PAGE_SIZE)
506 return true;
507 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
508 }
509
510 /*
511 * This works for 4k, 64k and 16M pages on POWER7,
512 * and 4k and 16M pages on PPC970.
513 */
slb_pgsize_encoding(unsigned long psize)514 static inline unsigned long slb_pgsize_encoding(unsigned long psize)
515 {
516 unsigned long senc = 0;
517
518 if (psize > 0x1000) {
519 senc = SLB_VSID_L;
520 if (psize == 0x10000)
521 senc |= SLB_VSID_LP_01;
522 }
523 return senc;
524 }
525
is_vrma_hpte(unsigned long hpte_v)526 static inline int is_vrma_hpte(unsigned long hpte_v)
527 {
528 return (hpte_v & ~0xffffffUL) ==
529 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
530 }
531
532 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
533 /*
534 * Note modification of an HPTE; set the HPTE modified bit
535 * if anyone is interested.
536 */
note_hpte_modification(struct kvm * kvm,struct revmap_entry * rev)537 static inline void note_hpte_modification(struct kvm *kvm,
538 struct revmap_entry *rev)
539 {
540 if (atomic_read(&kvm->arch.hpte_mod_interest))
541 rev->guest_rpte |= HPTE_GR_MODIFIED;
542 }
543
544 /*
545 * Like kvm_memslots(), but for use in real mode when we can't do
546 * any RCU stuff (since the secondary threads are offline from the
547 * kernel's point of view), and we can't print anything.
548 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
549 */
kvm_memslots_raw(struct kvm * kvm)550 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
551 {
552 return rcu_dereference_raw_check(kvm->memslots[0]);
553 }
554
555 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
556 extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
557
558 extern void kvmhv_rm_send_ipi(int cpu);
559
kvmppc_hpt_npte(struct kvm_hpt_info * hpt)560 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
561 {
562 /* HPTEs are 2**4 bytes long */
563 return 1UL << (hpt->order - 4);
564 }
565
kvmppc_hpt_mask(struct kvm_hpt_info * hpt)566 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
567 {
568 /* 128 (2**7) bytes in each HPTEG */
569 return (1UL << (hpt->order - 7)) - 1;
570 }
571
572 /* Set bits in a dirty bitmap, which is in LE format */
set_dirty_bits(unsigned long * map,unsigned long i,unsigned long npages)573 static inline void set_dirty_bits(unsigned long *map, unsigned long i,
574 unsigned long npages)
575 {
576
577 if (npages >= 8)
578 memset((char *)map + i / 8, 0xff, npages / 8);
579 else
580 for (; npages; ++i, --npages)
581 __set_bit_le(i, map);
582 }
583
set_dirty_bits_atomic(unsigned long * map,unsigned long i,unsigned long npages)584 static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
585 unsigned long npages)
586 {
587 if (npages >= 8)
588 memset((char *)map + i / 8, 0xff, npages / 8);
589 else
590 for (; npages; ++i, --npages)
591 set_bit_le(i, map);
592 }
593
sanitize_msr(u64 msr)594 static inline u64 sanitize_msr(u64 msr)
595 {
596 msr &= ~MSR_HV;
597 msr |= MSR_ME;
598 return msr;
599 }
600
601 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
copy_from_checkpoint(struct kvm_vcpu * vcpu)602 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
603 {
604 vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
605 vcpu->arch.regs.xer = vcpu->arch.xer_tm;
606 vcpu->arch.regs.link = vcpu->arch.lr_tm;
607 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
608 vcpu->arch.amr = vcpu->arch.amr_tm;
609 vcpu->arch.ppr = vcpu->arch.ppr_tm;
610 vcpu->arch.dscr = vcpu->arch.dscr_tm;
611 vcpu->arch.tar = vcpu->arch.tar_tm;
612 memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
613 sizeof(vcpu->arch.regs.gpr));
614 vcpu->arch.fp = vcpu->arch.fp_tm;
615 vcpu->arch.vr = vcpu->arch.vr_tm;
616 vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
617 }
618
copy_to_checkpoint(struct kvm_vcpu * vcpu)619 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
620 {
621 vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
622 vcpu->arch.xer_tm = vcpu->arch.regs.xer;
623 vcpu->arch.lr_tm = vcpu->arch.regs.link;
624 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
625 vcpu->arch.amr_tm = vcpu->arch.amr;
626 vcpu->arch.ppr_tm = vcpu->arch.ppr;
627 vcpu->arch.dscr_tm = vcpu->arch.dscr;
628 vcpu->arch.tar_tm = vcpu->arch.tar;
629 memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
630 sizeof(vcpu->arch.regs.gpr));
631 vcpu->arch.fp_tm = vcpu->arch.fp;
632 vcpu->arch.vr_tm = vcpu->arch.vr;
633 vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
634 }
635 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
636
637 extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
638 unsigned long gpa, unsigned int level,
639 unsigned long mmu_seq, unsigned int lpid,
640 unsigned long *rmapp, struct rmap_nested **n_rmap);
641 extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
642 struct rmap_nested **n_rmap);
643 extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
644 unsigned long clr, unsigned long set,
645 unsigned long hpa, unsigned long nbytes);
646 extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
647 const struct kvm_memory_slot *memslot,
648 unsigned long gpa, unsigned long hpa,
649 unsigned long nbytes);
650
651 static inline pte_t *
find_kvm_secondary_pte_unlocked(struct kvm * kvm,unsigned long ea,unsigned * hshift)652 find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
653 unsigned *hshift)
654 {
655 pte_t *pte;
656
657 pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
658 return pte;
659 }
660
find_kvm_secondary_pte(struct kvm * kvm,unsigned long ea,unsigned * hshift)661 static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
662 unsigned *hshift)
663 {
664 pte_t *pte;
665
666 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
667 "%s called with kvm mmu_lock not held \n", __func__);
668 pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
669
670 return pte;
671 }
672
find_kvm_host_pte(struct kvm * kvm,unsigned long mmu_seq,unsigned long ea,unsigned * hshift)673 static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
674 unsigned long ea, unsigned *hshift)
675 {
676 pte_t *pte;
677
678 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
679 "%s called with kvm mmu_lock not held \n", __func__);
680
681 if (mmu_notifier_retry(kvm, mmu_seq))
682 return NULL;
683
684 pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
685
686 return pte;
687 }
688
689 extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
690 unsigned long ea, unsigned *hshift);
691
692 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
693
694 #endif /* __ASM_KVM_BOOK3S_64_H__ */
695