1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Atish Patra <atish.patra@wdc.com>
7 * Anup Patel <anup.patel@wdc.com>
8 */
9
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/uaccess.h>
14
15 #ifdef CONFIG_FPU
kvm_riscv_vcpu_fp_reset(struct kvm_vcpu * vcpu)16 void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
17 {
18 unsigned long isa = vcpu->arch.isa;
19 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
20
21 cntx->sstatus &= ~SR_FS;
22 if (riscv_isa_extension_available(&isa, f) ||
23 riscv_isa_extension_available(&isa, d))
24 cntx->sstatus |= SR_FS_INITIAL;
25 else
26 cntx->sstatus |= SR_FS_OFF;
27 }
28
kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context * cntx)29 void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
30 {
31 cntx->sstatus &= ~SR_FS;
32 cntx->sstatus |= SR_FS_CLEAN;
33 }
34
kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context * cntx,unsigned long isa)35 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
36 unsigned long isa)
37 {
38 if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
39 if (riscv_isa_extension_available(&isa, d))
40 __kvm_riscv_fp_d_save(cntx);
41 else if (riscv_isa_extension_available(&isa, f))
42 __kvm_riscv_fp_f_save(cntx);
43 kvm_riscv_vcpu_fp_clean(cntx);
44 }
45 }
46
kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context * cntx,unsigned long isa)47 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
48 unsigned long isa)
49 {
50 if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
51 if (riscv_isa_extension_available(&isa, d))
52 __kvm_riscv_fp_d_restore(cntx);
53 else if (riscv_isa_extension_available(&isa, f))
54 __kvm_riscv_fp_f_restore(cntx);
55 kvm_riscv_vcpu_fp_clean(cntx);
56 }
57 }
58
kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context * cntx)59 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
60 {
61 /* No need to check host sstatus as it can be modified outside */
62 if (riscv_isa_extension_available(NULL, d))
63 __kvm_riscv_fp_d_save(cntx);
64 else if (riscv_isa_extension_available(NULL, f))
65 __kvm_riscv_fp_f_save(cntx);
66 }
67
kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context * cntx)68 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
69 {
70 if (riscv_isa_extension_available(NULL, d))
71 __kvm_riscv_fp_d_restore(cntx);
72 else if (riscv_isa_extension_available(NULL, f))
73 __kvm_riscv_fp_f_restore(cntx);
74 }
75 #endif
76
kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,unsigned long rtype)77 int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
78 const struct kvm_one_reg *reg,
79 unsigned long rtype)
80 {
81 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
82 unsigned long isa = vcpu->arch.isa;
83 unsigned long __user *uaddr =
84 (unsigned long __user *)(unsigned long)reg->addr;
85 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
86 KVM_REG_SIZE_MASK |
87 rtype);
88 void *reg_val;
89
90 if ((rtype == KVM_REG_RISCV_FP_F) &&
91 riscv_isa_extension_available(&isa, f)) {
92 if (KVM_REG_SIZE(reg->id) != sizeof(u32))
93 return -EINVAL;
94 if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
95 reg_val = &cntx->fp.f.fcsr;
96 else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
97 reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
98 reg_val = &cntx->fp.f.f[reg_num];
99 else
100 return -EINVAL;
101 } else if ((rtype == KVM_REG_RISCV_FP_D) &&
102 riscv_isa_extension_available(&isa, d)) {
103 if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
104 if (KVM_REG_SIZE(reg->id) != sizeof(u32))
105 return -EINVAL;
106 reg_val = &cntx->fp.d.fcsr;
107 } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
108 reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
109 if (KVM_REG_SIZE(reg->id) != sizeof(u64))
110 return -EINVAL;
111 reg_val = &cntx->fp.d.f[reg_num];
112 } else
113 return -EINVAL;
114 } else
115 return -EINVAL;
116
117 if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
118 return -EFAULT;
119
120 return 0;
121 }
122
kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,unsigned long rtype)123 int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
124 const struct kvm_one_reg *reg,
125 unsigned long rtype)
126 {
127 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
128 unsigned long isa = vcpu->arch.isa;
129 unsigned long __user *uaddr =
130 (unsigned long __user *)(unsigned long)reg->addr;
131 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
132 KVM_REG_SIZE_MASK |
133 rtype);
134 void *reg_val;
135
136 if ((rtype == KVM_REG_RISCV_FP_F) &&
137 riscv_isa_extension_available(&isa, f)) {
138 if (KVM_REG_SIZE(reg->id) != sizeof(u32))
139 return -EINVAL;
140 if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
141 reg_val = &cntx->fp.f.fcsr;
142 else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
143 reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
144 reg_val = &cntx->fp.f.f[reg_num];
145 else
146 return -EINVAL;
147 } else if ((rtype == KVM_REG_RISCV_FP_D) &&
148 riscv_isa_extension_available(&isa, d)) {
149 if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
150 if (KVM_REG_SIZE(reg->id) != sizeof(u32))
151 return -EINVAL;
152 reg_val = &cntx->fp.d.fcsr;
153 } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
154 reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
155 if (KVM_REG_SIZE(reg->id) != sizeof(u64))
156 return -EINVAL;
157 reg_val = &cntx->fp.d.f[reg_num];
158 } else
159 return -EINVAL;
160 } else
161 return -EINVAL;
162
163 if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
164 return -EFAULT;
165
166 return 0;
167 }
168