1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hyper-V Isolation VM interface with paravisor and hypervisor
4 *
5 * Author:
6 * Tianyu Lan <Tianyu.Lan@microsoft.com>
7 */
8
9 #include <linux/bitfield.h>
10 #include <linux/hyperv.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <asm/svm.h>
14 #include <asm/sev.h>
15 #include <asm/io.h>
16 #include <asm/mshyperv.h>
17 #include <asm/hypervisor.h>
18
19 #ifdef CONFIG_AMD_MEM_ENCRYPT
20
21 #define GHCB_USAGE_HYPERV_CALL 1
22
23 union hv_ghcb {
24 struct ghcb ghcb;
25 struct {
26 u64 hypercalldata[509];
27 u64 outputgpa;
28 union {
29 union {
30 struct {
31 u32 callcode : 16;
32 u32 isfast : 1;
33 u32 reserved1 : 14;
34 u32 isnested : 1;
35 u32 countofelements : 12;
36 u32 reserved2 : 4;
37 u32 repstartindex : 12;
38 u32 reserved3 : 4;
39 };
40 u64 asuint64;
41 } hypercallinput;
42 union {
43 struct {
44 u16 callstatus;
45 u16 reserved1;
46 u32 elementsprocessed : 12;
47 u32 reserved2 : 20;
48 };
49 u64 asunit64;
50 } hypercalloutput;
51 };
52 u64 reserved2;
53 } hypercall;
54 } __packed __aligned(HV_HYP_PAGE_SIZE);
55
hv_ghcb_hypercall(u64 control,void * input,void * output,u32 input_size)56 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
57 {
58 union hv_ghcb *hv_ghcb;
59 void **ghcb_base;
60 unsigned long flags;
61 u64 status;
62
63 if (!hv_ghcb_pg)
64 return -EFAULT;
65
66 WARN_ON(in_nmi());
67
68 local_irq_save(flags);
69 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
70 hv_ghcb = (union hv_ghcb *)*ghcb_base;
71 if (!hv_ghcb) {
72 local_irq_restore(flags);
73 return -EFAULT;
74 }
75
76 hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
77 hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
78
79 hv_ghcb->hypercall.outputgpa = (u64)output;
80 hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
81 hv_ghcb->hypercall.hypercallinput.callcode = control;
82
83 if (input_size)
84 memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
85
86 VMGEXIT();
87
88 hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
89 memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
90 sizeof(hv_ghcb->ghcb.save.valid_bitmap));
91
92 status = hv_ghcb->hypercall.hypercalloutput.callstatus;
93
94 local_irq_restore(flags);
95
96 return status;
97 }
98
hv_ghcb_msr_write(u64 msr,u64 value)99 void hv_ghcb_msr_write(u64 msr, u64 value)
100 {
101 union hv_ghcb *hv_ghcb;
102 void **ghcb_base;
103 unsigned long flags;
104 struct es_em_ctxt ctxt;
105
106 if (!hv_ghcb_pg)
107 return;
108
109 WARN_ON(in_nmi());
110
111 local_irq_save(flags);
112 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
113 hv_ghcb = (union hv_ghcb *)*ghcb_base;
114 if (!hv_ghcb) {
115 local_irq_restore(flags);
116 return;
117 }
118
119 ghcb_set_rcx(&hv_ghcb->ghcb, msr);
120 ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
121 ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
122
123 if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt,
124 SVM_EXIT_MSR, 1, 0))
125 pr_warn("Fail to write msr via ghcb %llx.\n", msr);
126
127 local_irq_restore(flags);
128 }
129 EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
130
hv_ghcb_msr_read(u64 msr,u64 * value)131 void hv_ghcb_msr_read(u64 msr, u64 *value)
132 {
133 union hv_ghcb *hv_ghcb;
134 void **ghcb_base;
135 unsigned long flags;
136 struct es_em_ctxt ctxt;
137
138 /* Check size of union hv_ghcb here. */
139 BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
140
141 if (!hv_ghcb_pg)
142 return;
143
144 WARN_ON(in_nmi());
145
146 local_irq_save(flags);
147 ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
148 hv_ghcb = (union hv_ghcb *)*ghcb_base;
149 if (!hv_ghcb) {
150 local_irq_restore(flags);
151 return;
152 }
153
154 ghcb_set_rcx(&hv_ghcb->ghcb, msr);
155 if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt,
156 SVM_EXIT_MSR, 0, 0))
157 pr_warn("Fail to read msr via ghcb %llx.\n", msr);
158 else
159 *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
160 | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
161 local_irq_restore(flags);
162 }
163 EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
164 #endif
165
hv_get_isolation_type(void)166 enum hv_isolation_type hv_get_isolation_type(void)
167 {
168 if (!(ms_hyperv.priv_high & HV_ISOLATION))
169 return HV_ISOLATION_TYPE_NONE;
170 return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
171 }
172 EXPORT_SYMBOL_GPL(hv_get_isolation_type);
173
174 /*
175 * hv_is_isolation_supported - Check system runs in the Hyper-V
176 * isolation VM.
177 */
hv_is_isolation_supported(void)178 bool hv_is_isolation_supported(void)
179 {
180 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
181 return false;
182
183 if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
184 return false;
185
186 return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
187 }
188
189 DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
190
191 /*
192 * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
193 * isolation VM.
194 */
hv_isolation_type_snp(void)195 bool hv_isolation_type_snp(void)
196 {
197 return static_branch_unlikely(&isolation_type_snp);
198 }
199
200 /*
201 * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
202 *
203 * In Isolation VM, all guest memory is encrypted from host and guest
204 * needs to set memory visible to host via hvcall before sharing memory
205 * with host.
206 */
hv_mark_gpa_visibility(u16 count,const u64 pfn[],enum hv_mem_host_visibility visibility)207 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
208 enum hv_mem_host_visibility visibility)
209 {
210 struct hv_gpa_range_for_visibility **input_pcpu, *input;
211 u16 pages_processed;
212 u64 hv_status;
213 unsigned long flags;
214
215 /* no-op if partition isolation is not enabled */
216 if (!hv_is_isolation_supported())
217 return 0;
218
219 if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
220 pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
221 HV_MAX_MODIFY_GPA_REP_COUNT);
222 return -EINVAL;
223 }
224
225 local_irq_save(flags);
226 input_pcpu = (struct hv_gpa_range_for_visibility **)
227 this_cpu_ptr(hyperv_pcpu_input_arg);
228 input = *input_pcpu;
229 if (unlikely(!input)) {
230 local_irq_restore(flags);
231 return -EINVAL;
232 }
233
234 input->partition_id = HV_PARTITION_ID_SELF;
235 input->host_visibility = visibility;
236 input->reserved0 = 0;
237 input->reserved1 = 0;
238 memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
239 hv_status = hv_do_rep_hypercall(
240 HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
241 0, input, &pages_processed);
242 local_irq_restore(flags);
243
244 if (hv_result_success(hv_status))
245 return 0;
246 else
247 return -EFAULT;
248 }
249
250 /*
251 * hv_set_mem_host_visibility - Set specified memory visible to host.
252 *
253 * In Isolation VM, all guest memory is encrypted from host and guest
254 * needs to set memory visible to host via hvcall before sharing memory
255 * with host. This function works as wrap of hv_mark_gpa_visibility()
256 * with memory base and size.
257 */
hv_set_mem_host_visibility(unsigned long kbuffer,int pagecount,bool visible)258 int hv_set_mem_host_visibility(unsigned long kbuffer, int pagecount, bool visible)
259 {
260 enum hv_mem_host_visibility visibility = visible ?
261 VMBUS_PAGE_VISIBLE_READ_WRITE : VMBUS_PAGE_NOT_VISIBLE;
262 u64 *pfn_array;
263 int ret = 0;
264 int i, pfn;
265
266 if (!hv_is_isolation_supported() || !hv_hypercall_pg)
267 return 0;
268
269 pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
270 if (!pfn_array)
271 return -ENOMEM;
272
273 for (i = 0, pfn = 0; i < pagecount; i++) {
274 pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
275 pfn++;
276
277 if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
278 ret = hv_mark_gpa_visibility(pfn, pfn_array,
279 visibility);
280 if (ret)
281 goto err_free_pfn_array;
282 pfn = 0;
283 }
284 }
285
286 err_free_pfn_array:
287 kfree(pfn_array);
288 return ret;
289 }
290