1 EMIT_FILE;
2
3 #include <xen/types.h>
4 #include <xen/hypercall.h>
5 #include <xen/guest_access.h>
6 #include <xen/sched.h>
7 #include <xen/event.h>
8 #include <xen/mem_access.h>
9 #include <asm/current.h>
10 #include <compat/memory.h>
11
12 #define xen_domid_t domid_t
13 #define compat_domid_t domid_compat_t
14 CHECK_TYPE(domid);
15 #undef compat_domid_t
16 #undef xen_domid_t
17
18 CHECK_vmemrange;
19
20 #ifdef CONFIG_HAS_PASSTHROUGH
21 struct get_reserved_device_memory {
22 struct compat_reserved_device_memory_map map;
23 unsigned int used_entries;
24 };
25
get_reserved_device_memory(xen_pfn_t start,xen_ulong_t nr,u32 id,void * ctxt)26 static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
27 u32 id, void *ctxt)
28 {
29 struct get_reserved_device_memory *grdm = ctxt;
30 uint32_t sbdf = PCI_SBDF3(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus,
31 grdm->map.dev.pci.devfn).sbdf;
32
33 if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) )
34 return 0;
35
36 if ( grdm->used_entries < grdm->map.nr_entries )
37 {
38 struct compat_reserved_device_memory rdm = {
39 .start_pfn = start, .nr_pages = nr
40 };
41
42 if ( rdm.start_pfn != start || rdm.nr_pages != nr )
43 return -ERANGE;
44
45 if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
46 &rdm, 1) )
47 return -EFAULT;
48 }
49
50 ++grdm->used_entries;
51
52 return 1;
53 }
54 #endif
55
compat_memory_op(unsigned int cmd,XEN_GUEST_HANDLE_PARAM (void)compat)56 int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
57 {
58 int split, op = cmd & MEMOP_CMD_MASK;
59 long rc;
60 unsigned int start_extent = cmd >> MEMOP_EXTENT_SHIFT;
61
62 do
63 {
64 unsigned int i, end_extent = 0;
65 union {
66 XEN_GUEST_HANDLE_PARAM(void) hnd;
67 struct xen_memory_reservation *rsrv;
68 struct xen_memory_exchange *xchg;
69 struct xen_add_to_physmap *atp;
70 struct xen_add_to_physmap_batch *atpb;
71 struct xen_remove_from_physmap *xrfp;
72 struct xen_vnuma_topology_info *vnuma;
73 struct xen_mem_access_op *mao;
74 struct xen_mem_acquire_resource *mar;
75 } nat;
76 union {
77 struct compat_memory_reservation rsrv;
78 struct compat_memory_exchange xchg;
79 struct compat_add_to_physmap atp;
80 struct compat_add_to_physmap_batch atpb;
81 struct compat_vnuma_topology_info vnuma;
82 struct compat_mem_access_op mao;
83 struct compat_mem_acquire_resource mar;
84 } cmp;
85
86 set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
87 split = 0;
88 switch ( op )
89 {
90 xen_pfn_t *space;
91
92 case XENMEM_increase_reservation:
93 case XENMEM_decrease_reservation:
94 case XENMEM_populate_physmap:
95 if ( copy_from_guest(&cmp.rsrv, compat, 1) )
96 return start_extent;
97
98 /* Is size too large for us to encode a continuation? */
99 if ( cmp.rsrv.nr_extents > (UINT_MAX >> MEMOP_EXTENT_SHIFT) )
100 return start_extent;
101
102 if ( !compat_handle_is_null(cmp.rsrv.extent_start) &&
103 !compat_handle_okay(cmp.rsrv.extent_start, cmp.rsrv.nr_extents) )
104 return start_extent;
105
106 end_extent = start_extent + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.rsrv)) /
107 sizeof(*space);
108 if ( end_extent > cmp.rsrv.nr_extents )
109 end_extent = cmp.rsrv.nr_extents;
110
111 space = (xen_pfn_t *)(nat.rsrv + 1);
112 #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \
113 do \
114 { \
115 if ( !compat_handle_is_null((_s_)->extent_start) ) \
116 { \
117 set_xen_guest_handle((_d_)->extent_start, space - start_extent); \
118 if ( op != XENMEM_increase_reservation ) \
119 { \
120 for ( i = start_extent; i < end_extent; ++i ) \
121 { \
122 compat_pfn_t pfn; \
123 if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \
124 { \
125 end_extent = i; \
126 split = -1; \
127 break; \
128 } \
129 *space++ = pfn; \
130 } \
131 } \
132 } \
133 else \
134 { \
135 set_xen_guest_handle((_d_)->extent_start, NULL); \
136 end_extent = cmp.rsrv.nr_extents; \
137 } \
138 } while (0)
139 XLAT_memory_reservation(nat.rsrv, &cmp.rsrv);
140 #undef XLAT_memory_reservation_HNDL_extent_start
141
142 if ( end_extent < cmp.rsrv.nr_extents )
143 {
144 nat.rsrv->nr_extents = end_extent;
145 ++split;
146 }
147
148 break;
149
150 case XENMEM_exchange:
151 {
152 int order_delta;
153
154 if ( copy_from_guest(&cmp.xchg, compat, 1) )
155 return -EFAULT;
156
157 order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order;
158 /* Various sanity checks. */
159 if ( (cmp.xchg.nr_exchanged > cmp.xchg.in.nr_extents) ||
160 (order_delta > 0 && (cmp.xchg.nr_exchanged & ((1U << order_delta) - 1))) ||
161 /* Sizes of input and output lists do not overflow an int? */
162 ((~0U >> cmp.xchg.in.extent_order) < cmp.xchg.in.nr_extents) ||
163 ((~0U >> cmp.xchg.out.extent_order) < cmp.xchg.out.nr_extents) ||
164 /* Sizes of input and output lists match? */
165 ((cmp.xchg.in.nr_extents << cmp.xchg.in.extent_order) !=
166 (cmp.xchg.out.nr_extents << cmp.xchg.out.extent_order)) )
167 return -EINVAL;
168
169 if ( !compat_handle_okay(cmp.xchg.in.extent_start,
170 cmp.xchg.in.nr_extents) ||
171 !compat_handle_okay(cmp.xchg.out.extent_start,
172 cmp.xchg.out.nr_extents) )
173 return -EFAULT;
174
175 start_extent = cmp.xchg.nr_exchanged;
176 end_extent = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.xchg)) /
177 (((1U << ABS(order_delta)) + 1) *
178 sizeof(*space));
179 if ( end_extent == 0 )
180 {
181 printk("Cannot translate compatibility mode XENMEM_exchange extents (%u,%u)\n",
182 cmp.xchg.in.extent_order, cmp.xchg.out.extent_order);
183 return -E2BIG;
184 }
185 if ( order_delta > 0 )
186 end_extent <<= order_delta;
187 end_extent += start_extent;
188 if ( end_extent > cmp.xchg.in.nr_extents )
189 end_extent = cmp.xchg.in.nr_extents;
190
191 space = (xen_pfn_t *)(nat.xchg + 1);
192 /* Code below depends upon .in preceding .out. */
193 BUILD_BUG_ON(offsetof(xen_memory_exchange_t, in) > offsetof(xen_memory_exchange_t, out));
194 #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \
195 do \
196 { \
197 set_xen_guest_handle((_d_)->extent_start, space - start_extent); \
198 for ( i = start_extent; i < end_extent; ++i ) \
199 { \
200 compat_pfn_t pfn; \
201 if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \
202 return -EFAULT; \
203 *space++ = pfn; \
204 } \
205 if ( order_delta > 0 ) \
206 { \
207 start_extent >>= order_delta; \
208 end_extent >>= order_delta; \
209 } \
210 else \
211 { \
212 start_extent <<= -order_delta; \
213 end_extent <<= -order_delta; \
214 } \
215 order_delta = -order_delta; \
216 } while (0)
217 XLAT_memory_exchange(nat.xchg, &cmp.xchg);
218 #undef XLAT_memory_reservation_HNDL_extent_start
219
220 if ( end_extent < cmp.xchg.in.nr_extents )
221 {
222 nat.xchg->in.nr_extents = end_extent;
223 if ( order_delta >= 0 )
224 nat.xchg->out.nr_extents = end_extent >> order_delta;
225 else
226 nat.xchg->out.nr_extents = end_extent << -order_delta;
227 ++split;
228 }
229
230 break;
231 }
232
233 case XENMEM_current_reservation:
234 case XENMEM_maximum_reservation:
235 case XENMEM_maximum_gpfn:
236 case XENMEM_maximum_ram_page:
237 nat.hnd = compat;
238 break;
239
240 case XENMEM_add_to_physmap:
241 BUILD_BUG_ON((typeof(cmp.atp.size))-1 >
242 (UINT_MAX >> MEMOP_EXTENT_SHIFT));
243
244 if ( copy_from_guest(&cmp.atp, compat, 1) )
245 return -EFAULT;
246
247 XLAT_add_to_physmap(nat.atp, &cmp.atp);
248
249 break;
250
251 case XENMEM_add_to_physmap_batch:
252 {
253 unsigned int limit = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.atpb))
254 / (sizeof(nat.atpb->idxs.p) + sizeof(nat.atpb->gpfns.p));
255 /* Use an intermediate variable to suppress warnings on old gcc: */
256 unsigned int size;
257 xen_ulong_t *idxs = (void *)(nat.atpb + 1);
258 xen_pfn_t *gpfns = (void *)(idxs + limit);
259 /*
260 * The union will always be 16-bit width. So it is not
261 * necessary to have the exact field which correspond to the
262 * space.
263 */
264 enum XLAT_add_to_physmap_batch_u u =
265 XLAT_add_to_physmap_batch_u_res0;
266
267 if ( copy_from_guest(&cmp.atpb, compat, 1) )
268 return -EFAULT;
269 size = cmp.atpb.size;
270 if ( !compat_handle_okay(cmp.atpb.idxs, size) ||
271 !compat_handle_okay(cmp.atpb.gpfns, size) ||
272 !compat_handle_okay(cmp.atpb.errs, size) )
273 return -EFAULT;
274
275 end_extent = start_extent + limit;
276 if ( end_extent > size )
277 end_extent = size;
278
279 idxs -= start_extent;
280 gpfns -= start_extent;
281
282 for ( i = start_extent; i < end_extent; ++i )
283 {
284 compat_ulong_t idx;
285 compat_pfn_t gpfn;
286
287 if ( __copy_from_compat_offset(&idx, cmp.atpb.idxs, i, 1) ||
288 __copy_from_compat_offset(&gpfn, cmp.atpb.gpfns, i, 1) )
289 return -EFAULT;
290 idxs[i] = idx;
291 gpfns[i] = gpfn;
292 }
293
294 #define XLAT_add_to_physmap_batch_HNDL_idxs(_d_, _s_) \
295 set_xen_guest_handle((_d_)->idxs, idxs)
296 #define XLAT_add_to_physmap_batch_HNDL_gpfns(_d_, _s_) \
297 set_xen_guest_handle((_d_)->gpfns, gpfns)
298 #define XLAT_add_to_physmap_batch_HNDL_errs(_d_, _s_) \
299 guest_from_compat_handle((_d_)->errs, (_s_)->errs)
300
301 XLAT_add_to_physmap_batch(nat.atpb, &cmp.atpb);
302
303 #undef XLAT_add_to_physmap_batch_HNDL_errs
304 #undef XLAT_add_to_physmap_batch_HNDL_gpfns
305 #undef XLAT_add_to_physmap_batch_HNDL_idxs
306
307 if ( end_extent < cmp.atpb.size )
308 {
309 nat.atpb->size = end_extent;
310 ++split;
311 }
312
313 break;
314 }
315
316 case XENMEM_remove_from_physmap:
317 {
318 struct compat_remove_from_physmap cmp;
319
320 if ( copy_from_guest(&cmp, compat, 1) )
321 return -EFAULT;
322
323 XLAT_remove_from_physmap(nat.xrfp, &cmp);
324
325 break;
326 }
327
328 case XENMEM_access_op:
329 if ( copy_from_guest(&cmp.mao, compat, 1) )
330 return -EFAULT;
331
332 #define XLAT_mem_access_op_HNDL_pfn_list(_d_, _s_) \
333 guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list)
334 #define XLAT_mem_access_op_HNDL_access_list(_d_, _s_) \
335 guest_from_compat_handle((_d_)->access_list, (_s_)->access_list)
336
337 XLAT_mem_access_op(nat.mao, &cmp.mao);
338
339 #undef XLAT_mem_access_op_HNDL_pfn_list
340 #undef XLAT_mem_access_op_HNDL_access_list
341
342 break;
343
344 case XENMEM_get_vnumainfo:
345 {
346 enum XLAT_vnuma_topology_info_vdistance vdistance =
347 XLAT_vnuma_topology_info_vdistance_h;
348 enum XLAT_vnuma_topology_info_vcpu_to_vnode vcpu_to_vnode =
349 XLAT_vnuma_topology_info_vcpu_to_vnode_h;
350 enum XLAT_vnuma_topology_info_vmemrange vmemrange =
351 XLAT_vnuma_topology_info_vmemrange_h;
352
353 if ( copy_from_guest(&cmp.vnuma, compat, 1) )
354 return -EFAULT;
355
356 #define XLAT_vnuma_topology_info_HNDL_vdistance_h(_d_, _s_) \
357 guest_from_compat_handle((_d_)->vdistance.h, (_s_)->vdistance.h)
358 #define XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h(_d_, _s_) \
359 guest_from_compat_handle((_d_)->vcpu_to_vnode.h, (_s_)->vcpu_to_vnode.h)
360 #define XLAT_vnuma_topology_info_HNDL_vmemrange_h(_d_, _s_) \
361 guest_from_compat_handle((_d_)->vmemrange.h, (_s_)->vmemrange.h)
362
363 XLAT_vnuma_topology_info(nat.vnuma, &cmp.vnuma);
364
365 #undef XLAT_vnuma_topology_info_HNDL_vdistance_h
366 #undef XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h
367 #undef XLAT_vnuma_topology_info_HNDL_vmemrange_h
368 break;
369 }
370
371 #ifdef CONFIG_HAS_PASSTHROUGH
372 case XENMEM_reserved_device_memory_map:
373 {
374 struct get_reserved_device_memory grdm;
375
376 if ( unlikely(start_extent) )
377 return -EINVAL;
378
379 if ( copy_from_guest(&grdm.map, compat, 1) ||
380 !compat_handle_okay(grdm.map.buffer, grdm.map.nr_entries) )
381 return -EFAULT;
382
383 if ( grdm.map.flags & ~XENMEM_RDM_ALL )
384 return -EINVAL;
385
386 grdm.used_entries = 0;
387 rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
388 &grdm);
389
390 if ( !rc && grdm.map.nr_entries < grdm.used_entries )
391 rc = -ENOBUFS;
392 grdm.map.nr_entries = grdm.used_entries;
393 if ( __copy_to_guest(compat, &grdm.map, 1) )
394 rc = -EFAULT;
395
396 return rc;
397 }
398 #endif
399
400 case XENMEM_acquire_resource:
401 {
402 xen_pfn_t *xen_frame_list;
403 unsigned int max_nr_frames;
404
405 if ( copy_from_guest(&cmp.mar, compat, 1) )
406 return -EFAULT;
407
408 /*
409 * The number of frames handled is currently limited to a
410 * small number by the underlying implementation, so the
411 * scratch space should be sufficient for bouncing the
412 * frame addresses.
413 */
414 max_nr_frames = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.mar)) /
415 sizeof(*xen_frame_list);
416
417 if ( cmp.mar.nr_frames > max_nr_frames )
418 return -E2BIG;
419
420 if ( compat_handle_is_null(cmp.mar.frame_list) )
421 xen_frame_list = NULL;
422 else
423 {
424 xen_frame_list = (xen_pfn_t *)(nat.mar + 1);
425
426 if ( !compat_handle_okay(cmp.mar.frame_list,
427 cmp.mar.nr_frames) )
428 return -EFAULT;
429
430 for ( i = 0; i < cmp.mar.nr_frames; i++ )
431 {
432 compat_pfn_t frame;
433
434 if ( __copy_from_compat_offset(
435 &frame, cmp.mar.frame_list, i, 1) )
436 return -EFAULT;
437
438 xen_frame_list[i] = frame;
439 }
440 }
441
442 #define XLAT_mem_acquire_resource_HNDL_frame_list(_d_, _s_) \
443 set_xen_guest_handle((_d_)->frame_list, xen_frame_list)
444
445 XLAT_mem_acquire_resource(nat.mar, &cmp.mar);
446
447 #undef XLAT_mem_acquire_resource_HNDL_frame_list
448
449 break;
450 }
451 default:
452 return compat_arch_memory_op(cmd, compat);
453 }
454
455 rc = do_memory_op(cmd, nat.hnd);
456 if ( rc < 0 )
457 {
458 if ( rc == -ENOBUFS && op == XENMEM_get_vnumainfo )
459 {
460 cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes;
461 cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus;
462 cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges;
463 if ( __copy_to_guest(compat, &cmp.vnuma, 1) )
464 rc = -EFAULT;
465 }
466 break;
467 }
468
469 cmd = 0;
470 if ( hypercall_xlat_continuation(&cmd, 2, 0x02, nat.hnd, compat) )
471 {
472 BUG_ON(rc != __HYPERVISOR_memory_op);
473 BUG_ON((cmd & MEMOP_CMD_MASK) != op);
474 split = -1;
475 }
476
477 switch ( op )
478 {
479 case XENMEM_increase_reservation:
480 case XENMEM_decrease_reservation:
481 case XENMEM_populate_physmap:
482 end_extent = split >= 0 ? rc : cmd >> MEMOP_EXTENT_SHIFT;
483 if ( (op != XENMEM_decrease_reservation) &&
484 !guest_handle_is_null(nat.rsrv->extent_start) )
485 {
486 for ( ; start_extent < end_extent; ++start_extent )
487 {
488 compat_pfn_t pfn = nat.rsrv->extent_start.p[start_extent];
489
490 BUG_ON(pfn != nat.rsrv->extent_start.p[start_extent]);
491 if ( __copy_to_compat_offset(cmp.rsrv.extent_start,
492 start_extent, &pfn, 1) )
493 {
494 if ( split >= 0 )
495 {
496 rc = start_extent;
497 split = 0;
498 }
499 else
500 /*
501 * Short of being able to cancel the continuation,
502 * force it to restart here; eventually we shall
503 * get out of this state.
504 */
505 rc = (start_extent << MEMOP_EXTENT_SHIFT) | op;
506 break;
507 }
508 }
509 }
510 else
511 {
512 start_extent = end_extent;
513 }
514 /* Bail if there was an error. */
515 if ( (split >= 0) && (end_extent != nat.rsrv->nr_extents) )
516 split = 0;
517 break;
518
519 case XENMEM_exchange:
520 {
521 DEFINE_XEN_GUEST_HANDLE(compat_memory_exchange_t);
522 int order_delta;
523
524 BUG_ON(split >= 0 && rc);
525 BUG_ON(end_extent < nat.xchg->nr_exchanged);
526 end_extent = nat.xchg->nr_exchanged;
527
528 order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order;
529 if ( order_delta > 0 )
530 {
531 start_extent >>= order_delta;
532 BUG_ON(end_extent & ((1U << order_delta) - 1));
533 end_extent >>= order_delta;
534 }
535 else
536 {
537 start_extent <<= -order_delta;
538 end_extent <<= -order_delta;
539 }
540
541 for ( ; start_extent < end_extent; ++start_extent )
542 {
543 compat_pfn_t pfn = nat.xchg->out.extent_start.p[start_extent];
544
545 BUG_ON(pfn != nat.xchg->out.extent_start.p[start_extent]);
546 if ( __copy_to_compat_offset(cmp.xchg.out.extent_start,
547 start_extent, &pfn, 1) )
548 {
549 rc = -EFAULT;
550 break;
551 }
552 }
553
554 cmp.xchg.nr_exchanged = nat.xchg->nr_exchanged;
555 if ( __copy_field_to_guest(guest_handle_cast(compat,
556 compat_memory_exchange_t),
557 &cmp.xchg, nr_exchanged) )
558 rc = -EFAULT;
559
560 if ( rc < 0 )
561 {
562 if ( split < 0 )
563 /* Cannot cancel the continuation... */
564 domain_crash(current->domain);
565 return rc;
566 }
567 break;
568 }
569
570 case XENMEM_add_to_physmap_batch:
571 start_extent = end_extent;
572 break;
573
574 case XENMEM_maximum_ram_page:
575 case XENMEM_current_reservation:
576 case XENMEM_maximum_reservation:
577 case XENMEM_maximum_gpfn:
578 case XENMEM_add_to_physmap:
579 case XENMEM_remove_from_physmap:
580 case XENMEM_access_op:
581 break;
582
583 case XENMEM_get_vnumainfo:
584 cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes;
585 cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus;
586 cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges;
587 if ( __copy_to_guest(compat, &cmp.vnuma, 1) )
588 rc = -EFAULT;
589 break;
590
591 case XENMEM_acquire_resource:
592 {
593 const xen_pfn_t *xen_frame_list = (xen_pfn_t *)(nat.mar + 1);
594 compat_pfn_t *compat_frame_list = (compat_pfn_t *)(nat.mar + 1);
595 DEFINE_XEN_GUEST_HANDLE(compat_mem_acquire_resource_t);
596
597 if ( compat_handle_is_null(cmp.mar.frame_list) )
598 {
599 if ( __copy_field_to_guest(
600 guest_handle_cast(compat,
601 compat_mem_acquire_resource_t),
602 &cmp.mar, nr_frames) )
603 return -EFAULT;
604 }
605 else
606 {
607 /*
608 * NOTE: the smaller compat array overwrites the native
609 * array.
610 */
611 BUILD_BUG_ON(sizeof(compat_pfn_t) > sizeof(xen_pfn_t));
612
613 for ( i = 0; i < cmp.mar.nr_frames; i++ )
614 {
615 compat_pfn_t frame = xen_frame_list[i];
616
617 if ( frame != xen_frame_list[i] )
618 return -ERANGE;
619
620 compat_frame_list[i] = frame;
621 }
622
623 if ( __copy_to_compat_offset(cmp.mar.frame_list, 0,
624 compat_frame_list,
625 cmp.mar.nr_frames) )
626 return -EFAULT;
627 }
628
629 break;
630 }
631
632 default:
633 domain_crash(current->domain);
634 split = 0;
635 break;
636 }
637
638 cmd = op | (start_extent << MEMOP_EXTENT_SHIFT);
639 if ( split > 0 && hypercall_preempt_check() )
640 return hypercall_create_continuation(
641 __HYPERVISOR_memory_op, "ih", cmd, compat);
642 } while ( split > 0 );
643
644 if ( unlikely(rc > INT_MAX) )
645 return INT_MAX;
646
647 if ( unlikely(rc < INT_MIN) )
648 return INT_MIN;
649
650 return rc;
651 }
652