1 /*
2  * Copyright (c) 2016 Citrix Systems Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; If not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include <xen/event.h>
18 #include <xen/guest_access.h>
19 #include <xen/hypercall.h>
20 #include <xen/nospec.h>
21 #include <xen/sched.h>
22 
23 #include <asm/hap.h>
24 #include <asm/hvm/cacheattr.h>
25 #include <asm/hvm/ioreq.h>
26 #include <asm/shadow.h>
27 
28 #include <xsm/xsm.h>
29 
30 #include <public/hvm/hvm_op.h>
31 
32 struct dmop_args {
33     domid_t domid;
34     unsigned int nr_bufs;
35     /* Reserve enough buf elements for all current hypercalls. */
36     struct xen_dm_op_buf buf[2];
37 };
38 
_raw_copy_from_guest_buf_offset(void * dst,const struct dmop_args * args,unsigned int buf_idx,size_t offset_bytes,size_t dst_bytes)39 static bool _raw_copy_from_guest_buf_offset(void *dst,
40                                             const struct dmop_args *args,
41                                             unsigned int buf_idx,
42                                             size_t offset_bytes,
43                                             size_t dst_bytes)
44 {
45     size_t buf_bytes;
46 
47     if ( buf_idx >= args->nr_bufs )
48         return false;
49 
50     buf_bytes =  args->buf[buf_idx].size;
51 
52     if ( (offset_bytes + dst_bytes) < offset_bytes ||
53          (offset_bytes + dst_bytes) > buf_bytes )
54         return false;
55 
56     return !copy_from_guest_offset(dst, args->buf[buf_idx].h,
57                                    offset_bytes, dst_bytes);
58 }
59 
60 #define COPY_FROM_GUEST_BUF_OFFSET(dst, bufs, buf_idx, offset_bytes) \
61     _raw_copy_from_guest_buf_offset(&(dst), bufs, buf_idx, offset_bytes, \
62                                     sizeof(dst))
63 
track_dirty_vram(struct domain * d,xen_pfn_t first_pfn,unsigned int nr,const struct xen_dm_op_buf * buf)64 static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn,
65                             unsigned int nr, const struct xen_dm_op_buf *buf)
66 {
67     if ( nr > (GB(1) >> PAGE_SHIFT) )
68         return -EINVAL;
69 
70     if ( d->is_dying )
71         return -ESRCH;
72 
73     if ( !d->max_vcpus || !d->vcpu[0] )
74         return -EINVAL;
75 
76     if ( ((nr + 7) / 8) > buf->size )
77         return -EINVAL;
78 
79     return shadow_mode_enabled(d) ?
80         shadow_track_dirty_vram(d, first_pfn, nr, buf->h) :
81         hap_track_dirty_vram(d, first_pfn, nr, buf->h);
82 }
83 
set_pci_intx_level(struct domain * d,uint16_t domain,uint8_t bus,uint8_t device,uint8_t intx,uint8_t level)84 static int set_pci_intx_level(struct domain *d, uint16_t domain,
85                               uint8_t bus, uint8_t device,
86                               uint8_t intx, uint8_t level)
87 {
88     if ( domain != 0 || bus != 0 || device > 0x1f || intx > 3 )
89         return -EINVAL;
90 
91     switch ( level )
92     {
93     case 0:
94         hvm_pci_intx_deassert(d, device, intx);
95         break;
96     case 1:
97         hvm_pci_intx_assert(d, device, intx);
98         break;
99     default:
100         return -EINVAL;
101     }
102 
103     return 0;
104 }
105 
set_isa_irq_level(struct domain * d,uint8_t isa_irq,uint8_t level)106 static int set_isa_irq_level(struct domain *d, uint8_t isa_irq,
107                              uint8_t level)
108 {
109     if ( isa_irq > 15 )
110         return -EINVAL;
111 
112     switch ( level )
113     {
114     case 0:
115         hvm_isa_irq_deassert(d, isa_irq);
116         break;
117     case 1:
118         hvm_isa_irq_assert(d, isa_irq, NULL);
119         break;
120     default:
121         return -EINVAL;
122     }
123 
124     return 0;
125 }
126 
modified_memory(struct domain * d,const struct dmop_args * bufs,struct xen_dm_op_modified_memory * header)127 static int modified_memory(struct domain *d,
128                            const struct dmop_args *bufs,
129                            struct xen_dm_op_modified_memory *header)
130 {
131 #define EXTENTS_BUFFER 1
132 
133     /* Process maximum of 256 pfns before checking for continuation. */
134     const unsigned int cont_check_interval = 0x100;
135     unsigned int *rem_extents =  &header->nr_extents;
136     unsigned int batch_rem_pfns = cont_check_interval;
137     /* Used for continuation. */
138     unsigned int *pfns_done = &header->opaque;
139 
140     if ( !paging_mode_log_dirty(d) )
141         return 0;
142 
143     if ( (bufs->buf[EXTENTS_BUFFER].size /
144           sizeof(struct xen_dm_op_modified_memory_extent)) <
145          *rem_extents )
146         return -EINVAL;
147 
148     while ( *rem_extents > 0 )
149     {
150         struct xen_dm_op_modified_memory_extent extent;
151         unsigned int batch_nr;
152         xen_pfn_t pfn, end_pfn;
153 
154         if ( !COPY_FROM_GUEST_BUF_OFFSET(extent, bufs, EXTENTS_BUFFER,
155                                          (*rem_extents - 1) * sizeof(extent)) )
156             return -EFAULT;
157 
158         if ( extent.pad )
159             return -EINVAL;
160 
161         end_pfn = extent.first_pfn + extent.nr;
162 
163         if ( end_pfn <= extent.first_pfn ||
164              end_pfn > domain_get_maximum_gpfn(d) )
165             return -EINVAL;
166 
167         if ( *pfns_done >= extent.nr )
168             return -EINVAL;
169 
170         pfn = extent.first_pfn + *pfns_done;
171         batch_nr = extent.nr - *pfns_done;
172 
173         if ( batch_nr > batch_rem_pfns )
174         {
175             batch_nr = batch_rem_pfns;
176             *pfns_done += batch_nr;
177             end_pfn = pfn + batch_nr;
178         }
179         else
180         {
181             (*rem_extents)--;
182             *pfns_done = 0;
183         }
184 
185         batch_rem_pfns -= batch_nr;
186 
187         for ( ; pfn < end_pfn; pfn++ )
188         {
189             struct page_info *page;
190 
191             page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
192             if ( page )
193             {
194                 paging_mark_pfn_dirty(d, _pfn(pfn));
195                 /*
196                  * These are most probably not page tables any more
197                  * don't take a long time and don't die either.
198                  */
199                 sh_remove_shadows(d, page_to_mfn(page), 1, 0);
200                 put_page(page);
201             }
202         }
203 
204         /*
205          * After a full batch of cont_check_interval pfns
206          * have been processed, and there are still extents
207          * remaining to process, check for continuation.
208          */
209         if ( (batch_rem_pfns == 0) && (*rem_extents > 0) )
210         {
211             if ( hypercall_preempt_check() )
212                 return -ERESTART;
213 
214             batch_rem_pfns = cont_check_interval;
215         }
216     }
217     return 0;
218 
219 #undef EXTENTS_BUFFER
220 }
221 
allow_p2m_type_change(p2m_type_t old,p2m_type_t new)222 static bool allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
223 {
224     if ( new == p2m_ioreq_server )
225         return old == p2m_ram_rw;
226 
227     if ( old == p2m_ioreq_server )
228         return new == p2m_ram_rw;
229 
230     return p2m_is_ram(old) ||
231            (p2m_is_hole(old) && new == p2m_mmio_dm);
232 }
233 
set_mem_type(struct domain * d,struct xen_dm_op_set_mem_type * data)234 static int set_mem_type(struct domain *d,
235                         struct xen_dm_op_set_mem_type *data)
236 {
237     xen_pfn_t last_pfn = data->first_pfn + data->nr - 1;
238     unsigned int iter = 0, mem_type;
239     int rc = 0;
240 
241     /* Interface types to internal p2m types */
242     static const p2m_type_t memtype[] = {
243         [HVMMEM_ram_rw]  = p2m_ram_rw,
244         [HVMMEM_ram_ro]  = p2m_ram_ro,
245         [HVMMEM_mmio_dm] = p2m_mmio_dm,
246         [HVMMEM_unused] = p2m_invalid,
247         [HVMMEM_ioreq_server] = p2m_ioreq_server,
248     };
249 
250     if ( (data->first_pfn > last_pfn) ||
251          (last_pfn > domain_get_maximum_gpfn(d)) )
252         return -EINVAL;
253 
254     if ( data->mem_type >= ARRAY_SIZE(memtype) ||
255          unlikely(data->mem_type == HVMMEM_unused) )
256         return -EINVAL;
257 
258     mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
259 
260     if ( mem_type == HVMMEM_ioreq_server )
261     {
262         unsigned int flags;
263 
264         if ( !hap_enabled(d) )
265             return -EOPNOTSUPP;
266 
267         /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */
268         if ( !p2m_get_ioreq_server(d, &flags) )
269             return -EINVAL;
270     }
271 
272     while ( iter < data->nr )
273     {
274         unsigned long pfn = data->first_pfn + iter;
275         p2m_type_t t;
276 
277         get_gfn_unshare(d, pfn, &t);
278         if ( p2m_is_paging(t) )
279         {
280             put_gfn(d, pfn);
281             p2m_mem_paging_populate(d, _gfn(pfn));
282             return -EAGAIN;
283         }
284 
285         if ( p2m_is_shared(t) )
286             rc = -EAGAIN;
287         else if ( !allow_p2m_type_change(t, memtype[mem_type]) )
288             rc = -EINVAL;
289         else
290             rc = p2m_change_type_one(d, pfn, t, memtype[mem_type]);
291 
292         put_gfn(d, pfn);
293 
294         if ( rc )
295             break;
296 
297         iter++;
298 
299         /*
300          * Check for continuation every 256th iteration and if the
301          * iteration is not the last.
302          */
303         if ( (iter < data->nr) && ((iter & 0xff) == 0) &&
304              hypercall_preempt_check() )
305         {
306             data->first_pfn += iter;
307             data->nr -= iter;
308 
309             rc = -ERESTART;
310             break;
311         }
312     }
313 
314     return rc;
315 }
316 
inject_event(struct domain * d,const struct xen_dm_op_inject_event * data)317 static int inject_event(struct domain *d,
318                         const struct xen_dm_op_inject_event *data)
319 {
320     struct vcpu *v;
321 
322     if ( data->vcpuid >= d->max_vcpus || !(v = d->vcpu[data->vcpuid]) )
323         return -EINVAL;
324 
325     if ( cmpxchg(&v->arch.hvm.inject_event.vector,
326                  HVM_EVENT_VECTOR_UNSET, HVM_EVENT_VECTOR_UPDATING) !=
327          HVM_EVENT_VECTOR_UNSET )
328         return -EBUSY;
329 
330     v->arch.hvm.inject_event.type = data->type;
331     v->arch.hvm.inject_event.insn_len = data->insn_len;
332     v->arch.hvm.inject_event.error_code = data->error_code;
333     v->arch.hvm.inject_event.cr2 = data->cr2;
334     smp_wmb();
335     v->arch.hvm.inject_event.vector = data->vector;
336 
337     return 0;
338 }
339 
dm_op(const struct dmop_args * op_args)340 static int dm_op(const struct dmop_args *op_args)
341 {
342     struct domain *d;
343     struct xen_dm_op op;
344     bool const_op = true;
345     long rc;
346     size_t offset;
347 
348     static const uint8_t op_size[] = {
349         [XEN_DMOP_create_ioreq_server]              = sizeof(struct xen_dm_op_create_ioreq_server),
350         [XEN_DMOP_get_ioreq_server_info]            = sizeof(struct xen_dm_op_get_ioreq_server_info),
351         [XEN_DMOP_map_io_range_to_ioreq_server]     = sizeof(struct xen_dm_op_ioreq_server_range),
352         [XEN_DMOP_unmap_io_range_from_ioreq_server] = sizeof(struct xen_dm_op_ioreq_server_range),
353         [XEN_DMOP_set_ioreq_server_state]           = sizeof(struct xen_dm_op_set_ioreq_server_state),
354         [XEN_DMOP_destroy_ioreq_server]             = sizeof(struct xen_dm_op_destroy_ioreq_server),
355         [XEN_DMOP_track_dirty_vram]                 = sizeof(struct xen_dm_op_track_dirty_vram),
356         [XEN_DMOP_set_pci_intx_level]               = sizeof(struct xen_dm_op_set_pci_intx_level),
357         [XEN_DMOP_set_isa_irq_level]                = sizeof(struct xen_dm_op_set_isa_irq_level),
358         [XEN_DMOP_set_pci_link_route]               = sizeof(struct xen_dm_op_set_pci_link_route),
359         [XEN_DMOP_modified_memory]                  = sizeof(struct xen_dm_op_modified_memory),
360         [XEN_DMOP_set_mem_type]                     = sizeof(struct xen_dm_op_set_mem_type),
361         [XEN_DMOP_inject_event]                     = sizeof(struct xen_dm_op_inject_event),
362         [XEN_DMOP_inject_msi]                       = sizeof(struct xen_dm_op_inject_msi),
363         [XEN_DMOP_map_mem_type_to_ioreq_server]     = sizeof(struct xen_dm_op_map_mem_type_to_ioreq_server),
364         [XEN_DMOP_remote_shutdown]                  = sizeof(struct xen_dm_op_remote_shutdown),
365         [XEN_DMOP_relocate_memory]                  = sizeof(struct xen_dm_op_relocate_memory),
366         [XEN_DMOP_pin_memory_cacheattr]             = sizeof(struct xen_dm_op_pin_memory_cacheattr),
367     };
368 
369     rc = rcu_lock_remote_domain_by_id(op_args->domid, &d);
370     if ( rc )
371         return rc;
372 
373     if ( !is_hvm_domain(d) )
374         goto out;
375 
376     rc = xsm_dm_op(XSM_DM_PRIV, d);
377     if ( rc )
378         goto out;
379 
380     offset = offsetof(struct xen_dm_op, u);
381 
382     rc = -EFAULT;
383     if ( op_args->buf[0].size < offset )
384         goto out;
385 
386     if ( copy_from_guest_offset((void *)&op, op_args->buf[0].h, 0, offset) )
387         goto out;
388 
389     if ( op.op >= ARRAY_SIZE(op_size) )
390     {
391         rc = -EOPNOTSUPP;
392         goto out;
393     }
394 
395     op.op = array_index_nospec(op.op, ARRAY_SIZE(op_size));
396 
397     if ( op_args->buf[0].size < offset + op_size[op.op] )
398         goto out;
399 
400     if ( copy_from_guest_offset((void *)&op.u, op_args->buf[0].h, offset,
401                                 op_size[op.op]) )
402         goto out;
403 
404     rc = -EINVAL;
405     if ( op.pad )
406         goto out;
407 
408     switch ( op.op )
409     {
410     case XEN_DMOP_create_ioreq_server:
411     {
412         struct xen_dm_op_create_ioreq_server *data =
413             &op.u.create_ioreq_server;
414 
415         const_op = false;
416 
417         rc = -EINVAL;
418         if ( data->pad[0] || data->pad[1] || data->pad[2] )
419             break;
420 
421         rc = hvm_create_ioreq_server(d, data->handle_bufioreq,
422                                      &data->id);
423         break;
424     }
425 
426     case XEN_DMOP_get_ioreq_server_info:
427     {
428         struct xen_dm_op_get_ioreq_server_info *data =
429             &op.u.get_ioreq_server_info;
430         const uint16_t valid_flags = XEN_DMOP_no_gfns;
431 
432         const_op = false;
433 
434         rc = -EINVAL;
435         if ( data->flags & ~valid_flags )
436             break;
437 
438         rc = hvm_get_ioreq_server_info(d, data->id,
439                                        (data->flags & XEN_DMOP_no_gfns) ?
440                                        NULL : &data->ioreq_gfn,
441                                        (data->flags & XEN_DMOP_no_gfns) ?
442                                        NULL : &data->bufioreq_gfn,
443                                        &data->bufioreq_port);
444         break;
445     }
446 
447     case XEN_DMOP_map_io_range_to_ioreq_server:
448     {
449         const struct xen_dm_op_ioreq_server_range *data =
450             &op.u.map_io_range_to_ioreq_server;
451 
452         rc = -EINVAL;
453         if ( data->pad )
454             break;
455 
456         rc = hvm_map_io_range_to_ioreq_server(d, data->id, data->type,
457                                               data->start, data->end);
458         break;
459     }
460 
461     case XEN_DMOP_unmap_io_range_from_ioreq_server:
462     {
463         const struct xen_dm_op_ioreq_server_range *data =
464             &op.u.unmap_io_range_from_ioreq_server;
465 
466         rc = -EINVAL;
467         if ( data->pad )
468             break;
469 
470         rc = hvm_unmap_io_range_from_ioreq_server(d, data->id, data->type,
471                                                   data->start, data->end);
472         break;
473     }
474 
475     case XEN_DMOP_map_mem_type_to_ioreq_server:
476     {
477         struct xen_dm_op_map_mem_type_to_ioreq_server *data =
478             &op.u.map_mem_type_to_ioreq_server;
479         unsigned long first_gfn = data->opaque;
480 
481         const_op = false;
482 
483         rc = -EOPNOTSUPP;
484         if ( !hap_enabled(d) )
485             break;
486 
487         if ( first_gfn == 0 )
488             rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
489                                                   data->type, data->flags);
490         else
491             rc = 0;
492 
493         /*
494          * Iterate p2m table when an ioreq server unmaps from p2m_ioreq_server,
495          * and reset the remaining p2m_ioreq_server entries back to p2m_ram_rw.
496          */
497         if ( rc == 0 && data->flags == 0 )
498         {
499             struct p2m_domain *p2m = p2m_get_hostp2m(d);
500 
501             while ( read_atomic(&p2m->ioreq.entry_count) &&
502                     first_gfn <= p2m->max_mapped_pfn )
503             {
504                 /* Iterate p2m table for 256 gfns each time. */
505                 rc = p2m_finish_type_change(d, _gfn(first_gfn), 256);
506                 if ( rc < 0 )
507                     break;
508 
509                 first_gfn += 256;
510 
511                 /* Check for continuation if it's not the last iteration. */
512                 if ( first_gfn <= p2m->max_mapped_pfn &&
513                      hypercall_preempt_check() )
514                 {
515                     rc = -ERESTART;
516                     data->opaque = first_gfn;
517                     break;
518                 }
519             }
520         }
521 
522         break;
523     }
524 
525     case XEN_DMOP_set_ioreq_server_state:
526     {
527         const struct xen_dm_op_set_ioreq_server_state *data =
528             &op.u.set_ioreq_server_state;
529 
530         rc = -EINVAL;
531         if ( data->pad )
532             break;
533 
534         rc = hvm_set_ioreq_server_state(d, data->id, !!data->enabled);
535         break;
536     }
537 
538     case XEN_DMOP_destroy_ioreq_server:
539     {
540         const struct xen_dm_op_destroy_ioreq_server *data =
541             &op.u.destroy_ioreq_server;
542 
543         rc = -EINVAL;
544         if ( data->pad )
545             break;
546 
547         rc = hvm_destroy_ioreq_server(d, data->id);
548         break;
549     }
550 
551     case XEN_DMOP_track_dirty_vram:
552     {
553         const struct xen_dm_op_track_dirty_vram *data =
554             &op.u.track_dirty_vram;
555 
556         rc = -EINVAL;
557         if ( data->pad )
558             break;
559 
560         if ( op_args->nr_bufs < 2 )
561             break;
562 
563         rc = track_dirty_vram(d, data->first_pfn, data->nr, &op_args->buf[1]);
564         break;
565     }
566 
567     case XEN_DMOP_set_pci_intx_level:
568     {
569         const struct xen_dm_op_set_pci_intx_level *data =
570             &op.u.set_pci_intx_level;
571 
572         rc = set_pci_intx_level(d, data->domain, data->bus,
573                                 data->device, data->intx,
574                                 data->level);
575         break;
576     }
577 
578     case XEN_DMOP_set_isa_irq_level:
579     {
580         const struct xen_dm_op_set_isa_irq_level *data =
581             &op.u.set_isa_irq_level;
582 
583         rc = set_isa_irq_level(d, data->isa_irq, data->level);
584         break;
585     }
586 
587     case XEN_DMOP_set_pci_link_route:
588     {
589         const struct xen_dm_op_set_pci_link_route *data =
590             &op.u.set_pci_link_route;
591 
592         rc = hvm_set_pci_link_route(d, data->link, data->isa_irq);
593         break;
594     }
595 
596     case XEN_DMOP_modified_memory:
597     {
598         struct xen_dm_op_modified_memory *data =
599             &op.u.modified_memory;
600 
601         rc = modified_memory(d, op_args, data);
602         const_op = !rc;
603         break;
604     }
605 
606     case XEN_DMOP_set_mem_type:
607     {
608         struct xen_dm_op_set_mem_type *data =
609             &op.u.set_mem_type;
610 
611         const_op = false;
612 
613         rc = -EINVAL;
614         if ( data->pad )
615             break;
616 
617         rc = set_mem_type(d, data);
618         break;
619     }
620 
621     case XEN_DMOP_inject_event:
622     {
623         const struct xen_dm_op_inject_event *data =
624             &op.u.inject_event;
625 
626         rc = -EINVAL;
627         if ( data->pad0 || data->pad1 )
628             break;
629 
630         rc = inject_event(d, data);
631         break;
632     }
633 
634     case XEN_DMOP_inject_msi:
635     {
636         const struct xen_dm_op_inject_msi *data =
637             &op.u.inject_msi;
638 
639         rc = -EINVAL;
640         if ( data->pad )
641             break;
642 
643         rc = hvm_inject_msi(d, data->addr, data->data);
644         break;
645     }
646 
647     case XEN_DMOP_remote_shutdown:
648     {
649         const struct xen_dm_op_remote_shutdown *data =
650             &op.u.remote_shutdown;
651 
652         domain_shutdown(d, data->reason);
653         rc = 0;
654         break;
655     }
656 
657     case XEN_DMOP_relocate_memory:
658     {
659         struct xen_dm_op_relocate_memory *data = &op.u.relocate_memory;
660         struct xen_add_to_physmap xatp = {
661             .domid = op_args->domid,
662             .size = data->size,
663             .space = XENMAPSPACE_gmfn_range,
664             .idx = data->src_gfn,
665             .gpfn = data->dst_gfn,
666         };
667 
668         if ( data->pad )
669         {
670             rc = -EINVAL;
671             break;
672         }
673 
674         rc = xenmem_add_to_physmap(d, &xatp, 0);
675         if ( rc == 0 && data->size != xatp.size )
676             rc = xatp.size;
677         if ( rc > 0 )
678         {
679             data->size -= rc;
680             data->src_gfn += rc;
681             data->dst_gfn += rc;
682             const_op = false;
683             rc = -ERESTART;
684         }
685         break;
686     }
687 
688     case XEN_DMOP_pin_memory_cacheattr:
689     {
690         const struct xen_dm_op_pin_memory_cacheattr *data =
691             &op.u.pin_memory_cacheattr;
692 
693         if ( data->pad )
694         {
695             rc = -EINVAL;
696             break;
697         }
698 
699         rc = hvm_set_mem_pinned_cacheattr(d, data->start, data->end,
700                                           data->type);
701         break;
702     }
703 
704     default:
705         rc = -EOPNOTSUPP;
706         break;
707     }
708 
709     if ( (!rc || rc == -ERESTART) &&
710          !const_op && copy_to_guest_offset(op_args->buf[0].h, offset,
711                                            (void *)&op.u, op_size[op.op]) )
712         rc = -EFAULT;
713 
714  out:
715     rcu_unlock_domain(d);
716 
717     return rc;
718 }
719 
720 CHECK_dm_op_create_ioreq_server;
721 CHECK_dm_op_get_ioreq_server_info;
722 CHECK_dm_op_ioreq_server_range;
723 CHECK_dm_op_set_ioreq_server_state;
724 CHECK_dm_op_destroy_ioreq_server;
725 CHECK_dm_op_track_dirty_vram;
726 CHECK_dm_op_set_pci_intx_level;
727 CHECK_dm_op_set_isa_irq_level;
728 CHECK_dm_op_set_pci_link_route;
729 CHECK_dm_op_modified_memory;
730 CHECK_dm_op_set_mem_type;
731 CHECK_dm_op_inject_event;
732 CHECK_dm_op_inject_msi;
733 CHECK_dm_op_remote_shutdown;
734 CHECK_dm_op_relocate_memory;
735 CHECK_dm_op_pin_memory_cacheattr;
736 
compat_dm_op(domid_t domid,unsigned int nr_bufs,XEN_GUEST_HANDLE_PARAM (void)bufs)737 int compat_dm_op(domid_t domid,
738                  unsigned int nr_bufs,
739                  XEN_GUEST_HANDLE_PARAM(void) bufs)
740 {
741     struct dmop_args args;
742     unsigned int i;
743     int rc;
744 
745     if ( nr_bufs > ARRAY_SIZE(args.buf) )
746         return -E2BIG;
747 
748     args.domid = domid;
749     args.nr_bufs = array_index_nospec(nr_bufs, ARRAY_SIZE(args.buf) + 1);
750 
751     for ( i = 0; i < args.nr_bufs; i++ )
752     {
753         struct compat_dm_op_buf cmp;
754 
755         if ( copy_from_guest_offset(&cmp, bufs, i, 1) )
756             return -EFAULT;
757 
758 #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \
759         guest_from_compat_handle((_d_)->h, (_s_)->h)
760 
761         XLAT_dm_op_buf(&args.buf[i], &cmp);
762 
763 #undef XLAT_dm_op_buf_HNDL_h
764     }
765 
766     rc = dm_op(&args);
767 
768     if ( rc == -ERESTART )
769         rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
770                                            domid, nr_bufs, bufs);
771 
772     return rc;
773 }
774 
do_dm_op(domid_t domid,unsigned int nr_bufs,XEN_GUEST_HANDLE_PARAM (xen_dm_op_buf_t)bufs)775 long do_dm_op(domid_t domid,
776               unsigned int nr_bufs,
777               XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
778 {
779     struct dmop_args args;
780     int rc;
781 
782     if ( nr_bufs > ARRAY_SIZE(args.buf) )
783         return -E2BIG;
784 
785     args.domid = domid;
786     args.nr_bufs = array_index_nospec(nr_bufs, ARRAY_SIZE(args.buf) + 1);
787 
788     if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) )
789         return -EFAULT;
790 
791     rc = dm_op(&args);
792 
793     if ( rc == -ERESTART )
794         rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
795                                            domid, nr_bufs, bufs);
796 
797     return rc;
798 }
799 
800 /*
801  * Local variables:
802  * mode: C
803  * c-file-style: "BSD"
804  * c-basic-offset: 4
805  * tab-width: 4
806  * indent-tabs-mode: nil
807  * End:
808  */
809