1 #include <xen/event.h>
2 #include <xen/mem_access.h>
3 #include <xen/multicall.h>
4 #include <compat/memory.h>
5 #include <compat/xen.h>
6 #include <asm/mem_paging.h>
7 #include <asm/mem_sharing.h>
8 
9 #include <asm/pv/mm.h>
10 
compat_arch_memory_op(unsigned long cmd,XEN_GUEST_HANDLE_PARAM (void)arg)11 int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
12 {
13     struct compat_machphys_mfn_list xmml;
14     l2_pgentry_t l2e;
15     unsigned long v;
16     compat_pfn_t mfn;
17     unsigned int i;
18     int rc = 0;
19 
20     switch ( cmd )
21     {
22     case XENMEM_set_memory_map:
23     {
24         struct compat_foreign_memory_map cmp;
25         struct xen_foreign_memory_map *nat = COMPAT_ARG_XLAT_VIRT_BASE;
26 
27         if ( copy_from_guest(&cmp, arg, 1) )
28             return -EFAULT;
29 
30 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \
31         guest_from_compat_handle((_d_)->buffer, (_s_)->buffer)
32         XLAT_foreign_memory_map(nat, &cmp);
33 #undef XLAT_memory_map_HNDL_buffer
34 
35         rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void));
36 
37         break;
38     }
39 
40     case XENMEM_memory_map:
41     case XENMEM_machine_memory_map:
42     {
43         struct compat_memory_map cmp;
44         struct xen_memory_map *nat = COMPAT_ARG_XLAT_VIRT_BASE;
45 
46         if ( copy_from_guest(&cmp, arg, 1) )
47             return -EFAULT;
48 
49 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \
50         guest_from_compat_handle((_d_)->buffer, (_s_)->buffer)
51         XLAT_memory_map(nat, &cmp);
52 #undef XLAT_memory_map_HNDL_buffer
53 
54         rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void));
55         if ( rc < 0 )
56             break;
57 
58 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) ((void)0)
59         XLAT_memory_map(&cmp, nat);
60 #undef XLAT_memory_map_HNDL_buffer
61         if ( __copy_to_guest(arg, &cmp, 1) )
62             rc = -EFAULT;
63 
64         break;
65     }
66 
67     case XENMEM_set_pod_target:
68     case XENMEM_get_pod_target:
69     {
70         struct compat_pod_target cmp;
71         struct xen_pod_target *nat = COMPAT_ARG_XLAT_VIRT_BASE;
72 
73         if ( copy_from_guest(&cmp, arg, 1) )
74             return -EFAULT;
75 
76         XLAT_pod_target(nat, &cmp);
77 
78         rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void));
79         if ( rc < 0 )
80             break;
81 
82         if ( rc == __HYPERVISOR_memory_op )
83             hypercall_xlat_continuation(NULL, 2, 0x2, nat, arg);
84 
85         XLAT_pod_target(&cmp, nat);
86 
87         if ( __copy_to_guest(arg, &cmp, 1) )
88         {
89             if ( rc == __HYPERVISOR_memory_op )
90                 hypercall_cancel_continuation(current);
91             rc = -EFAULT;
92         }
93 
94         break;
95     }
96 
97     case XENMEM_machphys_mapping:
98     {
99         struct domain *d = current->domain;
100         struct compat_machphys_mapping mapping = {
101             .v_start = MACH2PHYS_COMPAT_VIRT_START(d),
102             .v_end   = MACH2PHYS_COMPAT_VIRT_END,
103             .max_mfn = MACH2PHYS_COMPAT_NR_ENTRIES(d) - 1
104         };
105 
106         if ( copy_to_guest(arg, &mapping, 1) )
107             rc = -EFAULT;
108 
109         break;
110     }
111 
112     case XENMEM_machphys_mfn_list:
113     case XENMEM_machphys_compat_mfn_list:
114     {
115         unsigned long limit;
116         compat_pfn_t last_mfn;
117 
118         if ( copy_from_guest(&xmml, arg, 1) )
119             return -EFAULT;
120 
121         limit = (unsigned long)(compat_machine_to_phys_mapping + max_page);
122         if ( limit > RDWR_COMPAT_MPT_VIRT_END )
123             limit = RDWR_COMPAT_MPT_VIRT_END;
124         for ( i = 0, v = RDWR_COMPAT_MPT_VIRT_START, last_mfn = 0;
125               (i != xmml.max_extents) && (v < limit);
126               i++, v += 1 << L2_PAGETABLE_SHIFT )
127         {
128             l2e = compat_idle_pg_table_l2[l2_table_offset(v)];
129             if ( l2e_get_flags(l2e) & _PAGE_PRESENT )
130                 mfn = l2e_get_pfn(l2e);
131             else
132                 mfn = last_mfn;
133             ASSERT(mfn);
134             if ( copy_to_compat_offset(xmml.extent_start, i, &mfn, 1) )
135                 return -EFAULT;
136             last_mfn = mfn;
137         }
138 
139         xmml.nr_extents = i;
140         if ( __copy_to_guest(arg, &xmml, 1) )
141             rc = -EFAULT;
142 
143         break;
144     }
145 
146     case XENMEM_get_sharing_freed_pages:
147         return mem_sharing_get_nr_saved_mfns();
148 
149     case XENMEM_get_sharing_shared_pages:
150         return mem_sharing_get_nr_shared_mfns();
151 
152     case XENMEM_paging_op:
153         return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
154 
155 #ifdef CONFIG_MEM_SHARING
156     case XENMEM_sharing_op:
157         return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
158 #endif
159 
160     default:
161         rc = -ENOSYS;
162         break;
163     }
164 
165     return rc;
166 }
167 
168 #ifdef CONFIG_PV
169 DEFINE_XEN_GUEST_HANDLE(mmuext_op_compat_t);
170 
compat_mmuext_op(XEN_GUEST_HANDLE_PARAM (void)arg,unsigned int count,XEN_GUEST_HANDLE_PARAM (uint)pdone,unsigned int foreigndom)171 int compat_mmuext_op(XEN_GUEST_HANDLE_PARAM(void) arg,
172                      unsigned int count,
173                      XEN_GUEST_HANDLE_PARAM(uint) pdone,
174                      unsigned int foreigndom)
175 {
176     unsigned int i, preempt_mask;
177     int rc = 0;
178     XEN_GUEST_HANDLE_PARAM(mmuext_op_compat_t) cmp_uops =
179         guest_handle_cast(arg, mmuext_op_compat_t);
180     XEN_GUEST_HANDLE_PARAM(mmuext_op_t) nat_ops;
181 
182     if ( unlikely(count == MMU_UPDATE_PREEMPTED) &&
183          likely(guest_handle_is_null(cmp_uops)) )
184     {
185         set_xen_guest_handle(nat_ops, NULL);
186         return do_mmuext_op(nat_ops, count, pdone, foreigndom);
187     }
188 
189     preempt_mask = count & MMU_UPDATE_PREEMPTED;
190     count ^= preempt_mask;
191 
192     if ( unlikely(!guest_handle_okay(cmp_uops, count)) )
193         return -EFAULT;
194 
195     set_xen_guest_handle(nat_ops, COMPAT_ARG_XLAT_VIRT_BASE);
196 
197     for ( ; count; count -= i )
198     {
199         mmuext_op_t *nat_op = nat_ops.p;
200         unsigned int limit = COMPAT_ARG_XLAT_SIZE / sizeof(*nat_op);
201         int err;
202 
203         for ( i = 0; i < min(limit, count); ++i )
204         {
205             mmuext_op_compat_t cmp_op;
206             enum XLAT_mmuext_op_arg1 arg1;
207             enum XLAT_mmuext_op_arg2 arg2;
208 
209             if ( unlikely(__copy_from_guest(&cmp_op, cmp_uops, 1) != 0) )
210             {
211                 rc = -EFAULT;
212                 break;
213             }
214 
215             switch ( cmp_op.cmd )
216             {
217             case MMUEXT_PIN_L1_TABLE:
218             case MMUEXT_PIN_L2_TABLE:
219             case MMUEXT_PIN_L3_TABLE:
220             case MMUEXT_PIN_L4_TABLE:
221             case MMUEXT_UNPIN_TABLE:
222             case MMUEXT_NEW_BASEPTR:
223             case MMUEXT_CLEAR_PAGE:
224             case MMUEXT_COPY_PAGE:
225                 arg1 = XLAT_mmuext_op_arg1_mfn;
226                 break;
227             default:
228                 arg1 = XLAT_mmuext_op_arg1_linear_addr;
229                 break;
230             case MMUEXT_NEW_USER_BASEPTR:
231                 rc = -EINVAL;
232                 /* fallthrough */
233             case MMUEXT_TLB_FLUSH_LOCAL:
234             case MMUEXT_TLB_FLUSH_MULTI:
235             case MMUEXT_TLB_FLUSH_ALL:
236             case MMUEXT_FLUSH_CACHE:
237                 arg1 = -1;
238                 break;
239             }
240 
241             if ( rc )
242                 break;
243 
244             switch ( cmp_op.cmd )
245             {
246             case MMUEXT_SET_LDT:
247                 arg2 = XLAT_mmuext_op_arg2_nr_ents;
248                 break;
249             case MMUEXT_TLB_FLUSH_MULTI:
250             case MMUEXT_INVLPG_MULTI:
251                 arg2 = XLAT_mmuext_op_arg2_vcpumask;
252                 break;
253             case MMUEXT_COPY_PAGE:
254                 arg2 = XLAT_mmuext_op_arg2_src_mfn;
255                 break;
256             default:
257                 arg2 = -1;
258                 break;
259             }
260 
261 #define XLAT_mmuext_op_HNDL_arg2_vcpumask(_d_, _s_) \
262         guest_from_compat_handle((_d_)->arg2.vcpumask, (_s_)->arg2.vcpumask)
263             XLAT_mmuext_op(nat_op, &cmp_op);
264 #undef XLAT_mmuext_op_HNDL_arg2_vcpumask
265 
266             if ( rc || i >= limit )
267                 break;
268 
269             guest_handle_add_offset(cmp_uops, 1);
270             ++nat_op;
271         }
272 
273         err = do_mmuext_op(nat_ops, i | preempt_mask, pdone, foreigndom);
274 
275         if ( err )
276         {
277             BUILD_BUG_ON(__HYPERVISOR_mmuext_op <= 0);
278             if ( err == __HYPERVISOR_mmuext_op )
279             {
280                 struct cpu_user_regs *regs = guest_cpu_user_regs();
281                 struct mc_state *mcs = &current->mc_state;
282                 unsigned int arg1 = !(mcs->flags & MCSF_in_multicall)
283                                     ? regs->ecx
284                                     : mcs->call.args[1];
285                 unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
286 
287                 BUG_ON(left == arg1 && left != i);
288                 BUG_ON(left > count);
289                 guest_handle_add_offset(nat_ops, i - left);
290                 guest_handle_subtract_offset(cmp_uops, left);
291                 left = 1;
292                 if ( arg1 != MMU_UPDATE_PREEMPTED )
293                 {
294                     BUG_ON(!hypercall_xlat_continuation(&left, 4, 0x01, nat_ops,
295                                                         cmp_uops));
296                     if ( !(mcs->flags & MCSF_in_multicall) )
297                         regs->ecx += count - i;
298                     else
299                         mcs->compat_call.args[1] += count - i;
300                 }
301                 else
302                     BUG_ON(hypercall_xlat_continuation(&left, 4, 0));
303                 BUG_ON(left != arg1);
304             }
305             else
306                 BUG_ON(err > 0);
307             rc = err;
308         }
309 
310         if ( rc )
311             break;
312 
313         /* Force do_mmuext_op() to not start counting from zero again. */
314         preempt_mask = MMU_UPDATE_PREEMPTED;
315     }
316 
317     return rc;
318 }
319 #endif /* CONFIG_PV */
320 
321 /*
322  * Local variables:
323  * mode: C
324  * c-file-style: "BSD"
325  * c-basic-offset: 4
326  * tab-width: 4
327  * indent-tabs-mode: nil
328  * End:
329  */
330