1 /*
2 * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
3 * Copyright (c) 2018, Oleksandr Andrushchenko, EPAM Systems Inc.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation;
8 * version 2.1 of the License.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Split out from xc_linux_osdep.c
19 */
20
21 #include <fcntl.h>
22 #include <errno.h>
23 #include <unistd.h>
24 #include <stdlib.h>
25 #include <stdint.h>
26 #include <string.h>
27 #include <stddef.h>
28
29 #include <sys/ioctl.h>
30 #include <sys/mman.h>
31
32 #include <xen/sys/gntdev.h>
33 #include <xen/sys/gntalloc.h>
34
35 #include <xen-tools/libs.h>
36
37 #include "private.h"
38
39 #define PAGE_SHIFT 12
40 #define PAGE_SIZE (1UL << PAGE_SHIFT)
41 #define PAGE_MASK (~(PAGE_SIZE-1))
42
43 #define DEVXEN "/dev/xen/"
44
45 #ifndef O_CLOEXEC
46 #define O_CLOEXEC 0
47 #endif
48
osdep_gnttab_open(xengnttab_handle * xgt)49 int osdep_gnttab_open(xengnttab_handle *xgt)
50 {
51 int fd = open(DEVXEN "gntdev", O_RDWR|O_CLOEXEC);
52 if ( fd == -1 )
53 return -1;
54 xgt->fd = fd;
55 return 0;
56 }
57
osdep_gnttab_close(xengnttab_handle * xgt)58 int osdep_gnttab_close(xengnttab_handle *xgt)
59 {
60 if ( xgt->fd == -1 )
61 return 0;
62
63 return close(xgt->fd);
64 }
65
osdep_gnttab_set_max_grants(xengnttab_handle * xgt,uint32_t count)66 int osdep_gnttab_set_max_grants(xengnttab_handle *xgt, uint32_t count)
67 {
68 int fd = xgt->fd, rc;
69 struct ioctl_gntdev_set_max_grants max_grants = { .count = count };
70
71 rc = ioctl(fd, IOCTL_GNTDEV_SET_MAX_GRANTS, &max_grants);
72 if (rc) {
73 /*
74 * Newer (e.g. pv-ops) kernels don't implement this IOCTL,
75 * so ignore the resulting specific failure.
76 */
77 if (errno == ENOTTY)
78 rc = 0;
79 else
80 GTERROR(xgt->logger, "ioctl SET_MAX_GRANTS failed");
81 }
82
83 return rc;
84 }
85
osdep_gnttab_grant_map(xengnttab_handle * xgt,uint32_t count,int flags,int prot,uint32_t * domids,uint32_t * refs,uint32_t notify_offset,evtchn_port_t notify_port)86 void *osdep_gnttab_grant_map(xengnttab_handle *xgt,
87 uint32_t count, int flags, int prot,
88 uint32_t *domids, uint32_t *refs,
89 uint32_t notify_offset,
90 evtchn_port_t notify_port)
91 {
92 int fd = xgt->fd;
93 struct ioctl_gntdev_map_grant_ref *map;
94 unsigned int map_size = sizeof(*map) + (count - 1) * sizeof(map->refs[0]);
95 void *addr = NULL;
96 int domids_stride = 1;
97 int i;
98
99 if (flags & XENGNTTAB_GRANT_MAP_SINGLE_DOMAIN)
100 domids_stride = 0;
101
102 if ( map_size <= PAGE_SIZE )
103 map = alloca(map_size);
104 else
105 {
106 map_size = ROUNDUP(map_size, PAGE_SHIFT);
107 map = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
108 MAP_PRIVATE | MAP_ANON | MAP_POPULATE, -1, 0);
109 if ( map == MAP_FAILED )
110 {
111 GTERROR(xgt->logger, "mmap of map failed");
112 return NULL;
113 }
114 }
115
116 for ( i = 0; i < count; i++ )
117 {
118 map->refs[i].domid = domids[i * domids_stride];
119 map->refs[i].ref = refs[i];
120 }
121
122 map->count = count;
123
124 if ( ioctl(fd, IOCTL_GNTDEV_MAP_GRANT_REF, map) ) {
125 GTERROR(xgt->logger, "ioctl MAP_GRANT_REF failed");
126 goto out;
127 }
128
129 retry:
130 addr = mmap(NULL, PAGE_SIZE * count, prot, MAP_SHARED, fd,
131 map->index);
132
133 if (addr == MAP_FAILED && errno == EAGAIN)
134 {
135 /*
136 * The grant hypercall can return EAGAIN if the granted page
137 * is swapped out. Since the paging daemon may be in the same
138 * domain, the hypercall cannot block without causing a
139 * deadlock.
140 *
141 * Because there are no notifications when the page is swapped
142 * in, wait a bit before retrying, and hope that the page will
143 * arrive eventually.
144 */
145 usleep(1000);
146 goto retry;
147 }
148
149 if (addr != MAP_FAILED)
150 {
151 int rv = 0;
152 struct ioctl_gntdev_unmap_notify notify;
153 notify.index = map->index;
154 notify.action = 0;
155 if (notify_offset < PAGE_SIZE * count) {
156 notify.index += notify_offset;
157 notify.action |= UNMAP_NOTIFY_CLEAR_BYTE;
158 }
159 if (notify_port != -1) {
160 notify.event_channel_port = notify_port;
161 notify.action |= UNMAP_NOTIFY_SEND_EVENT;
162 }
163 if (notify.action)
164 rv = ioctl(fd, IOCTL_GNTDEV_SET_UNMAP_NOTIFY, ¬ify);
165 if (rv) {
166 GTERROR(xgt->logger, "ioctl SET_UNMAP_NOTIFY failed");
167 munmap(addr, count * PAGE_SIZE);
168 addr = MAP_FAILED;
169 }
170 }
171
172 if (addr == MAP_FAILED)
173 {
174 int saved_errno = errno;
175 struct ioctl_gntdev_unmap_grant_ref unmap_grant;
176
177 /* Unmap the driver slots used to store the grant information. */
178 GTERROR(xgt->logger, "mmap failed");
179 unmap_grant.index = map->index;
180 unmap_grant.count = count;
181 ioctl(fd, IOCTL_GNTDEV_UNMAP_GRANT_REF, &unmap_grant);
182 errno = saved_errno;
183 addr = NULL;
184 }
185
186 out:
187 if ( map_size > PAGE_SIZE )
188 munmap(map, map_size);
189
190 return addr;
191 }
192
osdep_gnttab_unmap(xengnttab_handle * xgt,void * start_address,uint32_t count)193 int osdep_gnttab_unmap(xengnttab_handle *xgt,
194 void *start_address,
195 uint32_t count)
196 {
197 int fd = xgt->fd;
198 struct ioctl_gntdev_get_offset_for_vaddr get_offset;
199 struct ioctl_gntdev_unmap_grant_ref unmap_grant;
200 int rc;
201
202 if ( start_address == NULL )
203 {
204 errno = EINVAL;
205 return -1;
206 }
207
208 /* First, it is necessary to get the offset which was initially used to
209 * mmap() the pages.
210 */
211 get_offset.vaddr = (unsigned long)start_address;
212 if ( (rc = ioctl(fd, IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR,
213 &get_offset)) )
214 return rc;
215
216 if ( get_offset.count != count )
217 {
218 errno = EINVAL;
219 return -1;
220 }
221
222 /* Next, unmap the memory. */
223 if ( (rc = munmap(start_address, count * PAGE_SIZE)) )
224 return rc;
225
226 /* Finally, unmap the driver slots used to store the grant information. */
227 unmap_grant.index = get_offset.offset;
228 unmap_grant.count = count;
229 if ( (rc = ioctl(fd, IOCTL_GNTDEV_UNMAP_GRANT_REF, &unmap_grant)) )
230 return rc;
231
232 return 0;
233 }
234
osdep_gnttab_grant_copy(xengnttab_handle * xgt,uint32_t count,xengnttab_grant_copy_segment_t * segs)235 int osdep_gnttab_grant_copy(xengnttab_handle *xgt,
236 uint32_t count,
237 xengnttab_grant_copy_segment_t *segs)
238 {
239 int rc;
240 int fd = xgt->fd;
241 struct ioctl_gntdev_grant_copy copy;
242
243 BUILD_BUG_ON(sizeof(struct ioctl_gntdev_grant_copy_segment) !=
244 sizeof(xengnttab_grant_copy_segment_t));
245
246 BUILD_BUG_ON(__alignof__(struct ioctl_gntdev_grant_copy_segment) !=
247 __alignof__(xengnttab_grant_copy_segment_t));
248
249 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
250 source.virt) !=
251 offsetof(xengnttab_grant_copy_segment_t,
252 source.virt));
253 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
254 source.foreign) !=
255 offsetof(xengnttab_grant_copy_segment_t,
256 source.foreign));
257 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
258 source.foreign.ref) !=
259 offsetof(xengnttab_grant_copy_segment_t,
260 source.foreign));
261 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
262 source.foreign.offset) !=
263 offsetof(xengnttab_grant_copy_segment_t,
264 source.foreign.offset));
265 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
266 source.foreign.domid) !=
267 offsetof(xengnttab_grant_copy_segment_t,
268 source.foreign.domid));
269
270 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
271 dest.virt) !=
272 offsetof(xengnttab_grant_copy_segment_t,
273 dest.virt));
274 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
275 dest.foreign) !=
276 offsetof(xengnttab_grant_copy_segment_t,
277 dest.foreign));
278 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
279 dest.foreign.ref) !=
280 offsetof(xengnttab_grant_copy_segment_t,
281 dest.foreign));
282 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
283 dest.foreign.offset) !=
284 offsetof(xengnttab_grant_copy_segment_t,
285 dest.foreign.offset));
286 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
287 dest.foreign.domid) !=
288 offsetof(xengnttab_grant_copy_segment_t,
289 dest.foreign.domid));
290
291 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
292 len) !=
293 offsetof(xengnttab_grant_copy_segment_t, len));
294 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
295 flags) !=
296 offsetof(xengnttab_grant_copy_segment_t, flags));
297 BUILD_BUG_ON(offsetof(struct ioctl_gntdev_grant_copy_segment,
298 status) !=
299 offsetof(xengnttab_grant_copy_segment_t, status));
300
301 copy.segments = (struct ioctl_gntdev_grant_copy_segment *)segs;
302 copy.count = count;
303
304 rc = ioctl(fd, IOCTL_GNTDEV_GRANT_COPY, ©);
305 if (rc)
306 GTERROR(xgt->logger, "ioctl GRANT COPY failed %d ", errno);
307
308 return rc;
309 }
310
osdep_gnttab_dmabuf_exp_from_refs(xengnttab_handle * xgt,uint32_t domid,uint32_t flags,uint32_t count,const uint32_t * refs,uint32_t * dmabuf_fd)311 int osdep_gnttab_dmabuf_exp_from_refs(xengnttab_handle *xgt, uint32_t domid,
312 uint32_t flags, uint32_t count,
313 const uint32_t *refs,
314 uint32_t *dmabuf_fd)
315 {
316 struct ioctl_gntdev_dmabuf_exp_from_refs *from_refs = NULL;
317 int rc = -1;
318
319 if ( !count )
320 {
321 errno = EINVAL;
322 goto out;
323 }
324
325 from_refs = malloc(sizeof(*from_refs) +
326 (count - 1) * sizeof(from_refs->refs[0]));
327 if ( !from_refs )
328 {
329 errno = ENOMEM;
330 goto out;
331 }
332
333 from_refs->flags = flags;
334 from_refs->count = count;
335 from_refs->domid = domid;
336
337 memcpy(from_refs->refs, refs, count * sizeof(from_refs->refs[0]));
338
339 if ( (rc = ioctl(xgt->fd, IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS, from_refs)) )
340 {
341 GTERROR(xgt->logger, "ioctl DMABUF_EXP_FROM_REFS failed");
342 goto out;
343 }
344
345 *dmabuf_fd = from_refs->fd;
346 rc = 0;
347
348 out:
349 free(from_refs);
350 return rc;
351 }
352
osdep_gnttab_dmabuf_exp_wait_released(xengnttab_handle * xgt,uint32_t fd,uint32_t wait_to_ms)353 int osdep_gnttab_dmabuf_exp_wait_released(xengnttab_handle *xgt,
354 uint32_t fd, uint32_t wait_to_ms)
355 {
356 struct ioctl_gntdev_dmabuf_exp_wait_released wait;
357 int rc;
358
359 wait.fd = fd;
360 wait.wait_to_ms = wait_to_ms;
361
362 if ( (rc = ioctl(xgt->fd, IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED, &wait)) )
363 {
364 if ( errno == ENOENT )
365 {
366 /* The buffer may have already been released. */
367 errno = 0;
368 rc = 0;
369 } else
370 GTERROR(xgt->logger, "ioctl DMABUF_EXP_WAIT_RELEASED failed");
371 }
372
373 return rc;
374 }
375
osdep_gnttab_dmabuf_imp_to_refs(xengnttab_handle * xgt,uint32_t domid,uint32_t fd,uint32_t count,uint32_t * refs)376 int osdep_gnttab_dmabuf_imp_to_refs(xengnttab_handle *xgt, uint32_t domid,
377 uint32_t fd, uint32_t count, uint32_t *refs)
378 {
379 struct ioctl_gntdev_dmabuf_imp_to_refs *to_refs = NULL;
380 int rc = -1;
381
382 if ( !count )
383 {
384 errno = EINVAL;
385 goto out;
386 }
387
388 to_refs = malloc(sizeof(*to_refs) +
389 (count - 1) * sizeof(to_refs->refs[0]));
390 if ( !to_refs )
391 {
392 errno = ENOMEM;
393 goto out;
394 }
395
396 to_refs->fd = fd;
397 to_refs->count = count;
398 to_refs->domid = domid;
399
400 if ( (rc = ioctl(xgt->fd, IOCTL_GNTDEV_DMABUF_IMP_TO_REFS, to_refs)) )
401 {
402 GTERROR(xgt->logger, "ioctl DMABUF_IMP_TO_REFS failed");
403 goto out;
404 }
405
406 memcpy(refs, to_refs->refs, count * sizeof(*refs));
407 rc = 0;
408
409 out:
410 free(to_refs);
411 return rc;
412 }
413
osdep_gnttab_dmabuf_imp_release(xengnttab_handle * xgt,uint32_t fd)414 int osdep_gnttab_dmabuf_imp_release(xengnttab_handle *xgt, uint32_t fd)
415 {
416 struct ioctl_gntdev_dmabuf_imp_release release;
417 int rc;
418
419 release.fd = fd;
420
421 if ( (rc = ioctl(xgt->fd, IOCTL_GNTDEV_DMABUF_IMP_RELEASE, &release)) )
422 GTERROR(xgt->logger, "ioctl DMABUF_IMP_RELEASE failed");
423
424 return rc;
425 }
426
osdep_gntshr_open(xengntshr_handle * xgs)427 int osdep_gntshr_open(xengntshr_handle *xgs)
428 {
429 int fd = open(DEVXEN "gntalloc", O_RDWR);
430 if ( fd == -1 )
431 return -1;
432 xgs->fd = fd;
433 return 0;
434 }
435
osdep_gntshr_close(xengntshr_handle * xgs)436 int osdep_gntshr_close(xengntshr_handle *xgs)
437 {
438 if ( xgs->fd == -1 )
439 return 0;
440
441 return close(xgs->fd);
442 }
443
osdep_gntshr_share_pages(xengntshr_handle * xgs,uint32_t domid,int count,uint32_t * refs,int writable,uint32_t notify_offset,evtchn_port_t notify_port)444 void *osdep_gntshr_share_pages(xengntshr_handle *xgs,
445 uint32_t domid, int count,
446 uint32_t *refs, int writable,
447 uint32_t notify_offset,
448 evtchn_port_t notify_port)
449 {
450 struct ioctl_gntalloc_alloc_gref *gref_info = NULL;
451 struct ioctl_gntalloc_unmap_notify notify;
452 struct ioctl_gntalloc_dealloc_gref gref_drop;
453 int fd = xgs->fd;
454 int err;
455 void *area = NULL;
456 gref_info = malloc(sizeof(*gref_info) + count * sizeof(uint32_t));
457 if (!gref_info)
458 return NULL;
459 gref_info->domid = domid;
460 gref_info->flags = writable ? GNTALLOC_FLAG_WRITABLE : 0;
461 gref_info->count = count;
462
463 err = ioctl(fd, IOCTL_GNTALLOC_ALLOC_GREF, gref_info);
464 if (err) {
465 GSERROR(xgs->logger, "ioctl failed");
466 goto out;
467 }
468
469 area = mmap(NULL, count * PAGE_SIZE, PROT_READ | PROT_WRITE,
470 MAP_SHARED, fd, gref_info->index);
471
472 if (area == MAP_FAILED) {
473 area = NULL;
474 GSERROR(xgs->logger, "mmap failed");
475 goto out_remove_fdmap;
476 }
477
478 notify.index = gref_info->index;
479 notify.action = 0;
480 if (notify_offset < PAGE_SIZE * count) {
481 notify.index += notify_offset;
482 notify.action |= UNMAP_NOTIFY_CLEAR_BYTE;
483 }
484 if (notify_port != -1) {
485 notify.event_channel_port = notify_port;
486 notify.action |= UNMAP_NOTIFY_SEND_EVENT;
487 }
488 if (notify.action)
489 err = ioctl(fd, IOCTL_GNTALLOC_SET_UNMAP_NOTIFY, ¬ify);
490 if (err) {
491 GSERROR(xgs->logger, "ioctl SET_UNMAP_NOTIFY failed");
492 munmap(area, count * PAGE_SIZE);
493 area = NULL;
494 }
495
496 memcpy(refs, gref_info->gref_ids, count * sizeof(uint32_t));
497
498 out_remove_fdmap:
499 /* Removing the mapping from the file descriptor does not cause the pages to
500 * be deallocated until the mapping is removed.
501 */
502 gref_drop.index = gref_info->index;
503 gref_drop.count = count;
504 ioctl(fd, IOCTL_GNTALLOC_DEALLOC_GREF, &gref_drop);
505 out:
506 free(gref_info);
507 return area;
508 }
509
osdep_gntshr_unshare(xengntshr_handle * xgs,void * start_address,uint32_t count)510 int osdep_gntshr_unshare(xengntshr_handle *xgs,
511 void *start_address, uint32_t count)
512 {
513 return munmap(start_address, count * PAGE_SIZE);
514 }
515
516 /*
517 * Local variables:
518 * mode: C
519 * c-file-style: "BSD"
520 * c-basic-offset: 4
521 * tab-width: 4
522 * indent-tabs-mode: nil
523 * End:
524 */
525