1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <stdlib.h>
4 #include <sys/mman.h>
5 #include <sys/eventfd.h>
6
7 #define __EXPORTED_HEADERS__
8 #include <linux/vfio.h>
9
10 #include "iommufd_utils.h"
11
12 static void *buffer;
13
14 static unsigned long PAGE_SIZE;
15 static unsigned long HUGEPAGE_SIZE;
16
17 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
18
get_huge_page_size(void)19 static unsigned long get_huge_page_size(void)
20 {
21 char buf[80];
22 int ret;
23 int fd;
24
25 fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
26 O_RDONLY);
27 if (fd < 0)
28 return 2 * 1024 * 1024;
29
30 ret = read(fd, buf, sizeof(buf));
31 close(fd);
32 if (ret <= 0 || ret == sizeof(buf))
33 return 2 * 1024 * 1024;
34 buf[ret] = 0;
35 return strtoul(buf, NULL, 10);
36 }
37
setup_sizes(void)38 static __attribute__((constructor)) void setup_sizes(void)
39 {
40 void *vrc;
41 int rc;
42
43 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
44 HUGEPAGE_SIZE = get_huge_page_size();
45
46 BUFFER_SIZE = PAGE_SIZE * 16;
47 rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
48 assert(!rc);
49 assert(buffer);
50 assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
51 vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
52 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
53 assert(vrc == buffer);
54 }
55
FIXTURE(iommufd)56 FIXTURE(iommufd)
57 {
58 int fd;
59 };
60
FIXTURE_SETUP(iommufd)61 FIXTURE_SETUP(iommufd)
62 {
63 self->fd = open("/dev/iommu", O_RDWR);
64 ASSERT_NE(-1, self->fd);
65 }
66
FIXTURE_TEARDOWN(iommufd)67 FIXTURE_TEARDOWN(iommufd)
68 {
69 teardown_iommufd(self->fd, _metadata);
70 }
71
TEST_F(iommufd,simple_close)72 TEST_F(iommufd, simple_close)
73 {
74 }
75
TEST_F(iommufd,cmd_fail)76 TEST_F(iommufd, cmd_fail)
77 {
78 struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
79
80 /* object id is invalid */
81 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
82 /* Bad pointer */
83 EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
84 /* Unknown ioctl */
85 EXPECT_ERRNO(ENOTTY,
86 ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
87 &cmd));
88 }
89
TEST_F(iommufd,cmd_length)90 TEST_F(iommufd, cmd_length)
91 {
92 #define TEST_LENGTH(_struct, _ioctl) \
93 { \
94 struct { \
95 struct _struct cmd; \
96 uint8_t extra; \
97 } cmd = { .cmd = { .size = sizeof(struct _struct) - 1 }, \
98 .extra = UINT8_MAX }; \
99 int old_errno; \
100 int rc; \
101 \
102 EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
103 cmd.cmd.size = sizeof(struct _struct) + 1; \
104 EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
105 cmd.cmd.size = sizeof(struct _struct); \
106 rc = ioctl(self->fd, _ioctl, &cmd); \
107 old_errno = errno; \
108 cmd.cmd.size = sizeof(struct _struct) + 1; \
109 cmd.extra = 0; \
110 if (rc) { \
111 EXPECT_ERRNO(old_errno, \
112 ioctl(self->fd, _ioctl, &cmd)); \
113 } else { \
114 ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
115 } \
116 }
117
118 TEST_LENGTH(iommu_destroy, IOMMU_DESTROY);
119 TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC);
120 TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES);
121 TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS);
122 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP);
123 TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY);
124 TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP);
125 TEST_LENGTH(iommu_option, IOMMU_OPTION);
126 TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS);
127 #undef TEST_LENGTH
128 }
129
TEST_F(iommufd,cmd_ex_fail)130 TEST_F(iommufd, cmd_ex_fail)
131 {
132 struct {
133 struct iommu_destroy cmd;
134 __u64 future;
135 } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
136
137 /* object id is invalid and command is longer */
138 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
139 /* future area is non-zero */
140 cmd.future = 1;
141 EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
142 /* Original command "works" */
143 cmd.cmd.size = sizeof(cmd.cmd);
144 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
145 /* Short command fails */
146 cmd.cmd.size = sizeof(cmd.cmd) - 1;
147 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
148 }
149
TEST_F(iommufd,global_options)150 TEST_F(iommufd, global_options)
151 {
152 struct iommu_option cmd = {
153 .size = sizeof(cmd),
154 .option_id = IOMMU_OPTION_RLIMIT_MODE,
155 .op = IOMMU_OPTION_OP_GET,
156 .val64 = 1,
157 };
158
159 cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
160 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
161 ASSERT_EQ(0, cmd.val64);
162
163 /* This requires root */
164 cmd.op = IOMMU_OPTION_OP_SET;
165 cmd.val64 = 1;
166 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
167 cmd.val64 = 2;
168 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
169
170 cmd.op = IOMMU_OPTION_OP_GET;
171 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
172 ASSERT_EQ(1, cmd.val64);
173
174 cmd.op = IOMMU_OPTION_OP_SET;
175 cmd.val64 = 0;
176 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
177
178 cmd.op = IOMMU_OPTION_OP_GET;
179 cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
180 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
181 cmd.op = IOMMU_OPTION_OP_SET;
182 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
183 }
184
FIXTURE(iommufd_ioas)185 FIXTURE(iommufd_ioas)
186 {
187 int fd;
188 uint32_t ioas_id;
189 uint32_t domain_id;
190 uint64_t base_iova;
191 };
192
FIXTURE_VARIANT(iommufd_ioas)193 FIXTURE_VARIANT(iommufd_ioas)
194 {
195 unsigned int mock_domains;
196 unsigned int memory_limit;
197 };
198
FIXTURE_SETUP(iommufd_ioas)199 FIXTURE_SETUP(iommufd_ioas)
200 {
201 unsigned int i;
202
203
204 self->fd = open("/dev/iommu", O_RDWR);
205 ASSERT_NE(-1, self->fd);
206 test_ioctl_ioas_alloc(&self->ioas_id);
207
208 if (!variant->memory_limit) {
209 test_ioctl_set_default_memory_limit();
210 } else {
211 test_ioctl_set_temp_memory_limit(variant->memory_limit);
212 }
213
214 for (i = 0; i != variant->mock_domains; i++) {
215 test_cmd_mock_domain(self->ioas_id, NULL, &self->domain_id);
216 self->base_iova = MOCK_APERTURE_START;
217 }
218 }
219
FIXTURE_TEARDOWN(iommufd_ioas)220 FIXTURE_TEARDOWN(iommufd_ioas)
221 {
222 test_ioctl_set_default_memory_limit();
223 teardown_iommufd(self->fd, _metadata);
224 }
225
FIXTURE_VARIANT_ADD(iommufd_ioas,no_domain)226 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
227 {
228 };
229
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain)230 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
231 {
232 .mock_domains = 1,
233 };
234
FIXTURE_VARIANT_ADD(iommufd_ioas,two_mock_domain)235 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
236 {
237 .mock_domains = 2,
238 };
239
FIXTURE_VARIANT_ADD(iommufd_ioas,mock_domain_limit)240 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
241 {
242 .mock_domains = 1,
243 .memory_limit = 16,
244 };
245
TEST_F(iommufd_ioas,ioas_auto_destroy)246 TEST_F(iommufd_ioas, ioas_auto_destroy)
247 {
248 }
249
TEST_F(iommufd_ioas,ioas_destroy)250 TEST_F(iommufd_ioas, ioas_destroy)
251 {
252 if (self->domain_id) {
253 /* IOAS cannot be freed while a domain is on it */
254 EXPECT_ERRNO(EBUSY,
255 _test_ioctl_destroy(self->fd, self->ioas_id));
256 } else {
257 /* Can allocate and manually free an IOAS table */
258 test_ioctl_destroy(self->ioas_id);
259 }
260 }
261
TEST_F(iommufd_ioas,ioas_area_destroy)262 TEST_F(iommufd_ioas, ioas_area_destroy)
263 {
264 /* Adding an area does not change ability to destroy */
265 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
266 if (self->domain_id)
267 EXPECT_ERRNO(EBUSY,
268 _test_ioctl_destroy(self->fd, self->ioas_id));
269 else
270 test_ioctl_destroy(self->ioas_id);
271 }
272
TEST_F(iommufd_ioas,ioas_area_auto_destroy)273 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
274 {
275 int i;
276
277 /* Can allocate and automatically free an IOAS table with many areas */
278 for (i = 0; i != 10; i++) {
279 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
280 self->base_iova + i * PAGE_SIZE);
281 }
282 }
283
TEST_F(iommufd_ioas,area)284 TEST_F(iommufd_ioas, area)
285 {
286 int i;
287
288 /* Unmap fails if nothing is mapped */
289 for (i = 0; i != 10; i++)
290 test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
291
292 /* Unmap works */
293 for (i = 0; i != 10; i++)
294 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
295 self->base_iova + i * PAGE_SIZE);
296 for (i = 0; i != 10; i++)
297 test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
298 PAGE_SIZE);
299
300 /* Split fails */
301 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
302 self->base_iova + 16 * PAGE_SIZE);
303 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
304 PAGE_SIZE);
305 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
306 PAGE_SIZE);
307
308 /* Over map fails */
309 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
310 self->base_iova + 16 * PAGE_SIZE);
311 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
312 self->base_iova + 16 * PAGE_SIZE);
313 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
314 self->base_iova + 17 * PAGE_SIZE);
315 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
316 self->base_iova + 15 * PAGE_SIZE);
317 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
318 self->base_iova + 15 * PAGE_SIZE);
319
320 /* unmap all works */
321 test_ioctl_ioas_unmap(0, UINT64_MAX);
322
323 /* Unmap all succeeds on an empty IOAS */
324 test_ioctl_ioas_unmap(0, UINT64_MAX);
325 }
326
TEST_F(iommufd_ioas,unmap_fully_contained_areas)327 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
328 {
329 uint64_t unmap_len;
330 int i;
331
332 /* Give no_domain some space to rewind base_iova */
333 self->base_iova += 4 * PAGE_SIZE;
334
335 for (i = 0; i != 4; i++)
336 test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
337 self->base_iova + i * 16 * PAGE_SIZE);
338
339 /* Unmap not fully contained area doesn't work */
340 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
341 8 * PAGE_SIZE);
342 test_err_ioctl_ioas_unmap(ENOENT,
343 self->base_iova + 3 * 16 * PAGE_SIZE +
344 8 * PAGE_SIZE - 4 * PAGE_SIZE,
345 8 * PAGE_SIZE);
346
347 /* Unmap fully contained areas works */
348 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
349 self->base_iova - 4 * PAGE_SIZE,
350 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
351 4 * PAGE_SIZE,
352 &unmap_len));
353 ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
354 }
355
TEST_F(iommufd_ioas,area_auto_iova)356 TEST_F(iommufd_ioas, area_auto_iova)
357 {
358 struct iommu_test_cmd test_cmd = {
359 .size = sizeof(test_cmd),
360 .op = IOMMU_TEST_OP_ADD_RESERVED,
361 .id = self->ioas_id,
362 .add_reserved = { .start = PAGE_SIZE * 4,
363 .length = PAGE_SIZE * 100 },
364 };
365 struct iommu_iova_range ranges[1] = {};
366 struct iommu_ioas_allow_iovas allow_cmd = {
367 .size = sizeof(allow_cmd),
368 .ioas_id = self->ioas_id,
369 .num_iovas = 1,
370 .allowed_iovas = (uintptr_t)ranges,
371 };
372 __u64 iovas[10];
373 int i;
374
375 /* Simple 4k pages */
376 for (i = 0; i != 10; i++)
377 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
378 for (i = 0; i != 10; i++)
379 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
380
381 /* Kernel automatically aligns IOVAs properly */
382 for (i = 0; i != 10; i++) {
383 size_t length = PAGE_SIZE * (i + 1);
384
385 if (self->domain_id) {
386 test_ioctl_ioas_map(buffer, length, &iovas[i]);
387 } else {
388 test_ioctl_ioas_map((void *)(1UL << 31), length,
389 &iovas[i]);
390 }
391 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
392 }
393 for (i = 0; i != 10; i++)
394 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
395
396 /* Avoids a reserved region */
397 ASSERT_EQ(0,
398 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
399 &test_cmd));
400 for (i = 0; i != 10; i++) {
401 size_t length = PAGE_SIZE * (i + 1);
402
403 test_ioctl_ioas_map(buffer, length, &iovas[i]);
404 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
405 EXPECT_EQ(false,
406 iovas[i] > test_cmd.add_reserved.start &&
407 iovas[i] <
408 test_cmd.add_reserved.start +
409 test_cmd.add_reserved.length);
410 }
411 for (i = 0; i != 10; i++)
412 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
413
414 /* Allowed region intersects with a reserved region */
415 ranges[0].start = PAGE_SIZE;
416 ranges[0].last = PAGE_SIZE * 600;
417 EXPECT_ERRNO(EADDRINUSE,
418 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
419
420 /* Allocate from an allowed region */
421 if (self->domain_id) {
422 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
423 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
424 } else {
425 ranges[0].start = PAGE_SIZE * 200;
426 ranges[0].last = PAGE_SIZE * 600 - 1;
427 }
428 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
429 for (i = 0; i != 10; i++) {
430 size_t length = PAGE_SIZE * (i + 1);
431
432 test_ioctl_ioas_map(buffer, length, &iovas[i]);
433 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
434 EXPECT_EQ(true, iovas[i] >= ranges[0].start);
435 EXPECT_EQ(true, iovas[i] <= ranges[0].last);
436 EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
437 EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
438 }
439 for (i = 0; i != 10; i++)
440 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
441 }
442
TEST_F(iommufd_ioas,area_allowed)443 TEST_F(iommufd_ioas, area_allowed)
444 {
445 struct iommu_test_cmd test_cmd = {
446 .size = sizeof(test_cmd),
447 .op = IOMMU_TEST_OP_ADD_RESERVED,
448 .id = self->ioas_id,
449 .add_reserved = { .start = PAGE_SIZE * 4,
450 .length = PAGE_SIZE * 100 },
451 };
452 struct iommu_iova_range ranges[1] = {};
453 struct iommu_ioas_allow_iovas allow_cmd = {
454 .size = sizeof(allow_cmd),
455 .ioas_id = self->ioas_id,
456 .num_iovas = 1,
457 .allowed_iovas = (uintptr_t)ranges,
458 };
459
460 /* Reserved intersects an allowed */
461 allow_cmd.num_iovas = 1;
462 ranges[0].start = self->base_iova;
463 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
464 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
465 test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
466 test_cmd.add_reserved.length = PAGE_SIZE;
467 EXPECT_ERRNO(EADDRINUSE,
468 ioctl(self->fd,
469 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
470 &test_cmd));
471 allow_cmd.num_iovas = 0;
472 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
473
474 /* Allowed intersects a reserved */
475 ASSERT_EQ(0,
476 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
477 &test_cmd));
478 allow_cmd.num_iovas = 1;
479 ranges[0].start = self->base_iova;
480 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
481 EXPECT_ERRNO(EADDRINUSE,
482 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
483 }
484
TEST_F(iommufd_ioas,copy_area)485 TEST_F(iommufd_ioas, copy_area)
486 {
487 struct iommu_ioas_copy copy_cmd = {
488 .size = sizeof(copy_cmd),
489 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
490 .dst_ioas_id = self->ioas_id,
491 .src_ioas_id = self->ioas_id,
492 .length = PAGE_SIZE,
493 };
494
495 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
496
497 /* Copy inside a single IOAS */
498 copy_cmd.src_iova = self->base_iova;
499 copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
500 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
501
502 /* Copy between IOAS's */
503 copy_cmd.src_iova = self->base_iova;
504 copy_cmd.dst_iova = 0;
505 test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
506 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
507 }
508
TEST_F(iommufd_ioas,iova_ranges)509 TEST_F(iommufd_ioas, iova_ranges)
510 {
511 struct iommu_test_cmd test_cmd = {
512 .size = sizeof(test_cmd),
513 .op = IOMMU_TEST_OP_ADD_RESERVED,
514 .id = self->ioas_id,
515 .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
516 };
517 struct iommu_iova_range *ranges = buffer;
518 struct iommu_ioas_iova_ranges ranges_cmd = {
519 .size = sizeof(ranges_cmd),
520 .ioas_id = self->ioas_id,
521 .num_iovas = BUFFER_SIZE / sizeof(*ranges),
522 .allowed_iovas = (uintptr_t)ranges,
523 };
524
525 /* Range can be read */
526 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
527 EXPECT_EQ(1, ranges_cmd.num_iovas);
528 if (!self->domain_id) {
529 EXPECT_EQ(0, ranges[0].start);
530 EXPECT_EQ(SIZE_MAX, ranges[0].last);
531 EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
532 } else {
533 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
534 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
535 EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
536 }
537
538 /* Buffer too small */
539 memset(ranges, 0, BUFFER_SIZE);
540 ranges_cmd.num_iovas = 0;
541 EXPECT_ERRNO(EMSGSIZE,
542 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
543 EXPECT_EQ(1, ranges_cmd.num_iovas);
544 EXPECT_EQ(0, ranges[0].start);
545 EXPECT_EQ(0, ranges[0].last);
546
547 /* 2 ranges */
548 ASSERT_EQ(0,
549 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
550 &test_cmd));
551 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
552 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
553 if (!self->domain_id) {
554 EXPECT_EQ(2, ranges_cmd.num_iovas);
555 EXPECT_EQ(0, ranges[0].start);
556 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
557 EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
558 EXPECT_EQ(SIZE_MAX, ranges[1].last);
559 } else {
560 EXPECT_EQ(1, ranges_cmd.num_iovas);
561 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
562 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
563 }
564
565 /* Buffer too small */
566 memset(ranges, 0, BUFFER_SIZE);
567 ranges_cmd.num_iovas = 1;
568 if (!self->domain_id) {
569 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
570 &ranges_cmd));
571 EXPECT_EQ(2, ranges_cmd.num_iovas);
572 EXPECT_EQ(0, ranges[0].start);
573 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
574 } else {
575 ASSERT_EQ(0,
576 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
577 EXPECT_EQ(1, ranges_cmd.num_iovas);
578 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
579 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
580 }
581 EXPECT_EQ(0, ranges[1].start);
582 EXPECT_EQ(0, ranges[1].last);
583 }
584
TEST_F(iommufd_ioas,access_pin)585 TEST_F(iommufd_ioas, access_pin)
586 {
587 struct iommu_test_cmd access_cmd = {
588 .size = sizeof(access_cmd),
589 .op = IOMMU_TEST_OP_ACCESS_PAGES,
590 .access_pages = { .iova = MOCK_APERTURE_START,
591 .length = BUFFER_SIZE,
592 .uptr = (uintptr_t)buffer },
593 };
594 struct iommu_test_cmd check_map_cmd = {
595 .size = sizeof(check_map_cmd),
596 .op = IOMMU_TEST_OP_MD_CHECK_MAP,
597 .check_map = { .iova = MOCK_APERTURE_START,
598 .length = BUFFER_SIZE,
599 .uptr = (uintptr_t)buffer },
600 };
601 uint32_t access_pages_id;
602 unsigned int npages;
603
604 test_cmd_create_access(self->ioas_id, &access_cmd.id,
605 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
606
607 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
608 uint32_t mock_device_id;
609 uint32_t mock_hwpt_id;
610
611 access_cmd.access_pages.length = npages * PAGE_SIZE;
612
613 /* Single map/unmap */
614 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
615 MOCK_APERTURE_START);
616 ASSERT_EQ(0, ioctl(self->fd,
617 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
618 &access_cmd));
619 test_cmd_destroy_access_pages(
620 access_cmd.id,
621 access_cmd.access_pages.out_access_pages_id);
622
623 /* Double user */
624 ASSERT_EQ(0, ioctl(self->fd,
625 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
626 &access_cmd));
627 access_pages_id = access_cmd.access_pages.out_access_pages_id;
628 ASSERT_EQ(0, ioctl(self->fd,
629 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
630 &access_cmd));
631 test_cmd_destroy_access_pages(
632 access_cmd.id,
633 access_cmd.access_pages.out_access_pages_id);
634 test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
635
636 /* Add/remove a domain with a user */
637 ASSERT_EQ(0, ioctl(self->fd,
638 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
639 &access_cmd));
640 test_cmd_mock_domain(self->ioas_id, &mock_device_id,
641 &mock_hwpt_id);
642 check_map_cmd.id = mock_hwpt_id;
643 ASSERT_EQ(0, ioctl(self->fd,
644 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
645 &check_map_cmd));
646
647 test_ioctl_destroy(mock_device_id);
648 test_ioctl_destroy(mock_hwpt_id);
649 test_cmd_destroy_access_pages(
650 access_cmd.id,
651 access_cmd.access_pages.out_access_pages_id);
652
653 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
654 }
655 test_cmd_destroy_access(access_cmd.id);
656 }
657
TEST_F(iommufd_ioas,access_pin_unmap)658 TEST_F(iommufd_ioas, access_pin_unmap)
659 {
660 struct iommu_test_cmd access_pages_cmd = {
661 .size = sizeof(access_pages_cmd),
662 .op = IOMMU_TEST_OP_ACCESS_PAGES,
663 .access_pages = { .iova = MOCK_APERTURE_START,
664 .length = BUFFER_SIZE,
665 .uptr = (uintptr_t)buffer },
666 };
667
668 test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
669 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
670 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
671 ASSERT_EQ(0,
672 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
673 &access_pages_cmd));
674
675 /* Trigger the unmap op */
676 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
677
678 /* kernel removed the item for us */
679 test_err_destroy_access_pages(
680 ENOENT, access_pages_cmd.id,
681 access_pages_cmd.access_pages.out_access_pages_id);
682 }
683
check_access_rw(struct __test_metadata * _metadata,int fd,unsigned int access_id,uint64_t iova,unsigned int def_flags)684 static void check_access_rw(struct __test_metadata *_metadata, int fd,
685 unsigned int access_id, uint64_t iova,
686 unsigned int def_flags)
687 {
688 uint16_t tmp[32];
689 struct iommu_test_cmd access_cmd = {
690 .size = sizeof(access_cmd),
691 .op = IOMMU_TEST_OP_ACCESS_RW,
692 .id = access_id,
693 .access_rw = { .uptr = (uintptr_t)tmp },
694 };
695 uint16_t *buffer16 = buffer;
696 unsigned int i;
697 void *tmp2;
698
699 for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
700 buffer16[i] = rand();
701
702 for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
703 access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
704 access_cmd.access_rw.iova++) {
705 for (access_cmd.access_rw.length = 1;
706 access_cmd.access_rw.length < sizeof(tmp);
707 access_cmd.access_rw.length++) {
708 access_cmd.access_rw.flags = def_flags;
709 ASSERT_EQ(0, ioctl(fd,
710 _IOMMU_TEST_CMD(
711 IOMMU_TEST_OP_ACCESS_RW),
712 &access_cmd));
713 ASSERT_EQ(0,
714 memcmp(buffer + (access_cmd.access_rw.iova -
715 iova),
716 tmp, access_cmd.access_rw.length));
717
718 for (i = 0; i != ARRAY_SIZE(tmp); i++)
719 tmp[i] = rand();
720 access_cmd.access_rw.flags = def_flags |
721 MOCK_ACCESS_RW_WRITE;
722 ASSERT_EQ(0, ioctl(fd,
723 _IOMMU_TEST_CMD(
724 IOMMU_TEST_OP_ACCESS_RW),
725 &access_cmd));
726 ASSERT_EQ(0,
727 memcmp(buffer + (access_cmd.access_rw.iova -
728 iova),
729 tmp, access_cmd.access_rw.length));
730 }
731 }
732
733 /* Multi-page test */
734 tmp2 = malloc(BUFFER_SIZE);
735 ASSERT_NE(NULL, tmp2);
736 access_cmd.access_rw.iova = iova;
737 access_cmd.access_rw.length = BUFFER_SIZE;
738 access_cmd.access_rw.flags = def_flags;
739 access_cmd.access_rw.uptr = (uintptr_t)tmp2;
740 ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
741 &access_cmd));
742 ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
743 free(tmp2);
744 }
745
TEST_F(iommufd_ioas,access_rw)746 TEST_F(iommufd_ioas, access_rw)
747 {
748 __u32 access_id;
749 __u64 iova;
750
751 test_cmd_create_access(self->ioas_id, &access_id, 0);
752 test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
753 check_access_rw(_metadata, self->fd, access_id, iova, 0);
754 check_access_rw(_metadata, self->fd, access_id, iova,
755 MOCK_ACCESS_RW_SLOW_PATH);
756 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
757 test_cmd_destroy_access(access_id);
758 }
759
TEST_F(iommufd_ioas,access_rw_unaligned)760 TEST_F(iommufd_ioas, access_rw_unaligned)
761 {
762 __u32 access_id;
763 __u64 iova;
764
765 test_cmd_create_access(self->ioas_id, &access_id, 0);
766
767 /* Unaligned pages */
768 iova = self->base_iova + MOCK_PAGE_SIZE;
769 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
770 check_access_rw(_metadata, self->fd, access_id, iova, 0);
771 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
772 test_cmd_destroy_access(access_id);
773 }
774
TEST_F(iommufd_ioas,fork_gone)775 TEST_F(iommufd_ioas, fork_gone)
776 {
777 __u32 access_id;
778 pid_t child;
779
780 test_cmd_create_access(self->ioas_id, &access_id, 0);
781
782 /* Create a mapping with a different mm */
783 child = fork();
784 if (!child) {
785 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
786 MOCK_APERTURE_START);
787 exit(0);
788 }
789 ASSERT_NE(-1, child);
790 ASSERT_EQ(child, waitpid(child, NULL, 0));
791
792 if (self->domain_id) {
793 /*
794 * If a domain already existed then everything was pinned within
795 * the fork, so this copies from one domain to another.
796 */
797 test_cmd_mock_domain(self->ioas_id, NULL, NULL);
798 check_access_rw(_metadata, self->fd, access_id,
799 MOCK_APERTURE_START, 0);
800
801 } else {
802 /*
803 * Otherwise we need to actually pin pages which can't happen
804 * since the fork is gone.
805 */
806 test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
807 }
808
809 test_cmd_destroy_access(access_id);
810 }
811
TEST_F(iommufd_ioas,fork_present)812 TEST_F(iommufd_ioas, fork_present)
813 {
814 __u32 access_id;
815 int pipefds[2];
816 uint64_t tmp;
817 pid_t child;
818 int efd;
819
820 test_cmd_create_access(self->ioas_id, &access_id, 0);
821
822 ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
823 efd = eventfd(0, EFD_CLOEXEC);
824 ASSERT_NE(-1, efd);
825
826 /* Create a mapping with a different mm */
827 child = fork();
828 if (!child) {
829 __u64 iova;
830 uint64_t one = 1;
831
832 close(pipefds[1]);
833 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
834 MOCK_APERTURE_START);
835 if (write(efd, &one, sizeof(one)) != sizeof(one))
836 exit(100);
837 if (read(pipefds[0], &iova, 1) != 1)
838 exit(100);
839 exit(0);
840 }
841 close(pipefds[0]);
842 ASSERT_NE(-1, child);
843 ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
844
845 /* Read pages from the remote process */
846 test_cmd_mock_domain(self->ioas_id, NULL, NULL);
847 check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
848
849 ASSERT_EQ(0, close(pipefds[1]));
850 ASSERT_EQ(child, waitpid(child, NULL, 0));
851
852 test_cmd_destroy_access(access_id);
853 }
854
TEST_F(iommufd_ioas,ioas_option_huge_pages)855 TEST_F(iommufd_ioas, ioas_option_huge_pages)
856 {
857 struct iommu_option cmd = {
858 .size = sizeof(cmd),
859 .option_id = IOMMU_OPTION_HUGE_PAGES,
860 .op = IOMMU_OPTION_OP_GET,
861 .val64 = 3,
862 .object_id = self->ioas_id,
863 };
864
865 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
866 ASSERT_EQ(1, cmd.val64);
867
868 cmd.op = IOMMU_OPTION_OP_SET;
869 cmd.val64 = 0;
870 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
871
872 cmd.op = IOMMU_OPTION_OP_GET;
873 cmd.val64 = 3;
874 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
875 ASSERT_EQ(0, cmd.val64);
876
877 cmd.op = IOMMU_OPTION_OP_SET;
878 cmd.val64 = 2;
879 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
880
881 cmd.op = IOMMU_OPTION_OP_SET;
882 cmd.val64 = 1;
883 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
884 }
885
TEST_F(iommufd_ioas,ioas_iova_alloc)886 TEST_F(iommufd_ioas, ioas_iova_alloc)
887 {
888 unsigned int length;
889 __u64 iova;
890
891 for (length = 1; length != PAGE_SIZE * 2; length++) {
892 if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
893 test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
894 } else {
895 test_ioctl_ioas_map(buffer, length, &iova);
896 test_ioctl_ioas_unmap(iova, length);
897 }
898 }
899 }
900
TEST_F(iommufd_ioas,ioas_align_change)901 TEST_F(iommufd_ioas, ioas_align_change)
902 {
903 struct iommu_option cmd = {
904 .size = sizeof(cmd),
905 .option_id = IOMMU_OPTION_HUGE_PAGES,
906 .op = IOMMU_OPTION_OP_SET,
907 .object_id = self->ioas_id,
908 /* 0 means everything must be aligned to PAGE_SIZE */
909 .val64 = 0,
910 };
911
912 /*
913 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
914 * and map are present.
915 */
916 if (variant->mock_domains)
917 return;
918
919 /*
920 * We can upgrade to PAGE_SIZE alignment when things are aligned right
921 */
922 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
923 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
924
925 /* Misalignment is rejected at map time */
926 test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
927 PAGE_SIZE,
928 MOCK_APERTURE_START + PAGE_SIZE);
929 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
930
931 /* Reduce alignment */
932 cmd.val64 = 1;
933 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
934
935 /* Confirm misalignment is rejected during alignment upgrade */
936 test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
937 MOCK_APERTURE_START + PAGE_SIZE);
938 cmd.val64 = 0;
939 EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
940
941 test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
942 test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
943 }
944
TEST_F(iommufd_ioas,copy_sweep)945 TEST_F(iommufd_ioas, copy_sweep)
946 {
947 struct iommu_ioas_copy copy_cmd = {
948 .size = sizeof(copy_cmd),
949 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
950 .src_ioas_id = self->ioas_id,
951 .dst_iova = MOCK_APERTURE_START,
952 .length = MOCK_PAGE_SIZE,
953 };
954 unsigned int dst_ioas_id;
955 uint64_t last_iova;
956 uint64_t iova;
957
958 test_ioctl_ioas_alloc(&dst_ioas_id);
959 copy_cmd.dst_ioas_id = dst_ioas_id;
960
961 if (variant->mock_domains)
962 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
963 else
964 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
965
966 test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
967 MOCK_APERTURE_START);
968
969 for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
970 iova += 511) {
971 copy_cmd.src_iova = iova;
972 if (iova < MOCK_APERTURE_START ||
973 iova + copy_cmd.length - 1 > last_iova) {
974 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
975 ©_cmd));
976 } else {
977 ASSERT_EQ(0,
978 ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
979 test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
980 copy_cmd.length);
981 }
982 }
983
984 test_ioctl_destroy(dst_ioas_id);
985 }
986
FIXTURE(iommufd_mock_domain)987 FIXTURE(iommufd_mock_domain)
988 {
989 int fd;
990 uint32_t ioas_id;
991 uint32_t domain_id;
992 uint32_t domain_ids[2];
993 int mmap_flags;
994 size_t mmap_buf_size;
995 };
996
FIXTURE_VARIANT(iommufd_mock_domain)997 FIXTURE_VARIANT(iommufd_mock_domain)
998 {
999 unsigned int mock_domains;
1000 bool hugepages;
1001 };
1002
FIXTURE_SETUP(iommufd_mock_domain)1003 FIXTURE_SETUP(iommufd_mock_domain)
1004 {
1005 unsigned int i;
1006
1007 self->fd = open("/dev/iommu", O_RDWR);
1008 ASSERT_NE(-1, self->fd);
1009 test_ioctl_ioas_alloc(&self->ioas_id);
1010
1011 ASSERT_GE(ARRAY_SIZE(self->domain_ids), variant->mock_domains);
1012
1013 for (i = 0; i != variant->mock_domains; i++)
1014 test_cmd_mock_domain(self->ioas_id, NULL, &self->domain_ids[i]);
1015 self->domain_id = self->domain_ids[0];
1016
1017 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1018 self->mmap_buf_size = PAGE_SIZE * 8;
1019 if (variant->hugepages) {
1020 /*
1021 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1022 * not available.
1023 */
1024 self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1025 self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1026 }
1027 }
1028
FIXTURE_TEARDOWN(iommufd_mock_domain)1029 FIXTURE_TEARDOWN(iommufd_mock_domain)
1030 {
1031 teardown_iommufd(self->fd, _metadata);
1032 }
1033
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain)1034 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1035 {
1036 .mock_domains = 1,
1037 .hugepages = false,
1038 };
1039
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains)1040 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1041 {
1042 .mock_domains = 2,
1043 .hugepages = false,
1044 };
1045
FIXTURE_VARIANT_ADD(iommufd_mock_domain,one_domain_hugepage)1046 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1047 {
1048 .mock_domains = 1,
1049 .hugepages = true,
1050 };
1051
FIXTURE_VARIANT_ADD(iommufd_mock_domain,two_domains_hugepage)1052 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1053 {
1054 .mock_domains = 2,
1055 .hugepages = true,
1056 };
1057
1058 /* Have the kernel check that the user pages made it to the iommu_domain */
1059 #define check_mock_iova(_ptr, _iova, _length) \
1060 ({ \
1061 struct iommu_test_cmd check_map_cmd = { \
1062 .size = sizeof(check_map_cmd), \
1063 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \
1064 .id = self->domain_id, \
1065 .check_map = { .iova = _iova, \
1066 .length = _length, \
1067 .uptr = (uintptr_t)(_ptr) }, \
1068 }; \
1069 ASSERT_EQ(0, \
1070 ioctl(self->fd, \
1071 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1072 &check_map_cmd)); \
1073 if (self->domain_ids[1]) { \
1074 check_map_cmd.id = self->domain_ids[1]; \
1075 ASSERT_EQ(0, \
1076 ioctl(self->fd, \
1077 _IOMMU_TEST_CMD( \
1078 IOMMU_TEST_OP_MD_CHECK_MAP), \
1079 &check_map_cmd)); \
1080 } \
1081 })
1082
TEST_F(iommufd_mock_domain,basic)1083 TEST_F(iommufd_mock_domain, basic)
1084 {
1085 size_t buf_size = self->mmap_buf_size;
1086 uint8_t *buf;
1087 __u64 iova;
1088
1089 /* Simple one page map */
1090 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1091 check_mock_iova(buffer, iova, PAGE_SIZE);
1092
1093 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1094 0);
1095 ASSERT_NE(MAP_FAILED, buf);
1096
1097 /* EFAULT half way through mapping */
1098 ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1099 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1100
1101 /* EFAULT on first page */
1102 ASSERT_EQ(0, munmap(buf, buf_size / 2));
1103 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1104 }
1105
TEST_F(iommufd_mock_domain,ro_unshare)1106 TEST_F(iommufd_mock_domain, ro_unshare)
1107 {
1108 uint8_t *buf;
1109 __u64 iova;
1110 int fd;
1111
1112 fd = open("/proc/self/exe", O_RDONLY);
1113 ASSERT_NE(-1, fd);
1114
1115 buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1116 ASSERT_NE(MAP_FAILED, buf);
1117 close(fd);
1118
1119 /*
1120 * There have been lots of changes to the "unshare" mechanism in
1121 * get_user_pages(), make sure it works right. The write to the page
1122 * after we map it for reading should not change the assigned PFN.
1123 */
1124 ASSERT_EQ(0,
1125 _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1126 &iova, IOMMU_IOAS_MAP_READABLE));
1127 check_mock_iova(buf, iova, PAGE_SIZE);
1128 memset(buf, 1, PAGE_SIZE);
1129 check_mock_iova(buf, iova, PAGE_SIZE);
1130 ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1131 }
1132
TEST_F(iommufd_mock_domain,all_aligns)1133 TEST_F(iommufd_mock_domain, all_aligns)
1134 {
1135 size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1136 MOCK_PAGE_SIZE;
1137 size_t buf_size = self->mmap_buf_size;
1138 unsigned int start;
1139 unsigned int end;
1140 uint8_t *buf;
1141
1142 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1143 0);
1144 ASSERT_NE(MAP_FAILED, buf);
1145 check_refs(buf, buf_size, 0);
1146
1147 /*
1148 * Map every combination of page size and alignment within a big region,
1149 * less for hugepage case as it takes so long to finish.
1150 */
1151 for (start = 0; start < buf_size; start += test_step) {
1152 if (variant->hugepages)
1153 end = buf_size;
1154 else
1155 end = start + MOCK_PAGE_SIZE;
1156 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1157 size_t length = end - start;
1158 __u64 iova;
1159
1160 test_ioctl_ioas_map(buf + start, length, &iova);
1161 check_mock_iova(buf + start, iova, length);
1162 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1163 end / PAGE_SIZE * PAGE_SIZE -
1164 start / PAGE_SIZE * PAGE_SIZE,
1165 1);
1166
1167 test_ioctl_ioas_unmap(iova, length);
1168 }
1169 }
1170 check_refs(buf, buf_size, 0);
1171 ASSERT_EQ(0, munmap(buf, buf_size));
1172 }
1173
TEST_F(iommufd_mock_domain,all_aligns_copy)1174 TEST_F(iommufd_mock_domain, all_aligns_copy)
1175 {
1176 size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1177 MOCK_PAGE_SIZE;
1178 size_t buf_size = self->mmap_buf_size;
1179 unsigned int start;
1180 unsigned int end;
1181 uint8_t *buf;
1182
1183 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1184 0);
1185 ASSERT_NE(MAP_FAILED, buf);
1186 check_refs(buf, buf_size, 0);
1187
1188 /*
1189 * Map every combination of page size and alignment within a big region,
1190 * less for hugepage case as it takes so long to finish.
1191 */
1192 for (start = 0; start < buf_size; start += test_step) {
1193 if (variant->hugepages)
1194 end = buf_size;
1195 else
1196 end = start + MOCK_PAGE_SIZE;
1197 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1198 size_t length = end - start;
1199 unsigned int old_id;
1200 uint32_t mock_device_id;
1201 __u64 iova;
1202
1203 test_ioctl_ioas_map(buf + start, length, &iova);
1204
1205 /* Add and destroy a domain while the area exists */
1206 old_id = self->domain_ids[1];
1207 test_cmd_mock_domain(self->ioas_id, &mock_device_id,
1208 &self->domain_ids[1]);
1209
1210 check_mock_iova(buf + start, iova, length);
1211 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1212 end / PAGE_SIZE * PAGE_SIZE -
1213 start / PAGE_SIZE * PAGE_SIZE,
1214 1);
1215
1216 test_ioctl_destroy(mock_device_id);
1217 test_ioctl_destroy(self->domain_ids[1]);
1218 self->domain_ids[1] = old_id;
1219
1220 test_ioctl_ioas_unmap(iova, length);
1221 }
1222 }
1223 check_refs(buf, buf_size, 0);
1224 ASSERT_EQ(0, munmap(buf, buf_size));
1225 }
1226
TEST_F(iommufd_mock_domain,user_copy)1227 TEST_F(iommufd_mock_domain, user_copy)
1228 {
1229 struct iommu_test_cmd access_cmd = {
1230 .size = sizeof(access_cmd),
1231 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1232 .access_pages = { .length = BUFFER_SIZE,
1233 .uptr = (uintptr_t)buffer },
1234 };
1235 struct iommu_ioas_copy copy_cmd = {
1236 .size = sizeof(copy_cmd),
1237 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
1238 .dst_ioas_id = self->ioas_id,
1239 .dst_iova = MOCK_APERTURE_START,
1240 .length = BUFFER_SIZE,
1241 };
1242 unsigned int ioas_id;
1243
1244 /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1245 test_ioctl_ioas_alloc(&ioas_id);
1246 test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
1247 ©_cmd.src_iova);
1248
1249 test_cmd_create_access(ioas_id, &access_cmd.id,
1250 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1251
1252 access_cmd.access_pages.iova = copy_cmd.src_iova;
1253 ASSERT_EQ(0,
1254 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1255 &access_cmd));
1256 copy_cmd.src_ioas_id = ioas_id;
1257 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1258 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1259
1260 test_cmd_destroy_access_pages(
1261 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1262 test_cmd_destroy_access(access_cmd.id);
1263
1264 test_ioctl_destroy(ioas_id);
1265 }
1266
1267 /* VFIO compatibility IOCTLs */
1268
TEST_F(iommufd,simple_ioctls)1269 TEST_F(iommufd, simple_ioctls)
1270 {
1271 ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
1272 ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
1273 }
1274
TEST_F(iommufd,unmap_cmd)1275 TEST_F(iommufd, unmap_cmd)
1276 {
1277 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1278 .iova = MOCK_APERTURE_START,
1279 .size = PAGE_SIZE,
1280 };
1281
1282 unmap_cmd.argsz = 1;
1283 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1284
1285 unmap_cmd.argsz = sizeof(unmap_cmd);
1286 unmap_cmd.flags = 1 << 31;
1287 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1288
1289 unmap_cmd.flags = 0;
1290 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1291 }
1292
TEST_F(iommufd,map_cmd)1293 TEST_F(iommufd, map_cmd)
1294 {
1295 struct vfio_iommu_type1_dma_map map_cmd = {
1296 .iova = MOCK_APERTURE_START,
1297 .size = PAGE_SIZE,
1298 .vaddr = (__u64)buffer,
1299 };
1300
1301 map_cmd.argsz = 1;
1302 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1303
1304 map_cmd.argsz = sizeof(map_cmd);
1305 map_cmd.flags = 1 << 31;
1306 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1307
1308 /* Requires a domain to be attached */
1309 map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
1310 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1311 }
1312
TEST_F(iommufd,info_cmd)1313 TEST_F(iommufd, info_cmd)
1314 {
1315 struct vfio_iommu_type1_info info_cmd = {};
1316
1317 /* Invalid argsz */
1318 info_cmd.argsz = 1;
1319 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1320
1321 info_cmd.argsz = sizeof(info_cmd);
1322 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1323 }
1324
TEST_F(iommufd,set_iommu_cmd)1325 TEST_F(iommufd, set_iommu_cmd)
1326 {
1327 /* Requires a domain to be attached */
1328 EXPECT_ERRNO(ENODEV,
1329 ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
1330 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
1331 }
1332
TEST_F(iommufd,vfio_ioas)1333 TEST_F(iommufd, vfio_ioas)
1334 {
1335 struct iommu_vfio_ioas vfio_ioas_cmd = {
1336 .size = sizeof(vfio_ioas_cmd),
1337 .op = IOMMU_VFIO_IOAS_GET,
1338 };
1339 __u32 ioas_id;
1340
1341 /* ENODEV if there is no compat ioas */
1342 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1343
1344 /* Invalid id for set */
1345 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
1346 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1347
1348 /* Valid id for set*/
1349 test_ioctl_ioas_alloc(&ioas_id);
1350 vfio_ioas_cmd.ioas_id = ioas_id;
1351 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1352
1353 /* Same id comes back from get */
1354 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
1355 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1356 ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
1357
1358 /* Clear works */
1359 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
1360 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1361 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
1362 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1363 }
1364
FIXTURE(vfio_compat_mock_domain)1365 FIXTURE(vfio_compat_mock_domain)
1366 {
1367 int fd;
1368 uint32_t ioas_id;
1369 };
1370
FIXTURE_VARIANT(vfio_compat_mock_domain)1371 FIXTURE_VARIANT(vfio_compat_mock_domain)
1372 {
1373 unsigned int version;
1374 };
1375
FIXTURE_SETUP(vfio_compat_mock_domain)1376 FIXTURE_SETUP(vfio_compat_mock_domain)
1377 {
1378 struct iommu_vfio_ioas vfio_ioas_cmd = {
1379 .size = sizeof(vfio_ioas_cmd),
1380 .op = IOMMU_VFIO_IOAS_SET,
1381 };
1382
1383 self->fd = open("/dev/iommu", O_RDWR);
1384 ASSERT_NE(-1, self->fd);
1385
1386 /* Create what VFIO would consider a group */
1387 test_ioctl_ioas_alloc(&self->ioas_id);
1388 test_cmd_mock_domain(self->ioas_id, NULL, NULL);
1389
1390 /* Attach it to the vfio compat */
1391 vfio_ioas_cmd.ioas_id = self->ioas_id;
1392 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1393 ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
1394 }
1395
FIXTURE_TEARDOWN(vfio_compat_mock_domain)1396 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
1397 {
1398 teardown_iommufd(self->fd, _metadata);
1399 }
1400
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v2)1401 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
1402 {
1403 .version = VFIO_TYPE1v2_IOMMU,
1404 };
1405
FIXTURE_VARIANT_ADD(vfio_compat_mock_domain,Ver1v0)1406 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
1407 {
1408 .version = VFIO_TYPE1_IOMMU,
1409 };
1410
TEST_F(vfio_compat_mock_domain,simple_close)1411 TEST_F(vfio_compat_mock_domain, simple_close)
1412 {
1413 }
1414
TEST_F(vfio_compat_mock_domain,option_huge_pages)1415 TEST_F(vfio_compat_mock_domain, option_huge_pages)
1416 {
1417 struct iommu_option cmd = {
1418 .size = sizeof(cmd),
1419 .option_id = IOMMU_OPTION_HUGE_PAGES,
1420 .op = IOMMU_OPTION_OP_GET,
1421 .val64 = 3,
1422 .object_id = self->ioas_id,
1423 };
1424
1425 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1426 if (variant->version == VFIO_TYPE1_IOMMU) {
1427 ASSERT_EQ(0, cmd.val64);
1428 } else {
1429 ASSERT_EQ(1, cmd.val64);
1430 }
1431 }
1432
1433 /*
1434 * Execute an ioctl command stored in buffer and check that the result does not
1435 * overflow memory.
1436 */
is_filled(const void * buf,uint8_t c,size_t len)1437 static bool is_filled(const void *buf, uint8_t c, size_t len)
1438 {
1439 const uint8_t *cbuf = buf;
1440
1441 for (; len; cbuf++, len--)
1442 if (*cbuf != c)
1443 return false;
1444 return true;
1445 }
1446
1447 #define ioctl_check_buf(fd, cmd) \
1448 ({ \
1449 size_t _cmd_len = *(__u32 *)buffer; \
1450 \
1451 memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
1452 ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
1453 ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
1454 BUFFER_SIZE - _cmd_len)); \
1455 })
1456
check_vfio_info_cap_chain(struct __test_metadata * _metadata,struct vfio_iommu_type1_info * info_cmd)1457 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
1458 struct vfio_iommu_type1_info *info_cmd)
1459 {
1460 const struct vfio_info_cap_header *cap;
1461
1462 ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
1463 cap = buffer + info_cmd->cap_offset;
1464 while (true) {
1465 size_t cap_size;
1466
1467 if (cap->next)
1468 cap_size = (buffer + cap->next) - (void *)cap;
1469 else
1470 cap_size = (buffer + info_cmd->argsz) - (void *)cap;
1471
1472 switch (cap->id) {
1473 case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
1474 struct vfio_iommu_type1_info_cap_iova_range *data =
1475 (void *)cap;
1476
1477 ASSERT_EQ(1, data->header.version);
1478 ASSERT_EQ(1, data->nr_iovas);
1479 EXPECT_EQ(MOCK_APERTURE_START,
1480 data->iova_ranges[0].start);
1481 EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
1482 break;
1483 }
1484 case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
1485 struct vfio_iommu_type1_info_dma_avail *data =
1486 (void *)cap;
1487
1488 ASSERT_EQ(1, data->header.version);
1489 ASSERT_EQ(sizeof(*data), cap_size);
1490 break;
1491 }
1492 default:
1493 ASSERT_EQ(false, true);
1494 break;
1495 }
1496 if (!cap->next)
1497 break;
1498
1499 ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
1500 ASSERT_GE(buffer + cap->next, (void *)cap);
1501 cap = buffer + cap->next;
1502 }
1503 }
1504
TEST_F(vfio_compat_mock_domain,get_info)1505 TEST_F(vfio_compat_mock_domain, get_info)
1506 {
1507 struct vfio_iommu_type1_info *info_cmd = buffer;
1508 unsigned int i;
1509 size_t caplen;
1510
1511 /* Pre-cap ABI */
1512 *info_cmd = (struct vfio_iommu_type1_info){
1513 .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
1514 };
1515 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
1516 ASSERT_NE(0, info_cmd->iova_pgsizes);
1517 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
1518 info_cmd->flags);
1519
1520 /* Read the cap chain size */
1521 *info_cmd = (struct vfio_iommu_type1_info){
1522 .argsz = sizeof(*info_cmd),
1523 };
1524 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
1525 ASSERT_NE(0, info_cmd->iova_pgsizes);
1526 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
1527 info_cmd->flags);
1528 ASSERT_EQ(0, info_cmd->cap_offset);
1529 ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
1530
1531 /* Read the caps, kernel should never create a corrupted caps */
1532 caplen = info_cmd->argsz;
1533 for (i = sizeof(*info_cmd); i < caplen; i++) {
1534 *info_cmd = (struct vfio_iommu_type1_info){
1535 .argsz = i,
1536 };
1537 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
1538 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
1539 info_cmd->flags);
1540 if (!info_cmd->cap_offset)
1541 continue;
1542 check_vfio_info_cap_chain(_metadata, info_cmd);
1543 }
1544 }
1545
shuffle_array(unsigned long * array,size_t nelms)1546 static void shuffle_array(unsigned long *array, size_t nelms)
1547 {
1548 unsigned int i;
1549
1550 /* Shuffle */
1551 for (i = 0; i != nelms; i++) {
1552 unsigned long tmp = array[i];
1553 unsigned int other = rand() % (nelms - i);
1554
1555 array[i] = array[other];
1556 array[other] = tmp;
1557 }
1558 }
1559
TEST_F(vfio_compat_mock_domain,map)1560 TEST_F(vfio_compat_mock_domain, map)
1561 {
1562 struct vfio_iommu_type1_dma_map map_cmd = {
1563 .argsz = sizeof(map_cmd),
1564 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
1565 .vaddr = (uintptr_t)buffer,
1566 .size = BUFFER_SIZE,
1567 .iova = MOCK_APERTURE_START,
1568 };
1569 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1570 .argsz = sizeof(unmap_cmd),
1571 .size = BUFFER_SIZE,
1572 .iova = MOCK_APERTURE_START,
1573 };
1574 unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
1575 unsigned int i;
1576
1577 /* Simple map/unmap */
1578 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1579 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1580 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
1581
1582 /* UNMAP_FLAG_ALL requres 0 iova/size */
1583 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1584 unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
1585 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1586
1587 unmap_cmd.iova = 0;
1588 unmap_cmd.size = 0;
1589 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1590 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
1591
1592 /* Small pages */
1593 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
1594 map_cmd.iova = pages_iova[i] =
1595 MOCK_APERTURE_START + i * PAGE_SIZE;
1596 map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
1597 map_cmd.size = PAGE_SIZE;
1598 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1599 }
1600 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
1601
1602 unmap_cmd.flags = 0;
1603 unmap_cmd.size = PAGE_SIZE;
1604 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
1605 unmap_cmd.iova = pages_iova[i];
1606 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1607 }
1608 }
1609
TEST_F(vfio_compat_mock_domain,huge_map)1610 TEST_F(vfio_compat_mock_domain, huge_map)
1611 {
1612 size_t buf_size = HUGEPAGE_SIZE * 2;
1613 struct vfio_iommu_type1_dma_map map_cmd = {
1614 .argsz = sizeof(map_cmd),
1615 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
1616 .size = buf_size,
1617 .iova = MOCK_APERTURE_START,
1618 };
1619 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1620 .argsz = sizeof(unmap_cmd),
1621 };
1622 unsigned long pages_iova[16];
1623 unsigned int i;
1624 void *buf;
1625
1626 /* Test huge pages and splitting */
1627 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
1628 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
1629 0);
1630 ASSERT_NE(MAP_FAILED, buf);
1631 map_cmd.vaddr = (uintptr_t)buf;
1632 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1633
1634 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
1635 for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
1636 pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
1637 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
1638
1639 /* type1 mode can cut up larger mappings, type1v2 always fails */
1640 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
1641 unmap_cmd.iova = pages_iova[i];
1642 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
1643 if (variant->version == VFIO_TYPE1_IOMMU) {
1644 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
1645 &unmap_cmd));
1646 } else {
1647 EXPECT_ERRNO(ENOENT,
1648 ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
1649 &unmap_cmd));
1650 }
1651 }
1652 }
1653
1654 TEST_HARNESS_MAIN
1655