1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
4 * the linux kernel to help device drivers mirror a process address space in
5 * the device. This allows the device to use the same address space which
6 * makes communication and data exchange a lot easier.
7 *
8 * This framework's sole purpose is to exercise various code paths inside
9 * the kernel to make sure that HMM performs as expected and to flush out any
10 * bugs.
11 */
12
13 #include "../kselftest_harness.h"
14
15 #include <errno.h>
16 #include <fcntl.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <strings.h>
22 #include <time.h>
23 #include <pthread.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/mman.h>
27 #include <sys/ioctl.h>
28
29 #include "./local_config.h"
30 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
31 #include <hugetlbfs.h>
32 #endif
33
34 /*
35 * This is a private UAPI to the kernel test module so it isn't exported
36 * in the usual include/uapi/... directory.
37 */
38 #include "../../../../lib/test_hmm_uapi.h"
39
40 struct hmm_buffer {
41 void *ptr;
42 void *mirror;
43 unsigned long size;
44 int fd;
45 uint64_t cpages;
46 uint64_t faults;
47 };
48
49 #define TWOMEG (1 << 21)
50 #define HMM_BUFFER_SIZE (1024 << 12)
51 #define HMM_PATH_MAX 64
52 #define NTIMES 10
53
54 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
55
FIXTURE(hmm)56 FIXTURE(hmm)
57 {
58 int fd;
59 unsigned int page_size;
60 unsigned int page_shift;
61 };
62
FIXTURE(hmm2)63 FIXTURE(hmm2)
64 {
65 int fd0;
66 int fd1;
67 unsigned int page_size;
68 unsigned int page_shift;
69 };
70
hmm_open(int unit)71 static int hmm_open(int unit)
72 {
73 char pathname[HMM_PATH_MAX];
74 int fd;
75
76 snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
77 fd = open(pathname, O_RDWR, 0);
78 if (fd < 0)
79 fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
80 pathname);
81 return fd;
82 }
83
FIXTURE_SETUP(hmm)84 FIXTURE_SETUP(hmm)
85 {
86 self->page_size = sysconf(_SC_PAGE_SIZE);
87 self->page_shift = ffs(self->page_size) - 1;
88
89 self->fd = hmm_open(0);
90 ASSERT_GE(self->fd, 0);
91 }
92
FIXTURE_SETUP(hmm2)93 FIXTURE_SETUP(hmm2)
94 {
95 self->page_size = sysconf(_SC_PAGE_SIZE);
96 self->page_shift = ffs(self->page_size) - 1;
97
98 self->fd0 = hmm_open(0);
99 ASSERT_GE(self->fd0, 0);
100 self->fd1 = hmm_open(1);
101 ASSERT_GE(self->fd1, 0);
102 }
103
FIXTURE_TEARDOWN(hmm)104 FIXTURE_TEARDOWN(hmm)
105 {
106 int ret = close(self->fd);
107
108 ASSERT_EQ(ret, 0);
109 self->fd = -1;
110 }
111
FIXTURE_TEARDOWN(hmm2)112 FIXTURE_TEARDOWN(hmm2)
113 {
114 int ret = close(self->fd0);
115
116 ASSERT_EQ(ret, 0);
117 self->fd0 = -1;
118
119 ret = close(self->fd1);
120 ASSERT_EQ(ret, 0);
121 self->fd1 = -1;
122 }
123
hmm_dmirror_cmd(int fd,unsigned long request,struct hmm_buffer * buffer,unsigned long npages)124 static int hmm_dmirror_cmd(int fd,
125 unsigned long request,
126 struct hmm_buffer *buffer,
127 unsigned long npages)
128 {
129 struct hmm_dmirror_cmd cmd;
130 int ret;
131
132 /* Simulate a device reading system memory. */
133 cmd.addr = (__u64)buffer->ptr;
134 cmd.ptr = (__u64)buffer->mirror;
135 cmd.npages = npages;
136
137 for (;;) {
138 ret = ioctl(fd, request, &cmd);
139 if (ret == 0)
140 break;
141 if (errno == EINTR)
142 continue;
143 return -errno;
144 }
145 buffer->cpages = cmd.cpages;
146 buffer->faults = cmd.faults;
147
148 return 0;
149 }
150
hmm_buffer_free(struct hmm_buffer * buffer)151 static void hmm_buffer_free(struct hmm_buffer *buffer)
152 {
153 if (buffer == NULL)
154 return;
155
156 if (buffer->ptr)
157 munmap(buffer->ptr, buffer->size);
158 free(buffer->mirror);
159 free(buffer);
160 }
161
162 /*
163 * Create a temporary file that will be deleted on close.
164 */
hmm_create_file(unsigned long size)165 static int hmm_create_file(unsigned long size)
166 {
167 char path[HMM_PATH_MAX];
168 int fd;
169
170 strcpy(path, "/tmp");
171 fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
172 if (fd >= 0) {
173 int r;
174
175 do {
176 r = ftruncate(fd, size);
177 } while (r == -1 && errno == EINTR);
178 if (!r)
179 return fd;
180 close(fd);
181 }
182 return -1;
183 }
184
185 /*
186 * Return a random unsigned number.
187 */
hmm_random(void)188 static unsigned int hmm_random(void)
189 {
190 static int fd = -1;
191 unsigned int r;
192
193 if (fd < 0) {
194 fd = open("/dev/urandom", O_RDONLY);
195 if (fd < 0) {
196 fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
197 __FILE__, __LINE__);
198 return ~0U;
199 }
200 }
201 read(fd, &r, sizeof(r));
202 return r;
203 }
204
hmm_nanosleep(unsigned int n)205 static void hmm_nanosleep(unsigned int n)
206 {
207 struct timespec t;
208
209 t.tv_sec = 0;
210 t.tv_nsec = n;
211 nanosleep(&t, NULL);
212 }
213
214 /*
215 * Simple NULL test of device open/close.
216 */
TEST_F(hmm,open_close)217 TEST_F(hmm, open_close)
218 {
219 }
220
221 /*
222 * Read private anonymous memory.
223 */
TEST_F(hmm,anon_read)224 TEST_F(hmm, anon_read)
225 {
226 struct hmm_buffer *buffer;
227 unsigned long npages;
228 unsigned long size;
229 unsigned long i;
230 int *ptr;
231 int ret;
232 int val;
233
234 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
235 ASSERT_NE(npages, 0);
236 size = npages << self->page_shift;
237
238 buffer = malloc(sizeof(*buffer));
239 ASSERT_NE(buffer, NULL);
240
241 buffer->fd = -1;
242 buffer->size = size;
243 buffer->mirror = malloc(size);
244 ASSERT_NE(buffer->mirror, NULL);
245
246 buffer->ptr = mmap(NULL, size,
247 PROT_READ | PROT_WRITE,
248 MAP_PRIVATE | MAP_ANONYMOUS,
249 buffer->fd, 0);
250 ASSERT_NE(buffer->ptr, MAP_FAILED);
251
252 /*
253 * Initialize buffer in system memory but leave the first two pages
254 * zero (pte_none and pfn_zero).
255 */
256 i = 2 * self->page_size / sizeof(*ptr);
257 for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
258 ptr[i] = i;
259
260 /* Set buffer permission to read-only. */
261 ret = mprotect(buffer->ptr, size, PROT_READ);
262 ASSERT_EQ(ret, 0);
263
264 /* Populate the CPU page table with a special zero page. */
265 val = *(int *)(buffer->ptr + self->page_size);
266 ASSERT_EQ(val, 0);
267
268 /* Simulate a device reading system memory. */
269 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
270 ASSERT_EQ(ret, 0);
271 ASSERT_EQ(buffer->cpages, npages);
272 ASSERT_EQ(buffer->faults, 1);
273
274 /* Check what the device read. */
275 ptr = buffer->mirror;
276 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
277 ASSERT_EQ(ptr[i], 0);
278 for (; i < size / sizeof(*ptr); ++i)
279 ASSERT_EQ(ptr[i], i);
280
281 hmm_buffer_free(buffer);
282 }
283
284 /*
285 * Read private anonymous memory which has been protected with
286 * mprotect() PROT_NONE.
287 */
TEST_F(hmm,anon_read_prot)288 TEST_F(hmm, anon_read_prot)
289 {
290 struct hmm_buffer *buffer;
291 unsigned long npages;
292 unsigned long size;
293 unsigned long i;
294 int *ptr;
295 int ret;
296
297 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
298 ASSERT_NE(npages, 0);
299 size = npages << self->page_shift;
300
301 buffer = malloc(sizeof(*buffer));
302 ASSERT_NE(buffer, NULL);
303
304 buffer->fd = -1;
305 buffer->size = size;
306 buffer->mirror = malloc(size);
307 ASSERT_NE(buffer->mirror, NULL);
308
309 buffer->ptr = mmap(NULL, size,
310 PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS,
312 buffer->fd, 0);
313 ASSERT_NE(buffer->ptr, MAP_FAILED);
314
315 /* Initialize buffer in system memory. */
316 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
317 ptr[i] = i;
318
319 /* Initialize mirror buffer so we can verify it isn't written. */
320 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
321 ptr[i] = -i;
322
323 /* Protect buffer from reading. */
324 ret = mprotect(buffer->ptr, size, PROT_NONE);
325 ASSERT_EQ(ret, 0);
326
327 /* Simulate a device reading system memory. */
328 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
329 ASSERT_EQ(ret, -EFAULT);
330
331 /* Allow CPU to read the buffer so we can check it. */
332 ret = mprotect(buffer->ptr, size, PROT_READ);
333 ASSERT_EQ(ret, 0);
334 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
335 ASSERT_EQ(ptr[i], i);
336
337 /* Check what the device read. */
338 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
339 ASSERT_EQ(ptr[i], -i);
340
341 hmm_buffer_free(buffer);
342 }
343
344 /*
345 * Write private anonymous memory.
346 */
TEST_F(hmm,anon_write)347 TEST_F(hmm, anon_write)
348 {
349 struct hmm_buffer *buffer;
350 unsigned long npages;
351 unsigned long size;
352 unsigned long i;
353 int *ptr;
354 int ret;
355
356 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
357 ASSERT_NE(npages, 0);
358 size = npages << self->page_shift;
359
360 buffer = malloc(sizeof(*buffer));
361 ASSERT_NE(buffer, NULL);
362
363 buffer->fd = -1;
364 buffer->size = size;
365 buffer->mirror = malloc(size);
366 ASSERT_NE(buffer->mirror, NULL);
367
368 buffer->ptr = mmap(NULL, size,
369 PROT_READ | PROT_WRITE,
370 MAP_PRIVATE | MAP_ANONYMOUS,
371 buffer->fd, 0);
372 ASSERT_NE(buffer->ptr, MAP_FAILED);
373
374 /* Initialize data that the device will write to buffer->ptr. */
375 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
376 ptr[i] = i;
377
378 /* Simulate a device writing system memory. */
379 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
380 ASSERT_EQ(ret, 0);
381 ASSERT_EQ(buffer->cpages, npages);
382 ASSERT_EQ(buffer->faults, 1);
383
384 /* Check what the device wrote. */
385 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
386 ASSERT_EQ(ptr[i], i);
387
388 hmm_buffer_free(buffer);
389 }
390
391 /*
392 * Write private anonymous memory which has been protected with
393 * mprotect() PROT_READ.
394 */
TEST_F(hmm,anon_write_prot)395 TEST_F(hmm, anon_write_prot)
396 {
397 struct hmm_buffer *buffer;
398 unsigned long npages;
399 unsigned long size;
400 unsigned long i;
401 int *ptr;
402 int ret;
403
404 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
405 ASSERT_NE(npages, 0);
406 size = npages << self->page_shift;
407
408 buffer = malloc(sizeof(*buffer));
409 ASSERT_NE(buffer, NULL);
410
411 buffer->fd = -1;
412 buffer->size = size;
413 buffer->mirror = malloc(size);
414 ASSERT_NE(buffer->mirror, NULL);
415
416 buffer->ptr = mmap(NULL, size,
417 PROT_READ,
418 MAP_PRIVATE | MAP_ANONYMOUS,
419 buffer->fd, 0);
420 ASSERT_NE(buffer->ptr, MAP_FAILED);
421
422 /* Simulate a device reading a zero page of memory. */
423 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
424 ASSERT_EQ(ret, 0);
425 ASSERT_EQ(buffer->cpages, 1);
426 ASSERT_EQ(buffer->faults, 1);
427
428 /* Initialize data that the device will write to buffer->ptr. */
429 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
430 ptr[i] = i;
431
432 /* Simulate a device writing system memory. */
433 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
434 ASSERT_EQ(ret, -EPERM);
435
436 /* Check what the device wrote. */
437 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
438 ASSERT_EQ(ptr[i], 0);
439
440 /* Now allow writing and see that the zero page is replaced. */
441 ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
442 ASSERT_EQ(ret, 0);
443
444 /* Simulate a device writing system memory. */
445 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
446 ASSERT_EQ(ret, 0);
447 ASSERT_EQ(buffer->cpages, npages);
448 ASSERT_EQ(buffer->faults, 1);
449
450 /* Check what the device wrote. */
451 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
452 ASSERT_EQ(ptr[i], i);
453
454 hmm_buffer_free(buffer);
455 }
456
457 /*
458 * Check that a device writing an anonymous private mapping
459 * will copy-on-write if a child process inherits the mapping.
460 */
TEST_F(hmm,anon_write_child)461 TEST_F(hmm, anon_write_child)
462 {
463 struct hmm_buffer *buffer;
464 unsigned long npages;
465 unsigned long size;
466 unsigned long i;
467 int *ptr;
468 pid_t pid;
469 int child_fd;
470 int ret;
471
472 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
473 ASSERT_NE(npages, 0);
474 size = npages << self->page_shift;
475
476 buffer = malloc(sizeof(*buffer));
477 ASSERT_NE(buffer, NULL);
478
479 buffer->fd = -1;
480 buffer->size = size;
481 buffer->mirror = malloc(size);
482 ASSERT_NE(buffer->mirror, NULL);
483
484 buffer->ptr = mmap(NULL, size,
485 PROT_READ | PROT_WRITE,
486 MAP_PRIVATE | MAP_ANONYMOUS,
487 buffer->fd, 0);
488 ASSERT_NE(buffer->ptr, MAP_FAILED);
489
490 /* Initialize buffer->ptr so we can tell if it is written. */
491 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
492 ptr[i] = i;
493
494 /* Initialize data that the device will write to buffer->ptr. */
495 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
496 ptr[i] = -i;
497
498 pid = fork();
499 if (pid == -1)
500 ASSERT_EQ(pid, 0);
501 if (pid != 0) {
502 waitpid(pid, &ret, 0);
503 ASSERT_EQ(WIFEXITED(ret), 1);
504
505 /* Check that the parent's buffer did not change. */
506 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
507 ASSERT_EQ(ptr[i], i);
508 return;
509 }
510
511 /* Check that we see the parent's values. */
512 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
513 ASSERT_EQ(ptr[i], i);
514 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
515 ASSERT_EQ(ptr[i], -i);
516
517 /* The child process needs its own mirror to its own mm. */
518 child_fd = hmm_open(0);
519 ASSERT_GE(child_fd, 0);
520
521 /* Simulate a device writing system memory. */
522 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
523 ASSERT_EQ(ret, 0);
524 ASSERT_EQ(buffer->cpages, npages);
525 ASSERT_EQ(buffer->faults, 1);
526
527 /* Check what the device wrote. */
528 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
529 ASSERT_EQ(ptr[i], -i);
530
531 close(child_fd);
532 exit(0);
533 }
534
535 /*
536 * Check that a device writing an anonymous shared mapping
537 * will not copy-on-write if a child process inherits the mapping.
538 */
TEST_F(hmm,anon_write_child_shared)539 TEST_F(hmm, anon_write_child_shared)
540 {
541 struct hmm_buffer *buffer;
542 unsigned long npages;
543 unsigned long size;
544 unsigned long i;
545 int *ptr;
546 pid_t pid;
547 int child_fd;
548 int ret;
549
550 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
551 ASSERT_NE(npages, 0);
552 size = npages << self->page_shift;
553
554 buffer = malloc(sizeof(*buffer));
555 ASSERT_NE(buffer, NULL);
556
557 buffer->fd = -1;
558 buffer->size = size;
559 buffer->mirror = malloc(size);
560 ASSERT_NE(buffer->mirror, NULL);
561
562 buffer->ptr = mmap(NULL, size,
563 PROT_READ | PROT_WRITE,
564 MAP_SHARED | MAP_ANONYMOUS,
565 buffer->fd, 0);
566 ASSERT_NE(buffer->ptr, MAP_FAILED);
567
568 /* Initialize buffer->ptr so we can tell if it is written. */
569 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
570 ptr[i] = i;
571
572 /* Initialize data that the device will write to buffer->ptr. */
573 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
574 ptr[i] = -i;
575
576 pid = fork();
577 if (pid == -1)
578 ASSERT_EQ(pid, 0);
579 if (pid != 0) {
580 waitpid(pid, &ret, 0);
581 ASSERT_EQ(WIFEXITED(ret), 1);
582
583 /* Check that the parent's buffer did change. */
584 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
585 ASSERT_EQ(ptr[i], -i);
586 return;
587 }
588
589 /* Check that we see the parent's values. */
590 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
591 ASSERT_EQ(ptr[i], i);
592 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
593 ASSERT_EQ(ptr[i], -i);
594
595 /* The child process needs its own mirror to its own mm. */
596 child_fd = hmm_open(0);
597 ASSERT_GE(child_fd, 0);
598
599 /* Simulate a device writing system memory. */
600 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
601 ASSERT_EQ(ret, 0);
602 ASSERT_EQ(buffer->cpages, npages);
603 ASSERT_EQ(buffer->faults, 1);
604
605 /* Check what the device wrote. */
606 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
607 ASSERT_EQ(ptr[i], -i);
608
609 close(child_fd);
610 exit(0);
611 }
612
613 /*
614 * Write private anonymous huge page.
615 */
TEST_F(hmm,anon_write_huge)616 TEST_F(hmm, anon_write_huge)
617 {
618 struct hmm_buffer *buffer;
619 unsigned long npages;
620 unsigned long size;
621 unsigned long i;
622 void *old_ptr;
623 void *map;
624 int *ptr;
625 int ret;
626
627 size = 2 * TWOMEG;
628
629 buffer = malloc(sizeof(*buffer));
630 ASSERT_NE(buffer, NULL);
631
632 buffer->fd = -1;
633 buffer->size = size;
634 buffer->mirror = malloc(size);
635 ASSERT_NE(buffer->mirror, NULL);
636
637 buffer->ptr = mmap(NULL, size,
638 PROT_READ | PROT_WRITE,
639 MAP_PRIVATE | MAP_ANONYMOUS,
640 buffer->fd, 0);
641 ASSERT_NE(buffer->ptr, MAP_FAILED);
642
643 size = TWOMEG;
644 npages = size >> self->page_shift;
645 map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
646 ret = madvise(map, size, MADV_HUGEPAGE);
647 ASSERT_EQ(ret, 0);
648 old_ptr = buffer->ptr;
649 buffer->ptr = map;
650
651 /* Initialize data that the device will write to buffer->ptr. */
652 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
653 ptr[i] = i;
654
655 /* Simulate a device writing system memory. */
656 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
657 ASSERT_EQ(ret, 0);
658 ASSERT_EQ(buffer->cpages, npages);
659 ASSERT_EQ(buffer->faults, 1);
660
661 /* Check what the device wrote. */
662 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
663 ASSERT_EQ(ptr[i], i);
664
665 buffer->ptr = old_ptr;
666 hmm_buffer_free(buffer);
667 }
668
669 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
670 /*
671 * Write huge TLBFS page.
672 */
TEST_F(hmm,anon_write_hugetlbfs)673 TEST_F(hmm, anon_write_hugetlbfs)
674 {
675 struct hmm_buffer *buffer;
676 unsigned long npages;
677 unsigned long size;
678 unsigned long i;
679 int *ptr;
680 int ret;
681 long pagesizes[4];
682 int n, idx;
683
684 /* Skip test if we can't allocate a hugetlbfs page. */
685
686 n = gethugepagesizes(pagesizes, 4);
687 if (n <= 0)
688 SKIP(return, "Huge page size could not be determined");
689 for (idx = 0; --n > 0; ) {
690 if (pagesizes[n] < pagesizes[idx])
691 idx = n;
692 }
693 size = ALIGN(TWOMEG, pagesizes[idx]);
694 npages = size >> self->page_shift;
695
696 buffer = malloc(sizeof(*buffer));
697 ASSERT_NE(buffer, NULL);
698
699 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
700 if (buffer->ptr == NULL) {
701 free(buffer);
702 SKIP(return, "Huge page could not be allocated");
703 }
704
705 buffer->fd = -1;
706 buffer->size = size;
707 buffer->mirror = malloc(size);
708 ASSERT_NE(buffer->mirror, NULL);
709
710 /* Initialize data that the device will write to buffer->ptr. */
711 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
712 ptr[i] = i;
713
714 /* Simulate a device writing system memory. */
715 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
716 ASSERT_EQ(ret, 0);
717 ASSERT_EQ(buffer->cpages, npages);
718 ASSERT_EQ(buffer->faults, 1);
719
720 /* Check what the device wrote. */
721 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
722 ASSERT_EQ(ptr[i], i);
723
724 free_hugepage_region(buffer->ptr);
725 buffer->ptr = NULL;
726 hmm_buffer_free(buffer);
727 }
728 #endif /* LOCAL_CONFIG_HAVE_LIBHUGETLBFS */
729
730 /*
731 * Read mmap'ed file memory.
732 */
TEST_F(hmm,file_read)733 TEST_F(hmm, file_read)
734 {
735 struct hmm_buffer *buffer;
736 unsigned long npages;
737 unsigned long size;
738 unsigned long i;
739 int *ptr;
740 int ret;
741 int fd;
742 ssize_t len;
743
744 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
745 ASSERT_NE(npages, 0);
746 size = npages << self->page_shift;
747
748 fd = hmm_create_file(size);
749 ASSERT_GE(fd, 0);
750
751 buffer = malloc(sizeof(*buffer));
752 ASSERT_NE(buffer, NULL);
753
754 buffer->fd = fd;
755 buffer->size = size;
756 buffer->mirror = malloc(size);
757 ASSERT_NE(buffer->mirror, NULL);
758
759 /* Write initial contents of the file. */
760 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
761 ptr[i] = i;
762 len = pwrite(fd, buffer->mirror, size, 0);
763 ASSERT_EQ(len, size);
764 memset(buffer->mirror, 0, size);
765
766 buffer->ptr = mmap(NULL, size,
767 PROT_READ,
768 MAP_SHARED,
769 buffer->fd, 0);
770 ASSERT_NE(buffer->ptr, MAP_FAILED);
771
772 /* Simulate a device reading system memory. */
773 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
774 ASSERT_EQ(ret, 0);
775 ASSERT_EQ(buffer->cpages, npages);
776 ASSERT_EQ(buffer->faults, 1);
777
778 /* Check what the device read. */
779 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
780 ASSERT_EQ(ptr[i], i);
781
782 hmm_buffer_free(buffer);
783 }
784
785 /*
786 * Write mmap'ed file memory.
787 */
TEST_F(hmm,file_write)788 TEST_F(hmm, file_write)
789 {
790 struct hmm_buffer *buffer;
791 unsigned long npages;
792 unsigned long size;
793 unsigned long i;
794 int *ptr;
795 int ret;
796 int fd;
797 ssize_t len;
798
799 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
800 ASSERT_NE(npages, 0);
801 size = npages << self->page_shift;
802
803 fd = hmm_create_file(size);
804 ASSERT_GE(fd, 0);
805
806 buffer = malloc(sizeof(*buffer));
807 ASSERT_NE(buffer, NULL);
808
809 buffer->fd = fd;
810 buffer->size = size;
811 buffer->mirror = malloc(size);
812 ASSERT_NE(buffer->mirror, NULL);
813
814 buffer->ptr = mmap(NULL, size,
815 PROT_READ | PROT_WRITE,
816 MAP_SHARED,
817 buffer->fd, 0);
818 ASSERT_NE(buffer->ptr, MAP_FAILED);
819
820 /* Initialize data that the device will write to buffer->ptr. */
821 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
822 ptr[i] = i;
823
824 /* Simulate a device writing system memory. */
825 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
826 ASSERT_EQ(ret, 0);
827 ASSERT_EQ(buffer->cpages, npages);
828 ASSERT_EQ(buffer->faults, 1);
829
830 /* Check what the device wrote. */
831 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
832 ASSERT_EQ(ptr[i], i);
833
834 /* Check that the device also wrote the file. */
835 len = pread(fd, buffer->mirror, size, 0);
836 ASSERT_EQ(len, size);
837 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
838 ASSERT_EQ(ptr[i], i);
839
840 hmm_buffer_free(buffer);
841 }
842
843 /*
844 * Migrate anonymous memory to device private memory.
845 */
TEST_F(hmm,migrate)846 TEST_F(hmm, migrate)
847 {
848 struct hmm_buffer *buffer;
849 unsigned long npages;
850 unsigned long size;
851 unsigned long i;
852 int *ptr;
853 int ret;
854
855 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
856 ASSERT_NE(npages, 0);
857 size = npages << self->page_shift;
858
859 buffer = malloc(sizeof(*buffer));
860 ASSERT_NE(buffer, NULL);
861
862 buffer->fd = -1;
863 buffer->size = size;
864 buffer->mirror = malloc(size);
865 ASSERT_NE(buffer->mirror, NULL);
866
867 buffer->ptr = mmap(NULL, size,
868 PROT_READ | PROT_WRITE,
869 MAP_PRIVATE | MAP_ANONYMOUS,
870 buffer->fd, 0);
871 ASSERT_NE(buffer->ptr, MAP_FAILED);
872
873 /* Initialize buffer in system memory. */
874 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
875 ptr[i] = i;
876
877 /* Migrate memory to device. */
878 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
879 ASSERT_EQ(ret, 0);
880 ASSERT_EQ(buffer->cpages, npages);
881
882 /* Check what the device read. */
883 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
884 ASSERT_EQ(ptr[i], i);
885
886 hmm_buffer_free(buffer);
887 }
888
889 /*
890 * Migrate anonymous memory to device private memory and fault some of it back
891 * to system memory, then try migrating the resulting mix of system and device
892 * private memory to the device.
893 */
TEST_F(hmm,migrate_fault)894 TEST_F(hmm, migrate_fault)
895 {
896 struct hmm_buffer *buffer;
897 unsigned long npages;
898 unsigned long size;
899 unsigned long i;
900 int *ptr;
901 int ret;
902
903 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
904 ASSERT_NE(npages, 0);
905 size = npages << self->page_shift;
906
907 buffer = malloc(sizeof(*buffer));
908 ASSERT_NE(buffer, NULL);
909
910 buffer->fd = -1;
911 buffer->size = size;
912 buffer->mirror = malloc(size);
913 ASSERT_NE(buffer->mirror, NULL);
914
915 buffer->ptr = mmap(NULL, size,
916 PROT_READ | PROT_WRITE,
917 MAP_PRIVATE | MAP_ANONYMOUS,
918 buffer->fd, 0);
919 ASSERT_NE(buffer->ptr, MAP_FAILED);
920
921 /* Initialize buffer in system memory. */
922 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
923 ptr[i] = i;
924
925 /* Migrate memory to device. */
926 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
927 ASSERT_EQ(ret, 0);
928 ASSERT_EQ(buffer->cpages, npages);
929
930 /* Check what the device read. */
931 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
932 ASSERT_EQ(ptr[i], i);
933
934 /* Fault half the pages back to system memory and check them. */
935 for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
936 ASSERT_EQ(ptr[i], i);
937
938 /* Migrate memory to the device again. */
939 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
940 ASSERT_EQ(ret, 0);
941 ASSERT_EQ(buffer->cpages, npages);
942
943 /* Check what the device read. */
944 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
945 ASSERT_EQ(ptr[i], i);
946
947 hmm_buffer_free(buffer);
948 }
949
950 /*
951 * Migrate anonymous shared memory to device private memory.
952 */
TEST_F(hmm,migrate_shared)953 TEST_F(hmm, migrate_shared)
954 {
955 struct hmm_buffer *buffer;
956 unsigned long npages;
957 unsigned long size;
958 int ret;
959
960 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
961 ASSERT_NE(npages, 0);
962 size = npages << self->page_shift;
963
964 buffer = malloc(sizeof(*buffer));
965 ASSERT_NE(buffer, NULL);
966
967 buffer->fd = -1;
968 buffer->size = size;
969 buffer->mirror = malloc(size);
970 ASSERT_NE(buffer->mirror, NULL);
971
972 buffer->ptr = mmap(NULL, size,
973 PROT_READ | PROT_WRITE,
974 MAP_SHARED | MAP_ANONYMOUS,
975 buffer->fd, 0);
976 ASSERT_NE(buffer->ptr, MAP_FAILED);
977
978 /* Migrate memory to device. */
979 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
980 ASSERT_EQ(ret, -ENOENT);
981
982 hmm_buffer_free(buffer);
983 }
984
985 /*
986 * Try to migrate various memory types to device private memory.
987 */
TEST_F(hmm2,migrate_mixed)988 TEST_F(hmm2, migrate_mixed)
989 {
990 struct hmm_buffer *buffer;
991 unsigned long npages;
992 unsigned long size;
993 int *ptr;
994 unsigned char *p;
995 int ret;
996 int val;
997
998 npages = 6;
999 size = npages << self->page_shift;
1000
1001 buffer = malloc(sizeof(*buffer));
1002 ASSERT_NE(buffer, NULL);
1003
1004 buffer->fd = -1;
1005 buffer->size = size;
1006 buffer->mirror = malloc(size);
1007 ASSERT_NE(buffer->mirror, NULL);
1008
1009 /* Reserve a range of addresses. */
1010 buffer->ptr = mmap(NULL, size,
1011 PROT_NONE,
1012 MAP_PRIVATE | MAP_ANONYMOUS,
1013 buffer->fd, 0);
1014 ASSERT_NE(buffer->ptr, MAP_FAILED);
1015 p = buffer->ptr;
1016
1017 /* Migrating a protected area should be an error. */
1018 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages);
1019 ASSERT_EQ(ret, -EINVAL);
1020
1021 /* Punch a hole after the first page address. */
1022 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1023 ASSERT_EQ(ret, 0);
1024
1025 /* We expect an error if the vma doesn't cover the range. */
1026 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3);
1027 ASSERT_EQ(ret, -EINVAL);
1028
1029 /* Page 2 will be a read-only zero page. */
1030 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1031 PROT_READ);
1032 ASSERT_EQ(ret, 0);
1033 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1034 val = *ptr + 3;
1035 ASSERT_EQ(val, 3);
1036
1037 /* Page 3 will be read-only. */
1038 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1039 PROT_READ | PROT_WRITE);
1040 ASSERT_EQ(ret, 0);
1041 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1042 *ptr = val;
1043 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1044 PROT_READ);
1045 ASSERT_EQ(ret, 0);
1046
1047 /* Page 4-5 will be read-write. */
1048 ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1049 PROT_READ | PROT_WRITE);
1050 ASSERT_EQ(ret, 0);
1051 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1052 *ptr = val;
1053 ptr = (int *)(buffer->ptr + 5 * self->page_size);
1054 *ptr = val;
1055
1056 /* Now try to migrate pages 2-5 to device 1. */
1057 buffer->ptr = p + 2 * self->page_size;
1058 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4);
1059 ASSERT_EQ(ret, 0);
1060 ASSERT_EQ(buffer->cpages, 4);
1061
1062 /* Page 5 won't be migrated to device 0 because it's on device 1. */
1063 buffer->ptr = p + 5 * self->page_size;
1064 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1065 ASSERT_EQ(ret, -ENOENT);
1066 buffer->ptr = p;
1067
1068 buffer->ptr = p;
1069 hmm_buffer_free(buffer);
1070 }
1071
1072 /*
1073 * Migrate anonymous memory to device private memory and fault it back to system
1074 * memory multiple times.
1075 */
TEST_F(hmm,migrate_multiple)1076 TEST_F(hmm, migrate_multiple)
1077 {
1078 struct hmm_buffer *buffer;
1079 unsigned long npages;
1080 unsigned long size;
1081 unsigned long i;
1082 unsigned long c;
1083 int *ptr;
1084 int ret;
1085
1086 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1087 ASSERT_NE(npages, 0);
1088 size = npages << self->page_shift;
1089
1090 for (c = 0; c < NTIMES; c++) {
1091 buffer = malloc(sizeof(*buffer));
1092 ASSERT_NE(buffer, NULL);
1093
1094 buffer->fd = -1;
1095 buffer->size = size;
1096 buffer->mirror = malloc(size);
1097 ASSERT_NE(buffer->mirror, NULL);
1098
1099 buffer->ptr = mmap(NULL, size,
1100 PROT_READ | PROT_WRITE,
1101 MAP_PRIVATE | MAP_ANONYMOUS,
1102 buffer->fd, 0);
1103 ASSERT_NE(buffer->ptr, MAP_FAILED);
1104
1105 /* Initialize buffer in system memory. */
1106 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1107 ptr[i] = i;
1108
1109 /* Migrate memory to device. */
1110 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer,
1111 npages);
1112 ASSERT_EQ(ret, 0);
1113 ASSERT_EQ(buffer->cpages, npages);
1114
1115 /* Check what the device read. */
1116 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1117 ASSERT_EQ(ptr[i], i);
1118
1119 /* Fault pages back to system memory and check them. */
1120 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1121 ASSERT_EQ(ptr[i], i);
1122
1123 hmm_buffer_free(buffer);
1124 }
1125 }
1126
1127 /*
1128 * Read anonymous memory multiple times.
1129 */
TEST_F(hmm,anon_read_multiple)1130 TEST_F(hmm, anon_read_multiple)
1131 {
1132 struct hmm_buffer *buffer;
1133 unsigned long npages;
1134 unsigned long size;
1135 unsigned long i;
1136 unsigned long c;
1137 int *ptr;
1138 int ret;
1139
1140 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1141 ASSERT_NE(npages, 0);
1142 size = npages << self->page_shift;
1143
1144 for (c = 0; c < NTIMES; c++) {
1145 buffer = malloc(sizeof(*buffer));
1146 ASSERT_NE(buffer, NULL);
1147
1148 buffer->fd = -1;
1149 buffer->size = size;
1150 buffer->mirror = malloc(size);
1151 ASSERT_NE(buffer->mirror, NULL);
1152
1153 buffer->ptr = mmap(NULL, size,
1154 PROT_READ | PROT_WRITE,
1155 MAP_PRIVATE | MAP_ANONYMOUS,
1156 buffer->fd, 0);
1157 ASSERT_NE(buffer->ptr, MAP_FAILED);
1158
1159 /* Initialize buffer in system memory. */
1160 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1161 ptr[i] = i + c;
1162
1163 /* Simulate a device reading system memory. */
1164 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1165 npages);
1166 ASSERT_EQ(ret, 0);
1167 ASSERT_EQ(buffer->cpages, npages);
1168 ASSERT_EQ(buffer->faults, 1);
1169
1170 /* Check what the device read. */
1171 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1172 ASSERT_EQ(ptr[i], i + c);
1173
1174 hmm_buffer_free(buffer);
1175 }
1176 }
1177
unmap_buffer(void * p)1178 void *unmap_buffer(void *p)
1179 {
1180 struct hmm_buffer *buffer = p;
1181
1182 /* Delay for a bit and then unmap buffer while it is being read. */
1183 hmm_nanosleep(hmm_random() % 32000);
1184 munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1185 buffer->ptr = NULL;
1186
1187 return NULL;
1188 }
1189
1190 /*
1191 * Try reading anonymous memory while it is being unmapped.
1192 */
TEST_F(hmm,anon_teardown)1193 TEST_F(hmm, anon_teardown)
1194 {
1195 unsigned long npages;
1196 unsigned long size;
1197 unsigned long c;
1198 void *ret;
1199
1200 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1201 ASSERT_NE(npages, 0);
1202 size = npages << self->page_shift;
1203
1204 for (c = 0; c < NTIMES; ++c) {
1205 pthread_t thread;
1206 struct hmm_buffer *buffer;
1207 unsigned long i;
1208 int *ptr;
1209 int rc;
1210
1211 buffer = malloc(sizeof(*buffer));
1212 ASSERT_NE(buffer, NULL);
1213
1214 buffer->fd = -1;
1215 buffer->size = size;
1216 buffer->mirror = malloc(size);
1217 ASSERT_NE(buffer->mirror, NULL);
1218
1219 buffer->ptr = mmap(NULL, size,
1220 PROT_READ | PROT_WRITE,
1221 MAP_PRIVATE | MAP_ANONYMOUS,
1222 buffer->fd, 0);
1223 ASSERT_NE(buffer->ptr, MAP_FAILED);
1224
1225 /* Initialize buffer in system memory. */
1226 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1227 ptr[i] = i + c;
1228
1229 rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1230 ASSERT_EQ(rc, 0);
1231
1232 /* Simulate a device reading system memory. */
1233 rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1234 npages);
1235 if (rc == 0) {
1236 ASSERT_EQ(buffer->cpages, npages);
1237 ASSERT_EQ(buffer->faults, 1);
1238
1239 /* Check what the device read. */
1240 for (i = 0, ptr = buffer->mirror;
1241 i < size / sizeof(*ptr);
1242 ++i)
1243 ASSERT_EQ(ptr[i], i + c);
1244 }
1245
1246 pthread_join(thread, &ret);
1247 hmm_buffer_free(buffer);
1248 }
1249 }
1250
1251 /*
1252 * Test memory snapshot without faulting in pages accessed by the device.
1253 */
TEST_F(hmm2,snapshot)1254 TEST_F(hmm2, snapshot)
1255 {
1256 struct hmm_buffer *buffer;
1257 unsigned long npages;
1258 unsigned long size;
1259 int *ptr;
1260 unsigned char *p;
1261 unsigned char *m;
1262 int ret;
1263 int val;
1264
1265 npages = 7;
1266 size = npages << self->page_shift;
1267
1268 buffer = malloc(sizeof(*buffer));
1269 ASSERT_NE(buffer, NULL);
1270
1271 buffer->fd = -1;
1272 buffer->size = size;
1273 buffer->mirror = malloc(npages);
1274 ASSERT_NE(buffer->mirror, NULL);
1275
1276 /* Reserve a range of addresses. */
1277 buffer->ptr = mmap(NULL, size,
1278 PROT_NONE,
1279 MAP_PRIVATE | MAP_ANONYMOUS,
1280 buffer->fd, 0);
1281 ASSERT_NE(buffer->ptr, MAP_FAILED);
1282 p = buffer->ptr;
1283
1284 /* Punch a hole after the first page address. */
1285 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1286 ASSERT_EQ(ret, 0);
1287
1288 /* Page 2 will be read-only zero page. */
1289 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1290 PROT_READ);
1291 ASSERT_EQ(ret, 0);
1292 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1293 val = *ptr + 3;
1294 ASSERT_EQ(val, 3);
1295
1296 /* Page 3 will be read-only. */
1297 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1298 PROT_READ | PROT_WRITE);
1299 ASSERT_EQ(ret, 0);
1300 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1301 *ptr = val;
1302 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1303 PROT_READ);
1304 ASSERT_EQ(ret, 0);
1305
1306 /* Page 4-6 will be read-write. */
1307 ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1308 PROT_READ | PROT_WRITE);
1309 ASSERT_EQ(ret, 0);
1310 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1311 *ptr = val;
1312
1313 /* Page 5 will be migrated to device 0. */
1314 buffer->ptr = p + 5 * self->page_size;
1315 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1316 ASSERT_EQ(ret, 0);
1317 ASSERT_EQ(buffer->cpages, 1);
1318
1319 /* Page 6 will be migrated to device 1. */
1320 buffer->ptr = p + 6 * self->page_size;
1321 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1);
1322 ASSERT_EQ(ret, 0);
1323 ASSERT_EQ(buffer->cpages, 1);
1324
1325 /* Simulate a device snapshotting CPU pagetables. */
1326 buffer->ptr = p;
1327 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1328 ASSERT_EQ(ret, 0);
1329 ASSERT_EQ(buffer->cpages, npages);
1330
1331 /* Check what the device saw. */
1332 m = buffer->mirror;
1333 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1334 ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1335 ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1336 ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1337 ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1338 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1339 HMM_DMIRROR_PROT_WRITE);
1340 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1341
1342 hmm_buffer_free(buffer);
1343 }
1344
1345 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
1346 /*
1347 * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
1348 * should be mapped by a large page table entry.
1349 */
TEST_F(hmm,compound)1350 TEST_F(hmm, compound)
1351 {
1352 struct hmm_buffer *buffer;
1353 unsigned long npages;
1354 unsigned long size;
1355 int *ptr;
1356 unsigned char *m;
1357 int ret;
1358 long pagesizes[4];
1359 int n, idx;
1360 unsigned long i;
1361
1362 /* Skip test if we can't allocate a hugetlbfs page. */
1363
1364 n = gethugepagesizes(pagesizes, 4);
1365 if (n <= 0)
1366 return;
1367 for (idx = 0; --n > 0; ) {
1368 if (pagesizes[n] < pagesizes[idx])
1369 idx = n;
1370 }
1371 size = ALIGN(TWOMEG, pagesizes[idx]);
1372 npages = size >> self->page_shift;
1373
1374 buffer = malloc(sizeof(*buffer));
1375 ASSERT_NE(buffer, NULL);
1376
1377 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
1378 if (buffer->ptr == NULL) {
1379 free(buffer);
1380 return;
1381 }
1382
1383 buffer->size = size;
1384 buffer->mirror = malloc(npages);
1385 ASSERT_NE(buffer->mirror, NULL);
1386
1387 /* Initialize the pages the device will snapshot in buffer->ptr. */
1388 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1389 ptr[i] = i;
1390
1391 /* Simulate a device snapshotting CPU pagetables. */
1392 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1393 ASSERT_EQ(ret, 0);
1394 ASSERT_EQ(buffer->cpages, npages);
1395
1396 /* Check what the device saw. */
1397 m = buffer->mirror;
1398 for (i = 0; i < npages; ++i)
1399 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1400 HMM_DMIRROR_PROT_PMD);
1401
1402 /* Make the region read-only. */
1403 ret = mprotect(buffer->ptr, size, PROT_READ);
1404 ASSERT_EQ(ret, 0);
1405
1406 /* Simulate a device snapshotting CPU pagetables. */
1407 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1408 ASSERT_EQ(ret, 0);
1409 ASSERT_EQ(buffer->cpages, npages);
1410
1411 /* Check what the device saw. */
1412 m = buffer->mirror;
1413 for (i = 0; i < npages; ++i)
1414 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1415 HMM_DMIRROR_PROT_PMD);
1416
1417 free_hugepage_region(buffer->ptr);
1418 buffer->ptr = NULL;
1419 hmm_buffer_free(buffer);
1420 }
1421 #endif /* LOCAL_CONFIG_HAVE_LIBHUGETLBFS */
1422
1423 /*
1424 * Test two devices reading the same memory (double mapped).
1425 */
TEST_F(hmm2,double_map)1426 TEST_F(hmm2, double_map)
1427 {
1428 struct hmm_buffer *buffer;
1429 unsigned long npages;
1430 unsigned long size;
1431 unsigned long i;
1432 int *ptr;
1433 int ret;
1434
1435 npages = 6;
1436 size = npages << self->page_shift;
1437
1438 buffer = malloc(sizeof(*buffer));
1439 ASSERT_NE(buffer, NULL);
1440
1441 buffer->fd = -1;
1442 buffer->size = size;
1443 buffer->mirror = malloc(npages);
1444 ASSERT_NE(buffer->mirror, NULL);
1445
1446 /* Reserve a range of addresses. */
1447 buffer->ptr = mmap(NULL, size,
1448 PROT_READ | PROT_WRITE,
1449 MAP_PRIVATE | MAP_ANONYMOUS,
1450 buffer->fd, 0);
1451 ASSERT_NE(buffer->ptr, MAP_FAILED);
1452
1453 /* Initialize buffer in system memory. */
1454 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1455 ptr[i] = i;
1456
1457 /* Make region read-only. */
1458 ret = mprotect(buffer->ptr, size, PROT_READ);
1459 ASSERT_EQ(ret, 0);
1460
1461 /* Simulate device 0 reading system memory. */
1462 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1463 ASSERT_EQ(ret, 0);
1464 ASSERT_EQ(buffer->cpages, npages);
1465 ASSERT_EQ(buffer->faults, 1);
1466
1467 /* Check what the device read. */
1468 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1469 ASSERT_EQ(ptr[i], i);
1470
1471 /* Simulate device 1 reading system memory. */
1472 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1473 ASSERT_EQ(ret, 0);
1474 ASSERT_EQ(buffer->cpages, npages);
1475 ASSERT_EQ(buffer->faults, 1);
1476
1477 /* Check what the device read. */
1478 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1479 ASSERT_EQ(ptr[i], i);
1480
1481 /* Punch a hole after the first page address. */
1482 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1483 ASSERT_EQ(ret, 0);
1484
1485 hmm_buffer_free(buffer);
1486 }
1487
1488 /*
1489 * Basic check of exclusive faulting.
1490 */
TEST_F(hmm,exclusive)1491 TEST_F(hmm, exclusive)
1492 {
1493 struct hmm_buffer *buffer;
1494 unsigned long npages;
1495 unsigned long size;
1496 unsigned long i;
1497 int *ptr;
1498 int ret;
1499
1500 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1501 ASSERT_NE(npages, 0);
1502 size = npages << self->page_shift;
1503
1504 buffer = malloc(sizeof(*buffer));
1505 ASSERT_NE(buffer, NULL);
1506
1507 buffer->fd = -1;
1508 buffer->size = size;
1509 buffer->mirror = malloc(size);
1510 ASSERT_NE(buffer->mirror, NULL);
1511
1512 buffer->ptr = mmap(NULL, size,
1513 PROT_READ | PROT_WRITE,
1514 MAP_PRIVATE | MAP_ANONYMOUS,
1515 buffer->fd, 0);
1516 ASSERT_NE(buffer->ptr, MAP_FAILED);
1517
1518 /* Initialize buffer in system memory. */
1519 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1520 ptr[i] = i;
1521
1522 /* Map memory exclusively for device access. */
1523 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1524 ASSERT_EQ(ret, 0);
1525 ASSERT_EQ(buffer->cpages, npages);
1526
1527 /* Check what the device read. */
1528 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1529 ASSERT_EQ(ptr[i], i);
1530
1531 /* Fault pages back to system memory and check them. */
1532 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1533 ASSERT_EQ(ptr[i]++, i);
1534
1535 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1536 ASSERT_EQ(ptr[i], i+1);
1537
1538 /* Check atomic access revoked */
1539 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
1540 ASSERT_EQ(ret, 0);
1541
1542 hmm_buffer_free(buffer);
1543 }
1544
TEST_F(hmm,exclusive_mprotect)1545 TEST_F(hmm, exclusive_mprotect)
1546 {
1547 struct hmm_buffer *buffer;
1548 unsigned long npages;
1549 unsigned long size;
1550 unsigned long i;
1551 int *ptr;
1552 int ret;
1553
1554 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1555 ASSERT_NE(npages, 0);
1556 size = npages << self->page_shift;
1557
1558 buffer = malloc(sizeof(*buffer));
1559 ASSERT_NE(buffer, NULL);
1560
1561 buffer->fd = -1;
1562 buffer->size = size;
1563 buffer->mirror = malloc(size);
1564 ASSERT_NE(buffer->mirror, NULL);
1565
1566 buffer->ptr = mmap(NULL, size,
1567 PROT_READ | PROT_WRITE,
1568 MAP_PRIVATE | MAP_ANONYMOUS,
1569 buffer->fd, 0);
1570 ASSERT_NE(buffer->ptr, MAP_FAILED);
1571
1572 /* Initialize buffer in system memory. */
1573 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1574 ptr[i] = i;
1575
1576 /* Map memory exclusively for device access. */
1577 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1578 ASSERT_EQ(ret, 0);
1579 ASSERT_EQ(buffer->cpages, npages);
1580
1581 /* Check what the device read. */
1582 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1583 ASSERT_EQ(ptr[i], i);
1584
1585 ret = mprotect(buffer->ptr, size, PROT_READ);
1586 ASSERT_EQ(ret, 0);
1587
1588 /* Simulate a device writing system memory. */
1589 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
1590 ASSERT_EQ(ret, -EPERM);
1591
1592 hmm_buffer_free(buffer);
1593 }
1594
1595 /*
1596 * Check copy-on-write works.
1597 */
TEST_F(hmm,exclusive_cow)1598 TEST_F(hmm, exclusive_cow)
1599 {
1600 struct hmm_buffer *buffer;
1601 unsigned long npages;
1602 unsigned long size;
1603 unsigned long i;
1604 int *ptr;
1605 int ret;
1606
1607 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1608 ASSERT_NE(npages, 0);
1609 size = npages << self->page_shift;
1610
1611 buffer = malloc(sizeof(*buffer));
1612 ASSERT_NE(buffer, NULL);
1613
1614 buffer->fd = -1;
1615 buffer->size = size;
1616 buffer->mirror = malloc(size);
1617 ASSERT_NE(buffer->mirror, NULL);
1618
1619 buffer->ptr = mmap(NULL, size,
1620 PROT_READ | PROT_WRITE,
1621 MAP_PRIVATE | MAP_ANONYMOUS,
1622 buffer->fd, 0);
1623 ASSERT_NE(buffer->ptr, MAP_FAILED);
1624
1625 /* Initialize buffer in system memory. */
1626 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1627 ptr[i] = i;
1628
1629 /* Map memory exclusively for device access. */
1630 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1631 ASSERT_EQ(ret, 0);
1632 ASSERT_EQ(buffer->cpages, npages);
1633
1634 fork();
1635
1636 /* Fault pages back to system memory and check them. */
1637 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1638 ASSERT_EQ(ptr[i]++, i);
1639
1640 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1641 ASSERT_EQ(ptr[i], i+1);
1642
1643 hmm_buffer_free(buffer);
1644 }
1645
1646 TEST_HARNESS_MAIN
1647