1 /*
2 * Copyright (c) 2020 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #if ARCH_HAS_MMU
9
10 #include <arch/mmu.h>
11
12 #include <lk/err.h>
13 #include <lib/unittest.h>
14 #include <kernel/vm.h>
15
create_user_aspace(void)16 static bool create_user_aspace(void) {
17 BEGIN_TEST;
18
19 arch_aspace_t as;
20 status_t err = arch_mmu_init_aspace(&as, USER_ASPACE_BASE, USER_ASPACE_SIZE, 0);
21 ASSERT_EQ(NO_ERROR, err, "init");
22
23 err = arch_mmu_destroy_aspace(&as);
24 EXPECT_EQ(NO_ERROR, err, "destroy");
25
26 END_TEST;
27 }
28
map_user_pages(void)29 static bool map_user_pages(void) {
30 BEGIN_TEST;
31
32 arch_aspace_t as;
33 status_t err = arch_mmu_init_aspace(&as, USER_ASPACE_BASE, USER_ASPACE_SIZE, 0);
34 ASSERT_EQ(NO_ERROR, err, "init");
35
36 // allocate a batch of pages
37 struct list_node pages = LIST_INITIAL_VALUE(pages);
38 size_t count = pmm_alloc_pages(4, &pages);
39 EXPECT_EQ(4, count, "alloc pages");
40 EXPECT_EQ(4, list_length(&pages), "page list");
41
42 // map the pages into the address space
43 vaddr_t va = USER_ASPACE_BASE;
44 vm_page_t *p;
45 list_for_every_entry(&pages, p, vm_page_t, node) {
46 err = arch_mmu_map(&as, va, vm_page_to_paddr(p), 1, ARCH_MMU_FLAG_PERM_USER);
47 EXPECT_EQ(NO_ERROR, err, "map page");
48 va += PAGE_SIZE;
49 }
50
51 // query the pages to make sure they match
52 va = USER_ASPACE_BASE;
53 list_for_every_entry(&pages, p, vm_page_t, node) {
54 paddr_t pa;
55 uint flags;
56 err = arch_mmu_query(&as, va, &pa, &flags);
57 EXPECT_EQ(NO_ERROR, err, "query");
58 EXPECT_EQ(vm_page_to_paddr(p), pa, "pa");
59 EXPECT_EQ(ARCH_MMU_FLAG_PERM_USER, flags, "flags");
60 va += PAGE_SIZE;
61
62 //unittest_printf("\npa %#lx, flags %#x", pa, flags);
63 }
64
65 // destroy the aspace with the pages mapped
66 err = arch_mmu_destroy_aspace(&as);
67 EXPECT_EQ(NO_ERROR, err, "destroy");
68
69 size_t freed = pmm_free(&pages);
70 EXPECT_EQ(count, freed, "free");
71
72 END_TEST;
73 }
74
context_switch(void)75 static bool context_switch(void) {
76 BEGIN_TEST;
77
78 arch_aspace_t as;
79 status_t err = arch_mmu_init_aspace(&as, USER_ASPACE_BASE, USER_ASPACE_SIZE, 0);
80 ASSERT_EQ(NO_ERROR, err, "init");
81
82 // switch to the address space
83 arch_mmu_context_switch(&as);
84
85 // map a page, verify can be read through the page
86 vm_page_t *p = pmm_alloc_page();
87 EXPECT_NONNULL(p, "page");
88
89 // map it
90 err = arch_mmu_map(&as, USER_ASPACE_BASE, vm_page_to_paddr(p), 1, ARCH_MMU_FLAG_PERM_USER);
91 EXPECT_EQ(NO_ERROR, err, "map");
92
93 // write a known value to the kvaddr portion of the page
94 volatile int *kv = paddr_to_kvaddr(vm_page_to_paddr(p));
95 *kv = 99;
96
97 // read the data back from the page
98 volatile int *ptr = (void *)USER_ASPACE_BASE;
99 volatile int foo = *ptr;
100
101 EXPECT_EQ(99, foo, "readback");
102 *kv = 0xaa;
103 foo = *ptr;
104 EXPECT_EQ(0xaa, foo, "readback 2");
105
106 // write to the page and read it back from the kernel side
107 *ptr = 0x55;
108 foo = *kv;
109 EXPECT_EQ(0x55, foo, "readback 3");
110
111 // switch back to kernel aspace
112 arch_mmu_context_switch(NULL);
113
114 // destroy it
115 err = arch_mmu_destroy_aspace(&as);
116 EXPECT_EQ(NO_ERROR, err, "destroy");
117
118 // free the page
119 size_t c = pmm_free_page(p);
120 EXPECT_EQ(1, c, "free");
121
122 END_TEST;
123 }
124
125 BEGIN_TEST_CASE(arch_mmu_tests)
126 RUN_TEST(create_user_aspace);
127 RUN_TEST(map_user_pages);
128 RUN_TEST(context_switch);
129 END_TEST_CASE(arch_mmu_tests)
130
131 #endif // ARCH_HAS_MMU
132