1 /*
2  * Copyright (c) 2009 Corey Tabaka
3  * Copyright (c) 2015 Intel Corporation
4  *
5  * Use of this source code is governed by a MIT-style
6  * license that can be found in the LICENSE file or at
7  * https://opensource.org/licenses/MIT
8  */
9 
10 #include <lk/debug.h>
11 #include <arch.h>
12 #include <arch/ops.h>
13 #include <arch/x86.h>
14 #include <arch/x86/mmu.h>
15 #include <arch/x86/descriptor.h>
16 #include <arch/fpu.h>
17 #include <arch/mmu.h>
18 #include <platform.h>
19 #include <sys/types.h>
20 #include <string.h>
21 
22 /* early stack */
23 uint8_t _kstack[PAGE_SIZE] __ALIGNED(8);
24 
25 /* save a pointer to the multiboot information coming in from whoever called us */
26 /* make sure it lives in .data to avoid it being wiped out by bss clearing */
27 __SECTION(".data") void *_multiboot_info;
28 
29 /* main tss */
30 static tss_t system_tss;
31 
arch_early_init(void)32 void arch_early_init(void) {
33     /* enable caches here for now */
34     clear_in_cr0(X86_CR0_NW | X86_CR0_CD);
35 
36     memset(&system_tss, 0, sizeof(system_tss));
37 
38 #if ARCH_X86_32
39     system_tss.esp0 = 0;
40     system_tss.ss0 = DATA_SELECTOR;
41     system_tss.ss1 = 0;
42     system_tss.ss2 = 0;
43     system_tss.eflags = 0x00003002;
44     system_tss.bitmap = offsetof(tss_32_t, tss_bitmap);
45     system_tss.trace = 1; // trap on hardware task switch
46 #endif
47 
48     set_global_desc(TSS_SELECTOR, &system_tss, sizeof(system_tss), 1, 0, 0, SEG_TYPE_TSS, 0, 0);
49     x86_ltr(TSS_SELECTOR);
50 
51     x86_mmu_early_init();
52 }
53 
arch_init(void)54 void arch_init(void) {
55     x86_mmu_init();
56 
57 #ifdef X86_WITH_FPU
58     fpu_init();
59 #endif
60 }
61 
arch_chain_load(void * entry,ulong arg0,ulong arg1,ulong arg2,ulong arg3)62 void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
63     PANIC_UNIMPLEMENTED;
64 }
65 
arch_enter_uspace(vaddr_t entry_point,vaddr_t user_stack_top)66 void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
67     PANIC_UNIMPLEMENTED;
68 #if 0
69     DEBUG_ASSERT(IS_ALIGNED(user_stack_top, 16));
70 
71     thread_t *ct = get_current_thread();
72 
73     vaddr_t kernel_stack_top = (uintptr_t)ct->stack + ct->stack_size;
74     kernel_stack_top = ROUNDDOWN(kernel_stack_top, 16);
75 
76     /* set up a default spsr to get into 64bit user space:
77      * zeroed NZCV
78      * no SS, no IL, no D
79      * all interrupts enabled
80      * mode 0: EL0t
81      */
82     uint32_t spsr = 0;
83 
84     arch_disable_ints();
85 
86     asm volatile(
87         "mov    sp, %[kstack];"
88         "msr    sp_el0, %[ustack];"
89         "msr    elr_el1, %[entry];"
90         "msr    spsr_el1, %[spsr];"
91         "eret;"
92         :
93         : [ustack]"r"(user_stack_top),
94         [kstack]"r"(kernel_stack_top),
95         [entry]"r"(entry_point),
96         [spsr]"r"(spsr)
97         : "memory");
98     __UNREACHABLE;
99 #endif
100 }
101