1 /*
2 * Copyright (c) 2009 Corey Tabaka
3 * Copyright (c) 2014 Travis Geiselbrecht
4 * Copyright (c) 2015 Intel Corporation
5 *
6 * Use of this source code is governed by a MIT-style
7 * license that can be found in the LICENSE file or at
8 * https://opensource.org/licenses/MIT
9 */
10 #include <sys/types.h>
11 #include <string.h>
12 #include <stdlib.h>
13 #include <lk/debug.h>
14 #include <kernel/thread.h>
15 #include <kernel/spinlock.h>
16 #include <arch/x86.h>
17 #include <arch/x86/descriptor.h>
18 #include <arch/fpu.h>
19
20 /* we're uniprocessor at this point for x86, so store a global pointer to the current thread */
21 struct thread *_current_thread;
22
23 static void initial_thread_func(void) __NO_RETURN;
initial_thread_func(void)24 static void initial_thread_func(void) {
25 int ret;
26
27 /* release the thread lock that was implicitly held across the reschedule */
28 spin_unlock(&thread_lock);
29 arch_enable_ints();
30
31 ret = _current_thread->entry(_current_thread->arg);
32
33 thread_exit(ret);
34 }
35
arch_thread_initialize(thread_t * t)36 void arch_thread_initialize(thread_t *t) {
37 // create a default stack frame on the stack
38 vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
39
40 #if ARCH_X86_32
41 // make sure the top of the stack is 8 byte aligned for ABI compliance
42 stack_top = ROUNDDOWN(stack_top, 8);
43 struct x86_32_context_switch_frame *frame = (struct x86_32_context_switch_frame *)(stack_top);
44 #endif
45 #if ARCH_X86_64
46 // make sure the top of the stack is 16 byte aligned for ABI compliance
47 stack_top = ROUNDDOWN(stack_top, 16);
48
49 // make sure we start the frame 8 byte unaligned (relative to the 16 byte alignment) because
50 // of the way the context switch will pop the return address off the stack. After the first
51 // context switch, this leaves the stack in unaligned relative to how a called function expects it.
52 stack_top -= 8;
53 struct x86_64_context_switch_frame *frame = (struct x86_64_context_switch_frame *)(stack_top);
54 #endif
55
56 // move down a frame size and zero it out
57 frame--;
58 memset(frame, 0, sizeof(*frame));
59
60 #if ARCH_X86_32
61 frame->eip = (vaddr_t) &initial_thread_func;
62 frame->eflags = 0x3002; // IF = 0, NT = 0, IOPL = 3
63 #endif
64
65 #if ARCH_X86_64
66 frame->rip = (vaddr_t) &initial_thread_func;
67 frame->rflags = 0x3002; /* IF = 0, NT = 0, IOPL = 3 */
68 #endif
69
70 #if X86_WITH_FPU
71 // initialize the saved fpu state
72 fpu_init_thread_states(t);
73 #endif
74
75 // set the stack pointer
76 t->arch.sp = (vaddr_t)frame;
77 }
78
arch_dump_thread(thread_t * t)79 void arch_dump_thread(thread_t *t) {
80 if (t->state != THREAD_RUNNING) {
81 dprintf(INFO, "\tarch: ");
82 dprintf(INFO, "sp 0x%lx\n", t->arch.sp);
83 }
84 }
85
86 #if ARCH_X86_32
87
arch_context_switch(thread_t * oldthread,thread_t * newthread)88 void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
89 //dprintf(DEBUG, "arch_context_switch: old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
90
91 #if X86_WITH_FPU
92 fpu_context_switch(oldthread, newthread);
93 #endif
94
95 __asm__ __volatile__ (
96 "pushl $1f \n\t"
97 "pushf \n\t"
98 "pusha \n\t"
99 "movl %%esp,(%%edx) \n\t"
100 "movl %%eax,%%esp \n\t"
101 "popa \n\t"
102 "popf \n\t"
103 "ret \n\t"
104 "1: \n\t"
105
106 :
107 : "d" (&oldthread->arch.sp), "a" (newthread->arch.sp)
108 );
109
110 /*__asm__ __volatile__ (
111 "pushf \n\t"
112 "pushl %%cs \n\t"
113 "pushl $1f \n\t"
114 "pushl %%gs \n\t"
115 "pushl %%fs \n\t"
116 "pushl %%es \n\t"
117 "pushl %%ds \n\t"
118 "pusha \n\t"
119 "movl %%esp,(%%edx) \n\t"
120 "movl %%eax,%%esp \n\t"
121 "popa \n\t"
122 "popl %%ds \n\t"
123 "popl %%es \n\t"
124 "popl %%fs \n\t"
125 "popl %%gs \n\t"
126 "iret \n\t"
127 "1: "
128 :
129 : "d" (&oldthread->arch.sp), "a" (newthread->arch.sp)
130 );*/
131 }
132 #endif
133
134 #if ARCH_X86_64
135
arch_context_switch(thread_t * oldthread,thread_t * newthread)136 void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
137 #if X86_WITH_FPU
138 fpu_context_switch(oldthread, newthread);
139 #endif
140
141 x86_64_context_switch(&oldthread->arch.sp, newthread->arch.sp);
142 }
143 #endif
144
145