1 /*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <object.h>
8 #include <machine.h>
9 #include <arch/model/statedata.h>
10 #include <arch/kernel/vspace.h>
11 #include <arch/kernel/thread.h>
12 #include <linker.h>
13
Arch_switchToThread(tcb_t * tcb)14 void Arch_switchToThread(tcb_t *tcb)
15 {
16 /* set PD */
17 setVMRoot(tcb);
18 #ifdef ENABLE_SMP_SUPPORT
19 asm volatile("movq %[value], %%gs:%c[offset]"
20 :
21 : [value] "r"(&tcb->tcbArch.tcbContext.registers[Error + 1]),
22 [offset] "i"(OFFSETOF(nodeInfo_t, currentThreadUserContext)));
23 #endif
24 if (config_set(CONFIG_KERNEL_X86_IBPB_ON_CONTEXT_SWITCH)) {
25 x86_ibpb();
26 }
27
28 if (config_set(CONFIG_KERNEL_X86_RSB_ON_CONTEXT_SWITCH)) {
29 x86_flush_rsb();
30 }
31 }
32
Arch_configureIdleThread(tcb_t * tcb)33 BOOT_CODE void Arch_configureIdleThread(tcb_t *tcb)
34 {
35 setRegister(tcb, FLAGS, FLAGS_USER_DEFAULT);
36 setRegister(tcb, NextIP, (uint64_t)idleThreadStart);
37 setRegister(tcb, CS, SEL_CS_0);
38 setRegister(tcb, SS, SEL_DS_0);
39 /* We set the RSP to 0, even though the idle thread will never touch it, as it
40 * allows us to distinguish an interrupt from the idle thread from an interrupt
41 * from kernel execution, just by examining the saved RSP value (since the kernel
42 * thread will have a valid RSP, and never 0). See traps.S for the other side of this
43 */
44 setRegister(tcb, RSP, 0);
45 }
46
Arch_switchToIdleThread(void)47 void Arch_switchToIdleThread(void)
48 {
49 tcb_t *tcb = NODE_STATE(ksIdleThread);
50 /* Force the idle thread to run on kernel page table */
51 setVMRoot(tcb);
52 #ifdef ENABLE_SMP_SUPPORT
53 asm volatile("movq %[value], %%gs:%c[offset]"
54 :
55 : [value] "r"(&tcb->tcbArch.tcbContext.registers[Error + 1]),
56 [offset] "i"(OFFSETOF(nodeInfo_t, currentThreadUserContext)));
57 #endif
58 }
59
Arch_activateIdleThread(tcb_t * tcb)60 void Arch_activateIdleThread(tcb_t *tcb)
61 {
62 /* Don't need to do anything */
63 }
64
Mode_postModifyRegisters(tcb_t * tptr)65 void Mode_postModifyRegisters(tcb_t *tptr)
66 {
67 /* Setting Error to 0 will force a return by the interrupt path, which
68 * does a full restore. Unless we're the current thread, in which case
69 * we still have to go back out via a sysret */
70 if (tptr != NODE_STATE(ksCurThread)) {
71 setRegister(tptr, Error, 0);
72 }
73 }
74