1 /*
2 * Copyright (c) 2009 Corey Tabaka
3 * Copyright (c) 2014 Travis Geiselbrecht
4 * Copyright (c) 2015-2018 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files
8 * (the "Software"), to deal in the Software without restriction,
9 * including without limitation the rights to use, copy, modify, merge,
10 * publish, distribute, sublicense, and/or sell copies of the Software,
11 * and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25 #include <sys/types.h>
26 #include <string.h>
27 #include <stdlib.h>
28 #include <debug.h>
29 #include <kernel/thread.h>
30 #include <kernel/spinlock.h>
31 #include <arch/x86.h>
32 #include <arch/x86/descriptor.h>
33 #include <arch/fpu.h>
34
35 static void initial_thread_func(void) __NO_RETURN;
initial_thread_func(void)36 static void initial_thread_func(void)
37 {
38 int ret;
39 thread_t *current_thread = get_current_thread();
40
41 /* release the thread lock that was implicitly held across the reschedule */
42 thread_unlock_ints_disabled();
43 arch_enable_ints();
44
45 ret = current_thread->entry(current_thread->arg);
46
47 thread_exit(ret);
48 }
49
arch_init_thread_initialize(struct thread * thread,uint cpu)50 void arch_init_thread_initialize(struct thread *thread, uint cpu)
51 {
52 extern uint8_t _kstack[];
53 size_t stack_size = PAGE_SIZE;
54 uint8_t *cpu_stack = _kstack;
55 ASSERT(cpu == 0);
56 thread->stack = cpu_stack;
57 thread->stack_high = cpu_stack + stack_size;
58 thread->stack_size = stack_size;
59 }
60
arch_thread_initialize(thread_t * t)61 void arch_thread_initialize(thread_t *t)
62 {
63 // create a default stack frame on the stack
64 vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
65
66 #if ARCH_X86_32
67 // make sure the top of the stack is 8 byte aligned for ABI compliance
68 stack_top = round_down(stack_top, 8);
69 struct x86_32_context_switch_frame *frame = (struct x86_32_context_switch_frame *)(stack_top);
70 #endif
71 #if ARCH_X86_64
72 // make sure the top of the stack is 16 byte aligned for ABI compliance
73 stack_top = round_down(stack_top, 16);
74
75 // make sure we start the frame 8 byte unaligned (relative to the 16 byte alignment) because
76 // of the way the context switch will pop the return address off the stack. After the first
77 // context switch, this leaves the stack in unaligned relative to how a called function expects it.
78 stack_top -= 8;
79 struct x86_64_context_switch_frame *frame = (struct x86_64_context_switch_frame *)(stack_top);
80 #endif
81
82 // move down a frame size and zero it out
83 frame--;
84 memset(frame, 0, sizeof(*frame));
85
86 #if ARCH_X86_32
87 frame->eip = (vaddr_t) &initial_thread_func;
88 frame->eflags = 0x3002; // IF = 0, NT = 0, IOPL = 3
89 #endif
90
91 #if ARCH_X86_64
92 frame->rip = (vaddr_t) &initial_thread_func;
93 frame->rflags = 0x3002; /* IF = 0, NT = 0, IOPL = 3 */
94 #endif
95
96 // initialize the saved fpu state
97 fpu_init_thread_states(t);
98
99 // set the stack pointer
100 t->arch.sp = (vaddr_t)frame;
101 }
102
arch_dump_thread(thread_t * t)103 void arch_dump_thread(thread_t *t)
104 {
105 if (t->state != THREAD_RUNNING) {
106 dprintf(INFO, "\tarch: ");
107 dprintf(INFO, "sp 0x%lx\n", t->arch.sp);
108 }
109 }
110
111 #if ARCH_X86_32
112
arch_context_switch(thread_t * oldthread,thread_t * newthread)113 void arch_context_switch(thread_t *oldthread, thread_t *newthread)
114 {
115 //dprintf(DEBUG, "arch_context_switch: old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
116
117 #if X86_WITH_FPU
118 fpu_context_switch(oldthread, newthread);
119 #endif
120
121 __asm__ __volatile__ (
122 "pushl $1f \n\t"
123 "pushf \n\t"
124 "pusha \n\t"
125 "movl %%esp,(%%edx) \n\t"
126 "movl %%eax,%%esp \n\t"
127 "popa \n\t"
128 "popf \n\t"
129 "ret \n\t"
130 "1: \n\t"
131
132 :
133 : "d" (&oldthread->arch.sp), "a" (newthread->arch.sp)
134 );
135 }
136
137 #elif ARCH_X86_64
138
arch_context_switch(thread_t * oldthread,thread_t * newthread)139 void arch_context_switch(thread_t *oldthread, thread_t *newthread)
140 {
141 uint64_t stack_top = (uint64_t)newthread->stack + newthread->stack_size;
142 tss_t *tss_base = get_tss_base();
143 #if X86_WITH_FPU
144 fpu_context_switch(oldthread, newthread);
145 #endif
146 /* Exceptions and interrupts from user-space sets RSP to TSS:RSP0 */
147 tss_base->rsp0 = stack_top;
148 /* SYSENTER instruction sets RSP to SYSENTER_ESP_MSR */
149 write_msr(SYSENTER_ESP_MSR, stack_top);
150 /*
151 * The SYSCALL instruction does not set RSP, so we also store the stack
152 * pointer in GS:SYSCALL_STACK_OFF so the syscall handler can easily get
153 * it.
154 */
155 x86_write_gs_with_offset(SYSCALL_STACK_OFF, stack_top);
156
157 /* Switch fs base which used to store tls */
158 oldthread->arch.fs_base = read_msr(X86_MSR_FS_BASE);
159 write_msr(X86_MSR_FS_BASE, newthread->arch.fs_base);
160
161 x86_64_context_switch(&oldthread->arch.sp, newthread->arch.sp);
162 }
163 #endif
164
165