1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "berberis/guest_os_primitives/guest_thread.h"
18
19 #include <sys/mman.h> // mprotect
20
21 #if defined(__BIONIC__)
22 #include "private/bionic_constants.h"
23 #include "private/bionic_tls.h"
24 #endif
25
26 #include "berberis/base/checks.h"
27 #include "berberis/base/mmap.h"
28 #include "berberis/base/tracing.h"
29 #include "berberis/guest_state/guest_addr.h" // ToGuestAddr
30 #include "berberis/guest_state/guest_state_opaque.h"
31 #include "berberis/runtime_primitives/host_stack.h"
32 #include "native_bridge_support/linker/static_tls_config.h"
33
34 #if defined(__BIONIC__)
35 #include "get_tls.h"
36 #endif
37
38 extern "C" void berberis_UnmapAndExit(void* ptr, size_t size, int status);
39
40 namespace berberis {
41
42 // Avoid depending on the whole intrinsics module just for this symbol.
43 // TODO(b/232598137): Maybe export an isolated header from intrinsics for this. Or
44 // alternatively export it from runtime_library.h.
45 namespace intrinsics {
46
47 void InitState();
48
49 } // namespace intrinsics
50
51 NativeBridgeStaticTlsConfig g_static_tls_config;
52
53 namespace {
54
55 const size_t kGuestThreadPageAlignedSize = AlignUpPageSize(sizeof(GuestThread));
56
57 } // namespace
58
59 // static
Create()60 GuestThread* GuestThread::Create() {
61 // ATTENTION: GuestThread is aligned on 16, as fp registers in CPUState are aligned on 16, for
62 // efficient handling with aligned SSE memory access instructions. Thus, avoid using 'new', as
63 // it might not honor alignment! See b/64554026.
64 //
65 // ATTENTION: Bionic allocates thread internal data together with thread stack.
66 // In case of user provided stack, thread internal data goes there.
67 void* thread_storage = Mmap(kGuestThreadPageAlignedSize);
68 if (thread_storage == MAP_FAILED) {
69 return nullptr;
70 }
71
72 GuestThread* thread = new (thread_storage) GuestThread;
73 CHECK(thread);
74
75 thread->state_ = CreateThreadState();
76 if (!thread->state_) {
77 TRACE("failed to allocate thread state");
78 Destroy(thread);
79 return nullptr;
80 }
81 SetGuestThread(*thread->state_, thread);
82
83 intrinsics::InitState();
84
85 return thread;
86 }
87
88 // static
CreateClone(const GuestThread * parent,bool share_signal_actions)89 GuestThread* GuestThread::CreateClone(const GuestThread* parent, bool share_signal_actions) {
90 GuestThread* thread = Create();
91 if (thread == nullptr) {
92 return nullptr;
93 }
94
95 // TODO(156271630): alloc host stack guard?
96 thread->host_stack_ = MmapOrDie(GetStackSizeForTranslation());
97 if (thread->host_stack_ == MAP_FAILED) {
98 TRACE("failed to allocate host stack!");
99 thread->host_stack_ = nullptr;
100 Destroy(thread);
101 return nullptr;
102 }
103
104 SetCPUState(*thread->state(), GetCPUState(*parent->state()));
105 SetTlsAddr(*thread->state(), GetTlsAddr(*parent->state()));
106
107 if (share_signal_actions) {
108 // New shared_ptr user.
109 thread->signal_actions_ = parent->signal_actions_;
110 } else {
111 thread->CloneSignalActionsTableFrom(parent->signal_actions_.get());
112 }
113
114 return thread;
115 }
116
117 // static
CreatePthread(void * stack,size_t stack_size,size_t guard_size)118 GuestThread* GuestThread::CreatePthread(void* stack, size_t stack_size, size_t guard_size) {
119 GuestThread* thread = Create();
120 if (thread == nullptr) {
121 return nullptr;
122 }
123
124 if (!thread->AllocStack(stack, stack_size, guard_size)) {
125 Destroy(thread);
126 return nullptr;
127 }
128
129 SetStackRegister(GetCPUState(*thread->state()), thread->stack_top_);
130
131 if (!thread->AllocShadowCallStack()) {
132 Destroy(thread);
133 return nullptr;
134 }
135
136 SetShadowCallStackPointer(GetCPUState(*thread->state()), thread->scs_base_);
137
138 // Static TLS must be in an independent mapping, because on creation of main thread its config
139 // is yet unknown. Loader sets main thread's static TLS explicitly later.
140 if (!thread->AllocStaticTls()) {
141 Destroy(thread);
142 return nullptr;
143 }
144
145 thread->SetDefaultSignalActionsTable();
146
147 return thread;
148 }
149
150 // static
CreateForTest(ThreadState * state)151 GuestThread* GuestThread::CreateForTest(ThreadState* state) {
152 void* thread_storage = Mmap(kGuestThreadPageAlignedSize);
153 if (thread_storage == MAP_FAILED) {
154 return nullptr;
155 }
156 GuestThread* thread = new (thread_storage) GuestThread;
157 thread->state_ = state;
158 return thread;
159 }
160
161 // static
Destroy(GuestThread * thread)162 void GuestThread::Destroy(GuestThread* thread) {
163 CHECK(thread);
164 // ATTENTION: Don't run guest code from here!
165 if (ArePendingSignalsPresent(*thread->state_)) {
166 TRACE("thread destroyed with pending signals, signals ignored!");
167 }
168 thread->signal_actions_.reset();
169
170 if (thread->host_stack_) {
171 // This happens only on cleanup after failed creation.
172 MunmapOrDie(thread->host_stack_, GetStackSizeForTranslation());
173 }
174 if (thread->mmap_size_) {
175 MunmapOrDie(thread->stack_, thread->mmap_size_);
176 }
177 #if defined(__BIONIC__)
178 if (thread->static_tls_ != nullptr) {
179 MunmapOrDie(thread->static_tls_, AlignUpPageSize(g_static_tls_config.size));
180 }
181 if (thread->scs_region_ != nullptr) {
182 MunmapOrDie(thread->scs_region_, SCS_GUARD_REGION_SIZE);
183 }
184 #endif // defined(__BIONIC__)
185 if (thread->state_) {
186 DestroyThreadState(thread->state_);
187 }
188 MunmapOrDie(thread, kGuestThreadPageAlignedSize);
189 }
190
191 // static
Exit(GuestThread * thread,int status)192 void GuestThread::Exit(GuestThread* thread, int status) {
193 // Destroy the thread without unmapping the host stack.
194 void* host_stack = thread->host_stack_;
195 thread->host_stack_ = nullptr;
196 Destroy(thread);
197
198 if (host_stack) {
199 berberis_UnmapAndExit(host_stack, GetStackSizeForTranslation(), status);
200 } else {
201 syscall(__NR_exit, status);
202 }
203 FATAL("thread didn't exit");
204 }
205
AllocStack(void * stack,size_t stack_size,size_t guard_size)206 bool GuestThread::AllocStack(void* stack, size_t stack_size, size_t guard_size) {
207 // Here is what bionic does, see bionic/pthread_create.cpp:
208 //
209 // For user-provided stack, it assumes guard_size is included in stack size.
210 //
211 // For new stack, it adds given guard and stack sizes to get actual stack size:
212 // |<- guard_size ->|<- stack_size -------------------->|
213 // | guard | stack | pthread_internal_t | tls | GUARD |
214 // |<- actual stack_size --------->|
215 // ^ stack_base ^ stack_top
216
217 if (stack) {
218 // User-provided stack.
219 stack_ = nullptr; // Do not unmap in Destroy!
220 mmap_size_ = 0;
221 guard_size_ = guard_size;
222 stack_size_ = stack_size;
223 stack_top_ = ToGuestAddr(stack) + stack_size_;
224 return true;
225 }
226
227 if (AlignUpPageSizeOverflow(guard_size, &guard_size_)) {
228 return false;
229 }
230
231 size_t aligned_stack_size{};
232 if (AlignUpPageSizeOverflow(stack_size, &aligned_stack_size)) {
233 return false;
234 }
235
236 if (__builtin_add_overflow(aligned_stack_size, guard_size_, &mmap_size_)) {
237 return false;
238 }
239 stack_size_ = mmap_size_;
240
241 if (stack_size_ == 0) {
242 return false;
243 }
244
245 stack_ = Mmap(mmap_size_);
246 if (stack_ == MAP_FAILED) {
247 TRACE("failed to allocate stack!");
248 stack_ = nullptr; // Do not unmap in Destroy!
249 return false;
250 }
251
252 if (mprotect(stack_, guard_size_, PROT_NONE) != 0) {
253 TRACE("failed to protect stack!");
254 return false;
255 }
256
257 // `stack_size_ - 16` is guaranteed to not overflow since it is not 0 and
258 // aligned to the page size.
259 if (__builtin_add_overflow(ToGuestAddr(stack_), stack_size_ - 16, &stack_top_)) {
260 return false;
261 }
262
263 return true;
264 }
265
AllocShadowCallStack()266 bool GuestThread::AllocShadowCallStack() {
267 #if defined(__BIONIC__) && defined(BERBERIS_GUEST_LP64) && !defined(BERBERIS_GUEST_ARCH_X86_64)
268 CHECK(IsAlignedPageSize(SCS_GUARD_REGION_SIZE));
269 CHECK(IsAlignedPageSize(SCS_SIZE));
270
271 scs_region_ = Mmap(SCS_GUARD_REGION_SIZE);
272 if (scs_region_ == MAP_FAILED) {
273 TRACE("failed to allocate shadow call stack!");
274 scs_region_ = nullptr; // do not unmap in Destroy!
275 return false;
276 }
277
278 GuestAddr scs_region_base = ToGuestAddr(scs_region_);
279 // TODO(b/138425729): use random offset!
280 scs_base_ = AlignUp(scs_region_base, SCS_SIZE);
281 GuestAddr scs_top = scs_base_ + SCS_SIZE;
282
283 if (mprotect(scs_region_, scs_base_ - scs_region_base, PROT_NONE) != 0 ||
284 mprotect(ToHostAddr<void>(scs_top),
285 scs_region_base + SCS_GUARD_REGION_SIZE - scs_top,
286 PROT_NONE) != 0) {
287 TRACE("failed to protect shadow call stack!");
288 return false;
289 }
290 #endif // defined(__BIONIC__) && defined(BERBERIS_GUEST_LP64) &&
291 // !defined(BERBERIS_GUEST_ARCH_X86_64)
292 return true;
293 }
294
AllocStaticTls()295 bool GuestThread::AllocStaticTls() {
296 // For the main thread, this function is called twice.
297
298 CHECK_EQ(nullptr, static_tls_);
299
300 #if defined(__BIONIC__)
301 if (g_static_tls_config.size > 0) {
302 static_tls_ = Mmap(AlignUpPageSize(g_static_tls_config.size));
303 if (static_tls_ == MAP_FAILED) {
304 TRACE("failed to allocate static tls!");
305 static_tls_ = nullptr; // Do not unmap in Destroy!
306 return false;
307 }
308 }
309 #endif // defined(__BIONIC__)
310
311 return true;
312 }
313
InitStaticTls()314 void GuestThread::InitStaticTls() {
315 #if defined(__BIONIC__)
316 if (static_tls_ == nullptr) {
317 // Leave the thread pointer unset when starting the main thread.
318 return;
319 }
320 // First initialize static TLS using the initialization image, then update
321 // some of the TLS slots. Reuse the host's pthread_internal_t and
322 // bionic_tls objects. We verify that these structures are safe to use with
323 // checks in berberis/android_api/libc/pthread_translation.h.
324 memcpy(static_tls_, g_static_tls_config.init_img, g_static_tls_config.size);
325 void** tls =
326 reinterpret_cast<void**>(reinterpret_cast<char*>(static_tls_) + g_static_tls_config.tpoff);
327 tls[g_static_tls_config.tls_slot_thread_id] = GetTls()[TLS_SLOT_THREAD_ID];
328 tls[g_static_tls_config.tls_slot_bionic_tls] = GetTls()[TLS_SLOT_BIONIC_TLS];
329 GetTls()[TLS_SLOT_NATIVE_BRIDGE_GUEST_STATE] = GetThreadStateStorage(*state_);
330 SetTlsAddr(*state_, ToGuestAddr(tls));
331 #else
332 // For Glibc we provide stub which is only usable to distinguish different threads.
333 // This is the only thing that many applications need.
334 SetTlsAddr(*state_, GettidSyscall());
335 #endif
336 }
337
ConfigStaticTls(const NativeBridgeStaticTlsConfig * config)338 void GuestThread::ConfigStaticTls(const NativeBridgeStaticTlsConfig* config) {
339 // This function is called during Bionic linker initialization, before any
340 // guest constructor functions run. It should be safe to omit locking.
341 g_static_tls_config = *config;
342
343 // Reinitialize the main thread's static TLS.
344 CHECK_EQ(true, AllocStaticTls());
345 InitStaticTls();
346 }
347
GetHostStackTop() const348 void* GuestThread::GetHostStackTop() const {
349 CHECK(host_stack_);
350 auto top = reinterpret_cast<uintptr_t>(host_stack_) + GetStackSizeForTranslation();
351 return reinterpret_cast<void*>(top);
352 }
353
354 } // namespace berberis
355