1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "ExecutableMemory.hpp"
16
17 #include "Debug.hpp"
18
19 #if defined(_WIN32)
20 # ifndef WIN32_LEAN_AND_MEAN
21 # define WIN32_LEAN_AND_MEAN
22 # endif
23 # include <Windows.h>
24 # include <intrin.h>
25 #elif defined(__Fuchsia__)
26 # include <unistd.h>
27 # include <zircon/process.h>
28 # include <zircon/syscalls.h>
29 #else
30 # include <errno.h>
31 # include <sys/mman.h>
32 # include <stdlib.h>
33 # include <unistd.h>
34 #endif
35
36 #if defined(__ANDROID__) && !defined(ANDROID_HOST_BUILD) && !defined(ANDROID_NDK_BUILD)
37 # include <sys/prctl.h>
38 #endif
39
40 #include <memory.h>
41
42 #undef allocate
43 #undef deallocate
44
45 #if(defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)) && !defined(__x86__)
46 # define __x86__
47 #endif
48
49 #define STRINGIFY(x) #x
50 #define MACRO_STRINGIFY(x) STRINGIFY(x)
51
52 // A Clang extension to determine compiler features.
53 // We use it to detect Sanitizer builds (e.g. -fsanitize=memory).
54 #ifndef __has_feature
55 # define __has_feature(x) 0
56 #endif
57
58 namespace rr {
59 namespace {
60
61 struct Allocation
62 {
63 // size_t bytes;
64 unsigned char *block;
65 };
66
allocateRaw(size_t bytes,size_t alignment)67 void *allocateRaw(size_t bytes, size_t alignment)
68 {
69 ASSERT((alignment & (alignment - 1)) == 0); // Power of 2 alignment.
70
71 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
72 if(alignment < sizeof(void *))
73 {
74 return malloc(bytes);
75 }
76 else
77 {
78 void *allocation;
79 int result = posix_memalign(&allocation, alignment, bytes);
80 if(result != 0)
81 {
82 errno = result;
83 allocation = nullptr;
84 }
85 return allocation;
86 }
87 #else
88 unsigned char *block = new unsigned char[bytes + sizeof(Allocation) + alignment];
89 unsigned char *aligned = nullptr;
90
91 if(block)
92 {
93 aligned = (unsigned char *)((uintptr_t)(block + sizeof(Allocation) + alignment - 1) & -(intptr_t)alignment);
94 Allocation *allocation = (Allocation *)(aligned - sizeof(Allocation));
95
96 // allocation->bytes = bytes;
97 allocation->block = block;
98 }
99
100 return aligned;
101 #endif
102 }
103
104 #if defined(_WIN32)
permissionsToProtectMode(int permissions)105 DWORD permissionsToProtectMode(int permissions)
106 {
107 switch(permissions)
108 {
109 case PERMISSION_READ:
110 return PAGE_READONLY;
111 case PERMISSION_EXECUTE:
112 return PAGE_EXECUTE;
113 case PERMISSION_READ | PERMISSION_WRITE:
114 return PAGE_READWRITE;
115 case PERMISSION_READ | PERMISSION_EXECUTE:
116 return PAGE_EXECUTE_READ;
117 case PERMISSION_READ | PERMISSION_WRITE | PERMISSION_EXECUTE:
118 return PAGE_EXECUTE_READWRITE;
119 }
120 return PAGE_NOACCESS;
121 }
122 #endif
123
124 #if !defined(_WIN32) && !defined(__Fuchsia__)
permissionsToMmapProt(int permissions)125 int permissionsToMmapProt(int permissions)
126 {
127 int result = 0;
128 if(permissions & PERMISSION_READ)
129 {
130 result |= PROT_READ;
131 }
132 if(permissions & PERMISSION_WRITE)
133 {
134 result |= PROT_WRITE;
135 }
136 if(permissions & PERMISSION_EXECUTE)
137 {
138 result |= PROT_EXEC;
139 }
140 return result;
141 }
142 #endif // !defined(_WIN32) && !defined(__Fuchsia__)
143
144 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
145 # if !defined(__ANDROID__) || defined(ANDROID_HOST_BUILD) || defined(ANDROID_NDK_BUILD)
146 // Create a file descriptor for anonymous memory with the given
147 // name. Returns -1 on failure.
148 // TODO: remove once libc wrapper exists.
memfd_create(const char * name,unsigned int flags)149 static int memfd_create(const char *name, unsigned int flags)
150 {
151 # if __aarch64__
152 # define __NR_memfd_create 279
153 # elif __arm__
154 # define __NR_memfd_create 279
155 # elif __powerpc64__
156 # define __NR_memfd_create 360
157 # elif __i386__
158 # define __NR_memfd_create 356
159 # elif __x86_64__
160 # define __NR_memfd_create 319
161 # endif /* __NR_memfd_create__ */
162 # ifdef __NR_memfd_create
163 // In the event of no system call this returns -1 with errno set
164 // as ENOSYS.
165 return syscall(__NR_memfd_create, name, flags);
166 # else
167 return -1;
168 # endif
169 }
170
171 // Returns a file descriptor for use with an anonymous mmap, if
172 // memfd_create fails, -1 is returned. Note, the mappings should be
173 // MAP_PRIVATE so that underlying pages aren't shared.
anonymousFd()174 int anonymousFd()
175 {
176 static int fd = memfd_create(MACRO_STRINGIFY(REACTOR_ANONYMOUS_MMAP_NAME), 0);
177 return fd;
178 }
179 # else // __ANDROID__ && !ANDROID_HOST_BUILD && !ANDROID_NDK_BUILD
anonymousFd()180 int anonymousFd()
181 {
182 return -1;
183 }
184 # endif // __ANDROID__ && !ANDROID_HOST_BUILD && !ANDROID_NDK_BUILD
185
186 // Ensure there is enough space in the "anonymous" fd for length.
ensureAnonFileSize(int anonFd,size_t length)187 void ensureAnonFileSize(int anonFd, size_t length)
188 {
189 static size_t fileSize = 0;
190 if(length > fileSize)
191 {
192 [[maybe_unused]] int result = ftruncate(anonFd, length);
193 ASSERT(result == 0);
194 fileSize = length;
195 }
196 }
197 #endif // defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
198
199 #if defined(__Fuchsia__)
permissionsToZxVmOptions(int permissions)200 zx_vm_option_t permissionsToZxVmOptions(int permissions)
201 {
202 zx_vm_option_t result = 0;
203 if(permissions & PERMISSION_READ)
204 {
205 result |= ZX_VM_PERM_READ;
206 }
207 if(permissions & PERMISSION_WRITE)
208 {
209 result |= ZX_VM_PERM_WRITE;
210 }
211 if(permissions & PERMISSION_EXECUTE)
212 {
213 result |= ZX_VM_PERM_EXECUTE;
214 }
215 return result;
216 }
217 #endif // defined(__Fuchsia__)
218
219 } // anonymous namespace
220
memoryPageSize()221 size_t memoryPageSize()
222 {
223 static int pageSize = [] {
224 #if defined(_WIN32)
225 SYSTEM_INFO systemInfo;
226 GetSystemInfo(&systemInfo);
227 return systemInfo.dwPageSize;
228 #else
229 return sysconf(_SC_PAGESIZE);
230 #endif
231 }();
232
233 return pageSize;
234 }
235
allocate(size_t bytes,size_t alignment)236 void *allocate(size_t bytes, size_t alignment)
237 {
238 void *memory = allocateRaw(bytes, alignment);
239
240 // Zero-initialize the memory, for security reasons.
241 // MemorySanitizer builds skip this so that we can detect when we
242 // inadvertently rely on this, which would indicate a bug.
243 if(memory && !__has_feature(memory_sanitizer))
244 {
245 memset(memory, 0, bytes);
246 }
247
248 return memory;
249 }
250
deallocate(void * memory)251 void deallocate(void *memory)
252 {
253 #if defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
254 free(memory);
255 #else
256 if(memory)
257 {
258 unsigned char *aligned = (unsigned char *)memory;
259 Allocation *allocation = (Allocation *)(aligned - sizeof(Allocation));
260
261 delete[] allocation->block;
262 }
263 #endif
264 }
265
266 // Rounds |x| up to a multiple of |m|, where |m| is a power of 2.
roundUp(uintptr_t x,uintptr_t m)267 inline uintptr_t roundUp(uintptr_t x, uintptr_t m)
268 {
269 ASSERT(m > 0 && (m & (m - 1)) == 0); // |m| must be a power of 2.
270 return (x + m - 1) & ~(m - 1);
271 }
272
allocateMemoryPages(size_t bytes,int permissions,bool need_exec)273 void *allocateMemoryPages(size_t bytes, int permissions, bool need_exec)
274 {
275 size_t pageSize = memoryPageSize();
276 size_t length = roundUp(bytes, pageSize);
277 void *mapping = nullptr;
278
279 #if defined(_WIN32)
280 return VirtualAlloc(nullptr, length, MEM_COMMIT | MEM_RESERVE,
281 permissionsToProtectMode(permissions));
282 #elif defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME)
283 int flags = MAP_PRIVATE;
284
285 // Try to name the memory region for the executable code,
286 // to aid profilers.
287 int anonFd = anonymousFd();
288 if(anonFd == -1)
289 {
290 flags |= MAP_ANONYMOUS;
291 }
292 else
293 {
294 ensureAnonFileSize(anonFd, length);
295 }
296
297 mapping = mmap(
298 nullptr, length, permissionsToMmapProt(permissions), flags, anonFd, 0);
299
300 if(mapping == MAP_FAILED)
301 {
302 mapping = nullptr;
303 }
304 # if defined(__ANDROID__) && !defined(ANDROID_HOST_BUILD) && !defined(ANDROID_NDK_BUILD)
305 else
306 {
307 // On Android, prefer to use a non-standard prctl called
308 // PR_SET_VMA_ANON_NAME to set the name of a private anonymous
309 // mapping, as Android restricts EXECUTE permission on
310 // CoW/shared anonymous mappings with sepolicy neverallows.
311 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, mapping, length,
312 MACRO_STRINGIFY(REACTOR_ANONYMOUS_MMAP_NAME));
313 }
314 # endif // __ANDROID__ && !ANDROID_HOST_BUILD && !ANDROID_NDK_BUILD
315 #elif defined(__Fuchsia__)
316 zx_handle_t vmo;
317 if(zx_vmo_create(length, 0, &vmo) != ZX_OK)
318 {
319 return nullptr;
320 }
321 if(need_exec &&
322 zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK)
323 {
324 return nullptr;
325 }
326 zx_vaddr_t reservation;
327 zx_status_t status = zx_vmar_map(
328 zx_vmar_root_self(), permissionsToZxVmOptions(permissions), 0, vmo,
329 0, length, &reservation);
330 zx_handle_close(vmo);
331 if(status != ZX_OK)
332 {
333 return nullptr;
334 }
335
336 // zx_vmar_map() returns page-aligned address.
337 ASSERT(roundUp(reservation, pageSize) == reservation);
338
339 mapping = reinterpret_cast<void *>(reservation);
340 #elif defined(__APPLE__)
341 int prot = permissionsToMmapProt(permissions);
342 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
343 // On macOS 10.14 and higher, executables that are code signed with the
344 // "runtime" option cannot execute writable memory by default. They can opt
345 // into this capability by specifying the "com.apple.security.cs.allow-jit"
346 // code signing entitlement and allocating the region with the MAP_JIT flag.
347 mapping = mmap(nullptr, length, prot, flags | MAP_JIT, -1, 0);
348
349 if(mapping == MAP_FAILED)
350 {
351 // Retry without MAP_JIT (for older macOS versions).
352 mapping = mmap(nullptr, length, prot, flags, -1, 0);
353 }
354
355 if(mapping == MAP_FAILED)
356 {
357 mapping = nullptr;
358 }
359 #else
360 mapping = allocate(length, pageSize);
361 protectMemoryPages(mapping, length, permissions);
362 #endif
363
364 return mapping;
365 }
366
protectMemoryPages(void * memory,size_t bytes,int permissions)367 void protectMemoryPages(void *memory, size_t bytes, int permissions)
368 {
369 if(bytes == 0)
370 {
371 return;
372 }
373
374 bytes = roundUp(bytes, memoryPageSize());
375
376 #if defined(_WIN32)
377 unsigned long oldProtection;
378 BOOL result =
379 VirtualProtect(memory, bytes, permissionsToProtectMode(permissions),
380 &oldProtection);
381 ASSERT(result);
382 #elif defined(__Fuchsia__)
383 zx_status_t status = zx_vmar_protect(
384 zx_vmar_root_self(), permissionsToZxVmOptions(permissions),
385 reinterpret_cast<zx_vaddr_t>(memory), bytes);
386 ASSERT(status == ZX_OK);
387 #else
388 int result =
389 mprotect(memory, bytes, permissionsToMmapProt(permissions));
390 ASSERT(result == 0);
391 #endif
392 }
393
deallocateMemoryPages(void * memory,size_t bytes)394 void deallocateMemoryPages(void *memory, size_t bytes)
395 {
396 #if defined(_WIN32)
397 unsigned long oldProtection;
398 BOOL result;
399 result = VirtualProtect(memory, bytes, PAGE_READWRITE, &oldProtection);
400 ASSERT(result);
401 result = VirtualFree(memory, 0, MEM_RELEASE);
402 ASSERT(result);
403 #elif defined(__APPLE__) || (defined(__linux__) && defined(REACTOR_ANONYMOUS_MMAP_NAME))
404 size_t pageSize = memoryPageSize();
405 size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
406 int result = munmap(memory, length);
407 ASSERT(result == 0);
408 #elif defined(__Fuchsia__)
409 size_t pageSize = memoryPageSize();
410 size_t length = roundUp(bytes, pageSize);
411 zx_status_t status = zx_vmar_unmap(
412 zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory), length);
413 ASSERT(status == ZX_OK);
414 #else
415 int result = mprotect(memory, bytes, PROT_READ | PROT_WRITE);
416 ASSERT(result == 0);
417 deallocate(memory);
418 #endif
419 }
420
421 } // namespace rr
422