1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "heap_tagging.h"
30 #include "malloc_common.h"
31 #include "malloc_tagged_pointers.h"
32
33 #include <bionic/pthread_internal.h>
34 #include <platform/bionic/malloc.h>
35 #include <sanitizer/hwasan_interface.h>
36 #include <sys/auxv.h>
37 #include <sys/prctl.h>
38
39 extern "C" void scudo_malloc_disable_memory_tagging();
40 extern "C" void scudo_malloc_set_track_allocation_stacks(int);
41
42 extern "C" const char* __scudo_get_stack_depot_addr();
43 extern "C" const char* __scudo_get_ring_buffer_addr();
44 extern "C" size_t __scudo_get_ring_buffer_size();
45 extern "C" size_t __scudo_get_stack_depot_size();
46
47 // Protected by `g_heap_tagging_lock`.
48 static HeapTaggingLevel heap_tagging_level = M_HEAP_TAGGING_LEVEL_NONE;
49
SetDefaultHeapTaggingLevel()50 void SetDefaultHeapTaggingLevel() {
51 #if defined(__aarch64__)
52 #if !__has_feature(hwaddress_sanitizer)
53 heap_tagging_level = __libc_shared_globals()->initial_heap_tagging_level;
54 #endif
55
56 __libc_memtag_stack_abi = __libc_shared_globals()->initial_memtag_stack_abi;
57
58 __libc_globals.mutate([](libc_globals* globals) {
59 switch (heap_tagging_level) {
60 case M_HEAP_TAGGING_LEVEL_TBI:
61 // Arrange for us to set pointer tags to POINTER_TAG, check tags on
62 // deallocation and untag when passing pointers to the allocator.
63 globals->heap_pointer_tag = (reinterpret_cast<uintptr_t>(POINTER_TAG) << TAG_SHIFT) |
64 (0xffull << CHECK_SHIFT) | (0xffull << UNTAG_SHIFT);
65 break;
66 case M_HEAP_TAGGING_LEVEL_SYNC:
67 case M_HEAP_TAGGING_LEVEL_ASYNC:
68 atomic_store(&globals->memtag, true);
69 atomic_store(&__libc_memtag_stack, __libc_shared_globals()->initial_memtag_stack);
70 break;
71 default:
72 break;
73 };
74 });
75
76 #if defined(USE_SCUDO) && !__has_feature(hwaddress_sanitizer)
77 switch (heap_tagging_level) {
78 case M_HEAP_TAGGING_LEVEL_TBI:
79 case M_HEAP_TAGGING_LEVEL_NONE:
80 scudo_malloc_disable_memory_tagging();
81 break;
82 case M_HEAP_TAGGING_LEVEL_SYNC:
83 scudo_malloc_set_track_allocation_stacks(1);
84 break;
85 default:
86 break;
87 }
88 #endif // USE_SCUDO
89 #endif // aarch64
90 }
91
set_tcf_on_all_threads(int tcf)92 static bool set_tcf_on_all_threads(int tcf) {
93 return android_run_on_all_threads(
94 [](void* arg) {
95 int tcf = *reinterpret_cast<int*>(arg);
96 int tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
97 if (tagged_addr_ctrl < 0) {
98 return false;
99 }
100
101 tagged_addr_ctrl = (tagged_addr_ctrl & ~PR_MTE_TCF_MASK) | tcf;
102 return prctl(PR_SET_TAGGED_ADDR_CTRL, tagged_addr_ctrl, 0, 0, 0) >= 0;
103 },
104 &tcf);
105 }
106
107 pthread_mutex_t g_heap_tagging_lock = PTHREAD_MUTEX_INITIALIZER;
108
109 // Requires `g_heap_tagging_lock` to be held.
SetHeapTaggingLevel(HeapTaggingLevel tag_level)110 bool SetHeapTaggingLevel(HeapTaggingLevel tag_level) {
111 if (tag_level == heap_tagging_level) {
112 return true;
113 }
114
115 switch (tag_level) {
116 case M_HEAP_TAGGING_LEVEL_NONE:
117 __libc_globals.mutate([](libc_globals* globals) {
118 if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_TBI) {
119 // Preserve the untag mask (we still want to untag pointers when passing them to the
120 // allocator), but clear the fixed tag and the check mask, so that pointers are no longer
121 // tagged and checks no longer happen.
122 globals->heap_pointer_tag = static_cast<uintptr_t>(0xffull << UNTAG_SHIFT);
123 }
124 atomic_store(&__libc_memtag_stack, false);
125 atomic_store(&globals->memtag, false);
126 });
127
128 if (heap_tagging_level != M_HEAP_TAGGING_LEVEL_TBI) {
129 if (!set_tcf_on_all_threads(PR_MTE_TCF_NONE)) {
130 error_log("SetHeapTaggingLevel: set_tcf_on_all_threads failed");
131 return false;
132 }
133 }
134 #if defined(USE_SCUDO) && !__has_feature(hwaddress_sanitizer)
135 scudo_malloc_disable_memory_tagging();
136 #endif
137 break;
138 case M_HEAP_TAGGING_LEVEL_TBI:
139 case M_HEAP_TAGGING_LEVEL_ASYNC:
140 case M_HEAP_TAGGING_LEVEL_SYNC:
141 if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_NONE) {
142 #if !__has_feature(hwaddress_sanitizer)
143 // Suppress the error message in HWASan builds. Apps can try to enable TBI (or even MTE
144 // modes) being unaware of HWASan, fail them silently.
145 error_log(
146 "SetHeapTaggingLevel: re-enabling tagging after it was disabled is not supported");
147 #endif
148 return false;
149 } else if (tag_level == M_HEAP_TAGGING_LEVEL_TBI ||
150 heap_tagging_level == M_HEAP_TAGGING_LEVEL_TBI) {
151 error_log("SetHeapTaggingLevel: switching between TBI and ASYNC/SYNC is not supported");
152 return false;
153 }
154
155 if (tag_level == M_HEAP_TAGGING_LEVEL_ASYNC) {
156 // When entering ASYNC mode, specify that we want to allow upgrading to SYNC by OR'ing in
157 // the SYNC flag. But if the kernel doesn't support specifying multiple TCF modes, fall back
158 // to specifying a single mode.
159 if (!set_tcf_on_all_threads(PR_MTE_TCF_ASYNC | PR_MTE_TCF_SYNC)) {
160 set_tcf_on_all_threads(PR_MTE_TCF_ASYNC);
161 }
162 #if defined(USE_SCUDO) && !__has_feature(hwaddress_sanitizer)
163 scudo_malloc_set_track_allocation_stacks(0);
164 #endif
165 } else if (tag_level == M_HEAP_TAGGING_LEVEL_SYNC) {
166 set_tcf_on_all_threads(PR_MTE_TCF_SYNC);
167 #if defined(USE_SCUDO) && !__has_feature(hwaddress_sanitizer)
168 scudo_malloc_set_track_allocation_stacks(1);
169 __libc_shared_globals()->scudo_ring_buffer = __scudo_get_ring_buffer_addr();
170 __libc_shared_globals()->scudo_ring_buffer_size = __scudo_get_ring_buffer_size();
171 __libc_shared_globals()->scudo_stack_depot = __scudo_get_stack_depot_addr();
172 __libc_shared_globals()->scudo_stack_depot_size = __scudo_get_stack_depot_size();
173 #endif
174 }
175 break;
176 default:
177 error_log("SetHeapTaggingLevel: unknown tagging level");
178 return false;
179 }
180
181 heap_tagging_level = tag_level;
182 info_log("SetHeapTaggingLevel: tag level set to %d", tag_level);
183
184 return true;
185 }
186
187 #ifdef __aarch64__
untag_memory(void * from,void * to)188 static inline __attribute__((no_sanitize("memtag"))) void untag_memory(void* from, void* to) {
189 if (from == to) {
190 return;
191 }
192 __asm__ __volatile__(
193 ".arch_extension mte\n"
194 "1:\n"
195 "stg %[Ptr], [%[Ptr]], #16\n"
196 "cmp %[Ptr], %[End]\n"
197 "b.lt 1b\n"
198 : [Ptr] "+&r"(from)
199 : [End] "r"(to)
200 : "memory");
201 }
202 #endif
203
204 #ifdef __aarch64__
205 // 128Mb of stack should be enough for anybody.
206 static constexpr size_t kUntagLimit = 128 * 1024 * 1024;
207 #endif // __aarch64__
208
memtag_handle_longjmp(void * sp_dst __unused,void * sp_src __unused)209 extern "C" __LIBC_HIDDEN__ __attribute__((no_sanitize("memtag"))) void memtag_handle_longjmp(
210 void* sp_dst __unused, void* sp_src __unused) {
211 // A usual longjmp looks like this, where sp_dst was the LR in the call to setlongjmp (i.e.
212 // the SP of the frame calling setlongjmp).
213 // ┌─────────────────────┐ │
214 // │ │ │
215 // ├─────────────────────┤◄──────── sp_dst │ stack
216 // │ ... │ │ grows
217 // ├─────────────────────┤ │ to lower
218 // │ ... │ │ addresses
219 // ├─────────────────────┤◄──────── sp_src │
220 // │siglongjmp │ │
221 // ├─────────────────────┤ │
222 // │memtag_handle_longjmp│ │
223 // └─────────────────────┘ ▼
224 #ifdef __aarch64__
225 if (atomic_load(&__libc_memtag_stack)) {
226 size_t distance = reinterpret_cast<uintptr_t>(sp_dst) - reinterpret_cast<uintptr_t>(sp_src);
227 if (distance > kUntagLimit) {
228 async_safe_fatal(
229 "memtag_handle_longjmp: stack adjustment too large! %p -> %p, distance %zx > %zx\n",
230 sp_src, sp_dst, distance, kUntagLimit);
231 } else {
232 untag_memory(sp_src, sp_dst);
233 }
234 }
235 #endif // __aarch64__
236
237 // We can use __has_feature here rather than __hwasan_handle_longjmp as a
238 // weak symbol because this is part of libc which is always sanitized for a
239 // hwasan enabled process.
240 #if __has_feature(hwaddress_sanitizer)
241 __hwasan_handle_longjmp(sp_dst);
242 #endif // __has_feature(hwaddress_sanitizer)
243 }
244