xref: /aosp_15_r20/external/cronet/base/allocator/partition_allocator/src/partition_alloc/tagging.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "partition_alloc/tagging.h"
6 
7 #include "build/build_config.h"
8 #include "partition_alloc/partition_alloc_base/compiler_specific.h"
9 #include "partition_alloc/partition_alloc_base/cpu.h"
10 #include "partition_alloc/partition_alloc_buildflags.h"
11 #include "partition_alloc/partition_alloc_check.h"
12 #include "partition_alloc/partition_alloc_config.h"
13 
14 #if BUILDFLAG(HAS_MEMORY_TAGGING)
15 #include <arm_acle.h>
16 #include <asm/hwcap.h>
17 #include <sys/auxv.h>
18 #include <sys/ifunc.h>
19 #include <sys/prctl.h>
20 #define PR_SET_TAGGED_ADDR_CTRL 55
21 #define PR_GET_TAGGED_ADDR_CTRL 56
22 #define PR_TAGGED_ADDR_ENABLE (1UL << 0)
23 
24 #if BUILDFLAG(IS_LINUX)
25 #include <linux/version.h>
26 
27 // Linux headers already provide these since v5.10.
28 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
29 #define HAS_PR_MTE_MACROS
30 #endif
31 #endif
32 
33 #ifndef HAS_PR_MTE_MACROS
34 #define PR_MTE_TCF_SHIFT 1
35 #define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
36 #define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
37 #define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
38 #define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
39 #define PR_MTE_TAG_SHIFT 3
40 #define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
41 #define HWCAP2_MTE (1 << 18)
42 #endif
43 #endif
44 
45 #if BUILDFLAG(IS_ANDROID)
46 #include "partition_alloc/partition_alloc_base/files/file_path.h"
47 #include "partition_alloc/partition_alloc_base/native_library.h"
48 #if BUILDFLAG(HAS_MEMORY_TAGGING)
49 #include <malloc.h>
50 #endif  // BUILDFLAGS(HAS_MEMORY_TAGGING)
51 #endif  // BUILDFLAG(IS_ANDROID)
52 
53 namespace partition_alloc {
54 
55 #if BUILDFLAG(HAS_MEMORY_TAGGING)
56 namespace {
ChangeMemoryTaggingModeInternal(unsigned prctl_mask)57 void ChangeMemoryTaggingModeInternal(unsigned prctl_mask) {
58   if (internal::base::CPU::GetInstanceNoAllocation().has_mte()) {
59     int status = prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_mask, 0, 0, 0);
60     PA_CHECK(status == 0);
61   }
62 }
63 }  // namespace
64 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING)
65 
ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode m)66 void ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode m) {
67 #if BUILDFLAG(HAS_MEMORY_TAGGING)
68   if (m == TagViolationReportingMode::kSynchronous) {
69     ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC |
70                                     (0xfffe << PR_MTE_TAG_SHIFT));
71   } else if (m == TagViolationReportingMode::kAsynchronous) {
72     ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC |
73                                     (0xfffe << PR_MTE_TAG_SHIFT));
74   } else {
75     ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_NONE);
76   }
77 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING)
78 }
79 
80 namespace internal {
81 
82 #if BUILDFLAG(IS_ANDROID)
ChangeMemoryTaggingModeForAllThreadsPerProcess(TagViolationReportingMode m)83 bool ChangeMemoryTaggingModeForAllThreadsPerProcess(
84     TagViolationReportingMode m) {
85 #if BUILDFLAG(HAS_MEMORY_TAGGING)
86   // In order to support Android NDK API level below 26, we need to call
87   // mallopt via dynamic linker.
88   // int mallopt(int param, int value);
89   using MalloptSignature = int (*)(int, int);
90 
91   static MalloptSignature mallopt_fnptr = []() {
92     base::FilePath module_path;
93     base::NativeLibraryLoadError load_error;
94     base::FilePath library_path = module_path.Append("libc.so");
95     base::NativeLibrary library =
96         base::LoadNativeLibrary(library_path, &load_error);
97     PA_CHECK(library);
98     void* func_ptr =
99         base::GetFunctionPointerFromNativeLibrary(library, "mallopt");
100     PA_CHECK(func_ptr);
101     return reinterpret_cast<MalloptSignature>(func_ptr);
102   }();
103 
104   int status = 0;
105   if (m == TagViolationReportingMode::kSynchronous) {
106     status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
107                            M_HEAP_TAGGING_LEVEL_SYNC);
108   } else if (m == TagViolationReportingMode::kAsynchronous) {
109     status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
110                            M_HEAP_TAGGING_LEVEL_ASYNC);
111   } else {
112     status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
113                            M_HEAP_TAGGING_LEVEL_NONE);
114   }
115   return status != 0;
116 #else
117   return false;
118 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING)
119 }
120 #endif  // BUILDFLAG(IS_ANDROID)
121 
122 namespace {
CheckTagRegionParameters(void * ptr,size_t sz)123 [[maybe_unused]] static bool CheckTagRegionParameters(void* ptr, size_t sz) {
124   // Check that ptr and size are correct for MTE
125   uintptr_t ptr_as_uint = reinterpret_cast<uintptr_t>(ptr);
126   bool ret = (ptr_as_uint % kMemTagGranuleSize == 0) &&
127              (sz % kMemTagGranuleSize == 0) && sz;
128   return ret;
129 }
130 
131 #if BUILDFLAG(HAS_MEMORY_TAGGING)
TagRegionRandomlyForMTE(void * ptr,size_t sz,uint64_t mask)132 void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
133   // Randomly tag a region (MTE-enabled systems only). The first 16-byte
134   // granule is randomly tagged, all other granules in the region are
135   // then assigned that initial tag via __arm_mte_set_tag.
136   if (!CheckTagRegionParameters(ptr, sz)) {
137     return nullptr;
138   }
139   // __arm_mte_create_random_tag generates a randomly tagged pointer via the
140   // hardware's random number generator, but does not apply it to the memory.
141   char* nptr = reinterpret_cast<char*>(__arm_mte_create_random_tag(ptr, mask));
142   for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
143     // Next, tag the first and all subsequent granules with the randomly tag.
144     __arm_mte_set_tag(nptr +
145                       i);  // Tag is taken from the top bits of the argument.
146   }
147   return nptr;
148 }
149 
TagRegionIncrementForMTE(void * ptr,size_t sz)150 void* TagRegionIncrementForMTE(void* ptr, size_t sz) {
151   // Increment a region's tag (MTE-enabled systems only), using the tag of the
152   // first granule.
153   if (!CheckTagRegionParameters(ptr, sz)) {
154     return nullptr;
155   }
156   // Increment ptr's tag.
157   char* nptr = reinterpret_cast<char*>(__arm_mte_increment_tag(ptr, 1u));
158   for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
159     // Apply the tag to the first granule, and all subsequent granules.
160     __arm_mte_set_tag(nptr + i);
161   }
162   return nptr;
163 }
164 
RemaskVoidPtrForMTE(void * ptr)165 void* RemaskVoidPtrForMTE(void* ptr) {
166   if (PA_LIKELY(ptr)) {
167     // Can't look up the tag for a null ptr (segfaults).
168     return __arm_mte_get_tag(ptr);
169   }
170   return nullptr;
171 }
172 
TagRegionIncrementNoOp(void * ptr,size_t sz)173 void* TagRegionIncrementNoOp(void* ptr, size_t sz) {
174   // Region parameters are checked even on non-MTE systems to check the
175   // intrinsics are used correctly.
176   return ptr;
177 }
178 
TagRegionRandomlyNoOp(void * ptr,size_t sz,uint64_t mask)179 void* TagRegionRandomlyNoOp(void* ptr, size_t sz, uint64_t mask) {
180   // Verifies a 16-byte aligned tagging granule, size tagging granule (all
181   // architectures).
182   return ptr;
183 }
184 
RemaskVoidPtrNoOp(void * ptr)185 void* RemaskVoidPtrNoOp(void* ptr) {
186   return ptr;
187 }
188 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING)
189 
190 }  // namespace
191 
192 #if BUILDFLAG(HAS_MEMORY_TAGGING)
193 using RemaskPtrInternalFn = void*(void* ptr);
194 using TagMemoryRangeIncrementInternalFn = void*(void* ptr, size_t size);
195 
196 using TagMemoryRangeRandomlyInternalFn = void*(void* ptr,
197                                                size_t size,
198                                                uint64_t mask);
199 
200 extern "C" TagMemoryRangeIncrementInternalFn(
ResolveTagMemoryRangeIncrement(uint64_t hwcap,struct __ifunc_arg_t * hw)201     *ResolveTagMemoryRangeIncrement(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
202   if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
203     return TagRegionIncrementForMTE;
204   }
205   return TagRegionIncrementNoOp;
206 }
207 
208 extern "C" TagMemoryRangeRandomlyInternalFn(
ResolveTagMemoryRandomly(uint64_t hwcap,struct __ifunc_arg_t * hw)209     *ResolveTagMemoryRandomly(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
210   if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
211     return TagRegionRandomlyForMTE;
212   }
213   return TagRegionRandomlyNoOp;
214 }
215 
216 extern "C" RemaskPtrInternalFn(
ResolveRemaskPointer(uint64_t hwcap,struct __ifunc_arg_t * hw)217     *ResolveRemaskPointer(uint64_t hwcap, struct __ifunc_arg_t* hw)) {
218   if ((hwcap & _IFUNC_ARG_HWCAP) && (hw->_hwcap2 & HWCAP2_MTE)) {
219     return RemaskVoidPtrForMTE;
220   }
221   return RemaskVoidPtrNoOp;
222 }
223 
224 void* TagMemoryRangeIncrementInternal(void* ptr, size_t size)
225     __attribute__((ifunc("ResolveTagMemoryRangeIncrement")));
226 void* TagMemoryRangeRandomlyInternal(void* ptr, size_t size, uint64_t mask)
227     __attribute__((ifunc("ResolveTagMemoryRandomly")));
228 void* RemaskPointerInternal(void* ptr)
229     __attribute__((ifunc("ResolveRemaskPointer")));
230 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING)
231 
GetMemoryTaggingModeForCurrentThread()232 TagViolationReportingMode GetMemoryTaggingModeForCurrentThread() {
233 #if BUILDFLAG(HAS_MEMORY_TAGGING)
234   base::CPU cpu;
235   if (!cpu.has_mte()) {
236     return TagViolationReportingMode::kUndefined;
237   }
238   int status = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
239   PA_CHECK(status >= 0);
240   // Check for Asynchronous first because ASYNC on Android sets both
241   // PR_MTE_TCF_ASYNC and PR_MTE_TCF_SYNC bits.
242   if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_ASYNC)) {
243     return TagViolationReportingMode::kAsynchronous;
244   }
245   if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_SYNC)) {
246     return TagViolationReportingMode::kSynchronous;
247   }
248   return TagViolationReportingMode::kDisabled;
249 #else
250   return TagViolationReportingMode::kUndefined;
251 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING)
252 }
253 
254 }  // namespace internal
255 
256 #if BUILDFLAG(HAS_MEMORY_TAGGING) && BUILDFLAG(IS_ANDROID)
257 bool PermissiveMte::enabled_ = false;
258 
259 // static
SetEnabled(bool enabled)260 void PermissiveMte::SetEnabled(bool enabled) {
261   PermissiveMte::enabled_ = enabled;
262 }
263 
264 // static
HandleCrash(int signo,siginfo_t * siginfo,ucontext_t * context)265 bool PermissiveMte::HandleCrash(int signo,
266                                 siginfo_t* siginfo,
267                                 ucontext_t* context) {
268   if (siginfo->si_signo == SIGSEGV &&
269       (siginfo->si_code == SEGV_MTESERR || siginfo->si_code == SEGV_MTEAERR) &&
270       PermissiveMte::enabled_) {
271     // In MTE permissive mode, do not crash the process. Instead, disable MTE
272     // and let the failing instruction be retried. The second time should
273     // succeed (except if there is another non-MTE fault).
274     internal::ChangeMemoryTaggingModeForAllThreadsPerProcess(
275         partition_alloc::TagViolationReportingMode::kDisabled);
276     return true;
277   }
278   return false;
279 }
280 #endif  // BUILDFLAG(HAS_MEMORY_TAGGING) && BUILDFLAG(IS_ANDROID)
281 
282 }  // namespace partition_alloc
283