1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_LIBARTBASE_BASE_MEM_MAP_H_
18 #define ART_LIBARTBASE_BASE_MEM_MAP_H_
19
20 #include <stddef.h>
21 #include <sys/types.h>
22
23 #include <map>
24 #include <mutex>
25 #include <string>
26
27 #include "android-base/thread_annotations.h"
28 #include "bit_utils.h"
29 #include "globals.h"
30 #include "macros.h"
31
32 #ifndef __BIONIC__
33 #ifndef MAP_FIXED_NOREPLACE
34 #define MAP_FIXED_NOREPLACE 0x100000
35 #endif
36 #endif // __BIONIC__
37
38 namespace art {
39
40 #if defined(__LP64__) && !defined(__Fuchsia__) && \
41 (defined(__aarch64__) || defined(__riscv) || defined(__APPLE__))
42 #define USE_ART_LOW_4G_ALLOCATOR 1
43 #else
44 #if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
45 #error "Unrecognized 64-bit architecture."
46 #endif
47 #define USE_ART_LOW_4G_ALLOCATOR 0
48 #endif
49
50 #ifdef __linux__
51 static constexpr bool kMadviseZeroes = true;
52 #define HAVE_MREMAP_SYSCALL true
53 #else
54 static constexpr bool kMadviseZeroes = false;
55 // We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
56 // present.
57 #define HAVE_MREMAP_SYSCALL false
58 #endif
59
60 // Used to keep track of mmap segments.
61 //
62 // On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
63 // for free pages. For security, the start of this scan should be randomized. This requires a
64 // dynamic initializer.
65 // For this to work, it is paramount that there are no other static initializers that access MemMap.
66 // Otherwise, calls might see uninitialized values.
67 class MemMap {
68 public:
69 static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
70
71 // Creates an invalid mapping.
MemMap()72 MemMap() {}
73
74 // Creates an invalid mapping. Used when we want to be more explicit than MemMap().
Invalid()75 static MemMap Invalid() {
76 return MemMap();
77 }
78
79 MemMap(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_);
80 MemMap& operator=(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_) {
81 Reset();
82 swap(other);
83 return *this;
84 }
85
86 // Releases the memory mapping.
87 ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
88
89 // Swap two MemMaps.
90 void swap(MemMap& other);
91
Reset()92 void Reset() {
93 if (IsValid()) {
94 DoReset();
95 }
96 }
97
IsValid()98 bool IsValid() const {
99 return base_size_ != 0u;
100 }
101
102 // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
103 // relinquishes ownership of the source mmap.
104 //
105 // For the call to be successful:
106 // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
107 // [source->Begin(), source->End()].
108 // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
109 // with them.
110 // * kCanReplaceMapping must be true.
111 // * Neither source nor dest may use manual redzones.
112 // * Both source and dest must have the same offset from the nearest page boundary.
113 // * mremap must succeed when called on the mappings.
114 //
115 // If this call succeeds it will return true and:
116 // * Invalidate *source
117 // * The protection of this will remain the same.
118 // * The size of this will be the size of the source
119 // * The data in this will be the data from source.
120 //
121 // If this call fails it will return false and make no changes to *source or this. The ownership
122 // of the source mmap is returned to the caller.
123 bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
124
125 // Set a debug friendly name for a map. It will be prefixed with "dalvik-".
126 static void SetDebugName(void* map_ptr, const char* name, size_t size);
127
128 // Request an anonymous region of length 'byte_count' and a requested base address.
129 // Use null as the requested base address if you don't care.
130 //
131 // `reuse` allows re-mapping an address range from an existing mapping which retains the
132 // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
133 // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
134 //
135 // The word "anonymous" in this context means "not backed by a file". The supplied
136 // 'name' will be used -- on systems that support it -- to give the mapping
137 // a name.
138 //
139 // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
140 static MemMap MapAnonymous(const char* name,
141 uint8_t* addr,
142 size_t byte_count,
143 int prot,
144 bool low_4gb,
145 bool reuse,
146 /*inout*/MemMap* reservation,
147 /*out*/std::string* error_msg,
148 bool use_debug_name = true);
149
150 // Request an aligned anonymous region, where the alignment must be higher
151 // than the runtime gPageSize. We can't directly ask for a MAP_SHARED
152 // (anonymous or otherwise) mapping to be aligned as in that case file offset
153 // is involved and could make the starting offset to be out of sync with
154 // another mapping of the same file.
155 static MemMap MapAnonymousAligned(const char* name,
156 size_t byte_count,
157 int prot,
158 bool low_4gb,
159 size_t alignment,
160 /*out=*/std::string* error_msg);
161
MapAnonymous(const char * name,size_t byte_count,int prot,bool low_4gb,std::string * error_msg)162 static MemMap MapAnonymous(const char* name,
163 size_t byte_count,
164 int prot,
165 bool low_4gb,
166 /*out*/std::string* error_msg) {
167 return MapAnonymous(name,
168 /*addr=*/ nullptr,
169 byte_count,
170 prot,
171 low_4gb,
172 /*reuse=*/ false,
173 /*reservation=*/ nullptr,
174 error_msg);
175 }
MapAnonymous(const char * name,size_t byte_count,int prot,bool low_4gb,MemMap * reservation,std::string * error_msg)176 static MemMap MapAnonymous(const char* name,
177 size_t byte_count,
178 int prot,
179 bool low_4gb,
180 MemMap* reservation,
181 /*out*/std::string* error_msg) {
182 return MapAnonymous(name,
183 /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr,
184 byte_count,
185 prot,
186 low_4gb,
187 /*reuse=*/ false,
188 reservation,
189 error_msg);
190 }
191
192 // Create placeholder for a region allocated by direct call to mmap.
193 // This is useful when we do not have control over the code calling mmap,
194 // but when we still want to keep track of it in the list.
195 // The region is not considered to be owned and will not be unmmaped.
196 static MemMap MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count);
197
198 // Map part of a file, taking care of non-page aligned offsets. The
199 // "start" offset is absolute, not relative.
200 //
201 // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
MapFile(size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,std::string * error_msg)202 static MemMap MapFile(size_t byte_count,
203 int prot,
204 int flags,
205 int fd,
206 off_t start,
207 bool low_4gb,
208 const char* filename,
209 std::string* error_msg) {
210 return MapFileAtAddress(nullptr,
211 byte_count,
212 prot,
213 flags,
214 fd,
215 start,
216 /*low_4gb=*/ low_4gb,
217 filename,
218 /*reuse=*/ false,
219 /*reservation=*/ nullptr,
220 error_msg);
221 }
222
MapFile(size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,bool reuse,std::string * error_msg)223 static MemMap MapFile(size_t byte_count,
224 int prot,
225 int flags,
226 int fd,
227 off_t start,
228 bool low_4gb,
229 const char* filename,
230 bool reuse,
231 std::string* error_msg) {
232 return MapFileAtAddress(nullptr,
233 byte_count,
234 prot,
235 flags,
236 fd,
237 start,
238 /*low_4gb=*/ low_4gb,
239 filename,
240 reuse,
241 /*reservation=*/ nullptr,
242 error_msg);
243 }
244
245 // Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute,
246 // not relative. This version allows requesting a specific address for the base of the mapping.
247 //
248 // `reuse` allows re-mapping an address range from an existing mapping which retains the
249 // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
250 // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
251 //
252 // If error_msg is null then we do not print /proc/maps to the log if MapFileAtAddress fails.
253 // This helps improve performance of the fail case since reading and printing /proc/maps takes
254 // several milliseconds in the worst case.
255 //
256 // On success, returns a valid MemMap. On failure, returns an invalid MemMap.
257 static MemMap MapFileAtAddress(uint8_t* addr,
258 size_t byte_count,
259 int prot,
260 int flags,
261 int fd,
262 off_t start,
263 bool low_4gb,
264 const char* filename,
265 bool reuse,
266 /*inout*/MemMap* reservation,
267 /*out*/std::string* error_msg);
268
GetName()269 const std::string& GetName() const {
270 return name_;
271 }
272
273 bool Sync();
274
275 bool Protect(int prot);
276
277 void FillWithZero(bool release_eagerly);
MadviseDontNeedAndZero()278 void MadviseDontNeedAndZero() {
279 FillWithZero(/* release_eagerly= */ true);
280 }
281 int MadviseDontFork();
282
GetProtect()283 int GetProtect() const {
284 return prot_;
285 }
286
Begin()287 uint8_t* Begin() const {
288 return begin_;
289 }
290
Size()291 size_t Size() const {
292 return size_;
293 }
294
295 // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
296 void SetSize(size_t new_size);
297
End()298 uint8_t* End() const {
299 return Begin() + Size();
300 }
301
BaseBegin()302 void* BaseBegin() const {
303 return base_begin_;
304 }
305
BaseSize()306 size_t BaseSize() const {
307 return base_size_;
308 }
309
BaseEnd()310 void* BaseEnd() const {
311 return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
312 }
313
HasAddress(const void * addr)314 bool HasAddress(const void* addr) const {
315 return Begin() <= addr && addr < End();
316 }
317
318 // Unmap the pages at end and remap them to create another memory map.
319 MemMap RemapAtEnd(uint8_t* new_end,
320 const char* tail_name,
321 int tail_prot,
322 std::string* error_msg,
323 bool use_debug_name = true);
324
325 // Unmap the pages of a file at end and remap them to create another memory map.
326 MemMap RemapAtEnd(uint8_t* new_end,
327 const char* tail_name,
328 int tail_prot,
329 int tail_flags,
330 int fd,
331 off_t offset,
332 std::string* error_msg,
333 bool use_debug_name = true);
334
335 // Take ownership of pages at the beginning of the mapping. The mapping must be an
336 // anonymous reservation mapping, owning entire pages. The `byte_count` must not
337 // exceed the size of this reservation.
338 //
339 // Returns a mapping owning `byte_count` bytes rounded up to entire pages
340 // with size set to the passed `byte_count`. If 'reuse' is true then the caller
341 // is responsible for unmapping the taken pages.
342 MemMap TakeReservedMemory(size_t byte_count, bool reuse = false);
343
344 static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
345 REQUIRES(!MemMap::mem_maps_lock_);
346 static void DumpMaps(std::ostream& os, bool terse = false)
347 REQUIRES(!MemMap::mem_maps_lock_);
348
349 // Init and Shutdown are NOT thread safe.
350 // Both may be called multiple times and MemMap objects may be created any
351 // time after the first call to Init and before the first call to Shutodwn.
352 static void Init() REQUIRES(!MemMap::mem_maps_lock_);
353 static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
354 static bool IsInitialized();
355
356 // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
357 // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
358 // intermittently.
359 void TryReadable();
360
361 // Align the map by unmapping the unaligned part at the lower end and if 'align_both_ends' is
362 // true, then the higher end as well.
363 void AlignBy(size_t alignment, bool align_both_ends = true);
364
365 // For annotation reasons.
GetMemMapsLock()366 static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) {
367 return nullptr;
368 }
369
370 // Reset in a forked process the MemMap whose memory has been madvised MADV_DONTFORK
371 // in the parent process.
372 void ResetInForkedProcess();
373
374 // 'redzone_size_ == 0' indicates that we are not using memory-tool on this mapping.
GetRedzoneSize()375 size_t GetRedzoneSize() const { return redzone_size_; }
376
377 #ifdef ART_PAGE_SIZE_AGNOSTIC
GetPageSize()378 static inline size_t GetPageSize() {
379 DCHECK_NE(page_size_, 0u);
380 return page_size_;
381 }
382 #else
GetPageSize()383 static constexpr size_t GetPageSize() {
384 return GetPageSizeSlow();
385 }
386 #endif
387
388 private:
389 MemMap(const std::string& name,
390 uint8_t* begin,
391 size_t size,
392 void* base_begin,
393 size_t base_size,
394 int prot,
395 bool reuse,
396 size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
397
398 void DoReset();
399 void Invalidate();
400 void SwapMembers(MemMap& other);
401
402 static void DumpMapsLocked(std::ostream& os, bool terse)
403 REQUIRES(MemMap::mem_maps_lock_);
404 static bool HasMemMap(MemMap& map)
405 REQUIRES(MemMap::mem_maps_lock_);
406 static MemMap* GetLargestMemMapAt(void* address)
407 REQUIRES(MemMap::mem_maps_lock_);
408 static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
409 REQUIRES(!MemMap::mem_maps_lock_);
410
411 // Internal version of mmap that supports low 4gb emulation.
412 static void* MapInternal(void* addr,
413 size_t length,
414 int prot,
415 int flags,
416 int fd,
417 off_t offset,
418 bool low_4gb)
419 REQUIRES(!MemMap::mem_maps_lock_);
420 static void* MapInternalArtLow4GBAllocator(size_t length,
421 int prot,
422 int flags,
423 int fd,
424 off_t offset)
425 REQUIRES(!MemMap::mem_maps_lock_);
426
427 // Release memory owned by a reservation mapping.
428 void ReleaseReservedMemory(size_t byte_count);
429
430 // member function to access real_munmap
431 static bool CheckMapRequest(uint8_t* expected_ptr,
432 void* actual_ptr,
433 size_t byte_count,
434 std::string* error_msg);
435
436 static bool CheckReservation(uint8_t* expected_ptr,
437 size_t byte_count,
438 const char* name,
439 const MemMap& reservation,
440 /*out*/std::string* error_msg);
441
442 std::string name_;
443 uint8_t* begin_ = nullptr; // Start of data. May be changed by AlignBy.
444 size_t size_ = 0u; // Length of data.
445
446 void* base_begin_ = nullptr; // Page-aligned base address. May be changed by AlignBy.
447 size_t base_size_ = 0u; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
448 int prot_ = 0; // Protection of the map.
449
450 // When reuse_ is true, this is a view of a mapping on which
451 // we do not take ownership and are not responsible for
452 // unmapping.
453 bool reuse_ = false;
454
455 // When already_unmapped_ is true the destructor will not call munmap.
456 bool already_unmapped_ = false;
457
458 size_t redzone_size_ = 0u;
459
460 #if USE_ART_LOW_4G_ALLOCATOR
461 static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
462
463 static void* TryMemMapLow4GB(void* ptr,
464 size_t page_aligned_byte_count,
465 int prot,
466 int flags,
467 int fd,
468 off_t offset);
469 #endif
470
471 static void TargetMMapInit();
472 static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off);
473 static int TargetMUnmap(void* start, size_t len);
474
475 static std::mutex* mem_maps_lock_;
476
477 #ifdef ART_PAGE_SIZE_AGNOSTIC
478 static size_t page_size_;
479 #endif
480
481 friend class MemMapTest; // To allow access to base_begin_ and base_size_.
482 };
483
swap(MemMap & lhs,MemMap & rhs)484 inline void swap(MemMap& lhs, MemMap& rhs) {
485 lhs.swap(rhs);
486 }
487
488 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
489
490 // Zero and maybe release memory if possible, no requirements on alignments.
491 void ZeroMemory(void* address, size_t length, bool release_eagerly);
ZeroAndReleaseMemory(void * address,size_t length)492 inline void ZeroAndReleaseMemory(void* address, size_t length) {
493 ZeroMemory(address, length, /* release_eagerly= */ true);
494 }
495
496 } // namespace art
497
498 #endif // ART_LIBARTBASE_BASE_MEM_MAP_H_
499