xref: /aosp_15_r20/art/libartbase/base/mem_map.cc (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mem_map.h"
18 
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #if !defined(ANDROID_OS) && !defined(__Fuchsia__) && !defined(_WIN32)
22 #include <sys/resource.h>
23 #endif
24 
25 #if defined(__linux__)
26 #include <sys/prctl.h>
27 #endif
28 
29 #include <map>
30 #include <memory>
31 #include <sstream>
32 
33 #include "android-base/stringprintf.h"
34 #include "android-base/unique_fd.h"
35 
36 #include "allocator.h"
37 #include "bit_utils.h"
38 #include "globals.h"
39 #include "logging.h"  // For VLOG_IS_ON.
40 #include "memory_tool.h"
41 #include "mman.h"  // For the PROT_* and MAP_* constants.
42 #include "utils.h"
43 
44 #ifndef MAP_ANONYMOUS
45 #define MAP_ANONYMOUS MAP_ANON
46 #endif
47 
48 namespace art {
49 
50 using android::base::StringPrintf;
51 using android::base::unique_fd;
52 
53 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
54 using AllocationTrackingMultiMap =
55     std::multimap<Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
56 
57 using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
58 
59 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
60 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
61 
62 // A map containing unique strings used for indentifying anonymous mappings
63 static std::map<std::string, int> debugStrMap GUARDED_BY(MemMap::GetMemMapsLock());
64 
65 // Retrieve iterator to a `gMaps` entry that is known to exist.
GetGMapsEntry(const MemMap & map)66 Maps::iterator GetGMapsEntry(const MemMap& map) REQUIRES(MemMap::GetMemMapsLock()) {
67   DCHECK(map.IsValid());
68   DCHECK(gMaps != nullptr);
69   for (auto it = gMaps->lower_bound(map.BaseBegin()), end = gMaps->end();
70        it != end && it->first == map.BaseBegin();
71        ++it) {
72     if (it->second == &map) {
73       return it;
74     }
75   }
76   LOG(FATAL) << "MemMap not found";
77   UNREACHABLE();
78 }
79 
operator <<(std::ostream & os,const Maps & mem_maps)80 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
81   os << "MemMap:" << std::endl;
82   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
83     void* base = it->first;
84     MemMap* map = it->second;
85     CHECK_EQ(base, map->BaseBegin());
86     os << *map << std::endl;
87   }
88   return os;
89 }
90 
91 std::mutex* MemMap::mem_maps_lock_ = nullptr;
92 #ifdef ART_PAGE_SIZE_AGNOSTIC
93 size_t MemMap::page_size_ = 0;
94 #endif
95 
96 #if USE_ART_LOW_4G_ALLOCATOR
97 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
98 
99 // The regular start of memory allocations. The first 64KB is protected by SELinux.
100 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
101 
102 // Generate random starting position.
103 // To not interfere with image position, take the image's address and only place it below. Current
104 // formula (sketch):
105 //
106 // ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
107 // ----------------------------------------
108 //                    = 0000111111111111111
109 // & ~(page_size - 1) =~0000000000000001111
110 // ----------------------------------------
111 // mask               = 0000111111111110000
112 // & random data      = YYYYYYYYYYYYYYYYYYY
113 // -----------------------------------
114 // tmp                = 0000YYYYYYYYYYY0000
115 // + LOW_MEM_START    = 0000000000001000000
116 // --------------------------------------
117 // start
118 //
119 // arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
120 // do not have Bionic, simply start with LOW_MEM_START.
121 
122 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
123 #ifdef __BIONIC__
CreateStartPos(uint64_t input,size_t page_size)124 uintptr_t CreateStartPos(uint64_t input, size_t page_size) {
125   CHECK_NE(0, ART_BASE_ADDRESS);
126 
127   // Start with all bits below highest bit in ART_BASE_ADDRESS.
128   constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
129   constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
130 
131   // Lowest (usually 12) bits are not used, as aligned by page size.
132   const uintptr_t mask = mask_ones & ~(page_size - 1);
133 
134   // Mask input data.
135   return (input & mask) + LOW_MEM_START;
136 }
137 #endif
138 
GenerateNextMemPos(size_t page_size)139 static uintptr_t GenerateNextMemPos(size_t page_size) {
140 #ifdef __BIONIC__
141   uint64_t random_data;
142   arc4random_buf(&random_data, sizeof(random_data));
143   return CreateStartPos(random_data, page_size);
144 #else
145   UNUSED(page_size);
146   // No arc4random on host, see above.
147   return LOW_MEM_START;
148 #endif
149 }
150 
151 uintptr_t MemMap::next_mem_pos_;
152 #endif
153 
154 // Return true if the address range is contained in a single memory map by either reading
155 // the gMaps variable or the /proc/self/map entry.
ContainedWithinExistingMap(uint8_t * ptr,size_t size,std::string * error_msg)156 bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
157   uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
158   uintptr_t end = begin + size;
159 
160   {
161     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
162     for (auto& pair : *gMaps) {
163       MemMap* const map = pair.second;
164       if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
165           end <= reinterpret_cast<uintptr_t>(map->End())) {
166         return true;
167       }
168     }
169   }
170 
171   if (error_msg != nullptr) {
172     PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
173     *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
174                               "any existing map. See process maps in the log.", begin, end);
175   }
176   return false;
177 }
178 
179 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
180 // the expected value, calling munmap if validation fails, giving the
181 // reason in error_msg.
182 //
183 // If the expected_ptr is null, nothing is checked beyond the fact
184 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
185 // non-null, we check that pointer is the actual_ptr == expected_ptr,
186 // and if not, report in error_msg what the conflict mapping was if
187 // found, or a generic error in other cases.
CheckMapRequest(uint8_t * expected_ptr,void * actual_ptr,size_t byte_count,std::string * error_msg)188 bool MemMap::CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
189                             std::string* error_msg) {
190   // Handled first by caller for more specific error messages.
191   CHECK(actual_ptr != MAP_FAILED);
192 
193   if (expected_ptr == nullptr) {
194     return true;
195   }
196 
197   uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
198   uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
199 
200   if (expected_ptr == actual_ptr) {
201     return true;
202   }
203 
204   // We asked for an address but didn't get what we wanted, all paths below here should fail.
205   int result = TargetMUnmap(actual_ptr, byte_count);
206   if (result == -1) {
207     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
208   }
209 
210   if (error_msg != nullptr) {
211     // We call this here so that we can try and generate a full error
212     // message with the overlapping mapping. There's no guarantee that
213     // that there will be an overlap though, since
214     // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
215     //   true, even if there is no overlap
216     // - There might have been an overlap at the point of mmap, but the
217     //   overlapping region has since been unmapped.
218 
219     // Tell the client the mappings that were in place at the time.
220     if (kIsDebugBuild) {
221       PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
222     }
223 
224     std::ostringstream os;
225     os <<  StringPrintf("Failed to mmap at expected address, mapped at "
226                         "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
227                         actual, expected);
228     *error_msg = os.str();
229   }
230   return false;
231 }
232 
CheckReservation(uint8_t * expected_ptr,size_t byte_count,const char * name,const MemMap & reservation,std::string * error_msg)233 bool MemMap::CheckReservation(uint8_t* expected_ptr,
234                               size_t byte_count,
235                               const char* name,
236                               const MemMap& reservation,
237                               /*out*/std::string* error_msg) {
238   if (!reservation.IsValid()) {
239     *error_msg = StringPrintf("Invalid reservation for %s", name);
240     return false;
241   }
242   DCHECK_ALIGNED_PARAM(reservation.Begin(), GetPageSize());
243   if (reservation.Begin() != expected_ptr) {
244     *error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p",
245                               name,
246                               reservation.Begin(),
247                               expected_ptr);
248     return false;
249   }
250   if (byte_count > reservation.Size()) {
251     *error_msg = StringPrintf("Insufficient reservation, required %zu, available %zu",
252                               byte_count,
253                               reservation.Size());
254     return false;
255   }
256   return true;
257 }
258 
259 
260 #if USE_ART_LOW_4G_ALLOCATOR
TryMemMapLow4GB(void * ptr,size_t page_aligned_byte_count,int prot,int flags,int fd,off_t offset)261 void* MemMap::TryMemMapLow4GB(void* ptr,
262                                     size_t page_aligned_byte_count,
263                                     int prot,
264                                     int flags,
265                                     int fd,
266                                     off_t offset) {
267   void* actual = TargetMMap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
268   if (actual != MAP_FAILED) {
269     // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
270     // 4GB. If this is the case, unmap and retry.
271     if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
272       TargetMUnmap(actual, page_aligned_byte_count);
273       actual = MAP_FAILED;
274     }
275   }
276   return actual;
277 }
278 #endif
279 
SetDebugName(void * map_ptr,const char * name,size_t size)280 void MemMap::SetDebugName(void* map_ptr, const char* name, size_t size) {
281   // Debug naming is only used for Android target builds. For Linux targets,
282   // we'll still call prctl but it wont do anything till we upstream the prctl.
283   if (kIsTargetFuchsia || !kIsTargetBuild) {
284     return;
285   }
286 
287   // lock as std::map is not thread-safe
288   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
289 
290   std::string debug_friendly_name("dalvik-");
291   debug_friendly_name += name;
292   auto it = debugStrMap.find(debug_friendly_name);
293 
294   if (it == debugStrMap.end()) {
295     it = debugStrMap.insert(std::make_pair(std::move(debug_friendly_name), 1)).first;
296   }
297 
298   DCHECK(it != debugStrMap.end());
299 #if defined(PR_SET_VMA) && defined(__linux__)
300   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, size, it->first.c_str());
301 #else
302   // Prevent variable unused compiler errors.
303   UNUSED(map_ptr, size);
304 #endif
305 }
306 
MapAnonymous(const char * name,uint8_t * addr,size_t byte_count,int prot,bool low_4gb,bool reuse,MemMap * reservation,std::string * error_msg,bool use_debug_name)307 MemMap MemMap::MapAnonymous(const char* name,
308                             uint8_t* addr,
309                             size_t byte_count,
310                             int prot,
311                             bool low_4gb,
312                             bool reuse,
313                             /*inout*/MemMap* reservation,
314                             /*out*/std::string* error_msg,
315                             bool use_debug_name) {
316 #ifndef __LP64__
317   UNUSED(low_4gb);
318 #endif
319   if (byte_count == 0) {
320     *error_msg = "Empty MemMap requested.";
321     return Invalid();
322   }
323   size_t page_aligned_byte_count = RoundUp(byte_count, GetPageSize());
324 
325   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
326   if (reuse) {
327     // reuse means it is okay that it overlaps an existing page mapping.
328     // Only use this if you actually made the page reservation yourself.
329     CHECK(addr != nullptr);
330     DCHECK(reservation == nullptr);
331 
332     DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
333     flags |= MAP_FIXED;
334   } else if (reservation != nullptr) {
335     CHECK(addr != nullptr);
336     if (!CheckReservation(addr, byte_count, name, *reservation, error_msg)) {
337       return MemMap::Invalid();
338     }
339     flags |= MAP_FIXED;
340   }
341 
342   unique_fd fd;
343 
344   // We need to store and potentially set an error number for pretty printing of errors
345   int saved_errno = 0;
346 
347   void* actual = nullptr;
348 
349   // New Ubuntu linux kerners seem to ignore the address hint, so make it a firm request.
350   // Whereas old kernels allocated at 'addr' if provided, newer kernels seem to ignore it.
351   // However, MAP_FIXED_NOREPLACE tells the kernel it must allocate at the address or fail.
352   // Do this only on host since android kernels still obey the hint without flag (for now).
353   if (!kIsTargetBuild && (flags & MAP_FIXED) == 0 && addr != nullptr) {
354     actual = MapInternal(
355         addr, page_aligned_byte_count, prot, flags | MAP_FIXED_NOREPLACE, fd.get(), 0, low_4gb);
356     // If the fixed-address allocation failed, fallback to the default path (random address).
357   }
358   if (actual == nullptr || actual == MAP_FAILED) {
359     actual = MapInternal(addr, page_aligned_byte_count, prot, flags, fd.get(), 0, low_4gb);
360   }
361   saved_errno = errno;
362 
363   if (actual == MAP_FAILED) {
364     if (error_msg != nullptr) {
365       PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
366       *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
367                                     "See process maps in the log.",
368                                 addr,
369                                 page_aligned_byte_count,
370                                 prot,
371                                 flags,
372                                 fd.get(),
373                                 strerror(saved_errno));
374     }
375     return Invalid();
376   }
377   if (!CheckMapRequest(addr, actual, page_aligned_byte_count, error_msg)) {
378     return Invalid();
379   }
380 
381   if (use_debug_name) {
382     SetDebugName(actual, name, page_aligned_byte_count);
383   }
384 
385   if (reservation != nullptr) {
386     // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
387     DCHECK_EQ(actual, reservation->Begin());
388     reservation->ReleaseReservedMemory(byte_count);
389   }
390   return MemMap(name,
391                 reinterpret_cast<uint8_t*>(actual),
392                 byte_count,
393                 actual,
394                 page_aligned_byte_count,
395                 prot,
396                 reuse);
397 }
398 
MapAnonymousAligned(const char * name,size_t byte_count,int prot,bool low_4gb,size_t alignment,std::string * error_msg)399 MemMap MemMap::MapAnonymousAligned(const char* name,
400                                    size_t byte_count,
401                                    int prot,
402                                    bool low_4gb,
403                                    size_t alignment,
404                                    /*out=*/std::string* error_msg) {
405   DCHECK(IsPowerOfTwo(alignment));
406   DCHECK_GT(alignment, GetPageSize());
407 
408   // Allocate extra 'alignment - GetPageSize()' bytes so that the mapping can be aligned.
409   MemMap ret = MapAnonymous(name,
410                             /*addr=*/nullptr,
411                             // AlignBy requires the size to be page-aligned, so
412                             // rounding it here. It is corrected afterwards with
413                             // SetSize after AlignBy.
414                             RoundUp(byte_count, GetPageSize()) + alignment - GetPageSize(),
415                             prot,
416                             low_4gb,
417                             /*reuse=*/false,
418                             /*reservation=*/nullptr,
419                             error_msg);
420   if (LIKELY(ret.IsValid())) {
421     ret.AlignBy(alignment, /*align_both_ends=*/false);
422     ret.SetSize(byte_count);
423     DCHECK_EQ(ret.Size(), byte_count);
424     DCHECK_ALIGNED_PARAM(ret.Begin(), alignment);
425   }
426   return ret;
427 }
428 
MapPlaceholder(const char * name,uint8_t * addr,size_t byte_count)429 MemMap MemMap::MapPlaceholder(const char* name, uint8_t* addr, size_t byte_count) {
430   if (byte_count == 0) {
431     return Invalid();
432   }
433   const size_t page_aligned_byte_count = RoundUp(byte_count, GetPageSize());
434   return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, /* reuse= */ true);
435 }
436 
437 template<typename A, typename B>
PointerDiff(A * a,B * b)438 static ptrdiff_t PointerDiff(A* a, B* b) {
439   return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
440 }
441 
ReplaceWith(MemMap * source,std::string * error)442 bool MemMap::ReplaceWith(MemMap* source, /*out*/std::string* error) {
443 #if !HAVE_MREMAP_SYSCALL
444   UNUSED(source);
445   *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
446   return false;
447 #else  // !HAVE_MREMAP_SYSCALL
448   CHECK(source != nullptr);
449   CHECK(source->IsValid());
450   if (!MemMap::kCanReplaceMapping) {
451     *error = "Unable to perform atomic replace due to runtime environment!";
452     return false;
453   }
454   // neither can be reuse.
455   if (source->reuse_ || reuse_) {
456     *error = "One or both mappings is not a real mmap!";
457     return false;
458   }
459   // TODO Support redzones.
460   if (source->redzone_size_ != 0 || redzone_size_ != 0) {
461     *error = "source and dest have different redzone sizes";
462     return false;
463   }
464   // Make sure they have the same offset from the actual mmap'd address
465   if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
466     *error =
467         "source starts at a different offset from the mmap. Cannot atomically replace mappings";
468     return false;
469   }
470   // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
471   // memcpy but the check is explicit and actually done).
472   if (source->BaseBegin() > BaseBegin() &&
473       reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
474       reinterpret_cast<uint8_t*>(source->BaseBegin())) {
475     *error = "destination memory pages overlap with source memory pages";
476     return false;
477   }
478   // Change the protection to match the new location.
479   int old_prot = source->GetProtect();
480   if (!source->Protect(GetProtect())) {
481     *error = "Could not change protections for source to those required for dest.";
482     return false;
483   }
484 
485   // Do the mremap.
486   void* res = mremap(/*old_address*/source->BaseBegin(),
487                      /*old_size*/source->BaseSize(),
488                      /*new_size*/source->BaseSize(),
489                      /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
490                      /*new_address*/BaseBegin());
491   if (res == MAP_FAILED) {
492     int saved_errno = errno;
493     // Wasn't able to move mapping. Change the protection of source back to the original one and
494     // return.
495     source->Protect(old_prot);
496     *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
497     return false;
498   }
499   CHECK(res == BaseBegin());
500 
501   // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
502   // them later.
503   size_t new_base_size = std::max(source->base_size_, base_size_);
504 
505   // Invalidate *source, don't unmap it though since it is already gone.
506   size_t source_size = source->size_;
507   source->Invalidate();
508 
509   size_ = source_size;
510   base_size_ = new_base_size;
511   // Reduce base_size if needed (this will unmap the extra pages).
512   SetSize(source_size);
513 
514   return true;
515 #endif  // !HAVE_MREMAP_SYSCALL
516 }
517 
MapFileAtAddress(uint8_t * expected_ptr,size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,bool reuse,MemMap * reservation,std::string * error_msg)518 MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
519                                 size_t byte_count,
520                                 int prot,
521                                 int flags,
522                                 int fd,
523                                 off_t start,
524                                 bool low_4gb,
525                                 const char* filename,
526                                 bool reuse,
527                                 /*inout*/MemMap* reservation,
528                                 /*out*/std::string* error_msg) {
529   CHECK_NE(0, prot);
530   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
531 
532   // Note that we do not allow MAP_FIXED unless reuse == true or we have an existing
533   // reservation, i.e we expect this mapping to be contained within an existing map.
534   if (reuse && expected_ptr != nullptr) {
535     // reuse means it is okay that it overlaps an existing page mapping.
536     // Only use this if you actually made the page reservation yourself.
537     DCHECK(reservation == nullptr);
538     DCHECK(error_msg != nullptr);
539     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
540         << ((error_msg != nullptr) ? *error_msg : std::string());
541     flags |= MAP_FIXED;
542   } else if (reservation != nullptr) {
543     DCHECK(error_msg != nullptr);
544     if (!CheckReservation(expected_ptr, byte_count, filename, *reservation, error_msg)) {
545       return Invalid();
546     }
547     flags |= MAP_FIXED;
548   } else {
549     CHECK_EQ(0, flags & MAP_FIXED);
550     // Don't bother checking for an overlapping region here. We'll
551     // check this if required after the fact inside CheckMapRequest.
552   }
553 
554   if (byte_count == 0) {
555     *error_msg = "Empty MemMap requested";
556     return Invalid();
557   }
558   // Adjust 'offset' to be page-aligned as required by mmap.
559   int page_offset = start % GetPageSize();
560   off_t page_aligned_offset = start - page_offset;
561   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
562   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, GetPageSize());
563   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
564   // not necessarily to virtual memory. mmap will page align 'expected' for us.
565   uint8_t* page_aligned_expected =
566       (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
567 
568   size_t redzone_size = 0;
569   if (kRunningOnMemoryTool && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
570     redzone_size = GetPageSize();
571     page_aligned_byte_count += redzone_size;
572   }
573 
574   uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
575                                                            page_aligned_byte_count,
576                                                            prot,
577                                                            flags,
578                                                            fd,
579                                                            page_aligned_offset,
580                                                            low_4gb));
581   if (actual == MAP_FAILED) {
582     if (error_msg != nullptr) {
583       auto saved_errno = errno;
584 
585       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
586         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
587       }
588 
589       *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
590                                 ") of file '%s' failed: %s. See process maps in the log.",
591                                 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
592                                 static_cast<int64_t>(page_aligned_offset), filename,
593                                 strerror(saved_errno));
594     }
595     return Invalid();
596   }
597   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
598     return Invalid();
599   }
600   if (redzone_size != 0) {
601     const uint8_t *real_start = actual + page_offset;
602     const uint8_t *real_end = actual + page_offset + byte_count;
603     const uint8_t *mapping_end = actual + page_aligned_byte_count;
604 
605     MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
606     MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
607     page_aligned_byte_count -= redzone_size;
608   }
609 
610   if (reservation != nullptr) {
611     // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
612     DCHECK_EQ(actual, reservation->Begin());
613     reservation->ReleaseReservedMemory(byte_count);
614   }
615   return MemMap(filename,
616                 actual + page_offset,
617                 byte_count,
618                 actual,
619                 page_aligned_byte_count,
620                 prot,
621                 reuse,
622                 redzone_size);
623 }
624 
MemMap(MemMap && other)625 MemMap::MemMap(MemMap&& other) noexcept
626     : MemMap() {
627   swap(other);
628 }
629 
~MemMap()630 MemMap::~MemMap() {
631   Reset();
632 }
633 
DoReset()634 void MemMap::DoReset() {
635   DCHECK(IsValid());
636   size_t real_base_size = base_size_;
637   // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
638   // before it is returned to the system.
639   if (redzone_size_ != 0) {
640     // Add redzone_size_ back to base_size or it will cause a mmap leakage.
641     real_base_size += redzone_size_;
642     MEMORY_TOOL_MAKE_UNDEFINED(
643         reinterpret_cast<char*>(base_begin_) + real_base_size - redzone_size_,
644         redzone_size_);
645   }
646 
647   if (!reuse_) {
648     MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
649     if (!already_unmapped_) {
650       int result = TargetMUnmap(base_begin_, real_base_size);
651       if (result == -1) {
652         PLOG(FATAL) << "munmap failed";
653       }
654     }
655   }
656 
657   Invalidate();
658 }
659 
ResetInForkedProcess()660 void MemMap::ResetInForkedProcess() {
661   // This should be called on a map that has MADV_DONTFORK.
662   // The kernel has already unmapped this.
663   already_unmapped_ = true;
664   Reset();
665 }
666 
Invalidate()667 void MemMap::Invalidate() {
668   DCHECK(IsValid());
669 
670   // Remove it from gMaps.
671   // TODO(b/307704260) Move MemMap::Init MemMap::Shutdown out of Runtime init/shutdown.
672   if (mem_maps_lock_ != nullptr) {  // Runtime was shutdown.
673     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
674     auto it = GetGMapsEntry(*this);
675     gMaps->erase(it);
676   }
677 
678   // Mark it as invalid.
679   base_size_ = 0u;
680   DCHECK(!IsValid());
681 }
682 
swap(MemMap & other)683 void MemMap::swap(MemMap& other) {
684   if (IsValid() || other.IsValid()) {
685     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
686     DCHECK(gMaps != nullptr);
687     auto this_it = IsValid() ? GetGMapsEntry(*this) : gMaps->end();
688     auto other_it = other.IsValid() ? GetGMapsEntry(other) : gMaps->end();
689     if (IsValid()) {
690       DCHECK(this_it != gMaps->end());
691       DCHECK_EQ(this_it->second, this);
692       this_it->second = &other;
693     }
694     if (other.IsValid()) {
695       DCHECK(other_it != gMaps->end());
696       DCHECK_EQ(other_it->second, &other);
697       other_it->second = this;
698     }
699     // Swap members with the `mem_maps_lock_` held so that `base_begin_` matches
700     // with the `gMaps` key when other threads try to use `gMaps`.
701     SwapMembers(other);
702   } else {
703     SwapMembers(other);
704   }
705 }
706 
SwapMembers(MemMap & other)707 void MemMap::SwapMembers(MemMap& other) {
708   name_.swap(other.name_);
709   std::swap(begin_, other.begin_);
710   std::swap(size_, other.size_);
711   std::swap(base_begin_, other.base_begin_);
712   std::swap(base_size_, other.base_size_);
713   std::swap(prot_, other.prot_);
714   std::swap(reuse_, other.reuse_);
715   std::swap(already_unmapped_, other.already_unmapped_);
716   std::swap(redzone_size_, other.redzone_size_);
717 }
718 
MemMap(const std::string & name,uint8_t * begin,size_t size,void * base_begin,size_t base_size,int prot,bool reuse,size_t redzone_size)719 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
720                size_t base_size, int prot, bool reuse, size_t redzone_size)
721     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
722       prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
723   if (size_ == 0) {
724     CHECK(begin_ == nullptr);
725     CHECK(base_begin_ == nullptr);
726     CHECK_EQ(base_size_, 0U);
727   } else {
728     CHECK(begin_ != nullptr);
729     CHECK(base_begin_ != nullptr);
730     CHECK_NE(base_size_, 0U);
731 
732     // Add it to gMaps.
733     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
734     DCHECK(gMaps != nullptr);
735     gMaps->insert(std::make_pair(base_begin_, this));
736   }
737 }
738 
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,std::string * error_msg,bool use_debug_name)739 MemMap MemMap::RemapAtEnd(uint8_t* new_end,
740                           const char* tail_name,
741                           int tail_prot,
742                           std::string* error_msg,
743                           bool use_debug_name) {
744   return RemapAtEnd(new_end,
745                     tail_name,
746                     tail_prot,
747                     MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
748                     /* fd= */ -1,
749                     /* offset= */ 0,
750                     error_msg,
751                     use_debug_name);
752 }
753 
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,int flags,int fd,off_t offset,std::string * error_msg,bool use_debug_name)754 MemMap MemMap::RemapAtEnd(uint8_t* new_end,
755                           const char* tail_name,
756                           int tail_prot,
757                           int flags,
758                           int fd,
759                           off_t offset,
760                           std::string* error_msg,
761                           bool use_debug_name) {
762   DCHECK_GE(new_end, Begin());
763   DCHECK_LE(new_end, End());
764   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
765   DCHECK_ALIGNED_PARAM(begin_, GetPageSize());
766   DCHECK_ALIGNED_PARAM(base_begin_, GetPageSize());
767   DCHECK_ALIGNED_PARAM(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, GetPageSize());
768   DCHECK_ALIGNED_PARAM(new_end, GetPageSize());
769   uint8_t* old_end = begin_ + size_;
770   uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
771   uint8_t* new_base_end = new_end;
772   DCHECK_LE(new_base_end, old_base_end);
773   if (new_base_end == old_base_end) {
774     return Invalid();
775   }
776   size_t new_size = new_end - reinterpret_cast<uint8_t*>(begin_);
777   size_t new_base_size = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
778   DCHECK_LE(begin_ + new_size, reinterpret_cast<uint8_t*>(base_begin_) + new_base_size);
779   size_t tail_size = old_end - new_end;
780   uint8_t* tail_base_begin = new_base_end;
781   size_t tail_base_size = old_base_end - new_base_end;
782   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
783   DCHECK_ALIGNED_PARAM(tail_base_size, GetPageSize());
784 
785   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
786   // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
787   // removes old mappings for the overlapping region. This makes the operation atomic
788   // and prevents other threads from racing to allocate memory in the requested region.
789   uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
790                                                           tail_base_size,
791                                                           tail_prot,
792                                                           flags,
793                                                           fd,
794                                                           offset));
795   if (actual == MAP_FAILED) {
796     *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed: %s. See process "
797                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
798                               fd, strerror(errno));
799     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
800     return Invalid();
801   }
802   // Update *this.
803   if (new_base_size == 0u) {
804     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
805     auto it = GetGMapsEntry(*this);
806     gMaps->erase(it);
807   }
808 
809   if (use_debug_name) {
810     SetDebugName(actual, tail_name, tail_base_size);
811   }
812 
813   size_ = new_size;
814   base_size_ = new_base_size;
815   // Return the new mapping.
816   return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
817 }
818 
TakeReservedMemory(size_t byte_count,bool reuse)819 MemMap MemMap::TakeReservedMemory(size_t byte_count, bool reuse) {
820   uint8_t* begin = Begin();
821   ReleaseReservedMemory(byte_count);  // Performs necessary DCHECK()s on this reservation.
822   size_t base_size = RoundUp(byte_count, GetPageSize());
823   return MemMap(name_, begin, byte_count, begin, base_size, prot_, reuse);
824 }
825 
ReleaseReservedMemory(size_t byte_count)826 void MemMap::ReleaseReservedMemory(size_t byte_count) {
827   // Check the reservation mapping.
828   DCHECK(IsValid());
829   DCHECK(!reuse_);
830   DCHECK(!already_unmapped_);
831   DCHECK_EQ(redzone_size_, 0u);
832   DCHECK_EQ(begin_, base_begin_);
833   DCHECK_EQ(size_, base_size_);
834   DCHECK_ALIGNED_PARAM(begin_, GetPageSize());
835   DCHECK_ALIGNED_PARAM(size_, GetPageSize());
836 
837   // Check and round up the `byte_count`.
838   DCHECK_NE(byte_count, 0u);
839   DCHECK_LE(byte_count, size_);
840   byte_count = RoundUp(byte_count, GetPageSize());
841 
842   if (byte_count == size_) {
843     Invalidate();
844   } else {
845     // Shrink the reservation MemMap and update its `gMaps` entry.
846     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
847     auto it = GetGMapsEntry(*this);
848     auto node = gMaps->extract(it);
849     begin_ += byte_count;
850     size_ -= byte_count;
851     base_begin_ = begin_;
852     base_size_ = size_;
853     node.key() = base_begin_;
854     gMaps->insert(std::move(node));
855   }
856 }
857 
FillWithZero(bool release_eagerly)858 void MemMap::FillWithZero(bool release_eagerly) {
859   if (base_begin_ != nullptr && base_size_ != 0) {
860     ZeroMemory(base_begin_, base_size_, release_eagerly);
861   }
862 }
863 
MadviseDontFork()864 int MemMap::MadviseDontFork() {
865 #if defined(__linux__)
866   if (base_begin_ != nullptr || base_size_ != 0) {
867     return madvise(base_begin_, base_size_, MADV_DONTFORK);
868   }
869 #endif
870   return -1;
871 }
872 
Sync()873 bool MemMap::Sync() {
874 #ifdef _WIN32
875   // TODO: add FlushViewOfFile support.
876   PLOG(ERROR) << "MemMap::Sync unsupported on Windows.";
877   return false;
878 #else
879   // Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess
880   // protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind
881   // only accepts page-aligned base address, and excludes the higher-end noaccess protection
882   // from the msync range. b/27552451.
883   return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
884 #endif
885 }
886 
Protect(int prot)887 bool MemMap::Protect(int prot) {
888   if (base_begin_ == nullptr && base_size_ == 0) {
889     prot_ = prot;
890     return true;
891   }
892 
893 #ifndef _WIN32
894   if (mprotect(base_begin_, base_size_, prot) == 0) {
895     prot_ = prot;
896     return true;
897   }
898 #endif
899 
900   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
901               << prot << ") failed";
902   return false;
903 }
904 
CheckNoGaps(MemMap & begin_map,MemMap & end_map)905 bool MemMap::CheckNoGaps(MemMap& begin_map, MemMap& end_map) {
906   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
907   CHECK(begin_map.IsValid());
908   CHECK(end_map.IsValid());
909   CHECK(HasMemMap(begin_map));
910   CHECK(HasMemMap(end_map));
911   CHECK_LE(begin_map.BaseBegin(), end_map.BaseBegin());
912   MemMap* map = &begin_map;
913   while (map->BaseBegin() != end_map.BaseBegin()) {
914     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
915     if (next_map == nullptr) {
916       // Found a gap.
917       return false;
918     }
919     map = next_map;
920   }
921   return true;
922 }
923 
DumpMaps(std::ostream & os,bool terse)924 void MemMap::DumpMaps(std::ostream& os, bool terse) {
925   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
926   DumpMapsLocked(os, terse);
927 }
928 
DumpMapsLocked(std::ostream & os,bool terse)929 void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
930   const auto& mem_maps = *gMaps;
931   if (!terse) {
932     os << mem_maps;
933     return;
934   }
935 
936   // Terse output example:
937   //   [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
938   //   [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
939   // The details:
940   //   "+0x20P" means 0x20 pages taken by a single mapping,
941   //   "~0x11dP" means a gap of 0x11d pages,
942   //   "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
943   os << "MemMap:" << std::endl;
944   for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
945     MemMap* map = it->second;
946     void* base = it->first;
947     CHECK_EQ(base, map->BaseBegin());
948     os << "[MemMap: " << base;
949     ++it;
950     // Merge consecutive maps with the same protect flags and name.
951     constexpr size_t kMaxGaps = 9;
952     size_t num_gaps = 0;
953     size_t num = 1u;
954     size_t size = map->BaseSize();
955     CHECK_ALIGNED_PARAM(size, GetPageSize());
956     void* end = map->BaseEnd();
957     while (it != maps_end &&
958         it->second->GetProtect() == map->GetProtect() &&
959         it->second->GetName() == map->GetName() &&
960         (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
961       if (it->second->BaseBegin() != end) {
962         ++num_gaps;
963         os << "+0x" << std::hex << (size / GetPageSize()) << "P";
964         if (num != 1u) {
965           os << "(" << std::dec << num << ")";
966         }
967         size_t gap =
968             reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
969         CHECK_ALIGNED_PARAM(gap, GetPageSize());
970         os << "~0x" << std::hex << (gap / GetPageSize()) << "P";
971         num = 0u;
972         size = 0u;
973       }
974       CHECK_ALIGNED_PARAM(it->second->BaseSize(), GetPageSize());
975       ++num;
976       size += it->second->BaseSize();
977       end = it->second->BaseEnd();
978       ++it;
979     }
980     os << "+0x" << std::hex << (size / GetPageSize()) << "P";
981     if (num != 1u) {
982       os << "(" << std::dec << num << ")";
983     }
984     os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
985   }
986 }
987 
HasMemMap(MemMap & map)988 bool MemMap::HasMemMap(MemMap& map) {
989   void* base_begin = map.BaseBegin();
990   for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
991        it != end && it->first == base_begin; ++it) {
992     if (it->second == &map) {
993       return true;
994     }
995   }
996   return false;
997 }
998 
GetLargestMemMapAt(void * address)999 MemMap* MemMap::GetLargestMemMapAt(void* address) {
1000   size_t largest_size = 0;
1001   MemMap* largest_map = nullptr;
1002   DCHECK(gMaps != nullptr);
1003   for (auto it = gMaps->lower_bound(address), end = gMaps->end();
1004        it != end && it->first == address; ++it) {
1005     MemMap* map = it->second;
1006     CHECK(map != nullptr);
1007     if (largest_size < map->BaseSize()) {
1008       largest_size = map->BaseSize();
1009       largest_map = map;
1010     }
1011   }
1012   return largest_map;
1013 }
1014 
Init()1015 void MemMap::Init() {
1016   if (mem_maps_lock_ != nullptr) {
1017     // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
1018     return;
1019   }
1020 
1021   mem_maps_lock_ = new std::mutex();
1022   // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
1023   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1024 #ifdef ART_PAGE_SIZE_AGNOSTIC
1025   page_size_ = GetPageSizeSlow();
1026 #endif
1027   CHECK_GE(GetPageSize(), kMinPageSize);
1028   CHECK_LE(GetPageSize(), kMaxPageSize);
1029 #if USE_ART_LOW_4G_ALLOCATOR
1030   // Initialize linear scan to random position.
1031   CHECK_EQ(next_mem_pos_, 0u);
1032   next_mem_pos_ = GenerateNextMemPos(GetPageSize());
1033 #endif
1034   DCHECK(gMaps == nullptr);
1035   gMaps = new Maps;
1036 
1037   TargetMMapInit();
1038 }
1039 
IsInitialized()1040 bool MemMap::IsInitialized() { return mem_maps_lock_ != nullptr; }
1041 
Shutdown()1042 void MemMap::Shutdown() {
1043   if (mem_maps_lock_ == nullptr) {
1044     // If MemMap::Shutdown is called more than once, there is no effect.
1045     return;
1046   }
1047   {
1048     // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
1049     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1050     DCHECK(gMaps != nullptr);
1051     delete gMaps;
1052     gMaps = nullptr;
1053   }
1054 #if USE_ART_LOW_4G_ALLOCATOR
1055   next_mem_pos_ = 0u;
1056 #endif
1057   delete mem_maps_lock_;
1058   mem_maps_lock_ = nullptr;
1059 }
1060 
SetSize(size_t new_size)1061 void MemMap::SetSize(size_t new_size) {
1062   CHECK_LE(new_size, size_);
1063   size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
1064                                  GetPageSize());
1065   if (new_base_size == base_size_) {
1066     size_ = new_size;
1067     return;
1068   }
1069   CHECK_LT(new_base_size, base_size_);
1070   MEMORY_TOOL_MAKE_UNDEFINED(
1071       reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
1072                               new_base_size),
1073       base_size_ - new_base_size);
1074   CHECK_EQ(TargetMUnmap(reinterpret_cast<void*>(
1075                         reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
1076                         base_size_ - new_base_size), 0)
1077                         << new_base_size << " " << base_size_;
1078   base_size_ = new_base_size;
1079   size_ = new_size;
1080 }
1081 
MapInternalArtLow4GBAllocator(size_t length,int prot,int flags,int fd,off_t offset)1082 void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
1083                                             int prot,
1084                                             int flags,
1085                                             int fd,
1086                                             off_t offset) {
1087 #if USE_ART_LOW_4G_ALLOCATOR
1088   void* actual = MAP_FAILED;
1089 
1090   bool first_run = true;
1091 
1092   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1093   for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += GetPageSize()) {
1094     // Use gMaps as an optimization to skip over large maps.
1095     // Find the first map which is address > ptr.
1096     auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
1097     if (it != gMaps->begin()) {
1098       auto before_it = it;
1099       --before_it;
1100       // Start at the end of the map before the upper bound.
1101       ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
1102       CHECK_ALIGNED_PARAM(ptr, GetPageSize());
1103     }
1104     while (it != gMaps->end()) {
1105       // How much space do we have until the next map?
1106       size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
1107       // If the space may be sufficient, break out of the loop.
1108       if (delta >= length) {
1109         break;
1110       }
1111       // Otherwise, skip to the end of the map.
1112       ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
1113       CHECK_ALIGNED_PARAM(ptr, GetPageSize());
1114       ++it;
1115     }
1116 
1117     // Try to see if we get lucky with this address since none of the ART maps overlap.
1118     actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
1119     if (actual != MAP_FAILED) {
1120       next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
1121       return actual;
1122     }
1123 
1124     if (4U * GB - ptr < length) {
1125       // Not enough memory until 4GB.
1126       if (first_run) {
1127         // Try another time from the bottom;
1128         ptr = LOW_MEM_START - GetPageSize();
1129         first_run = false;
1130         continue;
1131       } else {
1132         // Second try failed.
1133         break;
1134       }
1135     }
1136 
1137     uintptr_t tail_ptr;
1138 
1139     // Check pages are free.
1140     bool safe = true;
1141     for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += GetPageSize()) {
1142       if (msync(reinterpret_cast<void*>(tail_ptr), GetPageSize(), 0) == 0) {
1143         safe = false;
1144         break;
1145       } else {
1146         DCHECK_EQ(errno, ENOMEM);
1147       }
1148     }
1149 
1150     next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
1151 
1152     if (safe == true) {
1153       actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
1154       if (actual != MAP_FAILED) {
1155         return actual;
1156       }
1157     } else {
1158       // Skip over last page.
1159       ptr = tail_ptr;
1160     }
1161   }
1162 
1163   if (actual == MAP_FAILED) {
1164     LOG(ERROR) << "Could not find contiguous low-memory space.";
1165     errno = ENOMEM;
1166   }
1167   return actual;
1168 #else
1169   UNUSED(length, prot, flags, fd, offset);
1170   LOG(FATAL) << "Unreachable";
1171   UNREACHABLE();
1172 #endif
1173 }
1174 
MapInternal(void * addr,size_t length,int prot,int flags,int fd,off_t offset,bool low_4gb)1175 void* MemMap::MapInternal(void* addr,
1176                           size_t length,
1177                           int prot,
1178                           int flags,
1179                           int fd,
1180                           off_t offset,
1181                           bool low_4gb) {
1182 #ifdef __LP64__
1183   // When requesting low_4g memory and having an expectation, the requested range should fit into
1184   // 4GB.
1185   if (low_4gb && (
1186       // Start out of bounds.
1187       (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
1188       // End out of bounds. For simplicity, this will fail for the last page of memory.
1189       ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
1190     LOG(ERROR) << "The requested address space (" << addr << ", "
1191                << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
1192                << ") cannot fit in low_4gb";
1193     return MAP_FAILED;
1194   }
1195 #else
1196   UNUSED(low_4gb);
1197 #endif
1198   DCHECK_ALIGNED_PARAM(length, GetPageSize());
1199   // TODO:
1200   // A page allocator would be a useful abstraction here, as
1201   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
1202   void* actual = MAP_FAILED;
1203 #if USE_ART_LOW_4G_ALLOCATOR
1204   // MAP_32BIT only available on x86_64.
1205   if (low_4gb && addr == nullptr) {
1206     // The linear-scan allocator has an issue when executable pages are denied (e.g., by selinux
1207     // policies in sensitive processes). In that case, the error code will still be ENOMEM. So
1208     // the allocator will scan all low 4GB twice, and still fail. This is *very* slow.
1209     //
1210     // To avoid the issue, always map non-executable first, and mprotect if necessary.
1211     const int orig_prot = prot;
1212     const int prot_non_exec = prot & ~PROT_EXEC;
1213     actual = MapInternalArtLow4GBAllocator(length, prot_non_exec, flags, fd, offset);
1214 
1215     if (actual == MAP_FAILED) {
1216       return MAP_FAILED;
1217     }
1218 
1219     // See if we need to remap with the executable bit now.
1220     if (orig_prot != prot_non_exec) {
1221       if (mprotect(actual, length, orig_prot) != 0) {
1222         PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
1223         TargetMUnmap(actual, length);
1224         errno = ENOMEM;
1225         return MAP_FAILED;
1226       }
1227     }
1228     return actual;
1229   }
1230 
1231   actual = TargetMMap(addr, length, prot, flags, fd, offset);
1232 #else
1233 #if defined(__LP64__)
1234   if (low_4gb && addr == nullptr) {
1235     flags |= MAP_32BIT;
1236   }
1237 #endif
1238   actual = TargetMMap(addr, length, prot, flags, fd, offset);
1239 #endif
1240   return actual;
1241 }
1242 
operator <<(std::ostream & os,const MemMap & mem_map)1243 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
1244   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
1245                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
1246                      mem_map.GetName().c_str());
1247   return os;
1248 }
1249 
TryReadable()1250 void MemMap::TryReadable() {
1251   if (base_begin_ == nullptr && base_size_ == 0) {
1252     return;
1253   }
1254   CHECK_NE(prot_ & PROT_READ, 0);
1255   volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
1256   volatile uint8_t* end = begin + base_size_;
1257   DCHECK(IsAlignedParam(begin, GetPageSize()));
1258   DCHECK(IsAlignedParam(end, GetPageSize()));
1259   // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
1260   // reads.
1261   for (volatile uint8_t* ptr = begin; ptr < end; ptr += GetPageSize()) {
1262     // This read could fault if protection wasn't set correctly.
1263     uint8_t value = *ptr;
1264     UNUSED(value);
1265   }
1266 }
1267 
RawClearMemory(uint8_t * begin,uint8_t * end)1268 static void inline RawClearMemory(uint8_t* begin, uint8_t* end) {
1269   std::fill(begin, end, 0);
1270 }
1271 
1272 #if defined(__linux__)
ClearMemory(uint8_t * page_begin,size_t size,bool resident,size_t page_size)1273 static inline void ClearMemory(uint8_t* page_begin, size_t size, bool resident, size_t page_size) {
1274   DCHECK(IsAlignedParam(page_begin, page_size));
1275   DCHECK(IsAlignedParam(page_begin + size, page_size));
1276   if (resident) {
1277     RawClearMemory(page_begin, page_begin + size);
1278     // Note we check madvise return value against -1, as it seems old kernels
1279     // can return 1.
1280 #ifdef MADV_FREE
1281     bool res = madvise(page_begin, size, MADV_FREE);
1282     CHECK_NE(res, -1) << "madvise failed";
1283 #endif  // MADV_FREE
1284   } else {
1285     bool res = madvise(page_begin, size, MADV_DONTNEED);
1286     CHECK_NE(res, -1) << "madvise failed";
1287   }
1288 }
1289 #endif  // __linux__
1290 
ZeroMemory(void * address,size_t length,bool release_eagerly)1291 void ZeroMemory(void* address, size_t length, bool release_eagerly) {
1292   if (length == 0) {
1293     return;
1294   }
1295   uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
1296   uint8_t* const mem_end = mem_begin + length;
1297   uint8_t* const page_begin = AlignUp(mem_begin, MemMap::GetPageSize());
1298   uint8_t* const page_end = AlignDown(mem_end, MemMap::GetPageSize());
1299   if (!kMadviseZeroes || page_begin >= page_end) {
1300     // No possible area to madvise.
1301     RawClearMemory(mem_begin, mem_end);
1302     return;
1303   }
1304   // Spans one or more pages.
1305   DCHECK_LE(mem_begin, page_begin);
1306   DCHECK_LE(page_begin, page_end);
1307   DCHECK_LE(page_end, mem_end);
1308 #ifdef _WIN32
1309   UNUSED(release_eagerly);
1310   LOG(WARNING) << "ZeroMemory does not madvise on Windows.";
1311   RawClearMemory(mem_begin, mem_end);
1312 #else
1313   RawClearMemory(mem_begin, page_begin);
1314   RawClearMemory(page_end, mem_end);
1315 // mincore() is linux-specific syscall.
1316 #if defined(__linux__)
1317   if (!release_eagerly) {
1318     size_t vec_len = (page_end - page_begin) / MemMap::GetPageSize();
1319     std::unique_ptr<unsigned char[]> vec(new unsigned char[vec_len]);
1320     if (mincore(page_begin, page_end - page_begin, vec.get()) == 0) {
1321       uint8_t* current_page = page_begin;
1322       size_t current_size = MemMap::GetPageSize();
1323       uint32_t old_state = vec[0] & 0x1;
1324       for (size_t i = 1; i < vec_len; ++i) {
1325         uint32_t new_state = vec[i] & 0x1;
1326         if (old_state == new_state) {
1327           current_size += MemMap::GetPageSize();
1328         } else {
1329           ClearMemory(current_page, current_size, old_state, MemMap::GetPageSize());
1330           current_page = current_page + current_size;
1331           current_size = MemMap::GetPageSize();
1332           old_state = new_state;
1333         }
1334       }
1335       ClearMemory(current_page, current_size, old_state, MemMap::GetPageSize());
1336       return;
1337     }
1338     static bool logged_about_mincore = false;
1339     if (!logged_about_mincore) {
1340       PLOG(WARNING) << "mincore failed, falling back to madvise MADV_DONTNEED";
1341       logged_about_mincore = true;
1342     }
1343     // mincore failed, fall through to MADV_DONTNEED.
1344   }
1345 #else
1346   UNUSED(release_eagerly);
1347 #endif  // __linux__
1348   bool res = madvise(page_begin, page_end - page_begin, MADV_DONTNEED);
1349   CHECK_NE(res, -1) << "madvise failed";
1350 #endif  // _WIN32
1351 }
1352 
AlignBy(size_t alignment,bool align_both_ends)1353 void MemMap::AlignBy(size_t alignment, bool align_both_ends) {
1354   CHECK_EQ(begin_, base_begin_) << "Unsupported";
1355   CHECK_EQ(size_, base_size_) << "Unsupported";
1356   CHECK_GT(alignment, static_cast<size_t>(GetPageSize()));
1357   CHECK_ALIGNED_PARAM(alignment, GetPageSize());
1358   CHECK(!reuse_);
1359   if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), alignment) &&
1360       (!align_both_ends || IsAlignedParam(base_size_, alignment))) {
1361     // Already aligned.
1362     return;
1363   }
1364   uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
1365   uint8_t* aligned_base_begin = AlignUp(base_begin, alignment);
1366   CHECK_LE(base_begin, aligned_base_begin);
1367   if (base_begin < aligned_base_begin) {
1368     MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
1369     CHECK_EQ(TargetMUnmap(base_begin, aligned_base_begin - base_begin), 0)
1370         << "base_begin=" << reinterpret_cast<void*>(base_begin)
1371         << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
1372   }
1373   uint8_t* base_end = base_begin + base_size_;
1374   size_t aligned_base_size;
1375   if (align_both_ends) {
1376     uint8_t* aligned_base_end = AlignDown(base_end, alignment);
1377     CHECK_LE(aligned_base_end, base_end);
1378     CHECK_LT(aligned_base_begin, aligned_base_end)
1379         << "base_begin = " << reinterpret_cast<void*>(base_begin)
1380         << " base_end = " << reinterpret_cast<void*>(base_end);
1381     aligned_base_size = aligned_base_end - aligned_base_begin;
1382     CHECK_GE(aligned_base_size, alignment);
1383     if (aligned_base_end < base_end) {
1384       MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
1385       CHECK_EQ(TargetMUnmap(aligned_base_end, base_end - aligned_base_end), 0)
1386           << "base_end=" << reinterpret_cast<void*>(base_end)
1387           << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
1388     }
1389   } else {
1390     CHECK_LT(aligned_base_begin, base_end)
1391         << "base_begin = " << reinterpret_cast<void*>(base_begin);
1392     aligned_base_size = base_end - aligned_base_begin;
1393   }
1394   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1395   if (base_begin < aligned_base_begin) {
1396     auto it = GetGMapsEntry(*this);
1397     auto node = gMaps->extract(it);
1398     node.key() = aligned_base_begin;
1399     gMaps->insert(std::move(node));
1400   }
1401   base_begin_ = aligned_base_begin;
1402   base_size_ = aligned_base_size;
1403   begin_ = aligned_base_begin;
1404   size_ = aligned_base_size;
1405   DCHECK(gMaps != nullptr);
1406 }
1407 
1408 }  // namespace art
1409