xref: /aosp_15_r20/art/runtime/gc/heap.cc (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "heap.h"
18 
19 #include <sys/types.h>
20 #include <unistd.h>
21 
22 #include <limits>
23 #include <memory>
24 #include <random>
25 #include <sstream>
26 #include <vector>
27 
28 #include "allocation_listener.h"
29 #include "android-base/stringprintf.h"
30 #include "android-base/thread_annotations.h"
31 #include "art_field-inl.h"
32 #include "backtrace_helper.h"
33 #include "base/allocator.h"
34 #include "base/arena_allocator.h"
35 #include "base/dumpable.h"
36 #include "base/file_utils.h"
37 #include "base/histogram-inl.h"
38 #include "base/logging.h"  // For VLOG.
39 #include "base/memory_tool.h"
40 #include "base/mutex.h"
41 #include "base/os.h"
42 #include "base/stl_util.h"
43 #include "base/systrace.h"
44 #include "base/time_utils.h"
45 #include "base/utils.h"
46 #include "class_root-inl.h"
47 #include "common_throws.h"
48 #include "debugger.h"
49 #include "dex/dex_file-inl.h"
50 #include "entrypoints/quick/quick_alloc_entrypoints.h"
51 #include "gc/accounting/card_table-inl.h"
52 #include "gc/accounting/heap_bitmap-inl.h"
53 #include "gc/accounting/mod_union_table-inl.h"
54 #include "gc/accounting/read_barrier_table.h"
55 #include "gc/accounting/remembered_set.h"
56 #include "gc/accounting/space_bitmap-inl.h"
57 #include "gc/collector/concurrent_copying.h"
58 #include "gc/collector/mark_compact.h"
59 #include "gc/collector/mark_sweep.h"
60 #include "gc/collector/partial_mark_sweep.h"
61 #include "gc/collector/semi_space.h"
62 #include "gc/collector/sticky_mark_sweep.h"
63 #include "gc/racing_check.h"
64 #include "gc/reference_processor.h"
65 #include "gc/scoped_gc_critical_section.h"
66 #include "gc/space/bump_pointer_space.h"
67 #include "gc/space/dlmalloc_space-inl.h"
68 #include "gc/space/image_space.h"
69 #include "gc/space/large_object_space.h"
70 #include "gc/space/region_space.h"
71 #include "gc/space/rosalloc_space-inl.h"
72 #include "gc/space/space-inl.h"
73 #include "gc/space/zygote_space.h"
74 #include "gc/task_processor.h"
75 #include "gc/verification.h"
76 #include "gc_pause_listener.h"
77 #include "gc_root.h"
78 #include "handle_scope-inl.h"
79 #include "heap-inl.h"
80 #include "heap-visit-objects-inl.h"
81 #include "intern_table.h"
82 #include "jit/jit.h"
83 #include "jit/jit_code_cache.h"
84 #include "jni/java_vm_ext.h"
85 #include "mirror/class-inl.h"
86 #include "mirror/executable-inl.h"
87 #include "mirror/field.h"
88 #include "mirror/method_handle_impl.h"
89 #include "mirror/object-inl.h"
90 #include "mirror/object-refvisitor-inl.h"
91 #include "mirror/object_array-inl.h"
92 #include "mirror/reference-inl.h"
93 #include "mirror/var_handle.h"
94 #include "nativehelper/scoped_local_ref.h"
95 #include "oat/image.h"
96 #include "obj_ptr-inl.h"
97 #ifdef ART_TARGET_ANDROID
98 #include "perfetto/heap_profile.h"
99 #endif
100 #include "reflection.h"
101 #include "runtime.h"
102 #include "javaheapprof/javaheapsampler.h"
103 #include "scoped_thread_state_change-inl.h"
104 #include "thread-inl.h"
105 #include "thread_list.h"
106 #include "verify_object-inl.h"
107 #include "well_known_classes.h"
108 
109 #if defined(__BIONIC__) || defined(__GLIBC__) || defined(ANDROID_HOST_MUSL)
110 #include <malloc.h>  // For mallinfo()
111 #endif
112 
113 namespace art HIDDEN {
114 
115 #ifdef ART_TARGET_ANDROID
116 namespace {
117 
118 // Enable the heap sampler Callback function used by Perfetto.
EnableHeapSamplerCallback(void * enable_ptr,const AHeapProfileEnableCallbackInfo * enable_info_ptr)119 void EnableHeapSamplerCallback(void* enable_ptr,
120                                const AHeapProfileEnableCallbackInfo* enable_info_ptr) {
121   HeapSampler* sampler_self = reinterpret_cast<HeapSampler*>(enable_ptr);
122   // Set the ART profiler sampling interval to the value from Perfetto.
123   uint64_t interval = AHeapProfileEnableCallbackInfo_getSamplingInterval(enable_info_ptr);
124   if (interval > 0) {
125     sampler_self->SetSamplingInterval(interval);
126   }
127   // Else default is 4K sampling interval. However, default case shouldn't happen for Perfetto API.
128   // AHeapProfileEnableCallbackInfo_getSamplingInterval should always give the requested
129   // (non-negative) sampling interval. It is a uint64_t and gets checked for != 0
130   // Do not call heap as a temp here, it will build but test run will silently fail.
131   // Heap is not fully constructed yet in some cases.
132   sampler_self->EnableHeapSampler();
133 }
134 
135 // Disable the heap sampler Callback function used by Perfetto.
DisableHeapSamplerCallback(void * disable_ptr,const AHeapProfileDisableCallbackInfo * info_ptr)136 void DisableHeapSamplerCallback(void* disable_ptr,
137                                 [[maybe_unused]] const AHeapProfileDisableCallbackInfo* info_ptr) {
138   HeapSampler* sampler_self = reinterpret_cast<HeapSampler*>(disable_ptr);
139   sampler_self->DisableHeapSampler();
140 }
141 
142 }  // namespace
143 #endif
144 
145 namespace gc {
146 
147 DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
148 
149 // Minimum amount of remaining bytes before a concurrent GC is triggered.
150 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
151 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
152 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
153 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
154 // threads (lower pauses, use less memory bandwidth).
GetStickyGcThroughputAdjustment(bool use_generational_cc)155 static double GetStickyGcThroughputAdjustment(bool use_generational_cc) {
156   return use_generational_cc ? 0.5 : 1.0;
157 }
158 // Whether or not we compact the zygote in PreZygoteFork.
159 static constexpr bool kCompactZygote = kMovingCollector;
160 // How many reserve entries are at the end of the allocation stack, these are only needed if the
161 // allocation stack overflows.
162 static constexpr size_t kAllocationStackReserveSize = 1024;
163 // Default mark stack size in bytes.
164 static const size_t kDefaultMarkStackSize = 64 * KB;
165 // Define space name.
166 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
167 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
168 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
169 static const char* kNonMovingSpaceName = "non moving space";
170 static const char* kZygoteSpaceName = "zygote space";
171 static constexpr bool kGCALotMode = false;
172 // GC alot mode uses a small allocation stack to stress test a lot of GC.
173 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
174     sizeof(mirror::HeapReference<mirror::Object>);
175 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
176 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
177     sizeof(mirror::HeapReference<mirror::Object>);
178 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
179     sizeof(mirror::HeapReference<mirror::Object>);
180 
181 // If we violate BOTH of the following constraints, we throw OOME.
182 // They differ due to concurrent allocation.
183 // After a GC (due to allocation failure) we should retrieve at least this
184 // fraction of the current max heap size.
185 static constexpr double kMinFreedHeapAfterGcForAlloc = 0.05;
186 // After a GC (due to allocation failure), at least this fraction of the
187 // heap should be available.
188 static constexpr double kMinFreeHeapAfterGcForAlloc = 0.01;
189 
190 // For deterministic compilation, we need the heap to be at a well-known address.
191 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
192 // Dump the rosalloc stats on SIGQUIT.
193 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
194 
195 static const char* kRegionSpaceName = "main space (region space)";
196 
197 // If true, we log all GCs in the both the foreground and background. Used for debugging.
198 static constexpr bool kLogAllGCs = false;
199 
200 // Use Max heap for 2 seconds, this is smaller than the usual 5s window since we don't want to leave
201 // allocate with relaxed ergonomics for that long.
202 static constexpr size_t kPostForkMaxHeapDurationMS = 2000;
203 
204 #if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
205 // 32 MB (0x2000000) is picked to ensure it is aligned to the largest supported PMD
206 // size, which is 32mb with a 16k page size on AArch64.
__anon1647f9090202() 207 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(([]() constexpr {
208   constexpr size_t kBegin = 32 * MB;
209   constexpr int kMaxPMDSize = (kMaxPageSize / sizeof(uint64_t)) * kMaxPageSize;
210   static_assert(IsAligned<kMaxPMDSize>(kBegin),
211                 "Moving-space's begin should be aligned to the maximum supported PMD size.");
212   return kBegin;
213 })());
214 #else
215 #ifdef __ANDROID__
216 // For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
217 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
218 #else
219 // For 32-bit host, use 0x40000000 because asan uses most of the space below this.
220 uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
221 #endif
222 #endif
223 
224 // Log GC on regular (but fairly large) intervals during GC stress mode.
225 // It is expected that the other runtime options will be used to reduce the usual logging.
226 // This allows us to make the logging much less verbose while still reporting some
227 // progress (biased towards expensive GCs), and while still reporting pathological cases.
228 static constexpr int64_t kGcStressModeGcLogSampleFrequencyNs = MsToNs(10000);
229 
CareAboutPauseTimes()230 static inline bool CareAboutPauseTimes() {
231   return Runtime::Current()->InJankPerceptibleProcessState();
232 }
233 
VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace * > & image_spaces)234 static void VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace*>& image_spaces) {
235   uint32_t boot_image_size = 0u;
236   for (size_t i = 0u, num_spaces = image_spaces.size(); i != num_spaces; ) {
237     const ImageHeader& image_header = image_spaces[i]->GetImageHeader();
238     uint32_t reservation_size = image_header.GetImageReservationSize();
239     uint32_t image_count = image_header.GetImageSpaceCount();
240 
241     CHECK_NE(image_count, 0u);
242     CHECK_LE(image_count, num_spaces - i);
243     CHECK_NE(reservation_size, 0u);
244     for (size_t j = 1u; j != image_count; ++j) {
245       CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetComponentCount(), 0u);
246       CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetImageReservationSize(), 0u);
247     }
248 
249     // Check the start of the heap.
250     CHECK_EQ(image_spaces[0]->Begin() + boot_image_size, image_spaces[i]->Begin());
251     // Check contiguous layout of images and oat files.
252     const uint8_t* current_heap = image_spaces[i]->Begin();
253     const uint8_t* current_oat = image_spaces[i]->GetImageHeader().GetOatFileBegin();
254     for (size_t j = 0u; j != image_count; ++j) {
255       const ImageHeader& current_header = image_spaces[i + j]->GetImageHeader();
256       CHECK_EQ(current_heap, image_spaces[i + j]->Begin());
257       CHECK_EQ(current_oat, current_header.GetOatFileBegin());
258       current_heap += RoundUp(current_header.GetImageSize(), kElfSegmentAlignment);
259       CHECK_GT(current_header.GetOatFileEnd(), current_header.GetOatFileBegin());
260       current_oat = current_header.GetOatFileEnd();
261     }
262     // Check that oat files start at the end of images.
263     CHECK_EQ(current_heap, image_spaces[i]->GetImageHeader().GetOatFileBegin());
264     // Check that the reservation size equals the size of images and oat files.
265     CHECK_EQ(reservation_size, static_cast<size_t>(current_oat - image_spaces[i]->Begin()));
266 
267     boot_image_size += reservation_size;
268     i += image_count;
269   }
270 }
271 
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t stop_for_native_allocs,size_t capacity,size_t non_moving_space_capacity,const std::vector<std::string> & boot_class_path,const std::vector<std::string> & boot_class_path_locations,ArrayRef<File> boot_class_path_files,ArrayRef<File> boot_class_path_image_files,ArrayRef<File> boot_class_path_vdex_files,ArrayRef<File> boot_class_path_oat_files,const std::vector<std::string> & image_file_names,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_target_footprint,bool always_log_explicit_gcs,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool measure_gc_performance,bool use_homogeneous_space_compaction_for_oom,bool use_generational_cc,uint64_t min_interval_homogeneous_space_compaction_by_oom,bool dump_region_info_before_gc,bool dump_region_info_after_gc)272 Heap::Heap(size_t initial_size,
273            size_t growth_limit,
274            size_t min_free,
275            size_t max_free,
276            double target_utilization,
277            double foreground_heap_growth_multiplier,
278            size_t stop_for_native_allocs,
279            size_t capacity,
280            size_t non_moving_space_capacity,
281            const std::vector<std::string>& boot_class_path,
282            const std::vector<std::string>& boot_class_path_locations,
283            ArrayRef<File> boot_class_path_files,
284            ArrayRef<File> boot_class_path_image_files,
285            ArrayRef<File> boot_class_path_vdex_files,
286            ArrayRef<File> boot_class_path_oat_files,
287            const std::vector<std::string>& image_file_names,
288            const InstructionSet image_instruction_set,
289            CollectorType foreground_collector_type,
290            CollectorType background_collector_type,
291            space::LargeObjectSpaceType large_object_space_type,
292            size_t large_object_threshold,
293            size_t parallel_gc_threads,
294            size_t conc_gc_threads,
295            bool low_memory_mode,
296            size_t long_pause_log_threshold,
297            size_t long_gc_log_threshold,
298            bool ignore_target_footprint,
299            bool always_log_explicit_gcs,
300            bool use_tlab,
301            bool verify_pre_gc_heap,
302            bool verify_pre_sweeping_heap,
303            bool verify_post_gc_heap,
304            bool verify_pre_gc_rosalloc,
305            bool verify_pre_sweeping_rosalloc,
306            bool verify_post_gc_rosalloc,
307            bool gc_stress_mode,
308            bool measure_gc_performance,
309            bool use_homogeneous_space_compaction_for_oom,
310            bool use_generational_cc,
311            uint64_t min_interval_homogeneous_space_compaction_by_oom,
312            bool dump_region_info_before_gc,
313            bool dump_region_info_after_gc)
314     : non_moving_space_(nullptr),
315       rosalloc_space_(nullptr),
316       dlmalloc_space_(nullptr),
317       main_space_(nullptr),
318       collector_type_(kCollectorTypeNone),
319       foreground_collector_type_(foreground_collector_type),
320       background_collector_type_(background_collector_type),
321       desired_collector_type_(foreground_collector_type_),
322       pending_task_lock_(nullptr),
323       parallel_gc_threads_(parallel_gc_threads),
324       conc_gc_threads_(conc_gc_threads),
325       low_memory_mode_(low_memory_mode),
326       long_pause_log_threshold_(long_pause_log_threshold),
327       long_gc_log_threshold_(long_gc_log_threshold),
328       process_cpu_start_time_ns_(ProcessCpuNanoTime()),
329       pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
330       post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
331       pre_gc_weighted_allocated_bytes_(0.0),
332       post_gc_weighted_allocated_bytes_(0.0),
333       ignore_target_footprint_(ignore_target_footprint),
334       always_log_explicit_gcs_(always_log_explicit_gcs),
335       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
336       zygote_space_(nullptr),
337       large_object_threshold_(large_object_threshold),
338       disable_thread_flip_count_(0),
339       thread_flip_running_(false),
340       collector_type_running_(kCollectorTypeNone),
341       last_gc_cause_(kGcCauseNone),
342       thread_running_gc_(nullptr),
343       last_gc_type_(collector::kGcTypeNone),
344       next_gc_type_(collector::kGcTypePartial),
345       capacity_(capacity),
346       growth_limit_(growth_limit),
347       initial_heap_size_(initial_size),
348       target_footprint_(initial_size),
349       // Using kPostMonitorLock as a lock at kDefaultMutexLevel is acquired after
350       // this one.
351       process_state_update_lock_("process state update lock", kPostMonitorLock),
352       min_foreground_target_footprint_(0),
353       min_foreground_concurrent_start_bytes_(0),
354       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
355       total_bytes_freed_ever_(0),
356       total_objects_freed_ever_(0),
357       num_bytes_allocated_(0),
358       native_bytes_registered_(0),
359       old_native_bytes_allocated_(0),
360       native_objects_notified_(0),
361       num_bytes_freed_revoke_(0),
362       num_bytes_alive_after_gc_(0),
363       verify_missing_card_marks_(false),
364       verify_system_weaks_(false),
365       verify_pre_gc_heap_(verify_pre_gc_heap),
366       verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
367       verify_post_gc_heap_(verify_post_gc_heap),
368       verify_mod_union_table_(false),
369       verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
370       verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
371       verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
372       gc_stress_mode_(gc_stress_mode),
373       /* For GC a lot mode, we limit the allocation stacks to be kGcAlotInterval allocations. This
374        * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
375        * verification is enabled, we limit the size of allocation stacks to speed up their
376        * searching.
377        */
378       max_allocation_stack_size_(kGCALotMode
379           ? kGcAlotAllocationStackSize
380           : (kVerifyObjectSupport > kVerifyObjectModeFast)
381               ? kVerifyObjectAllocationStackSize
382               : kDefaultAllocationStackSize),
383       current_allocator_(kAllocatorTypeDlMalloc),
384       current_non_moving_allocator_(kAllocatorTypeNonMoving),
385       bump_pointer_space_(nullptr),
386       temp_space_(nullptr),
387       region_space_(nullptr),
388       min_free_(min_free),
389       max_free_(max_free),
390       target_utilization_(target_utilization),
391       foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
392       stop_for_native_allocs_(stop_for_native_allocs),
393       total_wait_time_(0),
394       verify_object_mode_(kVerifyObjectModeDisabled),
395       disable_moving_gc_count_(0),
396       semi_space_collector_(nullptr),
397       active_concurrent_copying_collector_(nullptr),
398       young_concurrent_copying_collector_(nullptr),
399       concurrent_copying_collector_(nullptr),
400       is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
401       use_tlab_(use_tlab),
402       main_space_backup_(nullptr),
403       min_interval_homogeneous_space_compaction_by_oom_(
404           min_interval_homogeneous_space_compaction_by_oom),
405       last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
406       gcs_completed_(0u),
407       max_gc_requested_(0u),
408       pending_collector_transition_(nullptr),
409       pending_heap_trim_(nullptr),
410       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
411       use_generational_cc_(use_generational_cc),
412       running_collection_is_blocking_(false),
413       blocking_gc_count_(0U),
414       blocking_gc_time_(0U),
415       last_update_time_gc_count_rate_histograms_(  // Round down by the window duration.
416           (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
417       gc_count_last_window_(0U),
418       blocking_gc_count_last_window_(0U),
419       gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
420       blocking_gc_count_rate_histogram_(
421           "blocking gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
422       alloc_tracking_enabled_(false),
423       alloc_record_depth_(AllocRecordObjectMap::kDefaultAllocStackDepth),
424       backtrace_lock_(nullptr),
425       seen_backtrace_count_(0u),
426       unique_backtrace_count_(0u),
427       gc_disabled_for_shutdown_(false),
428       dump_region_info_before_gc_(dump_region_info_before_gc),
429       dump_region_info_after_gc_(dump_region_info_after_gc),
430       boot_image_spaces_(),
431       boot_images_start_address_(0u),
432       boot_images_size_(0u),
433       pre_oome_gc_count_(0u) {
434   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
435     LOG(INFO) << "Heap() entering";
436   }
437 
438   LOG(INFO) << "Using " << foreground_collector_type_ << " GC.";
439   if (gUseUserfaultfd) {
440     CHECK_EQ(foreground_collector_type_, kCollectorTypeCMC);
441     CHECK_EQ(background_collector_type_, kCollectorTypeCMCBackground);
442   } else {
443     // This ensures that userfaultfd syscall is done before any seccomp filter is installed.
444     // TODO(b/266731037): Remove this when we no longer need to collect metric on userfaultfd
445     // support.
446     auto [uffd_supported, minor_fault_supported] = collector::MarkCompact::GetUffdAndMinorFault();
447     // The check is just to ensure that compiler doesn't eliminate the function call above.
448     // Userfaultfd support is certain to be there if its minor-fault feature is supported.
449     CHECK_IMPLIES(minor_fault_supported, uffd_supported);
450   }
451 
452   if (gUseReadBarrier) {
453     CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
454     CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
455   } else if (background_collector_type_ != gc::kCollectorTypeHomogeneousSpaceCompact) {
456     CHECK_EQ(IsMovingGc(foreground_collector_type_), IsMovingGc(background_collector_type_))
457         << "Changing from " << foreground_collector_type_ << " to "
458         << background_collector_type_ << " (or visa versa) is not supported.";
459   }
460   verification_.reset(new Verification(this));
461   CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
462   ScopedTrace trace(__FUNCTION__);
463   Runtime* const runtime = Runtime::Current();
464   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
465   // entrypoints.
466   const bool is_zygote = runtime->IsZygote();
467   if (!is_zygote) {
468     // Background compaction is currently not supported for command line runs.
469     if (background_collector_type_ != foreground_collector_type_) {
470       VLOG(heap) << "Disabling background compaction for non zygote";
471       background_collector_type_ = foreground_collector_type_;
472     }
473   }
474   ChangeCollector(desired_collector_type_);
475   live_bitmap_.reset(new accounting::HeapBitmap(this));
476   mark_bitmap_.reset(new accounting::HeapBitmap(this));
477 
478   // We don't have hspace compaction enabled with CC.
479   if (foreground_collector_type_ == kCollectorTypeCC
480       || foreground_collector_type_ == kCollectorTypeCMC) {
481     use_homogeneous_space_compaction_for_oom_ = false;
482   }
483   bool support_homogeneous_space_compaction =
484       background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
485       use_homogeneous_space_compaction_for_oom_;
486   // We may use the same space the main space for the non moving space if we don't need to compact
487   // from the main space.
488   // This is not the case if we support homogeneous compaction or have a moving background
489   // collector type.
490   bool separate_non_moving_space = is_zygote ||
491       support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
492       IsMovingGc(background_collector_type_);
493 
494   // Requested begin for the alloc space, to follow the mapped image and oat files
495   uint8_t* request_begin = nullptr;
496   // Calculate the extra space required after the boot image, see allocations below.
497   size_t heap_reservation_size = 0u;
498   if (separate_non_moving_space) {
499     heap_reservation_size = non_moving_space_capacity;
500   } else if (foreground_collector_type_ != kCollectorTypeCC && is_zygote) {
501     heap_reservation_size = capacity_;
502   }
503   heap_reservation_size = RoundUp(heap_reservation_size, gPageSize);
504   // Load image space(s).
505   std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
506   MemMap heap_reservation;
507   if (space::ImageSpace::LoadBootImage(boot_class_path,
508                                        boot_class_path_locations,
509                                        boot_class_path_files,
510                                        boot_class_path_image_files,
511                                        boot_class_path_vdex_files,
512                                        boot_class_path_oat_files,
513                                        image_file_names,
514                                        image_instruction_set,
515                                        runtime->ShouldRelocate(),
516                                        /*executable=*/!runtime->IsAotCompiler(),
517                                        heap_reservation_size,
518                                        runtime->AllowInMemoryCompilation(),
519                                        runtime->GetApexVersions(),
520                                        &boot_image_spaces,
521                                        &heap_reservation)) {
522     DCHECK_EQ(heap_reservation_size, heap_reservation.IsValid() ? heap_reservation.Size() : 0u);
523     DCHECK(!boot_image_spaces.empty());
524     request_begin = boot_image_spaces.back()->GetImageHeader().GetOatFileEnd();
525     DCHECK_IMPLIES(heap_reservation.IsValid(), request_begin == heap_reservation.Begin())
526         << "request_begin=" << static_cast<const void*>(request_begin)
527         << " heap_reservation.Begin()=" << static_cast<const void*>(heap_reservation.Begin());
528     for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) {
529       boot_image_spaces_.push_back(space.get());
530       AddSpace(space.release());
531     }
532     boot_images_start_address_ = PointerToLowMemUInt32(boot_image_spaces_.front()->Begin());
533     uint32_t boot_images_end =
534         PointerToLowMemUInt32(boot_image_spaces_.back()->GetImageHeader().GetOatFileEnd());
535     boot_images_size_ = boot_images_end - boot_images_start_address_;
536     if (kIsDebugBuild) {
537       VerifyBootImagesContiguity(boot_image_spaces_);
538     }
539   } else {
540     if (foreground_collector_type_ == kCollectorTypeCC) {
541       // Need to use a low address so that we can allocate a contiguous 2 * Xmx space
542       // when there's no image (dex2oat for target).
543       request_begin = kPreferredAllocSpaceBegin;
544     }
545     // Gross hack to make dex2oat deterministic.
546     if (foreground_collector_type_ == kCollectorTypeMS && Runtime::Current()->IsAotCompiler()) {
547       // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
548       // b/26849108
549       request_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
550     }
551   }
552 
553   /*
554   requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
555                                      +-  nonmoving space (non_moving_space_capacity)+-
556                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
557                                      +-????????????????????????????????????????????+-
558                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
559                                      +-main alloc space / bump space 1 (capacity_) +-
560                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
561                                      +-????????????????????????????????????????????+-
562                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
563                                      +-main alloc space2 / bump space 2 (capacity_)+-
564                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
565   */
566 
567   MemMap main_mem_map_1;
568   MemMap main_mem_map_2;
569 
570   std::string error_str;
571   MemMap non_moving_space_mem_map;
572   if (separate_non_moving_space) {
573     ScopedTrace trace2("Create separate non moving space");
574     // If we are the zygote, the non moving space becomes the zygote space when we run
575     // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
576     // rename the mem map later.
577     const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
578     // Reserve the non moving mem map before the other two since it needs to be at a specific
579     // address.
580     DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
581     if (heap_reservation.IsValid()) {
582       non_moving_space_mem_map = heap_reservation.RemapAtEnd(
583           heap_reservation.Begin(), space_name, PROT_READ | PROT_WRITE, &error_str);
584     } else {
585       non_moving_space_mem_map = MapAnonymousPreferredAddress(
586           space_name, request_begin, non_moving_space_capacity, &error_str);
587     }
588     CHECK(non_moving_space_mem_map.IsValid()) << error_str;
589     DCHECK(!heap_reservation.IsValid());
590     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
591     request_begin = non_moving_space_mem_map.Begin() == kPreferredAllocSpaceBegin
592                         ? non_moving_space_mem_map.End()
593                         : kPreferredAllocSpaceBegin;
594   }
595   // Attempt to create 2 mem maps at or after the requested begin.
596   if (foreground_collector_type_ != kCollectorTypeCC) {
597     ScopedTrace trace2("Create main mem map");
598     if (separate_non_moving_space || !is_zygote) {
599       main_mem_map_1 = MapAnonymousPreferredAddress(
600           kMemMapSpaceName[0], request_begin, capacity_, &error_str);
601     } else {
602       // If no separate non-moving space and we are the zygote, the main space must come right after
603       // the image space to avoid a gap. This is required since we want the zygote space to be
604       // adjacent to the image space.
605       DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
606       main_mem_map_1 = MemMap::MapAnonymous(
607           kMemMapSpaceName[0],
608           request_begin,
609           capacity_,
610           PROT_READ | PROT_WRITE,
611           /* low_4gb= */ true,
612           /* reuse= */ false,
613           heap_reservation.IsValid() ? &heap_reservation : nullptr,
614           &error_str);
615     }
616     CHECK(main_mem_map_1.IsValid()) << error_str;
617     DCHECK(!heap_reservation.IsValid());
618   }
619   if (support_homogeneous_space_compaction ||
620       background_collector_type_ == kCollectorTypeSS ||
621       foreground_collector_type_ == kCollectorTypeSS) {
622     ScopedTrace trace2("Create main mem map 2");
623     main_mem_map_2 = MapAnonymousPreferredAddress(
624         kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str);
625     CHECK(main_mem_map_2.IsValid()) << error_str;
626   }
627 
628   // Create the non moving space first so that bitmaps don't take up the address range.
629   if (separate_non_moving_space) {
630     ScopedTrace trace2("Add non moving space");
631     // Non moving space is always dlmalloc since we currently don't have support for multiple
632     // active rosalloc spaces.
633     const size_t size = non_moving_space_mem_map.Size();
634     const void* non_moving_space_mem_map_begin = non_moving_space_mem_map.Begin();
635     non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
636                                                                "zygote / non moving space",
637                                                                GetDefaultStartingSize(),
638                                                                initial_size,
639                                                                size,
640                                                                size,
641                                                                /* can_move_objects= */ false);
642     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
643         << non_moving_space_mem_map_begin;
644     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
645     AddSpace(non_moving_space_);
646   }
647   // Create other spaces based on whether or not we have a moving GC.
648   if (foreground_collector_type_ == kCollectorTypeCC) {
649     CHECK(separate_non_moving_space);
650     // Reserve twice the capacity, to allow evacuating every region for explicit GCs.
651     MemMap region_space_mem_map =
652         space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
653     CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
654     region_space_ = space::RegionSpace::Create(
655         kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_);
656     AddSpace(region_space_);
657   } else if (IsMovingGc(foreground_collector_type_)) {
658     // Create bump pointer spaces.
659     // We only to create the bump pointer if the foreground collector is a compacting GC.
660     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
661     bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
662                                                                     std::move(main_mem_map_1));
663     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
664     AddSpace(bump_pointer_space_);
665     // For Concurrent Mark-compact GC we don't need the temp space to be in
666     // lower 4GB. So its temp space will be created by the GC itself.
667     if (foreground_collector_type_ != kCollectorTypeCMC) {
668       temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
669                                                               std::move(main_mem_map_2));
670       CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
671       AddSpace(temp_space_);
672     }
673     CHECK(separate_non_moving_space);
674   } else {
675     CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
676     CHECK(main_space_ != nullptr);
677     AddSpace(main_space_);
678     if (!separate_non_moving_space) {
679       non_moving_space_ = main_space_;
680       CHECK(!non_moving_space_->CanMoveObjects());
681     }
682     if (main_mem_map_2.IsValid()) {
683       const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
684       main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
685                                                            initial_size,
686                                                            growth_limit_,
687                                                            capacity_,
688                                                            name,
689                                                            /* can_move_objects= */ true));
690       CHECK(main_space_backup_.get() != nullptr);
691       // Add the space so its accounted for in the heap_begin and heap_end.
692       AddSpace(main_space_backup_.get());
693     }
694   }
695   CHECK(non_moving_space_ != nullptr);
696   CHECK(!non_moving_space_->CanMoveObjects());
697   // Allocate the large object space.
698   if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
699     large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
700     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
701   } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
702     large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
703     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
704   } else {
705     // Disable the large object space by making the cutoff excessively large.
706     large_object_threshold_ = std::numeric_limits<size_t>::max();
707     large_object_space_ = nullptr;
708   }
709   if (large_object_space_ != nullptr) {
710     AddSpace(large_object_space_);
711   }
712   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
713   CHECK(!continuous_spaces_.empty());
714   // Relies on the spaces being sorted.
715   uint8_t* heap_begin = continuous_spaces_.front()->Begin();
716   uint8_t* heap_end = continuous_spaces_.back()->Limit();
717   size_t heap_capacity = heap_end - heap_begin;
718   // Remove the main backup space since it slows down the GC to have unused extra spaces.
719   // TODO: Avoid needing to do this.
720   if (main_space_backup_.get() != nullptr) {
721     RemoveSpace(main_space_backup_.get());
722   }
723   // Allocate the card table.
724   // We currently don't support dynamically resizing the card table.
725   // Since we don't know where in the low_4gb the app image will be located, make the card table
726   // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
727   UNUSED(heap_capacity);
728   // Start at 4 KB, we can be sure there are no spaces mapped this low since the address range is
729   // reserved by the kernel.
730   static constexpr size_t kMinHeapAddress = 4 * KB;
731   card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
732                                                   4 * GB - kMinHeapAddress));
733   CHECK(card_table_.get() != nullptr) << "Failed to create card table";
734   if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
735     rb_table_.reset(new accounting::ReadBarrierTable());
736     DCHECK(rb_table_->IsAllCleared());
737   }
738   if (HasBootImageSpace()) {
739     // Don't add the image mod union table if we are running without an image, this can crash if
740     // we use the CardCache implementation.
741     for (space::ImageSpace* image_space : GetBootImageSpaces()) {
742       accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
743           "Image mod-union table", this, image_space);
744       CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
745       AddModUnionTable(mod_union_table);
746     }
747   }
748   if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
749     accounting::RememberedSet* non_moving_space_rem_set =
750         new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
751     CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
752     AddRememberedSet(non_moving_space_rem_set);
753   }
754   // TODO: Count objects in the image space here?
755   num_bytes_allocated_.store(0, std::memory_order_relaxed);
756   mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
757                                                     kDefaultMarkStackSize));
758   const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
759   allocation_stack_.reset(accounting::ObjectStack::Create(
760       "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
761   live_stack_.reset(accounting::ObjectStack::Create(
762       "live stack", max_allocation_stack_size_, alloc_stack_capacity));
763   // It's still too early to take a lock because there are no threads yet, but we can create locks
764   // now. We don't create it earlier to make it clear that you can't use locks during heap
765   // initialization.
766   gc_complete_lock_ = new Mutex("GC complete lock");
767   gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
768                                                 *gc_complete_lock_));
769 
770   thread_flip_lock_ = new Mutex("GC thread flip lock");
771   thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
772                                                 *thread_flip_lock_));
773   task_processor_.reset(new TaskProcessor());
774   reference_processor_.reset(new ReferenceProcessor());
775   pending_task_lock_ = new Mutex("Pending task lock");
776   if (ignore_target_footprint_) {
777     SetIdealFootprint(std::numeric_limits<size_t>::max());
778     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
779   }
780   CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U);
781   // Create our garbage collectors.
782   for (size_t i = 0; i < 2; ++i) {
783     const bool concurrent = i != 0;
784     if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
785         (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
786       garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
787       garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
788       garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
789     }
790   }
791   if (kMovingCollector) {
792     if (MayUseCollector(kCollectorTypeSS) ||
793         MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
794         use_homogeneous_space_compaction_for_oom_) {
795       semi_space_collector_ = new collector::SemiSpace(this);
796       garbage_collectors_.push_back(semi_space_collector_);
797     }
798     if (MayUseCollector(kCollectorTypeCMC)) {
799       mark_compact_ = new collector::MarkCompact(this);
800       garbage_collectors_.push_back(mark_compact_);
801     }
802     if (MayUseCollector(kCollectorTypeCC)) {
803       concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
804                                                                        /*young_gen=*/false,
805                                                                        use_generational_cc_,
806                                                                        "",
807                                                                        measure_gc_performance);
808       if (use_generational_cc_) {
809         young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
810             this,
811             /*young_gen=*/true,
812             use_generational_cc_,
813             "young",
814             measure_gc_performance);
815       }
816       active_concurrent_copying_collector_.store(concurrent_copying_collector_,
817                                                  std::memory_order_relaxed);
818       DCHECK(region_space_ != nullptr);
819       concurrent_copying_collector_->SetRegionSpace(region_space_);
820       if (use_generational_cc_) {
821         young_concurrent_copying_collector_->SetRegionSpace(region_space_);
822         // At this point, non-moving space should be created.
823         DCHECK(non_moving_space_ != nullptr);
824         concurrent_copying_collector_->CreateInterRegionRefBitmaps();
825       }
826       garbage_collectors_.push_back(concurrent_copying_collector_);
827       if (use_generational_cc_) {
828         garbage_collectors_.push_back(young_concurrent_copying_collector_);
829       }
830     }
831   }
832   if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
833       (is_zygote || separate_non_moving_space)) {
834     // Check that there's no gap between the image space and the non moving space so that the
835     // immune region won't break (eg. due to a large object allocated in the gap). This is only
836     // required when we're the zygote.
837     // Space with smallest Begin().
838     space::ImageSpace* first_space = nullptr;
839     for (space::ImageSpace* space : boot_image_spaces_) {
840       if (first_space == nullptr || space->Begin() < first_space->Begin()) {
841         first_space = space;
842       }
843     }
844     bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
845     if (!no_gap) {
846       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
847       MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
848       LOG(FATAL) << "There's a gap between the image space and the non-moving space";
849     }
850   }
851   // Perfetto Java Heap Profiler Support.
852   if (runtime->IsPerfettoJavaHeapStackProfEnabled()) {
853     // Perfetto Plugin is loaded and enabled, initialize the Java Heap Profiler.
854     InitPerfettoJavaHeapProf();
855   } else {
856     // Disable the Java Heap Profiler.
857     GetHeapSampler().DisableHeapSampler();
858   }
859 
860   instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
861   if (gc_stress_mode_) {
862     backtrace_lock_ = new Mutex("GC complete lock");
863   }
864   if (is_running_on_memory_tool_ || gc_stress_mode_) {
865     instrumentation->InstrumentQuickAllocEntryPoints();
866   }
867   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
868     LOG(INFO) << "Heap() exiting";
869   }
870 }
871 
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)872 MemMap Heap::MapAnonymousPreferredAddress(const char* name,
873                                           uint8_t* request_begin,
874                                           size_t capacity,
875                                           std::string* out_error_str) {
876   while (true) {
877     MemMap map = MemMap::MapAnonymous(name,
878                                       request_begin,
879                                       capacity,
880                                       PROT_READ | PROT_WRITE,
881                                       /*low_4gb=*/ true,
882                                       /*reuse=*/ false,
883                                       /*reservation=*/ nullptr,
884                                       out_error_str);
885     if (map.IsValid() || request_begin == nullptr) {
886       return map;
887     }
888     // Retry a  second time with no specified request begin.
889     request_begin = nullptr;
890   }
891 }
892 
MayUseCollector(CollectorType type) const893 bool Heap::MayUseCollector(CollectorType type) const {
894   return foreground_collector_type_ == type || background_collector_type_ == type;
895 }
896 
CreateMallocSpaceFromMemMap(MemMap && mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)897 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map,
898                                                       size_t initial_size,
899                                                       size_t growth_limit,
900                                                       size_t capacity,
901                                                       const char* name,
902                                                       bool can_move_objects) {
903   space::MallocSpace* malloc_space = nullptr;
904   if (kUseRosAlloc) {
905     // Create rosalloc space.
906     malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map),
907                                                           name,
908                                                           GetDefaultStartingSize(),
909                                                           initial_size,
910                                                           growth_limit,
911                                                           capacity,
912                                                           low_memory_mode_,
913                                                           can_move_objects);
914   } else {
915     malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map),
916                                                           name,
917                                                           GetDefaultStartingSize(),
918                                                           initial_size,
919                                                           growth_limit,
920                                                           capacity,
921                                                           can_move_objects);
922   }
923   if (collector::SemiSpace::kUseRememberedSet) {
924     accounting::RememberedSet* rem_set  =
925         new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
926     CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
927     AddRememberedSet(rem_set);
928   }
929   CHECK(malloc_space != nullptr) << "Failed to create " << name;
930   malloc_space->SetFootprintLimit(malloc_space->Capacity());
931   return malloc_space;
932 }
933 
CreateMainMallocSpace(MemMap && mem_map,size_t initial_size,size_t growth_limit,size_t capacity)934 void Heap::CreateMainMallocSpace(MemMap&& mem_map,
935                                  size_t initial_size,
936                                  size_t growth_limit,
937                                  size_t capacity) {
938   // Is background compaction is enabled?
939   bool can_move_objects = IsMovingGc(background_collector_type_) !=
940       IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
941   // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
942   // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
943   // from the main space to the zygote space. If background compaction is enabled, always pass in
944   // that we can move objets.
945   if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
946     // After the zygote we want this to be false if we don't have background compaction enabled so
947     // that getting primitive array elements is faster.
948     can_move_objects = !HasZygoteSpace();
949   }
950   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
951     RemoveRememberedSet(main_space_);
952   }
953   const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
954   main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map),
955                                             initial_size,
956                                             growth_limit,
957                                             capacity, name,
958                                             can_move_objects);
959   SetSpaceAsDefault(main_space_);
960   VLOG(heap) << "Created main space " << main_space_;
961 }
962 
ChangeAllocator(AllocatorType allocator)963 void Heap::ChangeAllocator(AllocatorType allocator) {
964   if (current_allocator_ != allocator) {
965     // These two allocators are only used internally and don't have any entrypoints.
966     CHECK_NE(allocator, kAllocatorTypeLOS);
967     CHECK_NE(allocator, kAllocatorTypeNonMoving);
968     current_allocator_ = allocator;
969     MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
970     SetQuickAllocEntryPointsAllocator(current_allocator_);
971     Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
972   }
973 }
974 
IsCompilingBoot() const975 bool Heap::IsCompilingBoot() const {
976   if (!Runtime::Current()->IsAotCompiler()) {
977     return false;
978   }
979   ScopedObjectAccess soa(Thread::Current());
980   for (const auto& space : continuous_spaces_) {
981     if (space->IsImageSpace() || space->IsZygoteSpace()) {
982       return false;
983     }
984   }
985   return true;
986 }
987 
IncrementDisableMovingGC(Thread * self)988 void Heap::IncrementDisableMovingGC(Thread* self) {
989   // Need to do this holding the lock to prevent races where the GC is about to run / running when
990   // we attempt to disable it.
991   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGcToComplete);
992   MutexLock mu(self, *gc_complete_lock_);
993   ++disable_moving_gc_count_;
994   if (IsMovingGc(collector_type_running_)) {
995     WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
996   }
997 }
998 
DecrementDisableMovingGC(Thread * self)999 void Heap::DecrementDisableMovingGC(Thread* self) {
1000   MutexLock mu(self, *gc_complete_lock_);
1001   CHECK_GT(disable_moving_gc_count_, 0U);
1002   --disable_moving_gc_count_;
1003 }
1004 
IncrementDisableThreadFlip(Thread * self)1005 void Heap::IncrementDisableThreadFlip(Thread* self) {
1006   // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
1007   bool is_nested = self->GetDisableThreadFlipCount() > 0;
1008   self->IncrementDisableThreadFlipCount();
1009   if (is_nested) {
1010     // If this is a nested JNI critical section enter, we don't need to wait or increment the global
1011     // counter. The global counter is incremented only once for a thread for the outermost enter.
1012     return;
1013   }
1014   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGcThreadFlip);
1015   MutexLock mu(self, *thread_flip_lock_);
1016   thread_flip_cond_->CheckSafeToWait(self);
1017   bool has_waited = false;
1018   uint64_t wait_start = 0;
1019   if (thread_flip_running_) {
1020     wait_start = NanoTime();
1021     ScopedTrace trace("IncrementDisableThreadFlip");
1022     while (thread_flip_running_) {
1023       has_waited = true;
1024       thread_flip_cond_->Wait(self);
1025     }
1026   }
1027   ++disable_thread_flip_count_;
1028   if (has_waited) {
1029     uint64_t wait_time = NanoTime() - wait_start;
1030     total_wait_time_ += wait_time;
1031     if (wait_time > long_pause_log_threshold_) {
1032       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
1033     }
1034   }
1035 }
1036 
EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj)1037 void Heap::EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj) {
1038   if (gUseUserfaultfd) {
1039     // Use volatile to ensure that compiler loads from memory to trigger userfaults, if required.
1040     const uint8_t* start = reinterpret_cast<uint8_t*>(obj.Ptr());
1041     const uint8_t* end = AlignUp(start + obj->SizeOf(), gPageSize);
1042     // The first page is already touched by SizeOf().
1043     start += gPageSize;
1044     while (start < end) {
1045       ForceRead(start);
1046       start += gPageSize;
1047     }
1048   }
1049 }
1050 
DecrementDisableThreadFlip(Thread * self)1051 void Heap::DecrementDisableThreadFlip(Thread* self) {
1052   // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
1053   // the GC waiting before doing a thread flip.
1054   self->DecrementDisableThreadFlipCount();
1055   bool is_outermost = self->GetDisableThreadFlipCount() == 0;
1056   if (!is_outermost) {
1057     // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
1058     // The global counter is decremented only once for a thread for the outermost exit.
1059     return;
1060   }
1061   MutexLock mu(self, *thread_flip_lock_);
1062   CHECK_GT(disable_thread_flip_count_, 0U);
1063   --disable_thread_flip_count_;
1064   if (disable_thread_flip_count_ == 0) {
1065     // Potentially notify the GC thread blocking to begin a thread flip.
1066     thread_flip_cond_->Broadcast(self);
1067   }
1068 }
1069 
ThreadFlipBegin(Thread * self)1070 void Heap::ThreadFlipBegin(Thread* self) {
1071   // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
1072   // > 0, block. Otherwise, go ahead.
1073   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGcThreadFlip);
1074   MutexLock mu(self, *thread_flip_lock_);
1075   thread_flip_cond_->CheckSafeToWait(self);
1076   bool has_waited = false;
1077   uint64_t wait_start = NanoTime();
1078   CHECK(!thread_flip_running_);
1079   // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
1080   // GC. This like a writer preference of a reader-writer lock.
1081   thread_flip_running_ = true;
1082   while (disable_thread_flip_count_ > 0) {
1083     has_waited = true;
1084     thread_flip_cond_->Wait(self);
1085   }
1086   if (has_waited) {
1087     uint64_t wait_time = NanoTime() - wait_start;
1088     total_wait_time_ += wait_time;
1089     if (wait_time > long_pause_log_threshold_) {
1090       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
1091     }
1092   }
1093 }
1094 
ThreadFlipEnd(Thread * self)1095 void Heap::ThreadFlipEnd(Thread* self) {
1096   // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
1097   // waiting before doing a JNI critical.
1098   MutexLock mu(self, *thread_flip_lock_);
1099   CHECK(thread_flip_running_);
1100   thread_flip_running_ = false;
1101   // Potentially notify mutator threads blocking to enter a JNI critical section.
1102   thread_flip_cond_->Broadcast(self);
1103 }
1104 
GrowHeapOnJankPerceptibleSwitch()1105 void Heap::GrowHeapOnJankPerceptibleSwitch() {
1106   MutexLock mu(Thread::Current(), process_state_update_lock_);
1107   size_t orig_target_footprint = target_footprint_.load(std::memory_order_relaxed);
1108   if (orig_target_footprint < min_foreground_target_footprint_) {
1109     target_footprint_.compare_exchange_strong(orig_target_footprint,
1110                                               min_foreground_target_footprint_,
1111                                               std::memory_order_relaxed);
1112   }
1113   if (IsGcConcurrent() && concurrent_start_bytes_ < min_foreground_concurrent_start_bytes_) {
1114     concurrent_start_bytes_ = min_foreground_concurrent_start_bytes_;
1115   }
1116 }
1117 
UpdateProcessState(ProcessState old_process_state,ProcessState new_process_state)1118 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
1119   if (old_process_state != new_process_state) {
1120     const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
1121     if (jank_perceptible) {
1122       // Transition back to foreground right away to prevent jank.
1123       RequestCollectorTransition(foreground_collector_type_, 0);
1124       GrowHeapOnJankPerceptibleSwitch();
1125     } else {
1126       // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
1127       // special handling which does a homogenous space compaction once but then doesn't transition
1128       // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
1129       // transition the collector.
1130       RequestCollectorTransition(background_collector_type_, 0);
1131     }
1132   }
1133 }
1134 
CreateThreadPool(size_t num_threads)1135 void Heap::CreateThreadPool(size_t num_threads) {
1136   if (num_threads == 0) {
1137     num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
1138   }
1139   if (num_threads != 0) {
1140     thread_pool_.reset(ThreadPool::Create("Heap thread pool", num_threads));
1141   }
1142 }
1143 
WaitForWorkersToBeCreated()1144 void Heap::WaitForWorkersToBeCreated() {
1145   DCHECK(!Runtime::Current()->IsShuttingDown(Thread::Current()))
1146       << "Cannot create new threads during runtime shutdown";
1147   if (thread_pool_ != nullptr) {
1148     thread_pool_->WaitForWorkersToBeCreated();
1149   }
1150 }
1151 
MarkAllocStackAsLive(accounting::ObjectStack * stack)1152 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
1153   space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
1154   space::ContinuousSpace* space2 = non_moving_space_;
1155   // TODO: Generalize this to n bitmaps?
1156   CHECK(space1 != nullptr);
1157   CHECK(space2 != nullptr);
1158   MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
1159                  (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
1160                  stack);
1161 }
1162 
DeleteThreadPool()1163 void Heap::DeleteThreadPool() {
1164   thread_pool_.reset(nullptr);
1165 }
1166 
AddSpace(space::Space * space)1167 void Heap::AddSpace(space::Space* space) {
1168   CHECK(space != nullptr);
1169   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1170   if (space->IsContinuousSpace()) {
1171     DCHECK(!space->IsDiscontinuousSpace());
1172     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1173     // Continuous spaces don't necessarily have bitmaps.
1174     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1175     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1176     // The region space bitmap is not added since VisitObjects visits the region space objects with
1177     // special handling.
1178     if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1179       CHECK(mark_bitmap != nullptr);
1180       live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1181       mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
1182     }
1183     continuous_spaces_.push_back(continuous_space);
1184     // Ensure that spaces remain sorted in increasing order of start address.
1185     std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1186               [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1187       return a->Begin() < b->Begin();
1188     });
1189   } else {
1190     CHECK(space->IsDiscontinuousSpace());
1191     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1192     live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1193     mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1194     discontinuous_spaces_.push_back(discontinuous_space);
1195   }
1196   if (space->IsAllocSpace()) {
1197     alloc_spaces_.push_back(space->AsAllocSpace());
1198   }
1199 }
1200 
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)1201 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1202   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1203   if (continuous_space->IsDlMallocSpace()) {
1204     dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1205   } else if (continuous_space->IsRosAllocSpace()) {
1206     rosalloc_space_ = continuous_space->AsRosAllocSpace();
1207   }
1208 }
1209 
RemoveSpace(space::Space * space)1210 void Heap::RemoveSpace(space::Space* space) {
1211   DCHECK(space != nullptr);
1212   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1213   if (space->IsContinuousSpace()) {
1214     DCHECK(!space->IsDiscontinuousSpace());
1215     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1216     // Continuous spaces don't necessarily have bitmaps.
1217     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1218     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1219     if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1220       DCHECK(mark_bitmap != nullptr);
1221       live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1222       mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1223     }
1224     auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1225     DCHECK(it != continuous_spaces_.end());
1226     continuous_spaces_.erase(it);
1227   } else {
1228     DCHECK(space->IsDiscontinuousSpace());
1229     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1230     live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1231     mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1232     auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1233                         discontinuous_space);
1234     DCHECK(it != discontinuous_spaces_.end());
1235     discontinuous_spaces_.erase(it);
1236   }
1237   if (space->IsAllocSpace()) {
1238     auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1239     DCHECK(it != alloc_spaces_.end());
1240     alloc_spaces_.erase(it);
1241   }
1242 }
1243 
CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,uint64_t current_process_cpu_time) const1244 double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
1245                                                uint64_t current_process_cpu_time) const {
1246   uint64_t bytes_allocated = GetBytesAllocated();
1247   double weight = current_process_cpu_time - gc_last_process_cpu_time_ns;
1248   return weight * bytes_allocated;
1249 }
1250 
CalculatePreGcWeightedAllocatedBytes()1251 void Heap::CalculatePreGcWeightedAllocatedBytes() {
1252   uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1253   pre_gc_weighted_allocated_bytes_ +=
1254     CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1255   pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1256 }
1257 
CalculatePostGcWeightedAllocatedBytes()1258 void Heap::CalculatePostGcWeightedAllocatedBytes() {
1259   uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1260   post_gc_weighted_allocated_bytes_ +=
1261     CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1262   post_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1263 }
1264 
GetTotalGcCpuTime()1265 uint64_t Heap::GetTotalGcCpuTime() {
1266   uint64_t sum = 0;
1267   for (auto* collector : garbage_collectors_) {
1268     sum += collector->GetTotalCpuTime();
1269   }
1270   return sum;
1271 }
1272 
DumpGcPerformanceInfo(std::ostream & os)1273 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
1274   // Dump cumulative timings.
1275   os << "Dumping cumulative Gc timings\n";
1276   uint64_t total_duration = 0;
1277   // Dump cumulative loggers for each GC type.
1278   uint64_t total_paused_time = 0;
1279   for (auto* collector : garbage_collectors_) {
1280     total_duration += collector->GetCumulativeTimings().GetTotalNs();
1281     total_paused_time += collector->GetTotalPausedTimeNs();
1282     collector->DumpPerformanceInfo(os);
1283   }
1284   if (total_duration != 0) {
1285     const double total_seconds = total_duration / 1.0e9;
1286     const double total_cpu_seconds = GetTotalGcCpuTime() / 1.0e9;
1287     os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1288     os << "Mean GC size throughput: "
1289        << PrettySize(GetBytesFreedEver() / total_seconds) << "/s"
1290        << " per cpu-time: "
1291        << PrettySize(GetBytesFreedEver() / total_cpu_seconds) << "/s\n";
1292   }
1293   os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1294   os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1295   os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1296   os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1297   os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1298   os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1299   os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1300   if (HasZygoteSpace()) {
1301     os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1302   }
1303   os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1304   os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1305   os << "Total GC count: " << GetGcCount() << "\n";
1306   os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1307   os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1308   os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1309   os << "Total pre-OOME GC count: " << GetPreOomeGcCount() << "\n";
1310   {
1311     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1312     if (gc_count_rate_histogram_.SampleSize() > 0U) {
1313       os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1314       gc_count_rate_histogram_.DumpBins(os);
1315       os << "\n";
1316     }
1317     if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1318       os << "Histogram of blocking GC count per "
1319          << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1320       blocking_gc_count_rate_histogram_.DumpBins(os);
1321       os << "\n";
1322     }
1323   }
1324 
1325   if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1326     rosalloc_space_->DumpStats(os);
1327   }
1328 
1329   os << "Native bytes total: " << GetNativeBytes()
1330      << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n";
1331 
1332   os << "Total native bytes at last GC: "
1333      << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n";
1334 
1335   BaseMutex::DumpAll(os);
1336 }
1337 
ResetGcPerformanceInfo()1338 void Heap::ResetGcPerformanceInfo() {
1339   for (auto* collector : garbage_collectors_) {
1340     collector->ResetMeasurements();
1341   }
1342 
1343   process_cpu_start_time_ns_ = ProcessCpuNanoTime();
1344 
1345   pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1346   pre_gc_weighted_allocated_bytes_ = 0u;
1347 
1348   post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1349   post_gc_weighted_allocated_bytes_ = 0u;
1350 
1351   total_bytes_freed_ever_.store(0);
1352   total_objects_freed_ever_.store(0);
1353   total_wait_time_ = 0;
1354   blocking_gc_count_ = 0;
1355   blocking_gc_time_ = 0;
1356   pre_oome_gc_count_.store(0, std::memory_order_relaxed);
1357   gc_count_last_window_ = 0;
1358   blocking_gc_count_last_window_ = 0;
1359   last_update_time_gc_count_rate_histograms_ =  // Round down by the window duration.
1360       (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1361   {
1362     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1363     gc_count_rate_histogram_.Reset();
1364     blocking_gc_count_rate_histogram_.Reset();
1365   }
1366 }
1367 
GetGcCount() const1368 uint64_t Heap::GetGcCount() const {
1369   uint64_t gc_count = 0U;
1370   for (auto* collector : garbage_collectors_) {
1371     gc_count += collector->GetCumulativeTimings().GetIterations();
1372   }
1373   return gc_count;
1374 }
1375 
GetGcTime() const1376 uint64_t Heap::GetGcTime() const {
1377   uint64_t gc_time = 0U;
1378   for (auto* collector : garbage_collectors_) {
1379     gc_time += collector->GetCumulativeTimings().GetTotalNs();
1380   }
1381   return gc_time;
1382 }
1383 
GetBlockingGcCount() const1384 uint64_t Heap::GetBlockingGcCount() const {
1385   return blocking_gc_count_;
1386 }
1387 
GetBlockingGcTime() const1388 uint64_t Heap::GetBlockingGcTime() const {
1389   return blocking_gc_time_;
1390 }
1391 
DumpGcCountRateHistogram(std::ostream & os) const1392 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1393   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1394   if (gc_count_rate_histogram_.SampleSize() > 0U) {
1395     gc_count_rate_histogram_.DumpBins(os);
1396   }
1397 }
1398 
DumpBlockingGcCountRateHistogram(std::ostream & os) const1399 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1400   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1401   if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1402     blocking_gc_count_rate_histogram_.DumpBins(os);
1403   }
1404 }
1405 
GetPreOomeGcCount() const1406 uint64_t Heap::GetPreOomeGcCount() const {
1407   return pre_oome_gc_count_.load(std::memory_order_relaxed);
1408 }
1409 
1410 ALWAYS_INLINE
GetAndOverwriteAllocationListener(Atomic<AllocationListener * > * storage,AllocationListener * new_value)1411 static inline AllocationListener* GetAndOverwriteAllocationListener(
1412     Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
1413   return storage->exchange(new_value);
1414 }
1415 
~Heap()1416 Heap::~Heap() {
1417   VLOG(heap) << "Starting ~Heap()";
1418   STLDeleteElements(&garbage_collectors_);
1419   // If we don't reset then the mark stack complains in its destructor.
1420   allocation_stack_->Reset();
1421   allocation_records_.reset();
1422   live_stack_->Reset();
1423   STLDeleteValues(&mod_union_tables_);
1424   STLDeleteValues(&remembered_sets_);
1425   STLDeleteElements(&continuous_spaces_);
1426   STLDeleteElements(&discontinuous_spaces_);
1427   delete gc_complete_lock_;
1428   delete thread_flip_lock_;
1429   delete pending_task_lock_;
1430   delete backtrace_lock_;
1431   uint64_t unique_count = unique_backtrace_count_.load();
1432   uint64_t seen_count = seen_backtrace_count_.load();
1433   if (unique_count != 0 || seen_count != 0) {
1434     LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count);
1435   }
1436   VLOG(heap) << "Finished ~Heap()";
1437 }
1438 
1439 
FindContinuousSpaceFromAddress(const mirror::Object * addr) const1440 space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
1441   for (const auto& space : continuous_spaces_) {
1442     if (space->Contains(addr)) {
1443       return space;
1444     }
1445   }
1446   return nullptr;
1447 }
1448 
FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1449 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1450                                                             bool fail_ok) const {
1451   space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
1452   if (space != nullptr) {
1453     return space;
1454   }
1455   if (!fail_ok) {
1456     LOG(FATAL) << "object " << obj << " not inside any spaces!";
1457   }
1458   return nullptr;
1459 }
1460 
FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1461 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1462                                                                   bool fail_ok) const {
1463   for (const auto& space : discontinuous_spaces_) {
1464     if (space->Contains(obj.Ptr())) {
1465       return space;
1466     }
1467   }
1468   if (!fail_ok) {
1469     LOG(FATAL) << "object " << obj << " not inside any spaces!";
1470   }
1471   return nullptr;
1472 }
1473 
FindSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1474 space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
1475   space::Space* result = FindContinuousSpaceFromObject(obj, true);
1476   if (result != nullptr) {
1477     return result;
1478   }
1479   return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1480 }
1481 
FindSpaceFromAddress(const void * addr) const1482 space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
1483   for (const auto& space : continuous_spaces_) {
1484     if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1485       return space;
1486     }
1487   }
1488   for (const auto& space : discontinuous_spaces_) {
1489     if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1490       return space;
1491     }
1492   }
1493   return nullptr;
1494 }
1495 
DumpSpaceNameFromAddress(const void * addr) const1496 std::string Heap::DumpSpaceNameFromAddress(const void* addr) const {
1497   space::Space* space = FindSpaceFromAddress(addr);
1498   return (space != nullptr) ? space->GetName() : "no space";
1499 }
1500 
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1501 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1502   // If we're in a stack overflow, do not create a new exception. It would require running the
1503   // constructor, which will of course still be in a stack overflow. Note: we only care if the
1504   // native stack has overflowed. If the simulated stack overflows, it is still possible that the
1505   // native stack has room to create a new exception.
1506   if (self->IsHandlingStackOverflow<kNativeStackType>()) {
1507     self->SetException(
1508         Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow());
1509     return;
1510   }
1511   // Allow plugins to intercept out of memory errors.
1512   Runtime::Current()->OutOfMemoryErrorHook();
1513 
1514   std::ostringstream oss;
1515   size_t total_bytes_free = GetFreeMemory();
1516   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1517       << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
1518       << " target footprint " << target_footprint_.load(std::memory_order_relaxed)
1519       << ", growth limit "
1520       << growth_limit_;
1521   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1522   if (total_bytes_free >= byte_count) {
1523     space::AllocSpace* space = nullptr;
1524     if (allocator_type == kAllocatorTypeNonMoving) {
1525       space = non_moving_space_;
1526     } else if (allocator_type == kAllocatorTypeRosAlloc ||
1527                allocator_type == kAllocatorTypeDlMalloc) {
1528       space = main_space_;
1529     } else if (allocator_type == kAllocatorTypeBumpPointer ||
1530                allocator_type == kAllocatorTypeTLAB) {
1531       space = bump_pointer_space_;
1532     } else if (allocator_type == kAllocatorTypeRegion ||
1533                allocator_type == kAllocatorTypeRegionTLAB) {
1534       space = region_space_;
1535     }
1536 
1537     // There is no fragmentation info to log for large-object space.
1538     if (allocator_type != kAllocatorTypeLOS) {
1539       CHECK(space != nullptr) << "allocator_type:" << allocator_type
1540                               << " byte_count:" << byte_count
1541                               << " total_bytes_free:" << total_bytes_free;
1542       // LogFragmentationAllocFailure returns true if byte_count is greater than
1543       // the largest free contiguous chunk in the space. Return value false
1544       // means that we are throwing OOME because the amount of free heap after
1545       // GC is less than kMinFreeHeapAfterGcForAlloc in proportion of the heap-size.
1546       // Log an appropriate message in that case.
1547       if (!space->LogFragmentationAllocFailure(oss, byte_count)) {
1548         oss << "; giving up on allocation because <"
1549             << kMinFreeHeapAfterGcForAlloc * 100
1550             << "% of heap free after GC.";
1551       }
1552     }
1553   }
1554   self->ThrowOutOfMemoryError(oss.str().c_str());
1555 }
1556 
DoPendingCollectorTransition()1557 void Heap::DoPendingCollectorTransition() {
1558   CollectorType desired_collector_type = desired_collector_type_;
1559 
1560   if (collector_type_ == kCollectorTypeCC || collector_type_ == kCollectorTypeCMC) {
1561     // App's allocations (since last GC) more than the threshold then do TransitionGC
1562     // when the app was in background. If not then don't do TransitionGC.
1563     // num_bytes_allocated_since_gc should always be positive even if initially
1564     // num_bytes_alive_after_gc_ is coming from Zygote. This gives positive or zero value.
1565     size_t num_bytes_allocated_since_gc =
1566         UnsignedDifference(GetBytesAllocated(), num_bytes_alive_after_gc_);
1567     if (num_bytes_allocated_since_gc <
1568         (UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
1569                             num_bytes_alive_after_gc_)/4)
1570         && !kStressCollectorTransition
1571         && !IsLowMemoryMode()) {
1572       return;
1573     }
1574   }
1575 
1576   // Launch homogeneous space compaction if it is desired.
1577   if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1578     if (!CareAboutPauseTimes()) {
1579       PerformHomogeneousSpaceCompact();
1580     } else {
1581       VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1582     }
1583   } else if (desired_collector_type == kCollectorTypeCCBackground ||
1584              desired_collector_type == kCollectorTypeCMCBackground) {
1585     if (!CareAboutPauseTimes()) {
1586       // Invoke full compaction.
1587       CollectGarbageInternal(collector::kGcTypeFull,
1588                              kGcCauseCollectorTransition,
1589                              /*clear_soft_references=*/false, GetCurrentGcNum() + 1);
1590     } else {
1591       VLOG(gc) << "background compaction ignored due to jank perceptible process state";
1592     }
1593   } else {
1594     CHECK_EQ(desired_collector_type, collector_type_) << "Unsupported collector transition";
1595   }
1596 }
1597 
Trim(Thread * self)1598 void Heap::Trim(Thread* self) {
1599   Runtime* const runtime = Runtime::Current();
1600   if (!CareAboutPauseTimes()) {
1601     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1602     // about pauses.
1603     ScopedTrace trace("Deflating monitors");
1604     // Avoid race conditions on the lock word for CC.
1605     ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1606     ScopedSuspendAll ssa(__FUNCTION__);
1607     uint64_t start_time = NanoTime();
1608     size_t count = runtime->GetMonitorList()->DeflateMonitors();
1609     VLOG(heap) << "Deflating " << count << " monitors took "
1610         << PrettyDuration(NanoTime() - start_time);
1611   }
1612   TrimIndirectReferenceTables(self);
1613   TrimSpaces(self);
1614   // Trim arenas that may have been used by JIT or verifier.
1615   runtime->GetArenaPool()->TrimMaps();
1616 }
1617 
1618 class TrimIndirectReferenceTableClosure : public Closure {
1619  public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1620   explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1621   }
Run(Thread * thread)1622   void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
1623     thread->GetJniEnv()->TrimLocals();
1624     // If thread is a running mutator, then act on behalf of the trim thread.
1625     // See the code in ThreadList::RunCheckpoint.
1626     barrier_->Pass(Thread::Current());
1627   }
1628 
1629  private:
1630   Barrier* const barrier_;
1631 };
1632 
TrimIndirectReferenceTables(Thread * self)1633 void Heap::TrimIndirectReferenceTables(Thread* self) {
1634   ScopedObjectAccess soa(self);
1635   ScopedTrace trace(__PRETTY_FUNCTION__);
1636   JavaVMExt* vm = soa.Vm();
1637   // Trim globals indirect reference table.
1638   vm->TrimGlobals();
1639   // Trim locals indirect reference tables.
1640   // TODO: May also want to look for entirely empty pages maintained by SmallIrtAllocator.
1641   Barrier barrier(0);
1642   TrimIndirectReferenceTableClosure closure(&barrier);
1643   size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1644   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun);
1645   if (barrier_count != 0) {
1646     barrier.Increment(self, barrier_count);
1647   }
1648 }
1649 
StartGC(Thread * self,GcCause cause,CollectorType collector_type)1650 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1651   // This can be called in either kRunnable or suspended states.
1652   // TODO: Consider fixing that?
1653   ThreadState old_thread_state = self->GetState();
1654   if (old_thread_state == ThreadState::kRunnable) {
1655     Locks::mutator_lock_->AssertSharedHeld(self);
1656     // Manually inlining the following call breaks thread-safety analysis.
1657     StartGCRunnable(self, cause, collector_type);
1658     return;
1659   }
1660   Locks::mutator_lock_->AssertNotHeld(self);
1661   self->SetState(ThreadState::kWaitingForGcToComplete);
1662   MutexLock mu(self, *gc_complete_lock_);
1663   WaitForGcToCompleteLocked(cause, self);
1664   collector_type_running_ = collector_type;
1665   last_gc_cause_ = cause;
1666   thread_running_gc_ = self;
1667   self->SetState(old_thread_state);
1668 }
1669 
StartGCRunnable(Thread * self,GcCause cause,CollectorType collector_type)1670 void Heap::StartGCRunnable(Thread* self, GcCause cause, CollectorType collector_type) {
1671   Locks::mutator_lock_->AssertSharedHeld(self);
1672   while (true) {
1673     self->TransitionFromRunnableToSuspended(ThreadState::kWaitingForGcToComplete);
1674     {
1675       MutexLock mu(self, *gc_complete_lock_);
1676       // Ensure there is only one GC at a time.
1677       WaitForGcToCompleteLocked(cause, self);
1678       collector_type_running_ = collector_type;
1679       last_gc_cause_ = cause;
1680       thread_running_gc_ = self;
1681     }
1682     // We have to be careful returning to runnable state, since that could cause us to block.
1683     // That would be bad, since collector_type_running_ is set, and hence no GC is possible in this
1684     // state, allowing deadlock.
1685     if (LIKELY(self->TryTransitionFromSuspendedToRunnable())) {
1686       return;
1687     }
1688     {
1689       MutexLock mu(self, *gc_complete_lock_);
1690       collector_type_running_ = kCollectorTypeNone;
1691       thread_running_gc_ = nullptr;
1692     }
1693     self->TransitionFromSuspendedToRunnable();  // Will handle suspension request and block.
1694   }
1695 }
1696 
TrimSpaces(Thread * self)1697 void Heap::TrimSpaces(Thread* self) {
1698   // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1699   // trimming.
1700   StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1701   ScopedTrace trace(__PRETTY_FUNCTION__);
1702   const uint64_t start_ns = NanoTime();
1703   // Trim the managed spaces.
1704   uint64_t total_alloc_space_allocated = 0;
1705   uint64_t total_alloc_space_size = 0;
1706   uint64_t managed_reclaimed = 0;
1707   {
1708     ScopedObjectAccess soa(self);
1709     for (const auto& space : continuous_spaces_) {
1710       if (space->IsMallocSpace()) {
1711         gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1712         if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1713           // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1714           // for a long period of time.
1715           managed_reclaimed += malloc_space->Trim();
1716         }
1717         total_alloc_space_size += malloc_space->Size();
1718       }
1719     }
1720   }
1721   total_alloc_space_allocated = GetBytesAllocated();
1722   if (large_object_space_ != nullptr) {
1723     total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1724   }
1725   if (bump_pointer_space_ != nullptr) {
1726     total_alloc_space_allocated -= bump_pointer_space_->Size();
1727   }
1728   if (region_space_ != nullptr) {
1729     total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1730   }
1731   const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1732       static_cast<float>(total_alloc_space_size);
1733   uint64_t gc_heap_end_ns = NanoTime();
1734   // We never move things in the native heap, so we can finish the GC at this point.
1735   FinishGC(self, collector::kGcTypeNone);
1736 
1737   VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1738       << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1739       << static_cast<int>(100 * managed_utilization) << "%.";
1740 }
1741 
IsValidObjectAddress(const void * addr) const1742 bool Heap::IsValidObjectAddress(const void* addr) const {
1743   if (addr == nullptr) {
1744     return true;
1745   }
1746   return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
1747 }
1748 
IsNonDiscontinuousSpaceHeapAddress(const void * addr) const1749 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
1750   return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
1751 }
1752 
IsLiveObjectLocked(ObjPtr<mirror::Object> obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1753 bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
1754                               bool search_allocation_stack,
1755                               bool search_live_stack,
1756                               bool sorted) {
1757   if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
1758     return false;
1759   }
1760   if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
1761     mirror::Class* klass = obj->GetClass<kVerifyNone>();
1762     if (obj == klass) {
1763       // This case happens for java.lang.Class.
1764       return true;
1765     }
1766     return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1767   } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
1768     // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1769     // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1770     return temp_space_->Contains(obj.Ptr());
1771   }
1772   if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
1773     return true;
1774   }
1775   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1776   space::DiscontinuousSpace* d_space = nullptr;
1777   if (c_space != nullptr) {
1778     if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1779       return true;
1780     }
1781   } else {
1782     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1783     if (d_space != nullptr) {
1784       if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1785         return true;
1786       }
1787     }
1788   }
1789   // This is covering the allocation/live stack swapping that is done without mutators suspended.
1790   for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1791     if (i > 0) {
1792       NanoSleep(MsToNs(10));
1793     }
1794     if (search_allocation_stack) {
1795       if (sorted) {
1796         if (allocation_stack_->ContainsSorted(obj.Ptr())) {
1797           return true;
1798         }
1799       } else if (allocation_stack_->Contains(obj.Ptr())) {
1800         return true;
1801       }
1802     }
1803 
1804     if (search_live_stack) {
1805       if (sorted) {
1806         if (live_stack_->ContainsSorted(obj.Ptr())) {
1807           return true;
1808         }
1809       } else if (live_stack_->Contains(obj.Ptr())) {
1810         return true;
1811       }
1812     }
1813   }
1814   // We need to check the bitmaps again since there is a race where we mark something as live and
1815   // then clear the stack containing it.
1816   if (c_space != nullptr) {
1817     if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1818       return true;
1819     }
1820   } else {
1821     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1822     if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1823       return true;
1824     }
1825   }
1826   return false;
1827 }
1828 
DumpSpaces() const1829 std::string Heap::DumpSpaces() const {
1830   std::ostringstream oss;
1831   DumpSpaces(oss);
1832   return oss.str();
1833 }
1834 
DumpSpaces(std::ostream & stream) const1835 void Heap::DumpSpaces(std::ostream& stream) const {
1836   for (const auto& space : continuous_spaces_) {
1837     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1838     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1839     stream << space << " " << *space << "\n";
1840     if (live_bitmap != nullptr) {
1841       stream << live_bitmap << " " << *live_bitmap << "\n";
1842     }
1843     if (mark_bitmap != nullptr) {
1844       stream << mark_bitmap << " " << *mark_bitmap << "\n";
1845     }
1846   }
1847   for (const auto& space : discontinuous_spaces_) {
1848     stream << space << " " << *space << "\n";
1849   }
1850 }
1851 
VerifyObjectBody(ObjPtr<mirror::Object> obj)1852 void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
1853   if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1854     return;
1855   }
1856 
1857   // Ignore early dawn of the universe verifications.
1858   if (UNLIKELY(num_bytes_allocated_.load(std::memory_order_relaxed) < 10 * KB)) {
1859     return;
1860   }
1861   CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
1862   mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1863   CHECK(c != nullptr) << "Null class in object " << obj;
1864   CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1865   CHECK(VerifyClassClass(c));
1866 
1867   if (verify_object_mode_ > kVerifyObjectModeFast) {
1868     // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1869     CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1870   }
1871 }
1872 
VerifyHeap()1873 void Heap::VerifyHeap() {
1874   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1875   auto visitor = [&](mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
1876     VerifyObjectBody(obj);
1877   };
1878   // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
1879   // NO_THREAD_SAFETY_ANALYSIS.
1880   auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
1881     GetLiveBitmap()->Visit(visitor);
1882   };
1883   no_thread_safety_analysis();
1884 }
1885 
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1886 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1887   // Use signed comparison since freed bytes can be negative when background compaction foreground
1888   // transitions occurs. This is typically due to objects moving from a bump pointer space to a
1889   // free list backed space, which may increase memory footprint due to padding and binning.
1890   RACING_DCHECK_LE(freed_bytes,
1891                    static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
1892   // Note: This relies on 2s complement for handling negative freed_bytes.
1893   num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes), std::memory_order_relaxed);
1894   if (Runtime::Current()->HasStatsEnabled()) {
1895     RuntimeStats* thread_stats = Thread::Current()->GetStats();
1896     thread_stats->freed_objects += freed_objects;
1897     thread_stats->freed_bytes += freed_bytes;
1898     // TODO: Do this concurrently.
1899     RuntimeStats* global_stats = Runtime::Current()->GetStats();
1900     global_stats->freed_objects += freed_objects;
1901     global_stats->freed_bytes += freed_bytes;
1902   }
1903 }
1904 
RecordFreeRevoke()1905 void Heap::RecordFreeRevoke() {
1906   // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1907   // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1908   // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1909   // all the way to zero exactly as the remainder will be subtracted at the next GC.
1910   size_t bytes_freed = num_bytes_freed_revoke_.load(std::memory_order_relaxed);
1911   CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed, std::memory_order_relaxed),
1912            bytes_freed) << "num_bytes_freed_revoke_ underflow";
1913   CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed, std::memory_order_relaxed),
1914            bytes_freed) << "num_bytes_allocated_ underflow";
1915   GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1916 }
1917 
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1918 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1919   if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1920     return rosalloc_space_;
1921   }
1922   for (const auto& space : continuous_spaces_) {
1923     if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1924       if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1925         return space->AsContinuousSpace()->AsRosAllocSpace();
1926       }
1927     }
1928   }
1929   return nullptr;
1930 }
1931 
EntrypointsInstrumented()1932 static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
1933   instrumentation::Instrumentation* const instrumentation =
1934       Runtime::Current()->GetInstrumentation();
1935   return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1936 }
1937 
AllocateInternalWithGc(Thread * self,AllocatorType allocator,bool instrumented,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,ObjPtr<mirror::Class> * klass)1938 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1939                                              AllocatorType allocator,
1940                                              bool instrumented,
1941                                              size_t alloc_size,
1942                                              size_t* bytes_allocated,
1943                                              size_t* usable_size,
1944                                              size_t* bytes_tl_bulk_allocated,
1945                                              ObjPtr<mirror::Class>* klass) {
1946   bool was_default_allocator = allocator == GetCurrentAllocator();
1947   // Make sure there is no pending exception since we may need to throw an OOME.
1948   self->AssertNoPendingException();
1949   DCHECK(klass != nullptr);
1950 
1951   StackHandleScope<1> hs(self);
1952   HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(klass));
1953 
1954   auto send_object_pre_alloc =
1955       [&]() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) {
1956         if (UNLIKELY(instrumented)) {
1957           AllocationListener* l = alloc_listener_.load(std::memory_order_seq_cst);
1958           if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
1959             l->PreObjectAllocated(self, h_klass, &alloc_size);
1960           }
1961         }
1962       };
1963 #define PERFORM_SUSPENDING_OPERATION(op)                                          \
1964   [&]() REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) { \
1965     ScopedAllowThreadSuspension ats;                                              \
1966     auto res = (op);                                                              \
1967     send_object_pre_alloc();                                                      \
1968     return res;                                                                   \
1969   }()
1970 
1971   // The allocation failed. If the GC is running, block until it completes, and then retry the
1972   // allocation.
1973   collector::GcType last_gc =
1974       PERFORM_SUSPENDING_OPERATION(WaitForGcToComplete(kGcCauseForAlloc, self));
1975   // If we were the default allocator but the allocator changed while we were suspended,
1976   // abort the allocation.
1977   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1978       (!instrumented && EntrypointsInstrumented())) {
1979     return nullptr;
1980   }
1981   uint32_t starting_gc_num = GetCurrentGcNum();
1982   if (last_gc != collector::kGcTypeNone) {
1983     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1984     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1985                                                      usable_size, bytes_tl_bulk_allocated);
1986     if (ptr != nullptr) {
1987       return ptr;
1988     }
1989   }
1990   if (IsGCDisabledForShutdown()) {
1991     // We're just shutting down and GCs don't work anymore. Try a different allocator.
1992     mirror::Object* ptr = TryToAllocate<true, false>(self,
1993                                                      kAllocatorTypeNonMoving,
1994                                                      alloc_size,
1995                                                      bytes_allocated,
1996                                                      usable_size,
1997                                                      bytes_tl_bulk_allocated);
1998     if (ptr != nullptr) {
1999       return ptr;
2000     }
2001   }
2002 
2003   int64_t bytes_freed_before = GetBytesFreedEver();
2004   auto have_reclaimed_enough = [&]() {
2005     size_t curr_bytes_allocated = GetBytesAllocated();
2006     size_t free_heap = UnsignedDifference(growth_limit_, curr_bytes_allocated);
2007     int64_t newly_freed = GetBytesFreedEver() - bytes_freed_before;
2008     double free_heap_ratio = static_cast<double>(free_heap) / growth_limit_;
2009     double newly_freed_ratio = static_cast<double>(newly_freed) / growth_limit_;
2010     return free_heap_ratio >= kMinFreeHeapAfterGcForAlloc ||
2011            newly_freed_ratio >= kMinFreedHeapAfterGcForAlloc;
2012   };
2013   // We perform one GC as per the next_gc_type_ (chosen in GrowForUtilization),
2014   // if it's not already tried. If that doesn't succeed then go for the most
2015   // exhaustive option. Perform a full-heap collection including clearing
2016   // SoftReferences. In case of ConcurrentCopying, it will also ensure that
2017   // all regions are evacuated. If allocation doesn't succeed even after that
2018   // then there is no hope, so we throw OOME.
2019   collector::GcType tried_type = next_gc_type_;
2020   if (last_gc < tried_type) {
2021     VLOG(gc) << "Starting a blocking GC " << kGcCauseForAlloc;
2022     PERFORM_SUSPENDING_OPERATION(
2023         CollectGarbageInternal(tried_type, kGcCauseForAlloc, false, starting_gc_num + 1));
2024 
2025     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
2026         (!instrumented && EntrypointsInstrumented())) {
2027       return nullptr;
2028     }
2029     // Check this even if we didn't actually run a GC; if we didn't someone else probably did.
2030     if (have_reclaimed_enough()) {
2031       mirror::Object* ptr = TryToAllocate<true, false>(self, allocator,
2032                                                        alloc_size, bytes_allocated,
2033                                                        usable_size, bytes_tl_bulk_allocated);
2034       if (ptr != nullptr) {
2035         return ptr;
2036       }
2037     }
2038   }
2039   // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
2040   // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
2041   // VM spec requires that all SoftReferences have been collected and cleared before throwing
2042   // OOME.
2043   VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
2044            << " allocation";
2045   // TODO: Run finalization, but this may cause more allocations to occur.
2046   // We don't need a WaitForGcToComplete here either.
2047   // TODO: Should check whether another thread already just ran a GC with soft
2048   // references.
2049 
2050   DCHECK(!gc_plan_.empty());
2051 
2052   int64_t min_freed_to_continue =
2053       static_cast<int64_t>(kMinFreedHeapAfterGcForAlloc * growth_limit_ + alloc_size);
2054   // Repeatedly collect the entire heap until either
2055   // (a) this was insufficiently productive at reclaiming memory and we should give upt to avoid
2056   // "GC thrashing", or
2057   // (b) GC was sufficiently productive (reclaimed min_freed_to_continue bytes) AND allowed us to
2058   // satisfy the allocation request.
2059   bool gc_ran;
2060   int gc_attempts = 0;
2061   // A requested GC can fail to run because either someone else beat us to it, or because we can't
2062   // run a GC in this state. In the latter case, we return quickly. Just try a small number of
2063   // times.
2064   static constexpr int kMaxGcAttempts = 5;
2065   do {
2066     bytes_freed_before = GetBytesFreedEver();
2067     pre_oome_gc_count_.fetch_add(1, std::memory_order_relaxed);
2068     // TODO(b/353333767): Do this only if nobody else beats us to it. If we're having trouble
2069     // allocating, probably other threads are in the same boat.
2070     starting_gc_num = GetCurrentGcNum();
2071     gc_ran = PERFORM_SUSPENDING_OPERATION(
2072         CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true, starting_gc_num + 1) !=
2073         collector::kGcTypeNone);
2074     ++gc_attempts;
2075     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
2076         (!instrumented && EntrypointsInstrumented())) {
2077       return nullptr;
2078     }
2079     bool ran_homogeneous_space_compaction = false;
2080     bool immediately_reclaimed_enough = have_reclaimed_enough();
2081     if (!immediately_reclaimed_enough) {
2082       const uint64_t current_time = NanoTime();
2083       if (allocator == kAllocatorTypeRosAlloc || allocator == kAllocatorTypeDlMalloc) {
2084         if (use_homogeneous_space_compaction_for_oom_ &&
2085             current_time - last_time_homogeneous_space_compaction_by_oom_ >
2086             min_interval_homogeneous_space_compaction_by_oom_) {
2087           last_time_homogeneous_space_compaction_by_oom_ = current_time;
2088           ran_homogeneous_space_compaction =
2089               (PERFORM_SUSPENDING_OPERATION(PerformHomogeneousSpaceCompact()) ==
2090                HomogeneousSpaceCompactResult::kSuccess);
2091           // Thread suspension could have occurred.
2092           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
2093               (!instrumented && EntrypointsInstrumented())) {
2094             return nullptr;
2095           }
2096           // Always print that we ran homogeneous space compation since this can cause jank.
2097           VLOG(heap) << "Ran heap homogeneous space compaction, "
2098                     << " requested defragmentation "
2099                     << count_requested_homogeneous_space_compaction_.load()
2100                     << " performed defragmentation "
2101                     << count_performed_homogeneous_space_compaction_.load()
2102                     << " ignored homogeneous space compaction "
2103                     << count_ignored_homogeneous_space_compaction_.load()
2104                     << " delayed count = "
2105                     << count_delayed_oom_.load();
2106         }
2107       }
2108     }
2109     if (immediately_reclaimed_enough ||
2110         (ran_homogeneous_space_compaction && have_reclaimed_enough())) {
2111       mirror::Object* ptr = TryToAllocate<true, true>(
2112           self, allocator, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
2113       if (ptr != nullptr) {
2114         if (ran_homogeneous_space_compaction) {
2115           count_delayed_oom_++;
2116         }
2117         return ptr;
2118       }
2119     }
2120     // This loops only if we reclaimed plenty of memory, but presumably some other thread beat us
2121     // to allocating it. In the very unlikely case that we're running into a serious fragmentation
2122     // issue, and there is no other thread allocating, GCs will quickly become unsuccessful, and we
2123     // will stop then. If another thread is allocating aggressively, this may go on for a while,
2124     // but we are still making progress somewhere.
2125   } while ((!gc_ran && gc_attempts < kMaxGcAttempts) ||
2126            GetBytesFreedEver() - bytes_freed_before > min_freed_to_continue);
2127 #undef PERFORM_SUSPENDING_OPERATION
2128   // Throw an OOM error.
2129   {
2130     ScopedAllowThreadSuspension ats;
2131     ThrowOutOfMemoryError(self, alloc_size, allocator);
2132   }
2133   return nullptr;
2134 }
2135 
SetTargetHeapUtilization(float target)2136 void Heap::SetTargetHeapUtilization(float target) {
2137   DCHECK_GT(target, 0.1f);  // asserted in Java code
2138   DCHECK_LT(target, 1.0f);
2139   target_utilization_ = target;
2140 }
2141 
GetObjectsAllocated() const2142 size_t Heap::GetObjectsAllocated() const {
2143   Thread* const self = Thread::Current();
2144   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGetObjectsAllocated);
2145   // Prevent GC running during GetObjectsAllocated since we may get a checkpoint request that tells
2146   // us to suspend while we are doing SuspendAll. b/35232978
2147   gc::ScopedGCCriticalSection gcs(Thread::Current(),
2148                                   gc::kGcCauseGetObjectsAllocated,
2149                                   gc::kCollectorTypeGetObjectsAllocated);
2150   // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
2151   ScopedSuspendAll ssa(__FUNCTION__);
2152   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2153   size_t total = 0;
2154   for (space::AllocSpace* space : alloc_spaces_) {
2155     total += space->GetObjectsAllocated();
2156   }
2157   return total;
2158 }
2159 
GetBytesAllocatedEver() const2160 uint64_t Heap::GetBytesAllocatedEver() const {
2161   // Force the returned value to be monotonically increasing, in the sense that if this is called
2162   // at A and B, such that A happens-before B, then the call at B returns a value no smaller than
2163   // that at A. This is not otherwise guaranteed, since num_bytes_allocated_ is decremented first,
2164   // and total_bytes_freed_ever_ is incremented later.
2165   static std::atomic<uint64_t> max_bytes_so_far(0);
2166   uint64_t so_far = max_bytes_so_far.load(std::memory_order_relaxed);
2167   uint64_t current_bytes = GetBytesFreedEver(std::memory_order_acquire) + GetBytesAllocated();
2168   DCHECK(current_bytes < (static_cast<uint64_t>(1) << 63));  // result is "positive".
2169   do {
2170     if (current_bytes <= so_far) {
2171       return so_far;
2172     }
2173   } while (!max_bytes_so_far.compare_exchange_weak(so_far /* updated */,
2174                                                    current_bytes, std::memory_order_relaxed));
2175   return current_bytes;
2176 }
2177 
2178 // Check whether the given object is an instance of the given class.
MatchesClass(mirror::Object * obj,Handle<mirror::Class> h_class,bool use_is_assignable_from)2179 static bool MatchesClass(mirror::Object* obj,
2180                          Handle<mirror::Class> h_class,
2181                          bool use_is_assignable_from) REQUIRES_SHARED(Locks::mutator_lock_) {
2182   mirror::Class* instance_class = obj->GetClass();
2183   CHECK(instance_class != nullptr);
2184   ObjPtr<mirror::Class> klass = h_class.Get();
2185   if (use_is_assignable_from) {
2186     return klass != nullptr && klass->IsAssignableFrom(instance_class);
2187   }
2188   return instance_class == klass;
2189 }
2190 
CountInstances(const std::vector<Handle<mirror::Class>> & classes,bool use_is_assignable_from,uint64_t * counts)2191 void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
2192                           bool use_is_assignable_from,
2193                           uint64_t* counts) {
2194   auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2195     for (size_t i = 0; i < classes.size(); ++i) {
2196       if (MatchesClass(obj, classes[i], use_is_assignable_from)) {
2197         ++counts[i];
2198       }
2199     }
2200   };
2201   VisitObjects(instance_counter);
2202 }
2203 
CollectGarbage(bool clear_soft_references,GcCause cause)2204 void Heap::CollectGarbage(bool clear_soft_references, GcCause cause) {
2205   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
2206   // last GC will not have necessarily been cleared.
2207   CollectGarbageInternal(gc_plan_.back(), cause, clear_soft_references, GC_NUM_ANY);
2208 }
2209 
SupportHomogeneousSpaceCompactAndCollectorTransitions() const2210 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
2211   return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
2212       foreground_collector_type_ == kCollectorTypeCMS;
2213 }
2214 
PerformHomogeneousSpaceCompact()2215 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2216   Thread* self = Thread::Current();
2217   // Inc requested homogeneous space compaction.
2218   count_requested_homogeneous_space_compaction_++;
2219   // Store performed homogeneous space compaction at a new request arrival.
2220   ScopedThreadStateChange tsc(self, ThreadState::kWaitingPerformingGc);
2221   Locks::mutator_lock_->AssertNotHeld(self);
2222   {
2223     ScopedThreadStateChange tsc2(self, ThreadState::kWaitingForGcToComplete);
2224     MutexLock mu(self, *gc_complete_lock_);
2225     // Ensure there is only one GC at a time.
2226     WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
2227     // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable
2228     // count is non zero.
2229     // If the collector type changed to something which doesn't benefit from homogeneous space
2230     // compaction, exit.
2231     if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2232         !main_space_->CanMoveObjects()) {
2233       return kErrorReject;
2234     }
2235     if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2236       return kErrorUnsupported;
2237     }
2238     collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2239   }
2240   if (Runtime::Current()->IsShuttingDown(self)) {
2241     // Don't allow heap transitions to happen if the runtime is shutting down since these can
2242     // cause objects to get finalized.
2243     FinishGC(self, collector::kGcTypeNone);
2244     return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2245   }
2246   collector::GarbageCollector* collector;
2247   {
2248     ScopedSuspendAll ssa(__FUNCTION__);
2249     uint64_t start_time = NanoTime();
2250     // Launch compaction.
2251     space::MallocSpace* to_space = main_space_backup_.release();
2252     space::MallocSpace* from_space = main_space_;
2253     to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2254     const uint64_t space_size_before_compaction = from_space->Size();
2255     AddSpace(to_space);
2256     // Make sure that we will have enough room to copy.
2257     CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2258     collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2259     const uint64_t space_size_after_compaction = to_space->Size();
2260     main_space_ = to_space;
2261     main_space_backup_.reset(from_space);
2262     RemoveSpace(from_space);
2263     SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
2264     // Update performed homogeneous space compaction count.
2265     count_performed_homogeneous_space_compaction_++;
2266     // Print statics log and resume all threads.
2267     uint64_t duration = NanoTime() - start_time;
2268     VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2269                << PrettySize(space_size_before_compaction) << " -> "
2270                << PrettySize(space_size_after_compaction) << " compact-ratio: "
2271                << std::fixed << static_cast<double>(space_size_after_compaction) /
2272                static_cast<double>(space_size_before_compaction);
2273   }
2274   // Finish GC.
2275   // Get the references we need to enqueue.
2276   SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
2277   GrowForUtilization(semi_space_collector_);
2278   LogGC(kGcCauseHomogeneousSpaceCompact, collector);
2279   FinishGC(self, collector::kGcTypeFull);
2280   // Enqueue any references after losing the GC locks.
2281   clear->Run(self);
2282   clear->Finalize();
2283   {
2284     ScopedObjectAccess soa(self);
2285     soa.Vm()->UnloadNativeLibraries();
2286   }
2287   return HomogeneousSpaceCompactResult::kSuccess;
2288 }
2289 
SetDefaultConcurrentStartBytes()2290 void Heap::SetDefaultConcurrentStartBytes() {
2291   MutexLock mu(Thread::Current(), *gc_complete_lock_);
2292   if (collector_type_running_ != kCollectorTypeNone) {
2293     // If a collector is already running, just let it set concurrent_start_bytes_ .
2294     return;
2295   }
2296   SetDefaultConcurrentStartBytesLocked();
2297 }
2298 
SetDefaultConcurrentStartBytesLocked()2299 void Heap::SetDefaultConcurrentStartBytesLocked() {
2300   if (IsGcConcurrent()) {
2301     size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
2302     size_t reserve_bytes = target_footprint / 4;
2303     reserve_bytes = std::min(reserve_bytes, kMaxConcurrentRemainingBytes);
2304     reserve_bytes = std::max(reserve_bytes, kMinConcurrentRemainingBytes);
2305     concurrent_start_bytes_ = UnsignedDifference(target_footprint, reserve_bytes);
2306   } else {
2307     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2308   }
2309 }
2310 
ChangeCollector(CollectorType collector_type)2311 void Heap::ChangeCollector(CollectorType collector_type) {
2312   // TODO: Only do this with all mutators suspended to avoid races.
2313   if (collector_type != collector_type_) {
2314     collector_type_ = collector_type;
2315     gc_plan_.clear();
2316     switch (collector_type_) {
2317       case kCollectorTypeCC: {
2318         if (use_generational_cc_) {
2319           gc_plan_.push_back(collector::kGcTypeSticky);
2320         }
2321         gc_plan_.push_back(collector::kGcTypeFull);
2322         if (use_tlab_) {
2323           ChangeAllocator(kAllocatorTypeRegionTLAB);
2324         } else {
2325           ChangeAllocator(kAllocatorTypeRegion);
2326         }
2327         break;
2328       }
2329       case kCollectorTypeCMC: {
2330         gc_plan_.push_back(collector::kGcTypeFull);
2331         if (use_tlab_) {
2332           ChangeAllocator(kAllocatorTypeTLAB);
2333         } else {
2334           ChangeAllocator(kAllocatorTypeBumpPointer);
2335         }
2336         break;
2337       }
2338       case kCollectorTypeSS: {
2339         gc_plan_.push_back(collector::kGcTypeFull);
2340         if (use_tlab_) {
2341           ChangeAllocator(kAllocatorTypeTLAB);
2342         } else {
2343           ChangeAllocator(kAllocatorTypeBumpPointer);
2344         }
2345         break;
2346       }
2347       case kCollectorTypeMS: {
2348         gc_plan_.push_back(collector::kGcTypeSticky);
2349         gc_plan_.push_back(collector::kGcTypePartial);
2350         gc_plan_.push_back(collector::kGcTypeFull);
2351         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2352         break;
2353       }
2354       case kCollectorTypeCMS: {
2355         gc_plan_.push_back(collector::kGcTypeSticky);
2356         gc_plan_.push_back(collector::kGcTypePartial);
2357         gc_plan_.push_back(collector::kGcTypeFull);
2358         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2359         break;
2360       }
2361       default: {
2362         UNIMPLEMENTED(FATAL);
2363         UNREACHABLE();
2364       }
2365     }
2366     SetDefaultConcurrentStartBytesLocked();
2367   }
2368 }
2369 
2370 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2371 class ZygoteCompactingCollector final : public collector::SemiSpace {
2372  public:
ZygoteCompactingCollector(gc::Heap * heap,bool is_running_on_memory_tool)2373   ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2374       : SemiSpace(heap, "zygote collector"),
2375         bin_live_bitmap_(nullptr),
2376         bin_mark_bitmap_(nullptr),
2377         is_running_on_memory_tool_(is_running_on_memory_tool) {}
2378 
BuildBins(space::ContinuousSpace * space)2379   void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
2380     bin_live_bitmap_ = space->GetLiveBitmap();
2381     bin_mark_bitmap_ = space->GetMarkBitmap();
2382     uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
2383     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2384     // Note: This requires traversing the space in increasing order of object addresses.
2385     auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2386       uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2387       size_t bin_size = object_addr - prev;
2388       // Add the bin consisting of the end of the previous object to the start of the current object.
2389       AddBin(bin_size, prev);
2390       prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
2391     };
2392     bin_live_bitmap_->Walk(visitor);
2393     // Add the last bin which spans after the last object to the end of the space.
2394     AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
2395   }
2396 
2397  private:
2398   // Maps from bin sizes to locations.
2399   std::multimap<size_t, uintptr_t> bins_;
2400   // Live bitmap of the space which contains the bins.
2401   accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2402   // Mark bitmap of the space which contains the bins.
2403   accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2404   const bool is_running_on_memory_tool_;
2405 
AddBin(size_t size,uintptr_t position)2406   void AddBin(size_t size, uintptr_t position) {
2407     if (is_running_on_memory_tool_) {
2408       MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2409     }
2410     if (size != 0) {
2411       bins_.insert(std::make_pair(size, position));
2412     }
2413   }
2414 
ShouldSweepSpace(space::ContinuousSpace * space) const2415   bool ShouldSweepSpace([[maybe_unused]] space::ContinuousSpace* space) const override {
2416     // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2417     // allocator.
2418     return false;
2419   }
2420 
MarkNonForwardedObject(mirror::Object * obj)2421   mirror::Object* MarkNonForwardedObject(mirror::Object* obj) override
2422       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2423     size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
2424     size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2425     mirror::Object* forward_address;
2426     // Find the smallest bin which we can move obj in.
2427     auto it = bins_.lower_bound(alloc_size);
2428     if (it == bins_.end()) {
2429       // No available space in the bins, place it in the target space instead (grows the zygote
2430       // space).
2431       size_t bytes_allocated, unused_bytes_tl_bulk_allocated;
2432       forward_address = to_space_->Alloc(
2433           self_, alloc_size, &bytes_allocated, nullptr, &unused_bytes_tl_bulk_allocated);
2434       if (to_space_live_bitmap_ != nullptr) {
2435         to_space_live_bitmap_->Set(forward_address);
2436       } else {
2437         GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2438         GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2439       }
2440     } else {
2441       size_t size = it->first;
2442       uintptr_t pos = it->second;
2443       bins_.erase(it);  // Erase the old bin which we replace with the new smaller bin.
2444       forward_address = reinterpret_cast<mirror::Object*>(pos);
2445       // Set the live and mark bits so that sweeping system weaks works properly.
2446       bin_live_bitmap_->Set(forward_address);
2447       bin_mark_bitmap_->Set(forward_address);
2448       DCHECK_GE(size, alloc_size);
2449       // Add a new bin with the remaining space.
2450       AddBin(size - alloc_size, pos + alloc_size);
2451     }
2452     // Copy the object over to its new location.
2453     // Historical note: We did not use `alloc_size` to avoid a Valgrind error.
2454     memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2455     if (kUseBakerReadBarrier) {
2456       obj->AssertReadBarrierState();
2457       forward_address->AssertReadBarrierState();
2458     }
2459     return forward_address;
2460   }
2461 };
2462 
UnBindBitmaps()2463 void Heap::UnBindBitmaps() {
2464   TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2465   for (const auto& space : GetContinuousSpaces()) {
2466     if (space->IsContinuousMemMapAllocSpace()) {
2467       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2468       if (alloc_space->GetLiveBitmap() != nullptr && alloc_space->HasBoundBitmaps()) {
2469         alloc_space->UnBindBitmaps();
2470       }
2471     }
2472   }
2473 }
2474 
IncrementFreedEver()2475 void Heap::IncrementFreedEver() {
2476   // Counters are updated only by us, but may be read concurrently.
2477   // The updates should become visible after the corresponding live object info.
2478   total_objects_freed_ever_.store(total_objects_freed_ever_.load(std::memory_order_relaxed)
2479                                   + GetCurrentGcIteration()->GetFreedObjects()
2480                                   + GetCurrentGcIteration()->GetFreedLargeObjects(),
2481                                   std::memory_order_release);
2482   total_bytes_freed_ever_.store(total_bytes_freed_ever_.load(std::memory_order_relaxed)
2483                                 + GetCurrentGcIteration()->GetFreedBytes()
2484                                 + GetCurrentGcIteration()->GetFreedLargeObjectBytes(),
2485                                 std::memory_order_release);
2486 }
2487 
2488 #pragma clang diagnostic push
2489 #if !ART_USE_FUTEXES
2490 // Frame gets too large, perhaps due to Bionic pthread_mutex_lock size. We don't care.
2491 #  pragma clang diagnostic ignored "-Wframe-larger-than="
2492 #endif
2493 // This has a large frame, but shouldn't be run anywhere near the stack limit.
2494 // FIXME: BUT it did exceed... http://b/197647048
2495 #  pragma clang diagnostic ignored "-Wframe-larger-than="
PreZygoteFork()2496 void Heap::PreZygoteFork() {
2497   if (!HasZygoteSpace()) {
2498     // We still want to GC in case there is some unreachable non moving objects that could cause a
2499     // suboptimal bin packing when we compact the zygote space.
2500     CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false, GC_NUM_ANY);
2501     // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2502     // the trim process may require locking the mutator lock.
2503     non_moving_space_->Trim();
2504   }
2505   // We need to close userfaultfd fd for app/webview zygotes to avoid getattr
2506   // (stat) on the fd during fork.
2507   Thread* self = Thread::Current();
2508   MutexLock mu(self, zygote_creation_lock_);
2509   // Try to see if we have any Zygote spaces.
2510   if (HasZygoteSpace()) {
2511     return;
2512   }
2513   Runtime* runtime = Runtime::Current();
2514   // Setup linear-alloc pool for post-zygote fork allocations before freezing
2515   // snapshots of intern-table and class-table.
2516   runtime->SetupLinearAllocForPostZygoteFork(self);
2517   runtime->GetInternTable()->AddNewTable();
2518   runtime->GetClassLinker()->MoveClassTableToPreZygote();
2519   VLOG(heap) << "Starting PreZygoteFork";
2520   // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2521   // there.
2522   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2523   const bool same_space = non_moving_space_ == main_space_;
2524   if (kCompactZygote) {
2525     // Temporarily disable rosalloc verification because the zygote
2526     // compaction will mess up the rosalloc internal metadata.
2527     ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2528     ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2529     zygote_collector.BuildBins(non_moving_space_);
2530     // Create a new bump pointer space which we will compact into.
2531     space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2532                                          non_moving_space_->Limit());
2533     // Compact the bump pointer space to a new zygote bump pointer space.
2534     bool reset_main_space = false;
2535     if (IsMovingGc(collector_type_)) {
2536       if (collector_type_ == kCollectorTypeCC) {
2537         zygote_collector.SetFromSpace(region_space_);
2538       } else {
2539         zygote_collector.SetFromSpace(bump_pointer_space_);
2540       }
2541     } else {
2542       CHECK(main_space_ != nullptr);
2543       CHECK_NE(main_space_, non_moving_space_)
2544           << "Does not make sense to compact within the same space";
2545       // Copy from the main space.
2546       zygote_collector.SetFromSpace(main_space_);
2547       reset_main_space = true;
2548     }
2549     zygote_collector.SetToSpace(&target_space);
2550     zygote_collector.SetSwapSemiSpaces(false);
2551     zygote_collector.Run(kGcCauseCollectorTransition, false);
2552     if (reset_main_space) {
2553       main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2554       madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2555       MemMap mem_map = main_space_->ReleaseMemMap();
2556       RemoveSpace(main_space_);
2557       space::Space* old_main_space = main_space_;
2558       CreateMainMallocSpace(std::move(mem_map),
2559                             kDefaultInitialSize,
2560                             std::min(mem_map.Size(), growth_limit_),
2561                             mem_map.Size());
2562       delete old_main_space;
2563       AddSpace(main_space_);
2564     } else {
2565       if (collector_type_ == kCollectorTypeCC) {
2566         region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2567         // Evacuated everything out of the region space, clear the mark bitmap.
2568         region_space_->GetMarkBitmap()->Clear();
2569       } else {
2570         bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2571       }
2572     }
2573     if (temp_space_ != nullptr) {
2574       CHECK(temp_space_->IsEmpty());
2575     }
2576     IncrementFreedEver();
2577     // Update the end and write out image.
2578     non_moving_space_->SetEnd(target_space.End());
2579     non_moving_space_->SetLimit(target_space.Limit());
2580     VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2581   }
2582   // Change the collector to the post zygote one.
2583   ChangeCollector(foreground_collector_type_);
2584   // Save the old space so that we can remove it after we complete creating the zygote space.
2585   space::MallocSpace* old_alloc_space = non_moving_space_;
2586   // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2587   // the remaining available space.
2588   // Remove the old space before creating the zygote space since creating the zygote space sets
2589   // the old alloc space's bitmaps to null.
2590   RemoveSpace(old_alloc_space);
2591   if (collector::SemiSpace::kUseRememberedSet) {
2592     // Consistency bound check.
2593     FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2594     // Remove the remembered set for the now zygote space (the old
2595     // non-moving space). Note now that we have compacted objects into
2596     // the zygote space, the data in the remembered set is no longer
2597     // needed. The zygote space will instead have a mod-union table
2598     // from this point on.
2599     RemoveRememberedSet(old_alloc_space);
2600   }
2601   // Remaining space becomes the new non moving space.
2602   zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2603                                                      &non_moving_space_);
2604   CHECK(!non_moving_space_->CanMoveObjects());
2605   if (same_space) {
2606     main_space_ = non_moving_space_;
2607     SetSpaceAsDefault(main_space_);
2608   }
2609   delete old_alloc_space;
2610   CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2611   AddSpace(zygote_space_);
2612   non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2613   AddSpace(non_moving_space_);
2614   constexpr bool set_mark_bit = kUseBakerReadBarrier
2615                                 && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects;
2616   if (set_mark_bit) {
2617     // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
2618     // safe since we mark all of the objects that may reference non immune objects as gray.
2619     zygote_space_->SetMarkBitInLiveObjects();
2620   }
2621 
2622   // Create the zygote space mod union table.
2623   accounting::ModUnionTable* mod_union_table =
2624       new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_);
2625   CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2626 
2627   if (collector_type_ != kCollectorTypeCC && collector_type_ != kCollectorTypeCMC) {
2628     // Set all the cards in the mod-union table since we don't know which objects contain references
2629     // to large objects.
2630     mod_union_table->SetCards();
2631   } else {
2632     // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There
2633     // may be dirty cards from the zygote compaction or reference processing. These cards are not
2634     // necessary to have marked since the zygote space may not refer to any objects not in the
2635     // zygote or image spaces at this point.
2636     mod_union_table->ProcessCards();
2637     mod_union_table->ClearTable();
2638 
2639     // For CC and CMC we never collect zygote large objects. This means we do not need to set the
2640     // cards for the zygote mod-union table and we can also clear all of the existing image
2641     // mod-union tables. The existing mod-union tables are only for image spaces and may only
2642     // reference zygote and image objects.
2643     for (auto& pair : mod_union_tables_) {
2644       CHECK(pair.first->IsImageSpace());
2645       CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage());
2646       accounting::ModUnionTable* table = pair.second;
2647       table->ClearTable();
2648     }
2649   }
2650   AddModUnionTable(mod_union_table);
2651   large_object_space_->SetAllLargeObjectsAsZygoteObjects(self, set_mark_bit);
2652   if (collector::SemiSpace::kUseRememberedSet) {
2653     // Add a new remembered set for the post-zygote non-moving space.
2654     accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2655         new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2656                                       non_moving_space_);
2657     CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2658         << "Failed to create post-zygote non-moving space remembered set";
2659     AddRememberedSet(post_zygote_non_moving_space_rem_set);
2660   }
2661 }
2662 #pragma clang diagnostic pop
2663 
FlushAllocStack()2664 void Heap::FlushAllocStack() {
2665   MarkAllocStackAsLive(allocation_stack_.get());
2666   allocation_stack_->Reset();
2667 }
2668 
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2669 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2670                           accounting::ContinuousSpaceBitmap* bitmap2,
2671                           accounting::LargeObjectBitmap* large_objects,
2672                           accounting::ObjectStack* stack) {
2673   DCHECK(bitmap1 != nullptr);
2674   DCHECK(bitmap2 != nullptr);
2675   const auto* limit = stack->End();
2676   for (auto* it = stack->Begin(); it != limit; ++it) {
2677     const mirror::Object* obj = it->AsMirrorPtr();
2678     if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2679       if (bitmap1->HasAddress(obj)) {
2680         bitmap1->Set(obj);
2681       } else if (bitmap2->HasAddress(obj)) {
2682         bitmap2->Set(obj);
2683       } else {
2684         DCHECK(large_objects != nullptr);
2685         large_objects->Set(obj);
2686       }
2687     }
2688   }
2689 }
2690 
SwapSemiSpaces()2691 void Heap::SwapSemiSpaces() {
2692   CHECK(bump_pointer_space_ != nullptr);
2693   CHECK(temp_space_ != nullptr);
2694   std::swap(bump_pointer_space_, temp_space_);
2695 }
2696 
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2697 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2698                                            space::ContinuousMemMapAllocSpace* source_space,
2699                                            GcCause gc_cause) {
2700   CHECK(kMovingCollector);
2701   if (target_space != source_space) {
2702     // Don't swap spaces since this isn't a typical semi space collection.
2703     semi_space_collector_->SetSwapSemiSpaces(false);
2704     semi_space_collector_->SetFromSpace(source_space);
2705     semi_space_collector_->SetToSpace(target_space);
2706     semi_space_collector_->Run(gc_cause, false);
2707     return semi_space_collector_;
2708   }
2709   LOG(FATAL) << "Unsupported";
2710   UNREACHABLE();
2711 }
2712 
TraceHeapSize(size_t heap_size)2713 void Heap::TraceHeapSize(size_t heap_size) {
2714   ATraceIntegerValue("Heap size (KB)", heap_size / KB);
2715 }
2716 
2717 #if defined(__GLIBC__)
2718 # define IF_GLIBC(x) x
2719 #else
2720 # define IF_GLIBC(x)
2721 #endif
2722 
GetNativeBytes()2723 size_t Heap::GetNativeBytes() {
2724   size_t malloc_bytes;
2725 #if defined(__BIONIC__) || defined(__GLIBC__) || defined(ANDROID_HOST_MUSL)
2726   IF_GLIBC(size_t mmapped_bytes;)
2727   struct mallinfo mi = mallinfo();
2728   // In spite of the documentation, the jemalloc version of this call seems to do what we want,
2729   // and it is thread-safe.
2730   if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) {
2731     // Shouldn't happen, but glibc declares uordblks as int.
2732     // Avoiding sign extension gets us correct behavior for another 2 GB.
2733     malloc_bytes = (unsigned int)mi.uordblks;
2734     IF_GLIBC(mmapped_bytes = (unsigned int)mi.hblkhd;)
2735   } else {
2736     malloc_bytes = mi.uordblks;
2737     IF_GLIBC(mmapped_bytes = mi.hblkhd;)
2738   }
2739   // From the spec, it appeared mmapped_bytes <= malloc_bytes. Reality was sometimes
2740   // dramatically different. (b/119580449 was an early bug.) If so, we try to fudge it.
2741   // However, malloc implementations seem to interpret hblkhd differently, namely as
2742   // mapped blocks backing the entire heap (e.g. jemalloc) vs. large objects directly
2743   // allocated via mmap (e.g. glibc). Thus we now only do this for glibc, where it
2744   // previously helped, and which appears to use a reading of the spec compatible
2745   // with our adjustment.
2746 #if defined(__GLIBC__)
2747   if (mmapped_bytes > malloc_bytes) {
2748     malloc_bytes = mmapped_bytes;
2749   }
2750 #endif  // GLIBC
2751 #else  // Neither Bionic nor Glibc
2752   // We should hit this case only in contexts in which GC triggering is not critical. Effectively
2753   // disable GC triggering based on malloc().
2754   malloc_bytes = 1000;
2755 #endif
2756   return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
2757   // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no
2758   // more expensive, and it would allow us to count memory allocated by means other than malloc.
2759   // However it would change as pages are unmapped and remapped due to memory pressure, among
2760   // other things. It seems risky to trigger GCs as a result of such changes.
2761 }
2762 
GCNumberLt(uint32_t gc_num1,uint32_t gc_num2)2763 static inline bool GCNumberLt(uint32_t gc_num1, uint32_t gc_num2) {
2764   // unsigned comparison, assuming a non-huge difference, but dealing correctly with wrapping.
2765   uint32_t difference = gc_num2 - gc_num1;
2766   bool completed_more_than_requested = difference > 0x80000000;
2767   return difference > 0 && !completed_more_than_requested;
2768 }
2769 
2770 
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references,uint32_t requested_gc_num)2771 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2772                                                GcCause gc_cause,
2773                                                bool clear_soft_references,
2774                                                uint32_t requested_gc_num) {
2775   Thread* self = Thread::Current();
2776   Runtime* runtime = Runtime::Current();
2777   // If the heap can't run the GC, silently fail and return that no GC was run.
2778   switch (gc_type) {
2779     case collector::kGcTypePartial: {
2780       if (!HasZygoteSpace()) {
2781         // Do not increment gcs_completed_ . We should retry with kGcTypeFull.
2782         return collector::kGcTypeNone;
2783       }
2784       break;
2785     }
2786     default: {
2787       // Other GC types don't have any special cases which makes them not runnable. The main case
2788       // here is full GC.
2789     }
2790   }
2791   ScopedThreadStateChange tsc(self, ThreadState::kWaitingPerformingGc);
2792   Locks::mutator_lock_->AssertNotHeld(self);
2793   SelfDeletingTask* clear;  // Unconditionally set below.
2794   {
2795     // We should not ever become runnable and re-suspend while executing a GC.
2796     // This would likely cause a deadlock if we acted on a suspension request.
2797     // TODO: We really want to assert that we don't transition to kRunnable.
2798     ScopedAssertNoThreadSuspension scoped_assert("Performing GC");
2799     if (self->IsHandlingStackOverflow<kNativeStackType>()) {
2800       // If we are throwing a stack overflow error we probably don't have enough remaining stack
2801       // space to run the GC. Note: we only care if the native stack has overflowed. If the
2802       // simulated stack overflows it is still possible that the native stack has room to run the
2803       // GC.
2804 
2805       // Count this as a GC in case someone is waiting for it to complete.
2806       gcs_completed_.fetch_add(1, std::memory_order_release);
2807       return collector::kGcTypeNone;
2808     }
2809     bool compacting_gc;
2810     {
2811       gc_complete_lock_->AssertNotHeld(self);
2812       // Already not runnable; just switch suspended states. We remain in a suspended state until
2813       // FinishGC(). This avoids the complicated dance in StartGC().
2814       ScopedThreadStateChange tsc2(self, ThreadState::kWaitingForGcToComplete);
2815       MutexLock mu(self, *gc_complete_lock_);
2816       // Ensure there is only one GC at a time.
2817       WaitForGcToCompleteLocked(gc_cause, self);
2818       if (requested_gc_num != GC_NUM_ANY && !GCNumberLt(GetCurrentGcNum(), requested_gc_num)) {
2819         // The appropriate GC was already triggered elsewhere.
2820         return collector::kGcTypeNone;
2821       }
2822       compacting_gc = IsMovingGc(collector_type_);
2823       // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2824       if (compacting_gc && disable_moving_gc_count_ != 0) {
2825         LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2826         // Again count this as a GC.
2827         gcs_completed_.fetch_add(1, std::memory_order_release);
2828         return collector::kGcTypeNone;
2829       }
2830       if (gc_disabled_for_shutdown_) {
2831         gcs_completed_.fetch_add(1, std::memory_order_release);
2832         return collector::kGcTypeNone;
2833       }
2834       collector_type_running_ = collector_type_;
2835       last_gc_cause_ = gc_cause;
2836     }
2837     if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2838       ++runtime->GetStats()->gc_for_alloc_count;
2839       ++self->GetStats()->gc_for_alloc_count;
2840     }
2841     const size_t bytes_allocated_before_gc = GetBytesAllocated();
2842 
2843     DCHECK_LT(gc_type, collector::kGcTypeMax);
2844     DCHECK_NE(gc_type, collector::kGcTypeNone);
2845 
2846     collector::GarbageCollector* collector = nullptr;
2847     // TODO: Clean this up.
2848     if (compacting_gc) {
2849       DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2850              current_allocator_ == kAllocatorTypeTLAB ||
2851              current_allocator_ == kAllocatorTypeRegion ||
2852              current_allocator_ == kAllocatorTypeRegionTLAB);
2853       switch (collector_type_) {
2854         case kCollectorTypeSS:
2855           semi_space_collector_->SetFromSpace(bump_pointer_space_);
2856           semi_space_collector_->SetToSpace(temp_space_);
2857           semi_space_collector_->SetSwapSemiSpaces(true);
2858           collector = semi_space_collector_;
2859           break;
2860         case kCollectorTypeCMC:
2861           collector = mark_compact_;
2862           break;
2863         case kCollectorTypeCC:
2864           collector::ConcurrentCopying* active_cc_collector;
2865           if (use_generational_cc_) {
2866             // TODO: Other threads must do the flip checkpoint before they start poking at
2867             // active_concurrent_copying_collector_. So we should not concurrency here.
2868             active_cc_collector = (gc_type == collector::kGcTypeSticky) ?
2869                                       young_concurrent_copying_collector_ :
2870                                       concurrent_copying_collector_;
2871             active_concurrent_copying_collector_.store(active_cc_collector,
2872                                                        std::memory_order_relaxed);
2873             DCHECK(active_cc_collector->RegionSpace() == region_space_);
2874             collector = active_cc_collector;
2875           } else {
2876             collector = active_concurrent_copying_collector_.load(std::memory_order_relaxed);
2877           }
2878           break;
2879         default:
2880           LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2881       }
2882       // temp_space_ will be null for kCollectorTypeCMC.
2883       if (temp_space_ != nullptr &&
2884           collector != active_concurrent_copying_collector_.load(std::memory_order_relaxed)) {
2885         temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2886         if (kIsDebugBuild) {
2887           // Try to read each page of the memory map in case mprotect didn't work properly
2888           // b/19894268.
2889           temp_space_->GetMemMap()->TryReadable();
2890         }
2891         CHECK(temp_space_->IsEmpty());
2892       }
2893     } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2894                current_allocator_ == kAllocatorTypeDlMalloc) {
2895       collector = FindCollectorByGcType(gc_type);
2896     } else {
2897       LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2898     }
2899 
2900     CHECK(collector != nullptr) << "Could not find garbage collector with collector_type="
2901                                 << static_cast<size_t>(collector_type_)
2902                                 << " and gc_type=" << gc_type;
2903     collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2904     IncrementFreedEver();
2905     RequestTrim(self);
2906     // Collect cleared references.
2907     clear = reference_processor_->CollectClearedReferences(self);
2908     // Grow the heap so that we know when to perform the next GC.
2909     GrowForUtilization(collector, bytes_allocated_before_gc);
2910     old_native_bytes_allocated_.store(GetNativeBytes());
2911     LogGC(gc_cause, collector);
2912     FinishGC(self, gc_type);
2913     // We're suspended up to this point.
2914   }
2915   // Actually enqueue all cleared references. Do this after the GC has officially finished since
2916   // otherwise we can deadlock.
2917   clear->Run(self);
2918   clear->Finalize();
2919   // Inform DDMS that a GC completed.
2920   Dbg::GcDidFinish();
2921 
2922   // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2923   // deadlocks in case the JNI_OnUnload function does allocations.
2924   {
2925     ScopedObjectAccess soa(self);
2926     soa.Vm()->UnloadNativeLibraries();
2927   }
2928   return gc_type;
2929 }
2930 
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2931 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2932   const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2933   const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2934   // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2935   // (mutator time blocked >= long_pause_log_threshold_).
2936   bool log_gc = kLogAllGCs || (gc_cause == kGcCauseExplicit && always_log_explicit_gcs_);
2937   if (!log_gc && CareAboutPauseTimes()) {
2938     // GC for alloc pauses the allocating thread, so consider it as a pause.
2939     log_gc = duration > long_gc_log_threshold_ ||
2940         (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2941     for (uint64_t pause : pause_times) {
2942       log_gc = log_gc || pause >= long_pause_log_threshold_;
2943     }
2944   }
2945   bool is_sampled = false;
2946   if (UNLIKELY(gc_stress_mode_)) {
2947     static std::atomic_int64_t accumulated_duration_ns = 0;
2948     accumulated_duration_ns += duration;
2949     if (accumulated_duration_ns >= kGcStressModeGcLogSampleFrequencyNs) {
2950       accumulated_duration_ns -= kGcStressModeGcLogSampleFrequencyNs;
2951       log_gc = true;
2952       is_sampled = true;
2953     }
2954   }
2955   if (log_gc) {
2956     const size_t percent_free = GetPercentFree();
2957     const size_t current_heap_size = GetBytesAllocated();
2958     const size_t total_memory = GetTotalMemory();
2959     std::ostringstream pause_string;
2960     for (size_t i = 0; i < pause_times.size(); ++i) {
2961       pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2962                    << ((i != pause_times.size() - 1) ? "," : "");
2963     }
2964     LOG(INFO) << gc_cause << " " << collector->GetName()
2965               << (is_sampled ? " (sampled)" : "")
2966               << " GC freed "
2967               << PrettySize(current_gc_iteration_.GetFreedBytes()) << " AllocSpace bytes, "
2968               << current_gc_iteration_.GetFreedLargeObjects() << "("
2969               << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2970               << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2971               << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2972               << " total " << PrettyDuration((duration / 1000) * 1000);
2973     VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2974   }
2975 }
2976 
FinishGC(Thread * self,collector::GcType gc_type)2977 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2978   MutexLock mu(self, *gc_complete_lock_);
2979   collector_type_running_ = kCollectorTypeNone;
2980   if (gc_type != collector::kGcTypeNone) {
2981     last_gc_type_ = gc_type;
2982 
2983     // Update stats.
2984     ++gc_count_last_window_;
2985     if (running_collection_is_blocking_) {
2986       // If the currently running collection was a blocking one,
2987       // increment the counters and reset the flag.
2988       ++blocking_gc_count_;
2989       blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2990       ++blocking_gc_count_last_window_;
2991     }
2992     // Update the gc count rate histograms if due.
2993     UpdateGcCountRateHistograms();
2994   }
2995   // Reset.
2996   running_collection_is_blocking_ = false;
2997   thread_running_gc_ = nullptr;
2998   if (gc_type != collector::kGcTypeNone) {
2999     gcs_completed_.fetch_add(1, std::memory_order_release);
3000   }
3001   // Wake anyone who may have been waiting for the GC to complete.
3002   gc_complete_cond_->Broadcast(self);
3003 }
3004 
UpdateGcCountRateHistograms()3005 void Heap::UpdateGcCountRateHistograms() {
3006   // Invariant: if the time since the last update includes more than
3007   // one windows, all the GC runs (if > 0) must have happened in first
3008   // window because otherwise the update must have already taken place
3009   // at an earlier GC run. So, we report the non-first windows with
3010   // zero counts to the histograms.
3011   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
3012   uint64_t now = NanoTime();
3013   DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
3014   uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
3015   uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
3016 
3017   // The computed number of windows can be incoherently high if NanoTime() is not monotonic.
3018   // Setting a limit on its maximum value reduces the impact on CPU time in such cases.
3019   if (num_of_windows > kGcCountRateHistogramMaxNumMissedWindows) {
3020     LOG(WARNING) << "Reducing the number of considered missed Gc histogram windows from "
3021                  << num_of_windows << " to " << kGcCountRateHistogramMaxNumMissedWindows;
3022     num_of_windows = kGcCountRateHistogramMaxNumMissedWindows;
3023   }
3024 
3025   if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
3026     // Record the first window.
3027     gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1);  // Exclude the current run.
3028     blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
3029         blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
3030     // Record the other windows (with zero counts).
3031     for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
3032       gc_count_rate_histogram_.AddValue(0);
3033       blocking_gc_count_rate_histogram_.AddValue(0);
3034     }
3035     // Update the last update time and reset the counters.
3036     last_update_time_gc_count_rate_histograms_ =
3037         (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
3038     gc_count_last_window_ = 1;  // Include the current run.
3039     blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
3040   }
3041   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
3042 }
3043 
3044 class RootMatchesObjectVisitor : public SingleRootVisitor {
3045  public:
RootMatchesObjectVisitor(const mirror::Object * obj)3046   explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
3047 
VisitRoot(mirror::Object * root,const RootInfo & info)3048   void VisitRoot(mirror::Object* root, const RootInfo& info)
3049       override REQUIRES_SHARED(Locks::mutator_lock_) {
3050     if (root == obj_) {
3051       LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
3052     }
3053   }
3054 
3055  private:
3056   const mirror::Object* const obj_;
3057 };
3058 
3059 
3060 class ScanVisitor {
3061  public:
operator ()(const mirror::Object * obj) const3062   void operator()(const mirror::Object* obj) const {
3063     LOG(ERROR) << "Would have rescanned object " << obj;
3064   }
3065 };
3066 
3067 // Verify a reference from an object.
3068 class VerifyReferenceVisitor : public SingleRootVisitor {
3069  public:
VerifyReferenceVisitor(Thread * self,Heap * heap,size_t * fail_count,bool verify_referent)3070   VerifyReferenceVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
3071       REQUIRES_SHARED(Locks::mutator_lock_)
3072       : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
3073     CHECK_EQ(self_, Thread::Current());
3074   }
3075 
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const3076   void operator()([[maybe_unused]] ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
3077       REQUIRES_SHARED(Locks::mutator_lock_) {
3078     if (verify_referent_) {
3079       VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
3080     }
3081   }
3082 
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static) const3083   void operator()(ObjPtr<mirror::Object> obj,
3084                   MemberOffset offset,
3085                   [[maybe_unused]] bool is_static) const REQUIRES_SHARED(Locks::mutator_lock_) {
3086     VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
3087   }
3088 
IsLive(ObjPtr<mirror::Object> obj) const3089   bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
3090     return heap_->IsLiveObjectLocked(obj, true, false, true);
3091   }
3092 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const3093   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
3094       REQUIRES_SHARED(Locks::mutator_lock_) {
3095     if (!root->IsNull()) {
3096       VisitRoot(root);
3097     }
3098   }
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const3099   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
3100       REQUIRES_SHARED(Locks::mutator_lock_) {
3101     const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
3102         root->AsMirrorPtr(), RootInfo(kRootVMInternal));
3103   }
3104 
VisitRoot(mirror::Object * root,const RootInfo & root_info)3105   void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
3106       REQUIRES_SHARED(Locks::mutator_lock_) {
3107     if (root == nullptr) {
3108       LOG(ERROR) << "Root is null with info " << root_info.GetType();
3109     } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
3110       LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root)
3111           << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
3112     }
3113   }
3114 
3115  private:
3116   // TODO: Fix the no thread safety analysis.
3117   // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const3118   bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
3119       NO_THREAD_SAFETY_ANALYSIS {
3120     if (ref == nullptr || IsLive(ref)) {
3121       // Verify that the reference is live.
3122       return true;
3123     }
3124     CHECK_EQ(self_, Thread::Current());  // fail_count_ is private to the calling thread.
3125     *fail_count_ += 1;
3126     if (*fail_count_ == 1) {
3127       // Only print message for the first failure to prevent spam.
3128       LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
3129     }
3130     if (obj != nullptr) {
3131       // Only do this part for non roots.
3132       accounting::CardTable* card_table = heap_->GetCardTable();
3133       accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
3134       accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3135       uint8_t* card_addr = card_table->CardFromAddr(obj);
3136       LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
3137                  << offset << "\n card value = " << static_cast<int>(*card_addr);
3138       if (heap_->IsValidObjectAddress(obj->GetClass())) {
3139         LOG(ERROR) << "Obj type " << obj->PrettyTypeOf();
3140       } else {
3141         LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
3142       }
3143 
3144       // Attempt to find the class inside of the recently freed objects.
3145       space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
3146       if (ref_space != nullptr && ref_space->IsMallocSpace()) {
3147         space::MallocSpace* space = ref_space->AsMallocSpace();
3148         mirror::Class* ref_class = space->FindRecentFreedObject(ref);
3149         if (ref_class != nullptr) {
3150           LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
3151                      << ref_class->PrettyClass();
3152         } else {
3153           LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
3154         }
3155       }
3156 
3157       if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
3158           ref->GetClass()->IsClass()) {
3159         LOG(ERROR) << "Ref type " << ref->PrettyTypeOf();
3160       } else {
3161         LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
3162                    << ") is not a valid heap address";
3163       }
3164 
3165       card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
3166       void* cover_begin = card_table->AddrFromCard(card_addr);
3167       void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
3168           accounting::CardTable::kCardSize);
3169       LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
3170           << "-" << cover_end;
3171       accounting::ContinuousSpaceBitmap* bitmap =
3172           heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
3173 
3174       if (bitmap == nullptr) {
3175         LOG(ERROR) << "Object " << obj << " has no bitmap";
3176         if (!VerifyClassClass(obj->GetClass())) {
3177           LOG(ERROR) << "Object " << obj << " failed class verification!";
3178         }
3179       } else {
3180         // Print out how the object is live.
3181         if (bitmap->Test(obj)) {
3182           LOG(ERROR) << "Object " << obj << " found in live bitmap";
3183         }
3184         if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
3185           LOG(ERROR) << "Object " << obj << " found in allocation stack";
3186         }
3187         if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
3188           LOG(ERROR) << "Object " << obj << " found in live stack";
3189         }
3190         if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
3191           LOG(ERROR) << "Ref " << ref << " found in allocation stack";
3192         }
3193         if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
3194           LOG(ERROR) << "Ref " << ref << " found in live stack";
3195         }
3196         // Attempt to see if the card table missed the reference.
3197         ScanVisitor scan_visitor;
3198         uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
3199         card_table->Scan<false>(bitmap, byte_cover_begin,
3200                                 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
3201       }
3202 
3203       // Search to see if any of the roots reference our object.
3204       RootMatchesObjectVisitor visitor1(obj);
3205       Runtime::Current()->VisitRoots(&visitor1);
3206       // Search to see if any of the roots reference our reference.
3207       RootMatchesObjectVisitor visitor2(ref);
3208       Runtime::Current()->VisitRoots(&visitor2);
3209     }
3210     return false;
3211   }
3212 
3213   Thread* const self_;
3214   Heap* const heap_;
3215   size_t* const fail_count_;
3216   const bool verify_referent_;
3217 };
3218 
3219 // Verify all references within an object, for use with HeapBitmap::Visit.
3220 class VerifyObjectVisitor {
3221  public:
VerifyObjectVisitor(Thread * self,Heap * heap,size_t * fail_count,bool verify_referent)3222   VerifyObjectVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
3223       : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
3224 
operator ()(mirror::Object * obj)3225   void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
3226     // Note: we are verifying the references in obj but not obj itself, this is because obj must
3227     // be live or else how did we find it in the live bitmap?
3228     VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
3229     // The class doesn't count as a reference but we should verify it anyways.
3230     obj->VisitReferences(visitor, visitor);
3231   }
3232 
VerifyRoots()3233   void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
3234     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
3235     VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
3236     Runtime::Current()->VisitRoots(&visitor);
3237   }
3238 
GetFailureCount() const3239   uint32_t GetFailureCount() const REQUIRES(Locks::mutator_lock_) {
3240     CHECK_EQ(self_, Thread::Current());
3241     return *fail_count_;
3242   }
3243 
3244  private:
3245   Thread* const self_;
3246   Heap* const heap_;
3247   size_t* const fail_count_;
3248   const bool verify_referent_;
3249 };
3250 
PushOnAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3251 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
3252   // Slow path, the allocation stack push back must have already failed.
3253   DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
3254   do {
3255     // TODO: Add handle VerifyObject.
3256     StackHandleScope<1> hs(self);
3257     HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3258     // Push our object into the reserve region of the allocation stack. This is only required due
3259     // to heap verification requiring that roots are live (either in the live bitmap or in the
3260     // allocation stack).
3261     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3262     CollectGarbageInternal(collector::kGcTypeSticky,
3263                            kGcCauseForAlloc,
3264                            false,
3265                            GetCurrentGcNum() + 1);
3266   } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
3267 }
3268 
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3269 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
3270                                                           ObjPtr<mirror::Object>* obj) {
3271   // Slow path, the allocation stack push back must have already failed.
3272   DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
3273   StackReference<mirror::Object>* start_address;
3274   StackReference<mirror::Object>* end_address;
3275   while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3276                                             &end_address)) {
3277     // TODO: Add handle VerifyObject.
3278     StackHandleScope<1> hs(self);
3279     HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3280     // Push our object into the reserve region of the allocaiton stack. This is only required due
3281     // to heap verification requiring that roots are live (either in the live bitmap or in the
3282     // allocation stack).
3283     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3284     // Push into the reserve allocation stack.
3285     CollectGarbageInternal(collector::kGcTypeSticky,
3286                            kGcCauseForAlloc,
3287                            false,
3288                            GetCurrentGcNum() + 1);
3289   }
3290   self->SetThreadLocalAllocationStack(start_address, end_address);
3291   // Retry on the new thread-local allocation stack.
3292   CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr()));  // Must succeed.
3293 }
3294 
3295 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)3296 size_t Heap::VerifyHeapReferences(bool verify_referents) {
3297   Thread* self = Thread::Current();
3298   Locks::mutator_lock_->AssertExclusiveHeld(self);
3299   // Lets sort our allocation stacks so that we can efficiently binary search them.
3300   allocation_stack_->Sort();
3301   live_stack_->Sort();
3302   // Since we sorted the allocation stack content, need to revoke all
3303   // thread-local allocation stacks.
3304   RevokeAllThreadLocalAllocationStacks(self);
3305   size_t fail_count = 0;
3306   VerifyObjectVisitor visitor(self, this, &fail_count, verify_referents);
3307   // Verify objects in the allocation stack since these will be objects which were:
3308   // 1. Allocated prior to the GC (pre GC verification).
3309   // 2. Allocated during the GC (pre sweep GC verification).
3310   // We don't want to verify the objects in the live stack since they themselves may be
3311   // pointing to dead objects if they are not reachable.
3312   VisitObjectsPaused(visitor);
3313   // Verify the roots:
3314   visitor.VerifyRoots();
3315   if (visitor.GetFailureCount() > 0) {
3316     // Dump mod-union tables.
3317     for (const auto& table_pair : mod_union_tables_) {
3318       accounting::ModUnionTable* mod_union_table = table_pair.second;
3319       mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
3320     }
3321     // Dump remembered sets.
3322     for (const auto& table_pair : remembered_sets_) {
3323       accounting::RememberedSet* remembered_set = table_pair.second;
3324       remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
3325     }
3326     DumpSpaces(LOG_STREAM(ERROR));
3327   }
3328   return visitor.GetFailureCount();
3329 }
3330 
3331 class VerifyReferenceCardVisitor {
3332  public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)3333   VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3334       REQUIRES_SHARED(Locks::mutator_lock_,
3335                             Locks::heap_bitmap_lock_)
3336       : heap_(heap), failed_(failed) {
3337   }
3338 
3339   // There is no card marks for native roots on a class.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const3340   void VisitRootIfNonNull(
3341       [[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const3342   void VisitRoot([[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
3343 
3344   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3345   // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3346   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3347       NO_THREAD_SAFETY_ANALYSIS {
3348     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3349     // Filter out class references since changing an object's class does not mark the card as dirty.
3350     // Also handles large objects, since the only reference they hold is a class reference.
3351     if (ref != nullptr && !ref->IsClass()) {
3352       accounting::CardTable* card_table = heap_->GetCardTable();
3353       // If the object is not dirty and it is referencing something in the live stack other than
3354       // class, then it must be on a dirty card.
3355       if (!card_table->AddrIsInCardTable(obj)) {
3356         LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3357         *failed_ = true;
3358       } else if (!card_table->IsDirty(obj)) {
3359         // TODO: Check mod-union tables.
3360         // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3361         // kCardDirty - 1 if it didnt get touched since we aged it.
3362         accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3363         if (live_stack->ContainsSorted(ref)) {
3364           if (live_stack->ContainsSorted(obj)) {
3365             LOG(ERROR) << "Object " << obj << " found in live stack";
3366           }
3367           if (heap_->GetLiveBitmap()->Test(obj)) {
3368             LOG(ERROR) << "Object " << obj << " found in live bitmap";
3369           }
3370           LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj)
3371                     << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref)
3372                     << " in live stack";
3373 
3374           // Print which field of the object is dead.
3375           if (!obj->IsObjectArray()) {
3376             ObjPtr<mirror::Class> klass = is_static ? obj->AsClass() : obj->GetClass();
3377             CHECK(klass != nullptr);
3378             for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3379               if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3380                 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3381                            << field.PrettyField();
3382                 break;
3383               }
3384             }
3385           } else {
3386             ObjPtr<mirror::ObjectArray<mirror::Object>> object_array =
3387                 obj->AsObjectArray<mirror::Object>();
3388             for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3389               if (object_array->Get(i) == ref) {
3390                 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3391               }
3392             }
3393           }
3394 
3395           *failed_ = true;
3396         }
3397       }
3398     }
3399   }
3400 
3401  private:
3402   Heap* const heap_;
3403   bool* const failed_;
3404 };
3405 
3406 class VerifyLiveStackReferences {
3407  public:
VerifyLiveStackReferences(Heap * heap)3408   explicit VerifyLiveStackReferences(Heap* heap)
3409       : heap_(heap),
3410         failed_(false) {}
3411 
operator ()(mirror::Object * obj) const3412   void operator()(mirror::Object* obj) const
3413       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3414     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3415     obj->VisitReferences(visitor, VoidFunctor());
3416   }
3417 
Failed() const3418   bool Failed() const {
3419     return failed_;
3420   }
3421 
3422  private:
3423   Heap* const heap_;
3424   bool failed_;
3425 };
3426 
VerifyMissingCardMarks()3427 bool Heap::VerifyMissingCardMarks() {
3428   Thread* self = Thread::Current();
3429   Locks::mutator_lock_->AssertExclusiveHeld(self);
3430   // We need to sort the live stack since we binary search it.
3431   live_stack_->Sort();
3432   // Since we sorted the allocation stack content, need to revoke all
3433   // thread-local allocation stacks.
3434   RevokeAllThreadLocalAllocationStacks(self);
3435   VerifyLiveStackReferences visitor(this);
3436   GetLiveBitmap()->Visit(visitor);
3437   // We can verify objects in the live stack since none of these should reference dead objects.
3438   for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3439     if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3440       visitor(it->AsMirrorPtr());
3441     }
3442   }
3443   return !visitor.Failed();
3444 }
3445 
SwapStacks()3446 void Heap::SwapStacks() {
3447   if (kUseThreadLocalAllocationStack) {
3448     live_stack_->AssertAllZero();
3449   }
3450   allocation_stack_.swap(live_stack_);
3451 }
3452 
RevokeAllThreadLocalAllocationStacks(Thread * self)3453 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3454   // This must be called only during the pause.
3455   DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3456   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3457   MutexLock mu2(self, *Locks::thread_list_lock_);
3458   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3459   for (Thread* t : thread_list) {
3460     t->RevokeThreadLocalAllocationStack();
3461   }
3462 }
3463 
AssertThreadLocalBuffersAreRevoked(Thread * thread)3464 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3465   if (kIsDebugBuild) {
3466     if (rosalloc_space_ != nullptr) {
3467       rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3468     }
3469     if (bump_pointer_space_ != nullptr) {
3470       bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3471     }
3472   }
3473 }
3474 
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()3475 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3476   if (kIsDebugBuild) {
3477     if (bump_pointer_space_ != nullptr) {
3478       bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3479     }
3480   }
3481 }
3482 
FindModUnionTableFromSpace(space::Space * space)3483 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3484   auto it = mod_union_tables_.find(space);
3485   if (it == mod_union_tables_.end()) {
3486     return nullptr;
3487   }
3488   return it->second;
3489 }
3490 
FindRememberedSetFromSpace(space::Space * space)3491 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3492   auto it = remembered_sets_.find(space);
3493   if (it == remembered_sets_.end()) {
3494     return nullptr;
3495   }
3496   return it->second;
3497 }
3498 
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3499 void Heap::ProcessCards(TimingLogger* timings,
3500                         bool use_rem_sets,
3501                         bool process_alloc_space_cards,
3502                         bool clear_alloc_space_cards) {
3503   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3504   // Clear cards and keep track of cards cleared in the mod-union table.
3505   for (const auto& space : continuous_spaces_) {
3506     accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3507     accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3508     if (table != nullptr) {
3509       const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3510           "ImageModUnionClearCards";
3511       TimingLogger::ScopedTiming t2(name, timings);
3512       table->ProcessCards();
3513     } else if (use_rem_sets && rem_set != nullptr) {
3514       DCHECK(collector::SemiSpace::kUseRememberedSet) << static_cast<int>(collector_type_);
3515       TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3516       rem_set->ClearCards();
3517     } else if (process_alloc_space_cards) {
3518       TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3519       if (clear_alloc_space_cards) {
3520         uint8_t* end = space->End();
3521         if (space->IsImageSpace()) {
3522           // Image space end is the end of the mirror objects, it is not necessarily page or card
3523           // aligned. Align up so that the check in ClearCardRange does not fail.
3524           end = AlignUp(end, accounting::CardTable::kCardSize);
3525         }
3526         card_table_->ClearCardRange(space->Begin(), end);
3527       } else {
3528         // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3529         // cards were dirty before the GC started.
3530         // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3531         // -> clean(cleaning thread).
3532         // The races are we either end up with: Aged card, unaged card. Since we have the
3533         // checkpoint roots and then we scan / update mod union tables after. We will always
3534         // scan either card. If we end up with the non aged card, we scan it it in the pause.
3535         card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3536                                        VoidFunctor());
3537       }
3538     }
3539   }
3540 }
3541 
3542 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
MarkObjectart::gc::IdentityMarkHeapReferenceVisitor3543   mirror::Object* MarkObject(mirror::Object* obj) override {
3544     return obj;
3545   }
MarkHeapReferenceart::gc::IdentityMarkHeapReferenceVisitor3546   void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
3547   }
3548 };
3549 
PreGcVerificationPaused(collector::GarbageCollector * gc)3550 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3551   Thread* const self = Thread::Current();
3552   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3553   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3554   if (verify_pre_gc_heap_) {
3555     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3556     size_t failures = VerifyHeapReferences();
3557     if (failures > 0) {
3558       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3559           << " failures";
3560     }
3561   }
3562   // Check that all objects which reference things in the live stack are on dirty cards.
3563   if (verify_missing_card_marks_) {
3564     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3565     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3566     SwapStacks();
3567     // Sort the live stack so that we can quickly binary search it later.
3568     CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3569                                     << " missing card mark verification failed\n" << DumpSpaces();
3570     SwapStacks();
3571   }
3572   if (verify_mod_union_table_) {
3573     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3574     ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3575     for (const auto& table_pair : mod_union_tables_) {
3576       accounting::ModUnionTable* mod_union_table = table_pair.second;
3577       IdentityMarkHeapReferenceVisitor visitor;
3578       mod_union_table->UpdateAndMarkReferences(&visitor);
3579       mod_union_table->Verify();
3580     }
3581   }
3582 }
3583 
PreGcVerification(collector::GarbageCollector * gc)3584 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3585   if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3586     collector::GarbageCollector::ScopedPause pause(gc, false);
3587     PreGcVerificationPaused(gc);
3588   }
3589 }
3590 
PrePauseRosAllocVerification(collector::GarbageCollector * gc)3591 void Heap::PrePauseRosAllocVerification([[maybe_unused]] collector::GarbageCollector* gc) {
3592   // TODO: Add a new runtime option for this?
3593   if (verify_pre_gc_rosalloc_) {
3594     RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3595   }
3596 }
3597 
PreSweepingGcVerification(collector::GarbageCollector * gc)3598 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3599   Thread* const self = Thread::Current();
3600   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3601   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3602   // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3603   // reachable objects.
3604   if (verify_pre_sweeping_heap_) {
3605     TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3606     CHECK_NE(self->GetState(), ThreadState::kRunnable);
3607     {
3608       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3609       // Swapping bound bitmaps does nothing.
3610       gc->SwapBitmaps();
3611     }
3612     // Pass in false since concurrent reference processing can mean that the reference referents
3613     // may point to dead objects at the point which PreSweepingGcVerification is called.
3614     size_t failures = VerifyHeapReferences(false);
3615     if (failures > 0) {
3616       LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3617           << " failures";
3618     }
3619     {
3620       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3621       gc->SwapBitmaps();
3622     }
3623   }
3624   if (verify_pre_sweeping_rosalloc_) {
3625     RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3626   }
3627 }
3628 
PostGcVerificationPaused(collector::GarbageCollector * gc)3629 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3630   // Only pause if we have to do some verification.
3631   Thread* const self = Thread::Current();
3632   TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3633   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3634   if (verify_system_weaks_) {
3635     ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3636     collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3637     mark_sweep->VerifySystemWeaks();
3638   }
3639   if (verify_post_gc_rosalloc_) {
3640     RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3641   }
3642   if (verify_post_gc_heap_) {
3643     TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3644     size_t failures = VerifyHeapReferences();
3645     if (failures > 0) {
3646       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3647           << " failures";
3648     }
3649   }
3650 }
3651 
PostGcVerification(collector::GarbageCollector * gc)3652 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3653   if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3654     collector::GarbageCollector::ScopedPause pause(gc, false);
3655     PostGcVerificationPaused(gc);
3656   }
3657 }
3658 
RosAllocVerification(TimingLogger * timings,const char * name)3659 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3660   TimingLogger::ScopedTiming t(name, timings);
3661   for (const auto& space : continuous_spaces_) {
3662     if (space->IsRosAllocSpace()) {
3663       VLOG(heap) << name << " : " << space->GetName();
3664       space->AsRosAllocSpace()->Verify();
3665     }
3666   }
3667 }
3668 
WaitForGcToComplete(GcCause cause,Thread * self)3669 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3670   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGcToComplete);
3671   MutexLock mu(self, *gc_complete_lock_);
3672   return WaitForGcToCompleteLocked(cause, self, /* only_one= */ true);
3673 }
3674 
WaitForGcToCompleteLocked(GcCause cause,Thread * self,bool only_one)3675 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self, bool only_one) {
3676   gc_complete_cond_->CheckSafeToWait(self);
3677   collector::GcType last_gc_type = collector::kGcTypeNone;
3678   GcCause last_gc_cause = kGcCauseNone;
3679   if (collector_type_running_ != kCollectorTypeNone) {
3680     uint64_t wait_start = NanoTime();
3681     uint32_t starting_gc_num = GetCurrentGcNum();
3682     while (collector_type_running_ != kCollectorTypeNone &&
3683            (!only_one || GCNumberLt(GetCurrentGcNum(), starting_gc_num + 1))) {
3684       if (!task_processor_->IsRunningThread(self)) {
3685         // The current thread is about to wait for a currently running
3686         // collection to finish. If the waiting thread is not the heap
3687         // task daemon thread, the currently running collection is
3688         // considered as a blocking GC.
3689         running_collection_is_blocking_ = true;
3690         VLOG(gc) << "Waiting for a blocking GC " << cause;
3691       }
3692       SCOPED_TRACE << "GC: Wait For Completion " << cause;
3693       // We must wait, change thread state then sleep on gc_complete_cond_;
3694       gc_complete_cond_->Wait(self);
3695       last_gc_type = last_gc_type_;
3696       last_gc_cause = last_gc_cause_;
3697     }
3698     uint64_t wait_time = NanoTime() - wait_start;
3699     total_wait_time_ += wait_time;
3700     if (wait_time > long_pause_log_threshold_) {
3701       LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for "
3702                 << PrettyDuration(wait_time);
3703     }
3704   }
3705   if (!task_processor_->IsRunningThread(self)) {
3706     // The current thread is about to run a collection. If the thread
3707     // is not the heap task daemon thread, it's considered as a
3708     // blocking GC (i.e., blocking itself).
3709     running_collection_is_blocking_ = true;
3710   }
3711   DCHECK(only_one || collector_type_running_ == kCollectorTypeNone);
3712   return last_gc_type;
3713 }
3714 
DumpForSigQuit(std::ostream & os)3715 void Heap::DumpForSigQuit(std::ostream& os) {
3716   os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3717      << PrettySize(GetTotalMemory()) << "\n";
3718   {
3719     os << "Image spaces:\n";
3720     ScopedObjectAccess soa(Thread::Current());
3721     for (const auto& space : continuous_spaces_) {
3722       if (space->IsImageSpace()) {
3723         os << space->GetName() << "\n";
3724       }
3725     }
3726   }
3727   DumpGcPerformanceInfo(os);
3728 }
3729 
GetPercentFree()3730 size_t Heap::GetPercentFree() {
3731   return static_cast<size_t>(100.0f * static_cast<float>(
3732       GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed));
3733 }
3734 
SetIdealFootprint(size_t target_footprint)3735 void Heap::SetIdealFootprint(size_t target_footprint) {
3736   if (target_footprint > GetMaxMemory()) {
3737     VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to "
3738              << PrettySize(GetMaxMemory());
3739     target_footprint = GetMaxMemory();
3740   }
3741   target_footprint_.store(target_footprint, std::memory_order_relaxed);
3742 }
3743 
IsMovableObject(ObjPtr<mirror::Object> obj) const3744 bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
3745   if (kMovingCollector) {
3746     space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
3747     if (space != nullptr) {
3748       // TODO: Check large object?
3749       return space->CanMoveObjects();
3750     }
3751   }
3752   return false;
3753 }
3754 
FindCollectorByGcType(collector::GcType gc_type)3755 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3756   for (auto* collector : garbage_collectors_) {
3757     if (collector->GetCollectorType() == collector_type_ &&
3758         collector->GetGcType() == gc_type) {
3759       return collector;
3760     }
3761   }
3762   return nullptr;
3763 }
3764 
HeapGrowthMultiplier() const3765 double Heap::HeapGrowthMultiplier() const {
3766   // If we don't care about pause times we are background, so return 1.0.
3767   if (!CareAboutPauseTimes()) {
3768     return 1.0;
3769   }
3770   return foreground_heap_growth_multiplier_;
3771 }
3772 
GrowForUtilization(collector::GarbageCollector * collector_ran,size_t bytes_allocated_before_gc)3773 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3774                               size_t bytes_allocated_before_gc) {
3775   // We're running in the thread that set collector_type_running_ to something other than none,
3776   // thus ensuring that there is only one of us running. Thus
3777   // collector_type_running_ != kCollectorTypeNone, but that's a little tricky to turn into a
3778   // DCHECK.
3779 
3780   // We know what our utilization is at this moment.
3781   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3782   const size_t bytes_allocated = GetBytesAllocated();
3783   // Trace the new heap size after the GC is finished.
3784   TraceHeapSize(bytes_allocated);
3785   uint64_t target_size, grow_bytes;
3786   collector::GcType gc_type = collector_ran->GetGcType();
3787   MutexLock mu(Thread::Current(), process_state_update_lock_);
3788   // Use the multiplier to grow more for foreground.
3789   const double multiplier = HeapGrowthMultiplier();
3790   if (gc_type != collector::kGcTypeSticky) {
3791     // Grow the heap for non sticky GC.
3792     uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
3793     DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
3794         << " target_utilization_=" << target_utilization_;
3795     grow_bytes = std::min(delta, static_cast<uint64_t>(max_free_));
3796     grow_bytes = std::max(grow_bytes, static_cast<uint64_t>(min_free_));
3797     target_size = bytes_allocated + static_cast<uint64_t>(grow_bytes * multiplier);
3798     next_gc_type_ = collector::kGcTypeSticky;
3799   } else {
3800     collector::GcType non_sticky_gc_type = NonStickyGcType();
3801     // Find what the next non sticky collector will be.
3802     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3803     if (use_generational_cc_) {
3804       if (non_sticky_collector == nullptr) {
3805         non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial);
3806       }
3807       CHECK(non_sticky_collector != nullptr);
3808     }
3809     double sticky_gc_throughput_adjustment = GetStickyGcThroughputAdjustment(use_generational_cc_);
3810 
3811     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3812     // do another sticky collection next.
3813     // We also check that the bytes allocated aren't over the target_footprint, or
3814     // concurrent_start_bytes in case of concurrent GCs, in order to prevent a
3815     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3816     // if the sticky GC throughput always remained >= the full/partial throughput.
3817     size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3818     if (current_gc_iteration_.GetEstimatedThroughput() * sticky_gc_throughput_adjustment >=
3819         non_sticky_collector->GetEstimatedMeanThroughput() &&
3820         non_sticky_collector->NumberOfIterations() > 0 &&
3821         bytes_allocated <= (IsGcConcurrent() ? concurrent_start_bytes_ : target_footprint)) {
3822       next_gc_type_ = collector::kGcTypeSticky;
3823     } else {
3824       next_gc_type_ = non_sticky_gc_type;
3825     }
3826     // If we have freed enough memory, shrink the heap back down.
3827     const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
3828     if (bytes_allocated + adjusted_max_free < target_footprint) {
3829       target_size = bytes_allocated + adjusted_max_free;
3830       grow_bytes = max_free_;
3831     } else {
3832       target_size = std::max(bytes_allocated, target_footprint);
3833       // The same whether jank perceptible or not; just avoid the adjustment.
3834       grow_bytes = 0;
3835     }
3836   }
3837   CHECK_LE(target_size, std::numeric_limits<size_t>::max())
3838       << " bytes_allocated:" << bytes_allocated
3839       << " bytes_freed:" << current_gc_iteration_.GetFreedBytes()
3840       << " large_obj_bytes_freed:" << current_gc_iteration_.GetFreedLargeObjectBytes();
3841   if (!ignore_target_footprint_) {
3842     SetIdealFootprint(target_size);
3843     // Store target size (computed with foreground heap growth multiplier) for updating
3844     // target_footprint_ when process state switches to foreground.
3845     // target_size = 0 ensures that target_footprint_ is not updated on
3846     // process-state switch.
3847     min_foreground_target_footprint_ =
3848         (multiplier <= 1.0 && grow_bytes > 0)
3849         ? std::min(
3850           bytes_allocated + static_cast<size_t>(grow_bytes * foreground_heap_growth_multiplier_),
3851           GetMaxMemory())
3852         : 0;
3853 
3854     if (IsGcConcurrent()) {
3855       const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3856           current_gc_iteration_.GetFreedLargeObjectBytes() +
3857           current_gc_iteration_.GetFreedRevokeBytes();
3858       // Records the number of bytes allocated at the time of GC finish,excluding the number of
3859       // bytes allocated during GC.
3860       num_bytes_alive_after_gc_ = UnsignedDifference(bytes_allocated_before_gc, freed_bytes);
3861       // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3862       // how many bytes were allocated during the GC we need to add freed_bytes back on.
3863       // Almost always bytes_allocated + freed_bytes >= bytes_allocated_before_gc.
3864       const size_t bytes_allocated_during_gc =
3865           UnsignedDifference(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3866       // Calculate when to perform the next ConcurrentGC.
3867       // Estimate how many remaining bytes we will have when we need to start the next GC.
3868       size_t remaining_bytes = bytes_allocated_during_gc;
3869       remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3870       remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3871       size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3872       if (UNLIKELY(remaining_bytes > target_footprint)) {
3873         // A never going to happen situation that from the estimated allocation rate we will exceed
3874         // the applications entire footprint with the given estimated allocation rate. Schedule
3875         // another GC nearly straight away.
3876         remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint);
3877       }
3878       DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory());
3879       // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3880       // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3881       // right away.
3882       concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated);
3883       // Store concurrent_start_bytes_ (computed with foreground heap growth multiplier) for update
3884       // itself when process state switches to foreground.
3885       min_foreground_concurrent_start_bytes_ =
3886           min_foreground_target_footprint_ != 0
3887           ? std::max(min_foreground_target_footprint_ - remaining_bytes, bytes_allocated)
3888           : 0;
3889     }
3890   }
3891 }
3892 
ClampGrowthLimit()3893 void Heap::ClampGrowthLimit() {
3894   // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3895   ScopedObjectAccess soa(Thread::Current());
3896   WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3897   capacity_ = growth_limit_;
3898   for (const auto& space : continuous_spaces_) {
3899     if (space->IsMallocSpace()) {
3900       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3901       malloc_space->ClampGrowthLimit();
3902     }
3903   }
3904   if (large_object_space_ != nullptr) {
3905     large_object_space_->ClampGrowthLimit(capacity_);
3906   }
3907   if (collector_type_ == kCollectorTypeCC) {
3908     DCHECK(region_space_ != nullptr);
3909     // Twice the capacity as CC needs extra space for evacuating objects.
3910     region_space_->ClampGrowthLimit(2 * capacity_);
3911   } else if (collector_type_ == kCollectorTypeCMC) {
3912     DCHECK(gUseUserfaultfd);
3913     DCHECK_NE(mark_compact_, nullptr);
3914     DCHECK_NE(bump_pointer_space_, nullptr);
3915     mark_compact_->ClampGrowthLimit(capacity_);
3916   }
3917   // This space isn't added for performance reasons.
3918   if (main_space_backup_.get() != nullptr) {
3919     main_space_backup_->ClampGrowthLimit();
3920   }
3921 }
3922 
ClearGrowthLimit()3923 void Heap::ClearGrowthLimit() {
3924   if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
3925       && growth_limit_ < capacity_) {
3926     target_footprint_.store(capacity_, std::memory_order_relaxed);
3927     SetDefaultConcurrentStartBytes();
3928   }
3929   growth_limit_ = capacity_;
3930   ScopedObjectAccess soa(Thread::Current());
3931   for (const auto& space : continuous_spaces_) {
3932     if (space->IsMallocSpace()) {
3933       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3934       malloc_space->ClearGrowthLimit();
3935       malloc_space->SetFootprintLimit(malloc_space->Capacity());
3936     }
3937   }
3938   // This space isn't added for performance reasons.
3939   if (main_space_backup_.get() != nullptr) {
3940     main_space_backup_->ClearGrowthLimit();
3941     main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3942   }
3943 }
3944 
AddFinalizerReference(Thread * self,ObjPtr<mirror::Object> * object)3945 void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
3946   ScopedObjectAccess soa(self);
3947   StackHandleScope<1u> hs(self);
3948   // Use handle wrapper to update the `*object` if the object gets moved.
3949   HandleWrapperObjPtr<mirror::Object> h_object = hs.NewHandleWrapper(object);
3950   WellKnownClasses::java_lang_ref_FinalizerReference_add->InvokeStatic<'V', 'L'>(
3951       self, h_object.Get());
3952 }
3953 
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,uint32_t observed_gc_num,ObjPtr<mirror::Object> * obj)3954 void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
3955                                             bool force_full,
3956                                             uint32_t observed_gc_num,
3957                                             ObjPtr<mirror::Object>* obj) {
3958   StackHandleScope<1> hs(self);
3959   HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3960   RequestConcurrentGC(self, kGcCauseBackground, force_full, observed_gc_num);
3961 }
3962 
3963 class Heap::ConcurrentGCTask : public HeapTask {
3964  public:
ConcurrentGCTask(uint64_t target_time,GcCause cause,bool force_full,uint32_t gc_num)3965   ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full, uint32_t gc_num)
3966       : HeapTask(target_time), cause_(cause), force_full_(force_full), my_gc_num_(gc_num) {}
Run(Thread * self)3967   void Run(Thread* self) override {
3968     Runtime* runtime = Runtime::Current();
3969     gc::Heap* heap = runtime->GetHeap();
3970     DCHECK(GCNumberLt(my_gc_num_, heap->GetCurrentGcNum() + 2));  // <= current_gc_num + 1
3971     heap->ConcurrentGC(self, cause_, force_full_, my_gc_num_);
3972     CHECK_IMPLIES(GCNumberLt(heap->GetCurrentGcNum(), my_gc_num_), runtime->IsShuttingDown(self));
3973   }
3974 
3975  private:
3976   const GcCause cause_;
3977   const bool force_full_;  // If true, force full (or partial) collection.
3978   const uint32_t my_gc_num_;  // Sequence number of requested GC.
3979 };
3980 
CanAddHeapTask(Thread * self)3981 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3982   Runtime* runtime = Runtime::Current();
3983   // We only care if the native stack has overflowed. If the simulated stack overflows, it is still
3984   // possible that the native stack has room to add a heap task.
3985   return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3986       !self->IsHandlingStackOverflow<kNativeStackType>();
3987 }
3988 
RequestConcurrentGC(Thread * self,GcCause cause,bool force_full,uint32_t observed_gc_num)3989 bool Heap::RequestConcurrentGC(Thread* self,
3990                                GcCause cause,
3991                                bool force_full,
3992                                uint32_t observed_gc_num) {
3993   uint32_t max_gc_requested = max_gc_requested_.load(std::memory_order_relaxed);
3994   if (!GCNumberLt(observed_gc_num, max_gc_requested)) {
3995     // observed_gc_num >= max_gc_requested: Nobody beat us to requesting the next gc.
3996     if (CanAddHeapTask(self)) {
3997       // Since observed_gc_num >= max_gc_requested, this increases max_gc_requested_, if successful.
3998       if (max_gc_requested_.CompareAndSetStrongRelaxed(max_gc_requested, observed_gc_num + 1)) {
3999         task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(),  // Start straight away.
4000                                                             cause,
4001                                                             force_full,
4002                                                             observed_gc_num + 1));
4003       }
4004       DCHECK(GCNumberLt(observed_gc_num, max_gc_requested_.load(std::memory_order_relaxed)));
4005       // If we increased max_gc_requested_, then we added a task that will eventually cause
4006       // gcs_completed_ to be incremented (to at least observed_gc_num + 1).
4007       // If the CAS failed, somebody else did.
4008       return true;
4009     }
4010     return false;
4011   }
4012   return true;  // Vacuously.
4013 }
4014 
ConcurrentGC(Thread * self,GcCause cause,bool force_full,uint32_t requested_gc_num)4015 void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t requested_gc_num) {
4016   if (!Runtime::Current()->IsShuttingDown(self)) {
4017     // Wait for any GCs currently running to finish. If this incremented GC number, we're done.
4018     WaitForGcToComplete(cause, self);
4019     if (GCNumberLt(GetCurrentGcNum(), requested_gc_num)) {
4020       collector::GcType next_gc_type = next_gc_type_;
4021       // If forcing full and next gc type is sticky, override with a non-sticky type.
4022       if (force_full && next_gc_type == collector::kGcTypeSticky) {
4023         next_gc_type = NonStickyGcType();
4024       }
4025       // If we can't run the GC type we wanted to run, find the next appropriate one and try
4026       // that instead. E.g. can't do partial, so do full instead.
4027       // We must ensure that we run something that ends up incrementing gcs_completed_.
4028       // In the kGcTypePartial case, the initial CollectGarbageInternal call may not have that
4029       // effect, but the subsequent KGcTypeFull call will.
4030       if (CollectGarbageInternal(next_gc_type, cause, false, requested_gc_num)
4031           == collector::kGcTypeNone) {
4032         for (collector::GcType gc_type : gc_plan_) {
4033           if (!GCNumberLt(GetCurrentGcNum(), requested_gc_num)) {
4034             // Somebody did it for us.
4035             break;
4036           }
4037           // Attempt to run the collector, if we succeed, we are done.
4038           if (gc_type > next_gc_type &&
4039               CollectGarbageInternal(gc_type, cause, false, requested_gc_num)
4040               != collector::kGcTypeNone) {
4041             break;
4042           }
4043         }
4044       }
4045     }
4046   }
4047 }
4048 
4049 class Heap::CollectorTransitionTask : public HeapTask {
4050  public:
CollectorTransitionTask(uint64_t target_time)4051   explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
4052 
Run(Thread * self)4053   void Run(Thread* self) override {
4054     gc::Heap* heap = Runtime::Current()->GetHeap();
4055     heap->DoPendingCollectorTransition();
4056     heap->ClearPendingCollectorTransition(self);
4057   }
4058 };
4059 
ClearPendingCollectorTransition(Thread * self)4060 void Heap::ClearPendingCollectorTransition(Thread* self) {
4061   MutexLock mu(self, *pending_task_lock_);
4062   pending_collector_transition_ = nullptr;
4063 }
4064 
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)4065 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
4066   Thread* self = Thread::Current();
4067   desired_collector_type_ = desired_collector_type;
4068   if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
4069     return;
4070   }
4071   if (collector_type_ == kCollectorTypeCC) {
4072     // For CC, we invoke a full compaction when going to the background, but the collector type
4073     // doesn't change.
4074     DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
4075   }
4076   if (collector_type_ == kCollectorTypeCMC) {
4077     // For CMC collector type doesn't change.
4078     DCHECK_EQ(desired_collector_type_, kCollectorTypeCMCBackground);
4079   }
4080   DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
4081   DCHECK_NE(collector_type_, kCollectorTypeCMCBackground);
4082   CollectorTransitionTask* added_task = nullptr;
4083   const uint64_t target_time = NanoTime() + delta_time;
4084   {
4085     MutexLock mu(self, *pending_task_lock_);
4086     // If we have an existing collector transition, update the target time to be the new target.
4087     if (pending_collector_transition_ != nullptr) {
4088       task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
4089       return;
4090     }
4091     added_task = new CollectorTransitionTask(target_time);
4092     pending_collector_transition_ = added_task;
4093   }
4094   task_processor_->AddTask(self, added_task);
4095 }
4096 
4097 class Heap::HeapTrimTask : public HeapTask {
4098  public:
HeapTrimTask(uint64_t delta_time)4099   explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)4100   void Run(Thread* self) override {
4101     gc::Heap* heap = Runtime::Current()->GetHeap();
4102     heap->Trim(self);
4103     heap->ClearPendingTrim(self);
4104   }
4105 };
4106 
ClearPendingTrim(Thread * self)4107 void Heap::ClearPendingTrim(Thread* self) {
4108   MutexLock mu(self, *pending_task_lock_);
4109   pending_heap_trim_ = nullptr;
4110 }
4111 
RequestTrim(Thread * self)4112 void Heap::RequestTrim(Thread* self) {
4113   if (!CanAddHeapTask(self)) {
4114     return;
4115   }
4116   // GC completed and now we must decide whether to request a heap trim (advising pages back to the
4117   // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
4118   // a space it will hold its lock and can become a cause of jank.
4119   // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
4120   // forking.
4121 
4122   // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
4123   // because that only marks object heads, so a large array looks like lots of empty space. We
4124   // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
4125   // to utilization (which is probably inversely proportional to how much benefit we can expect).
4126   // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
4127   // not how much use we're making of those pages.
4128   HeapTrimTask* added_task = nullptr;
4129   {
4130     MutexLock mu(self, *pending_task_lock_);
4131     if (pending_heap_trim_ != nullptr) {
4132       // Already have a heap trim request in task processor, ignore this request.
4133       return;
4134     }
4135     added_task = new HeapTrimTask(kHeapTrimWait);
4136     pending_heap_trim_ = added_task;
4137   }
4138   task_processor_->AddTask(self, added_task);
4139 }
4140 
IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke)4141 void Heap::IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke) {
4142   size_t previous_num_bytes_freed_revoke =
4143       num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_relaxed);
4144   // Check the updated value is less than the number of bytes allocated. There is a risk of
4145   // execution being suspended between the increment above and the CHECK below, leading to
4146   // the use of previous_num_bytes_freed_revoke in the comparison.
4147   CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed),
4148            previous_num_bytes_freed_revoke + freed_bytes_revoke);
4149 }
4150 
RevokeThreadLocalBuffers(Thread * thread)4151 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
4152   if (rosalloc_space_ != nullptr) {
4153     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
4154     if (freed_bytes_revoke > 0U) {
4155       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
4156     }
4157   }
4158   if (bump_pointer_space_ != nullptr) {
4159     CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
4160   }
4161   if (region_space_ != nullptr) {
4162     CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
4163   }
4164 }
4165 
RevokeRosAllocThreadLocalBuffers(Thread * thread)4166 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
4167   if (rosalloc_space_ != nullptr) {
4168     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
4169     if (freed_bytes_revoke > 0U) {
4170       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
4171     }
4172   }
4173 }
4174 
RevokeAllThreadLocalBuffers()4175 void Heap::RevokeAllThreadLocalBuffers() {
4176   if (rosalloc_space_ != nullptr) {
4177     size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
4178     if (freed_bytes_revoke > 0U) {
4179       IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
4180     }
4181   }
4182   if (bump_pointer_space_ != nullptr) {
4183     CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
4184   }
4185   if (region_space_ != nullptr) {
4186     CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
4187   }
4188 }
4189 
4190 // For GC triggering purposes, we count old (pre-last-GC) and new native allocations as
4191 // different fractions of Java allocations.
4192 // For now, we essentially do not count old native allocations at all, so that we can preserve the
4193 // existing behavior of not limiting native heap size. If we seriously considered it, we would
4194 // have to adjust collection thresholds when we encounter large amounts of old native memory,
4195 // and handle native out-of-memory situations.
4196 
4197 static constexpr size_t kOldNativeDiscountFactor = 65536;  // Approximately infinite for now.
4198 static constexpr size_t kNewNativeDiscountFactor = 2;
4199 
4200 // If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
4201 // newly allocated memory exceeds stop_for_native_allocs_, we wait for GC to complete to avoid
4202 // running out of memory.
4203 static constexpr float kStopForNativeFactor = 4.0;
4204 
4205 // Return the ratio of the weighted native + java allocated bytes to its target value.
4206 // A return value > 1.0 means we should collect. Significantly larger values mean we're falling
4207 // behind.
NativeMemoryOverTarget(size_t current_native_bytes,bool is_gc_concurrent)4208 inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent) {
4209   // Collection check for native allocation. Does not enforce Java heap bounds.
4210   // With adj_start_bytes defined below, effectively checks
4211   // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
4212   // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above.
4213   size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed);
4214   if (old_native_bytes > current_native_bytes) {
4215     // Net decrease; skip the check, but update old value.
4216     // It's OK to lose an update if two stores race.
4217     old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed);
4218     return 0.0;
4219   } else {
4220     size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
4221     size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
4222         + old_native_bytes / kOldNativeDiscountFactor;
4223     size_t add_bytes_allowed = static_cast<size_t>(
4224         NativeAllocationGcWatermark() * HeapGrowthMultiplier());
4225     size_t java_gc_start_bytes = is_gc_concurrent
4226         ? concurrent_start_bytes_
4227         : target_footprint_.load(std::memory_order_relaxed);
4228     size_t adj_start_bytes = UnsignedSum(java_gc_start_bytes,
4229                                          add_bytes_allowed / kNewNativeDiscountFactor);
4230     return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
4231          / static_cast<float>(adj_start_bytes);
4232   }
4233 }
4234 
CheckGCForNative(Thread * self)4235 inline void Heap::CheckGCForNative(Thread* self) {
4236   bool is_gc_concurrent = IsGcConcurrent();
4237   uint32_t starting_gc_num = GetCurrentGcNum();
4238   size_t current_native_bytes = GetNativeBytes();
4239   float gc_urgency = NativeMemoryOverTarget(current_native_bytes, is_gc_concurrent);
4240   if (UNLIKELY(gc_urgency >= 1.0)) {
4241     if (is_gc_concurrent) {
4242       bool requested =
4243           RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true, starting_gc_num);
4244       if (requested && gc_urgency > kStopForNativeFactor
4245           && current_native_bytes > stop_for_native_allocs_) {
4246         // We're in danger of running out of memory due to rampant native allocation.
4247         if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
4248           LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
4249         }
4250         // Count how many times we do this, so we can warn if this becomes excessive.
4251         // Stop after a while, out of excessive caution.
4252         static constexpr int kGcWaitIters = 20;
4253         for (int i = 1; i <= kGcWaitIters; ++i) {
4254           if (!GCNumberLt(GetCurrentGcNum(), max_gc_requested_.load(std::memory_order_relaxed))
4255               || WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
4256             break;
4257           }
4258           CHECK(GCNumberLt(starting_gc_num, max_gc_requested_.load(std::memory_order_relaxed)));
4259           if (i % 10 == 0) {
4260             LOG(WARNING) << "Slept " << i << " times in native allocation, waiting for GC";
4261           }
4262           static constexpr int kGcWaitSleepMicros = 2000;
4263           usleep(kGcWaitSleepMicros);  // Encourage our requested GC to start.
4264         }
4265       }
4266     } else {
4267       CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false, starting_gc_num + 1);
4268     }
4269   }
4270 }
4271 
4272 // About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
NotifyNativeAllocations(JNIEnv * env)4273 void Heap::NotifyNativeAllocations(JNIEnv* env) {
4274   native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
4275   CheckGCForNative(Thread::ForEnv(env));
4276 }
4277 
4278 // Register a native allocation with an explicit size.
4279 // This should only be done for large allocations of non-malloc memory, which we wouldn't
4280 // otherwise see.
RegisterNativeAllocation(JNIEnv * env,size_t bytes)4281 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
4282   // Cautiously check for a wrapped negative bytes argument.
4283   DCHECK(sizeof(size_t) < 8 || bytes < (std::numeric_limits<size_t>::max() / 2));
4284   native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
4285   uint32_t objects_notified =
4286       native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
4287   if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
4288       || bytes > kCheckImmediatelyThreshold) {
4289     CheckGCForNative(Thread::ForEnv(env));
4290   }
4291   // Heap profiler treats this as a Java allocation with a null object.
4292   if (GetHeapSampler().IsEnabled()) {
4293     JHPCheckNonTlabSampleAllocation(Thread::Current(), nullptr, bytes);
4294   }
4295 }
4296 
RegisterNativeFree(JNIEnv *,size_t bytes)4297 void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
4298   size_t allocated;
4299   size_t new_freed_bytes;
4300   do {
4301     allocated = native_bytes_registered_.load(std::memory_order_relaxed);
4302     new_freed_bytes = std::min(allocated, bytes);
4303     // We should not be registering more free than allocated bytes.
4304     // But correctly keep going in non-debug builds.
4305     DCHECK_EQ(new_freed_bytes, bytes);
4306   } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated,
4307                                                               allocated - new_freed_bytes));
4308 }
4309 
GetTotalMemory() const4310 size_t Heap::GetTotalMemory() const {
4311   return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated());
4312 }
4313 
AddModUnionTable(accounting::ModUnionTable * mod_union_table)4314 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
4315   DCHECK(mod_union_table != nullptr);
4316   mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
4317 }
4318 
CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c,size_t byte_count)4319 void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
4320   // Compare rounded sizes since the allocation may have been retried after rounding the size.
4321   // See b/37885600
4322   CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
4323         (c->IsVariableSize() ||
4324             RoundUp(c->GetObjectSize(), kObjectAlignment) ==
4325                 RoundUp(byte_count, kObjectAlignment)))
4326       << "ClassFlags=" << c->GetClassFlags()
4327       << " IsClassClass=" << c->IsClassClass()
4328       << " byte_count=" << byte_count
4329       << " IsVariableSize=" << c->IsVariableSize()
4330       << " ObjectSize=" << c->GetObjectSize()
4331       << " sizeof(Class)=" << sizeof(mirror::Class)
4332       << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
4333   CHECK_GE(byte_count, sizeof(mirror::Object));
4334 }
4335 
AddRememberedSet(accounting::RememberedSet * remembered_set)4336 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
4337   CHECK(remembered_set != nullptr);
4338   space::Space* space = remembered_set->GetSpace();
4339   CHECK(space != nullptr);
4340   CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
4341   remembered_sets_.Put(space, remembered_set);
4342   CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
4343 }
4344 
RemoveRememberedSet(space::Space * space)4345 void Heap::RemoveRememberedSet(space::Space* space) {
4346   CHECK(space != nullptr);
4347   auto it = remembered_sets_.find(space);
4348   CHECK(it != remembered_sets_.end());
4349   delete it->second;
4350   remembered_sets_.erase(it);
4351   CHECK(remembered_sets_.find(space) == remembered_sets_.end());
4352 }
4353 
ClearMarkedObjects(bool release_eagerly)4354 void Heap::ClearMarkedObjects(bool release_eagerly) {
4355   // Clear all of the spaces' mark bitmaps.
4356   for (const auto& space : GetContinuousSpaces()) {
4357     if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
4358       space->GetMarkBitmap()->Clear(release_eagerly);
4359     }
4360   }
4361   // Clear the marked objects in the discontinous space object sets.
4362   for (const auto& space : GetDiscontinuousSpaces()) {
4363     space->GetMarkBitmap()->Clear(release_eagerly);
4364   }
4365 }
4366 
SetAllocationRecords(AllocRecordObjectMap * records)4367 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
4368   allocation_records_.reset(records);
4369 }
4370 
VisitAllocationRecords(RootVisitor * visitor) const4371 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
4372   if (IsAllocTrackingEnabled()) {
4373     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4374     if (IsAllocTrackingEnabled()) {
4375       GetAllocationRecords()->VisitRoots(visitor);
4376     }
4377   }
4378 }
4379 
SweepAllocationRecords(IsMarkedVisitor * visitor) const4380 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
4381   if (IsAllocTrackingEnabled()) {
4382     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4383     if (IsAllocTrackingEnabled()) {
4384       GetAllocationRecords()->SweepAllocationRecords(visitor);
4385     }
4386   }
4387 }
4388 
AllowNewAllocationRecords() const4389 void Heap::AllowNewAllocationRecords() const {
4390   CHECK(!gUseReadBarrier);
4391   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4392   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4393   if (allocation_records != nullptr) {
4394     allocation_records->AllowNewAllocationRecords();
4395   }
4396 }
4397 
DisallowNewAllocationRecords() const4398 void Heap::DisallowNewAllocationRecords() const {
4399   CHECK(!gUseReadBarrier);
4400   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4401   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4402   if (allocation_records != nullptr) {
4403     allocation_records->DisallowNewAllocationRecords();
4404   }
4405 }
4406 
BroadcastForNewAllocationRecords() const4407 void Heap::BroadcastForNewAllocationRecords() const {
4408   // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
4409   // be set to false while some threads are waiting for system weak access in
4410   // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4411   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4412   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4413   if (allocation_records != nullptr) {
4414     allocation_records->BroadcastForNewAllocationRecords();
4415   }
4416 }
4417 
4418 // Perfetto Java Heap Profiler Support.
4419 
4420 // Perfetto initialization.
InitPerfettoJavaHeapProf()4421 void Heap::InitPerfettoJavaHeapProf() {
4422   // Initialize Perfetto Heap info and Heap id.
4423   uint32_t heap_id = 1;  // Initialize to 1, to be overwritten by Perfetto heap id.
4424 #ifdef ART_TARGET_ANDROID
4425   // Register the heap and create the heapid.
4426   // Use a Perfetto heap name = "com.android.art" for the Java Heap Profiler.
4427   AHeapInfo* info = AHeapInfo_create("com.android.art");
4428   // Set the Enable Callback, there is no callback data ("nullptr").
4429   AHeapInfo_setEnabledCallback(info, &EnableHeapSamplerCallback, &heap_sampler_);
4430   // Set the Disable Callback.
4431   AHeapInfo_setDisabledCallback(info, &DisableHeapSamplerCallback, &heap_sampler_);
4432   heap_id = AHeapProfile_registerHeap(info);
4433   // Do not enable the Java Heap Profiler in this case, wait for Perfetto to enable it through
4434   // the callback function.
4435 #else
4436   // This is the host case, enable the Java Heap Profiler for host testing.
4437   // Perfetto API is currently not available on host.
4438   heap_sampler_.EnableHeapSampler();
4439 #endif
4440   heap_sampler_.SetHeapID(heap_id);
4441   VLOG(heap) << "Java Heap Profiler Initialized";
4442 }
4443 
JHPCheckNonTlabSampleAllocation(Thread * self,mirror::Object * obj,size_t alloc_size)4444 void Heap::JHPCheckNonTlabSampleAllocation(Thread* self, mirror::Object* obj, size_t alloc_size) {
4445   bool take_sample = false;
4446   size_t bytes_until_sample = 0;
4447   HeapSampler& prof_heap_sampler = GetHeapSampler();
4448   // An allocation occurred, sample it, even if non-Tlab.
4449   // In case take_sample is already set from the previous GetSampleOffset
4450   // because we tried the Tlab allocation first, we will not use this value.
4451   // A new value is generated below. Also bytes_until_sample will be updated.
4452   // Note that we are not using the return value from the GetSampleOffset in
4453   // the NonTlab case here.
4454   prof_heap_sampler.GetSampleOffset(
4455       alloc_size, self->GetTlabPosOffset(), &take_sample, &bytes_until_sample);
4456   prof_heap_sampler.SetBytesUntilSample(bytes_until_sample);
4457   if (take_sample) {
4458     prof_heap_sampler.ReportSample(obj, alloc_size);
4459   }
4460   VLOG(heap) << "JHP:NonTlab Non-moving or Large Allocation or RegisterNativeAllocation";
4461 }
4462 
JHPCalculateNextTlabSize(Thread * self,size_t jhp_def_tlab_size,size_t alloc_size,bool * take_sample,size_t * bytes_until_sample)4463 size_t Heap::JHPCalculateNextTlabSize(Thread* self,
4464                                       size_t jhp_def_tlab_size,
4465                                       size_t alloc_size,
4466                                       bool* take_sample,
4467                                       size_t* bytes_until_sample) {
4468   size_t next_sample_point = GetHeapSampler().GetSampleOffset(
4469       alloc_size, self->GetTlabPosOffset(), take_sample, bytes_until_sample);
4470   return std::min(next_sample_point, jhp_def_tlab_size);
4471 }
4472 
AdjustSampleOffset(size_t adjustment)4473 void Heap::AdjustSampleOffset(size_t adjustment) {
4474   GetHeapSampler().AdjustSampleOffset(adjustment);
4475 }
4476 
CheckGcStressMode(Thread * self,ObjPtr<mirror::Object> * obj)4477 void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
4478   DCHECK(gc_stress_mode_);
4479   auto* const runtime = Runtime::Current();
4480   if (runtime->GetClassLinker()->IsInitialized() && !runtime->IsActiveTransaction()) {
4481     // Check if we should GC.
4482     bool new_backtrace = false;
4483     {
4484       static constexpr size_t kMaxFrames = 16u;
4485       MutexLock mu(self, *backtrace_lock_);
4486       FixedSizeBacktrace<kMaxFrames> backtrace;
4487       backtrace.Collect(/* skip_count= */ 2);
4488       uint64_t hash = backtrace.Hash();
4489       new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4490       if (new_backtrace) {
4491         seen_backtraces_.insert(hash);
4492       }
4493     }
4494     if (new_backtrace) {
4495       StackHandleScope<1> hs(self);
4496       auto h = hs.NewHandleWrapper(obj);
4497       CollectGarbage(/* clear_soft_references= */ false);
4498       unique_backtrace_count_.fetch_add(1);
4499     } else {
4500       seen_backtrace_count_.fetch_add(1);
4501     }
4502   }
4503 }
4504 
DisableGCForShutdown()4505 void Heap::DisableGCForShutdown() {
4506   MutexLock mu(Thread::Current(), *gc_complete_lock_);
4507   gc_disabled_for_shutdown_ = true;
4508 }
4509 
IsGCDisabledForShutdown() const4510 bool Heap::IsGCDisabledForShutdown() const {
4511   MutexLock mu(Thread::Current(), *gc_complete_lock_);
4512   return gc_disabled_for_shutdown_;
4513 }
4514 
ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const4515 bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
4516   DCHECK_EQ(IsBootImageAddress(obj.Ptr()),
4517             any_of(boot_image_spaces_.begin(),
4518                    boot_image_spaces_.end(),
4519                    [obj](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
4520                      return space->HasAddress(obj.Ptr());
4521                    }));
4522   return IsBootImageAddress(obj.Ptr());
4523 }
4524 
IsInBootImageOatFile(const void * p) const4525 bool Heap::IsInBootImageOatFile(const void* p) const {
4526   DCHECK_EQ(IsBootImageAddress(p),
4527             any_of(boot_image_spaces_.begin(),
4528                    boot_image_spaces_.end(),
4529                    [p](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
4530                      return space->GetOatFile()->Contains(p);
4531                    }));
4532   return IsBootImageAddress(p);
4533 }
4534 
SetAllocationListener(AllocationListener * l)4535 void Heap::SetAllocationListener(AllocationListener* l) {
4536   AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
4537 
4538   if (old == nullptr) {
4539     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4540   }
4541 }
4542 
RemoveAllocationListener()4543 void Heap::RemoveAllocationListener() {
4544   AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
4545 
4546   if (old != nullptr) {
4547     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4548   }
4549 }
4550 
SetGcPauseListener(GcPauseListener * l)4551 void Heap::SetGcPauseListener(GcPauseListener* l) {
4552   gc_pause_listener_.store(l, std::memory_order_relaxed);
4553 }
4554 
RemoveGcPauseListener()4555 void Heap::RemoveGcPauseListener() {
4556   gc_pause_listener_.store(nullptr, std::memory_order_relaxed);
4557 }
4558 
AllocWithNewTLAB(Thread * self,AllocatorType allocator_type,size_t alloc_size,bool grow,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)4559 mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
4560                                        AllocatorType allocator_type,
4561                                        size_t alloc_size,
4562                                        bool grow,
4563                                        size_t* bytes_allocated,
4564                                        size_t* usable_size,
4565                                        size_t* bytes_tl_bulk_allocated) {
4566   mirror::Object* ret = nullptr;
4567   bool take_sample = false;
4568   size_t bytes_until_sample = 0;
4569   bool jhp_enabled = GetHeapSampler().IsEnabled();
4570 
4571   if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) {
4572     DCHECK_GT(alloc_size, self->TlabSize());
4573     // There is enough space if we grow the TLAB. Lets do that. This increases the
4574     // TLAB bytes.
4575     const size_t min_expand_size = alloc_size - self->TlabSize();
4576     size_t next_tlab_size =
4577         jhp_enabled ? JHPCalculateNextTlabSize(
4578                           self, kPartialTlabSize, alloc_size, &take_sample, &bytes_until_sample) :
4579                       kPartialTlabSize;
4580     const size_t expand_bytes = std::max(
4581         min_expand_size,
4582         std::min(self->TlabRemainingCapacity() - self->TlabSize(), next_tlab_size));
4583     if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) {
4584       return nullptr;
4585     }
4586     *bytes_tl_bulk_allocated = expand_bytes;
4587     self->ExpandTlab(expand_bytes);
4588     DCHECK_LE(alloc_size, self->TlabSize());
4589   } else if (allocator_type == kAllocatorTypeTLAB) {
4590     DCHECK(bump_pointer_space_ != nullptr);
4591     // Try to allocate a page-aligned TLAB (not necessary though).
4592     // TODO: for large allocations, which are rare, maybe we should allocate
4593     // that object and return. There is no need to revoke the current TLAB,
4594     // particularly if it's mostly unutilized.
4595     size_t next_tlab_size = RoundDown(alloc_size + kDefaultTLABSize, gPageSize) - alloc_size;
4596     if (jhp_enabled) {
4597       next_tlab_size = JHPCalculateNextTlabSize(
4598           self, next_tlab_size, alloc_size, &take_sample, &bytes_until_sample);
4599     }
4600     const size_t new_tlab_size = alloc_size + next_tlab_size;
4601     if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
4602       return nullptr;
4603     }
4604     // Try allocating a new thread local buffer, if the allocation fails the space must be
4605     // full so return null.
4606     if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size, bytes_tl_bulk_allocated)) {
4607       return nullptr;
4608     }
4609     if (jhp_enabled) {
4610       VLOG(heap) << "JHP:kAllocatorTypeTLAB, New Tlab bytes allocated= " << new_tlab_size;
4611     }
4612   } else {
4613     DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
4614     DCHECK(region_space_ != nullptr);
4615     if (space::RegionSpace::kRegionSize >= alloc_size) {
4616       // Non-large. Check OOME for a tlab.
4617       if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
4618                                             space::RegionSpace::kRegionSize,
4619                                             grow))) {
4620         size_t next_pr_tlab_size =
4621             kUsePartialTlabs ? kPartialTlabSize : gc::space::RegionSpace::kRegionSize;
4622         if (jhp_enabled) {
4623           next_pr_tlab_size = JHPCalculateNextTlabSize(
4624               self, next_pr_tlab_size, alloc_size, &take_sample, &bytes_until_sample);
4625         }
4626         const size_t new_tlab_size = kUsePartialTlabs
4627             ? std::max(alloc_size, next_pr_tlab_size)
4628             : next_pr_tlab_size;
4629         // Try to allocate a tlab.
4630         if (!region_space_->AllocNewTlab(self, new_tlab_size, bytes_tl_bulk_allocated)) {
4631           // Failed to allocate a tlab. Try non-tlab.
4632           ret = region_space_->AllocNonvirtual<false>(alloc_size,
4633                                                       bytes_allocated,
4634                                                       usable_size,
4635                                                       bytes_tl_bulk_allocated);
4636           if (jhp_enabled) {
4637             JHPCheckNonTlabSampleAllocation(self, ret, alloc_size);
4638           }
4639           return ret;
4640         }
4641         // Fall-through to using the TLAB below.
4642       } else {
4643         // Check OOME for a non-tlab allocation.
4644         if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
4645           ret = region_space_->AllocNonvirtual<false>(alloc_size,
4646                                                       bytes_allocated,
4647                                                       usable_size,
4648                                                       bytes_tl_bulk_allocated);
4649           if (jhp_enabled) {
4650             JHPCheckNonTlabSampleAllocation(self, ret, alloc_size);
4651           }
4652           return ret;
4653         }
4654         // Neither tlab or non-tlab works. Give up.
4655         return nullptr;
4656       }
4657     } else {
4658       // Large. Check OOME.
4659       if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
4660         ret = region_space_->AllocNonvirtual<false>(alloc_size,
4661                                                     bytes_allocated,
4662                                                     usable_size,
4663                                                     bytes_tl_bulk_allocated);
4664         if (jhp_enabled) {
4665           JHPCheckNonTlabSampleAllocation(self, ret, alloc_size);
4666         }
4667         return ret;
4668       }
4669       return nullptr;
4670     }
4671   }
4672   // Refilled TLAB, return.
4673   ret = self->AllocTlab(alloc_size);
4674   DCHECK(ret != nullptr);
4675   *bytes_allocated = alloc_size;
4676   *usable_size = alloc_size;
4677 
4678   // JavaHeapProfiler: Send the thread information about this allocation in case a sample is
4679   // requested.
4680   // This is the fallthrough from both the if and else if above cases => Cases that use TLAB.
4681   if (jhp_enabled) {
4682     if (take_sample) {
4683       GetHeapSampler().ReportSample(ret, alloc_size);
4684       // Update the bytes_until_sample now that the allocation is already done.
4685       GetHeapSampler().SetBytesUntilSample(bytes_until_sample);
4686     }
4687     VLOG(heap) << "JHP:Fallthrough Tlab allocation";
4688   }
4689 
4690   return ret;
4691 }
4692 
GetVerification() const4693 const Verification* Heap::GetVerification() const {
4694   return verification_.get();
4695 }
4696 
VlogHeapGrowth(size_t old_footprint,size_t new_footprint,size_t alloc_size)4697 void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) {
4698   VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to "
4699              << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
4700 }
4701 
4702 // Run a gc if we haven't run one since initial_gc_num. This forces processes to
4703 // reclaim memory allocated during startup, even if they don't do much
4704 // allocation post startup. If the process is actively allocating and triggering
4705 // GCs, or has moved to the background and hence forced a GC, this does nothing.
4706 class Heap::TriggerPostForkCCGcTask : public HeapTask {
4707  public:
TriggerPostForkCCGcTask(uint64_t target_time,uint32_t initial_gc_num)4708   explicit TriggerPostForkCCGcTask(uint64_t target_time, uint32_t initial_gc_num) :
4709       HeapTask(target_time), initial_gc_num_(initial_gc_num) {}
Run(Thread * self)4710   void Run(Thread* self) override {
4711     gc::Heap* heap = Runtime::Current()->GetHeap();
4712     if (heap->GetCurrentGcNum() == initial_gc_num_) {
4713       if (kLogAllGCs) {
4714         LOG(INFO) << "Forcing GC for allocation-inactive process";
4715       }
4716       heap->RequestConcurrentGC(self, kGcCauseBackground, false, initial_gc_num_);
4717     }
4718   }
4719  private:
4720   uint32_t initial_gc_num_;
4721 };
4722 
4723 // Reduce target footprint, if no GC has occurred since initial_gc_num.
4724 // If a GC already occurred, it will have done this for us.
4725 class Heap::ReduceTargetFootprintTask : public HeapTask {
4726  public:
ReduceTargetFootprintTask(uint64_t target_time,size_t new_target_sz,uint32_t initial_gc_num)4727   explicit ReduceTargetFootprintTask(uint64_t target_time, size_t new_target_sz,
4728                                      uint32_t initial_gc_num) :
4729       HeapTask(target_time), new_target_sz_(new_target_sz), initial_gc_num_(initial_gc_num) {}
Run(Thread * self)4730   void Run(Thread* self) override {
4731     gc::Heap* heap = Runtime::Current()->GetHeap();
4732     MutexLock mu(self, *(heap->gc_complete_lock_));
4733     if (heap->GetCurrentGcNum() == initial_gc_num_
4734         && heap->collector_type_running_ == kCollectorTypeNone) {
4735       size_t target_footprint = heap->target_footprint_.load(std::memory_order_relaxed);
4736       if (target_footprint > new_target_sz_) {
4737         if (heap->target_footprint_.CompareAndSetStrongRelaxed(target_footprint, new_target_sz_)) {
4738           heap->SetDefaultConcurrentStartBytesLocked();
4739         }
4740       }
4741     }
4742   }
4743  private:
4744   size_t new_target_sz_;
4745   uint32_t initial_gc_num_;
4746 };
4747 
4748 // Return a pseudo-random integer between 0 and 19999, using the uid as a seed.  We want this to
4749 // be deterministic for a given process, but to vary randomly across processes. Empirically, the
4750 // uids for processes for which this matters are distinct.
GetPseudoRandomFromUid()4751 static uint32_t GetPseudoRandomFromUid() {
4752   std::default_random_engine rng(getuid());
4753   std::uniform_int_distribution<int> dist(0, 19999);
4754   return dist(rng);
4755 }
4756 
PostForkChildAction(Thread * self)4757 void Heap::PostForkChildAction(Thread* self) {
4758   uint32_t starting_gc_num = GetCurrentGcNum();
4759   uint64_t last_adj_time = NanoTime();
4760   next_gc_type_ = NonStickyGcType();  // Always start with a full gc.
4761 
4762   LOG(INFO) << "Using " << foreground_collector_type_ << " GC.";
4763   if (gUseUserfaultfd) {
4764     DCHECK_NE(mark_compact_, nullptr);
4765     mark_compact_->CreateUserfaultfd(/*post_fork*/true);
4766   }
4767 
4768   // Temporarily increase target_footprint_ and concurrent_start_bytes_ to
4769   // max values to avoid GC during app launch.
4770   // Set target_footprint_ to the largest allowed value.
4771   SetIdealFootprint(growth_limit_);
4772   SetDefaultConcurrentStartBytes();
4773 
4774   // Shrink heap after kPostForkMaxHeapDurationMS, to force a memory hog process to GC.
4775   // This remains high enough that many processes will continue without a GC.
4776   if (initial_heap_size_ < growth_limit_) {
4777     size_t first_shrink_size = std::max(growth_limit_ / 4, initial_heap_size_);
4778     last_adj_time += MsToNs(kPostForkMaxHeapDurationMS);
4779     GetTaskProcessor()->AddTask(
4780         self, new ReduceTargetFootprintTask(last_adj_time, first_shrink_size, starting_gc_num));
4781     // Shrink to a small value after a substantial time period. This will typically force a
4782     // GC if none has occurred yet. Has no effect if there was a GC before this anyway, which
4783     // is commonly the case, e.g. because of a process transition.
4784     if (initial_heap_size_ < first_shrink_size) {
4785       last_adj_time += MsToNs(4 * kPostForkMaxHeapDurationMS);
4786       GetTaskProcessor()->AddTask(
4787           self,
4788           new ReduceTargetFootprintTask(last_adj_time, initial_heap_size_, starting_gc_num));
4789     }
4790   }
4791   // Schedule a GC after a substantial period of time. This will become a no-op if another GC is
4792   // scheduled in the interim. If not, we want to avoid holding onto start-up garbage.
4793   uint64_t post_fork_gc_time = last_adj_time
4794       + MsToNs(4 * kPostForkMaxHeapDurationMS + GetPseudoRandomFromUid());
4795   GetTaskProcessor()->AddTask(self,
4796                               new TriggerPostForkCCGcTask(post_fork_gc_time, starting_gc_num));
4797 }
4798 
VisitReflectiveTargets(ReflectiveValueVisitor * visit)4799 void Heap::VisitReflectiveTargets(ReflectiveValueVisitor *visit) {
4800   VisitObjectsPaused([&visit](mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
4801     art::ObjPtr<mirror::Class> klass(ref->GetClass());
4802     // All these classes are in the BootstrapClassLoader.
4803     if (!klass->IsBootStrapClassLoaded()) {
4804       return;
4805     }
4806     if (GetClassRoot<mirror::Method>()->IsAssignableFrom(klass) ||
4807         GetClassRoot<mirror::Constructor>()->IsAssignableFrom(klass)) {
4808       down_cast<mirror::Executable*>(ref)->VisitTarget(visit);
4809     } else if (art::GetClassRoot<art::mirror::Field>() == klass) {
4810       down_cast<mirror::Field*>(ref)->VisitTarget(visit);
4811     } else if (art::GetClassRoot<art::mirror::MethodHandle>()->IsAssignableFrom(klass)) {
4812       down_cast<mirror::MethodHandle*>(ref)->VisitTarget(visit);
4813     } else if (art::GetClassRoot<art::mirror::StaticFieldVarHandle>()->IsAssignableFrom(klass)) {
4814       down_cast<mirror::StaticFieldVarHandle*>(ref)->VisitTarget(visit);
4815     } else if (art::GetClassRoot<art::mirror::FieldVarHandle>()->IsAssignableFrom(klass)) {
4816       down_cast<mirror::FieldVarHandle*>(ref)->VisitTarget(visit);
4817     } else if (art::GetClassRoot<art::mirror::DexCache>()->IsAssignableFrom(klass)) {
4818       down_cast<mirror::DexCache*>(ref)->VisitReflectiveTargets(visit);
4819     }
4820   });
4821 }
4822 
AddHeapTask(gc::HeapTask * task)4823 bool Heap::AddHeapTask(gc::HeapTask* task) {
4824   Thread* const self = Thread::Current();
4825   if (!CanAddHeapTask(self)) {
4826     return false;
4827   }
4828   GetTaskProcessor()->AddTask(self, task);
4829   return true;
4830 }
4831 
GetForegroundCollectorName()4832 std::string Heap::GetForegroundCollectorName() {
4833   std::ostringstream oss;
4834   oss << foreground_collector_type_;
4835   return oss.str();
4836 }
4837 
HasAppImageSpaceFor(const std::string & dex_location) const4838 bool Heap::HasAppImageSpaceFor(const std::string& dex_location) const {
4839   ScopedObjectAccess soa(Thread::Current());
4840   for (space::ContinuousSpace* space : continuous_spaces_) {
4841     // An image space is either a boot image space or an app image space.
4842     if (space->IsImageSpace() &&
4843         !IsBootImageAddress(space->Begin()) &&
4844         (space->AsImageSpace()->GetOatFile()->GetOatDexFiles()[0]->GetDexFileLocation() ==
4845               dex_location)) {
4846       return true;
4847     }
4848   }
4849   return false;
4850 }
4851 
4852 }  // namespace gc
4853 }  // namespace art
4854