xref: /aosp_15_r20/art/runtime/gc/collector/concurrent_copying.h (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19 
20 #include "base/macros.h"
21 #include "garbage_collector.h"
22 #include "gc/accounting/space_bitmap.h"
23 #include "immune_spaces.h"
24 #include "offsets.h"
25 
26 #include <map>
27 #include <memory>
28 #include <unordered_map>
29 #include <vector>
30 
31 namespace art HIDDEN {
32 class Barrier;
33 class Closure;
34 class RootInfo;
35 
36 namespace mirror {
37 template<class MirrorType> class CompressedReference;
38 template<class MirrorType> class HeapReference;
39 class Object;
40 }  // namespace mirror
41 
42 namespace gc {
43 
44 namespace accounting {
45 template <size_t kAlignment> class SpaceBitmap;
46 using ContinuousSpaceBitmap = SpaceBitmap<kObjectAlignment>;
47 class HeapBitmap;
48 class ReadBarrierTable;
49 }  // namespace accounting
50 
51 namespace space {
52 class RegionSpace;
53 }  // namespace space
54 
55 namespace collector {
56 
57 class ConcurrentCopying : public GarbageCollector {
58  public:
59   // Enable the no-from-space-refs verification at the pause.
60   static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
61   // Enable the from-space bytes/objects check.
62   static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
63   // Enable verbose mode.
64   static constexpr bool kVerboseMode = false;
65   // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
66   // pages.
67   static constexpr bool kGrayDirtyImmuneObjects = true;
68 
69   ConcurrentCopying(Heap* heap,
70                     bool young_gen,
71                     bool use_generational_cc,
72                     const std::string& name_prefix = "",
73                     bool measure_read_barrier_slow_path = false);
74   ~ConcurrentCopying();
75 
76   void RunPhases() override
77       REQUIRES(!immune_gray_stack_lock_,
78                !mark_stack_lock_,
79                !rb_slow_path_histogram_lock_,
80                !skipped_blocks_lock_);
81   void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
82       REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
83   void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
84       REQUIRES(!mark_stack_lock_);
85   void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
86       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
87   void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
88   void FinishPhase() REQUIRES(!mark_stack_lock_,
89                               !rb_slow_path_histogram_lock_,
90                               !skipped_blocks_lock_);
91 
92   void CaptureRssAtPeak() REQUIRES(!mark_stack_lock_);
93   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
94       REQUIRES(!Locks::heap_bitmap_lock_);
GetGcType()95   GcType GetGcType() const override {
96     return (use_generational_cc_ && young_gen_)
97         ? kGcTypeSticky
98         : kGcTypePartial;
99   }
GetCollectorType()100   CollectorType GetCollectorType() const override {
101     return kCollectorTypeCC;
102   }
103   void RevokeAllThreadLocalBuffers() override;
104   // Creates inter-region ref bitmaps for region-space and non-moving-space.
105   // Gets called in Heap construction after the two spaces are created.
106   void CreateInterRegionRefBitmaps();
SetRegionSpace(space::RegionSpace * region_space)107   void SetRegionSpace(space::RegionSpace* region_space) {
108     DCHECK(region_space != nullptr);
109     region_space_ = region_space;
110   }
RegionSpace()111   space::RegionSpace* RegionSpace() {
112     return region_space_;
113   }
114   // Assert the to-space invariant for a heap reference `ref` held in `obj` at offset `offset`.
115   void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
116       REQUIRES_SHARED(Locks::mutator_lock_);
117   // Assert the to-space invariant for a GC root reference `ref`.
118   void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
119       REQUIRES_SHARED(Locks::mutator_lock_);
IsInToSpace(mirror::Object * ref)120   bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
121     DCHECK(ref != nullptr);
122     return IsMarked(ref) == ref;
123   }
124   // Mark object `from_ref`, copying it to the to-space if needed.
125   template<bool kGrayImmuneObject = true, bool kNoUnEvac = false, bool kFromGCThread = false>
126   ALWAYS_INLINE mirror::Object* Mark(Thread* const self,
127                                      mirror::Object* from_ref,
128                                      mirror::Object* holder = nullptr,
129                                      MemberOffset offset = MemberOffset(0))
130       REQUIRES_SHARED(Locks::mutator_lock_)
131       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
132   ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
133       REQUIRES_SHARED(Locks::mutator_lock_)
134       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
IsMarking()135   bool IsMarking() const {
136     return is_marking_;
137   }
138   // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying
139   // creates a small window where we might dispatch on these entrypoints.
IsUsingReadBarrierEntrypoints()140   bool IsUsingReadBarrierEntrypoints() const {
141     return is_using_read_barrier_entrypoints_;
142   }
IsActive()143   bool IsActive() const {
144     return is_active_;
145   }
GetBarrier()146   Barrier& GetBarrier() {
147     return *gc_barrier_;
148   }
IsWeakRefAccessEnabled()149   bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
150     return weak_ref_access_enabled_;
151   }
152   void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES(!mark_stack_lock_);
153 
154   // Blindly return the forwarding pointer from the lockword, or null if there is none.
155   static mirror::Object* GetFwdPtrUnchecked(mirror::Object* from_ref)
156       REQUIRES_SHARED(Locks::mutator_lock_);
157 
158   // If marked, return the to-space object, otherwise null.
159   mirror::Object* IsMarked(mirror::Object* from_ref) override
160       REQUIRES_SHARED(Locks::mutator_lock_);
161 
162   void AssertNoThreadMarkStackMapping(Thread* thread) REQUIRES(!mark_stack_lock_);
163   // Dump information about reference `ref` and return it as a string.
164   // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`.
165   std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, const char* indent = "")
166       REQUIRES_SHARED(Locks::mutator_lock_);
167 
168  private:
169   EXPORT void PushOntoMarkStack(Thread* const self, mirror::Object* obj)
170       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
171   // Returns a to-space copy of the from-space object from_ref, and atomically installs a
172   // forwarding pointer. Ensures that the forwarding reference is visible to other threads before
173   // the returned to-space pointer becomes visible to them.
174   EXPORT mirror::Object* Copy(Thread* const self,
175                               mirror::Object* from_ref,
176                               mirror::Object* holder,
177                               MemberOffset offset) REQUIRES_SHARED(Locks::mutator_lock_)
178       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
179   // Scan the reference fields of object `to_ref`.
180   template <bool kNoUnEvac>
181   void Scan(mirror::Object* to_ref, size_t obj_size = 0) REQUIRES_SHARED(Locks::mutator_lock_)
182       REQUIRES(!mark_stack_lock_);
183   // Scan the reference fields of object 'obj' in the dirty cards during
184   // card-table scan. In addition to visiting the references, it also sets the
185   // read-barrier state to gray for Reference-type objects to ensure that
186   // GetReferent() called on these objects calls the read-barrier on the referent.
187   template <bool kNoUnEvac>
188   void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
189       REQUIRES(!mark_stack_lock_);
190   // Process a field.
191   template <bool kNoUnEvac>
192   void Process(mirror::Object* obj, MemberOffset offset)
193       REQUIRES_SHARED(Locks::mutator_lock_)
194       REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
195   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
196       REQUIRES_SHARED(Locks::mutator_lock_)
197       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
198   template<bool kGrayImmuneObject>
199   void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
200       REQUIRES_SHARED(Locks::mutator_lock_)
201       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
202   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
203                   size_t count,
204                   const RootInfo& info) override
205       REQUIRES_SHARED(Locks::mutator_lock_)
206       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
207   void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
208   accounting::ObjectStack* GetAllocationStack();
209   accounting::ObjectStack* GetLiveStack();
210   void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
211       REQUIRES(!mark_stack_lock_);
212   bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
213   void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
214       REQUIRES(!mark_stack_lock_);
215   void GrayAllDirtyImmuneObjects()
216       REQUIRES(Locks::mutator_lock_)
217       REQUIRES(!mark_stack_lock_);
218   void GrayAllNewlyDirtyImmuneObjects()
219       REQUIRES(Locks::mutator_lock_)
220       REQUIRES(!mark_stack_lock_);
221   void VerifyGrayImmuneObjects()
222       REQUIRES(Locks::mutator_lock_)
223       REQUIRES(!mark_stack_lock_);
224   void VerifyNoMissingCardMarks()
225       REQUIRES(Locks::mutator_lock_)
226       REQUIRES(!mark_stack_lock_);
227   template <typename Processor>
228   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
229                                       Closure* checkpoint_callback,
230                                       const Processor& processor)
231       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
232   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
233       REQUIRES_SHARED(Locks::mutator_lock_);
234   void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
235       REQUIRES(!mark_stack_lock_);
236   void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
237   void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
238                               ObjPtr<mirror::Reference> reference) override
239       REQUIRES_SHARED(Locks::mutator_lock_);
240   void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
241   mirror::Object* MarkObject(mirror::Object* from_ref) override
242       REQUIRES_SHARED(Locks::mutator_lock_)
243       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
244   void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
245                          bool do_atomic_update) override
246       REQUIRES_SHARED(Locks::mutator_lock_)
247       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
248   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
249       REQUIRES_SHARED(Locks::mutator_lock_);
250   bool IsMarkedInNonMovingSpace(mirror::Object* from_ref)
251       REQUIRES_SHARED(Locks::mutator_lock_);
252   bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
253                                    bool do_atomic_update) override
254       REQUIRES_SHARED(Locks::mutator_lock_);
255   void SweepSystemWeaks(Thread* self)
256       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
257   // Sweep unmarked objects to complete the garbage collection. Full GCs sweep
258   // all allocation spaces (except the region space). Sticky-bit GCs just sweep
259   // a subset of the heap.
260   void Sweep(bool swap_bitmaps)
261       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
262   void SweepArray(accounting::ObjectStack* obj_arr, bool swap_bitmaps)
263       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
264   void SweepLargeObjects(bool swap_bitmaps)
265       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
266   void MarkZygoteLargeObjects()
267       REQUIRES_SHARED(Locks::mutator_lock_);
268   void FillWithFakeObject(Thread* const self, mirror::Object* fake_obj, size_t byte_size)
269       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
270       REQUIRES_SHARED(Locks::mutator_lock_);
271   mirror::Object* AllocateInSkippedBlock(Thread* const self, size_t alloc_size)
272       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
273       REQUIRES_SHARED(Locks::mutator_lock_);
274   void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
275   void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
276   bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
277   // Return the forwarding pointer from the lockword. The argument must be in from space.
278   mirror::Object* GetFwdPtr(mirror::Object* from_ref) REQUIRES_SHARED(Locks::mutator_lock_);
279   void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
280   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
281   void RecordLiveStackFreezeSize(Thread* self);
282   void ComputeUnevacFromSpaceLiveRatio();
283   void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
284       REQUIRES_SHARED(Locks::mutator_lock_);
285   // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`,
286   // and return it as a string.
287   EXPORT std::string DumpHeapReference(mirror::Object* obj,
288                                        MemberOffset offset,
289                                        mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
290   // Dump information about GC root `ref` and return it as a string.
291   std::string DumpGcRoot(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
292   void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
293       REQUIRES_SHARED(Locks::mutator_lock_);
294   void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
295   void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
296   void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
297   void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
298   EXPORT mirror::Object* MarkNonMoving(Thread* const self,
299                                        mirror::Object* from_ref,
300                                        mirror::Object* holder = nullptr,
301                                        MemberOffset offset = MemberOffset(0))
302       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
303   ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(Thread* const self,
304       mirror::Object* from_ref,
305       accounting::SpaceBitmap<kObjectAlignment>* bitmap)
306       REQUIRES_SHARED(Locks::mutator_lock_)
307       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
308   template<bool kGrayImmuneObject>
309   ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self,
310                                                 mirror::Object* from_ref)
311       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
312   void ScanImmuneObject(mirror::Object* obj)
313       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
314   EXPORT mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self,
315                                                              mirror::Object* from_ref)
316       REQUIRES_SHARED(Locks::mutator_lock_)
317       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
318   void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
319   // Set the read barrier mark entrypoints to non-null.
320   void ActivateReadBarrierEntrypoints();
321 
322   void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_);
323   void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
324   bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
325   template <bool kAtomic = false>
326   bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
327   void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
328   void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
329       REQUIRES(!mark_stack_lock_);
330 
331   void RemoveThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
332       REQUIRES(mark_stack_lock_);
333   void AddThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
334       REQUIRES(mark_stack_lock_);
335   void AssertEmptyThreadMarkStackMap() REQUIRES(mark_stack_lock_);
336 
337   space::RegionSpace* region_space_;      // The underlying region space.
338   std::unique_ptr<Barrier> gc_barrier_;
339   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
340 
341   // If true, enable generational collection when using the Concurrent Copying
342   // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
343   // for major collections. Generational CC collection is currently only
344   // compatible with Baker read barriers. Set in Heap constructor.
345   const bool use_generational_cc_;
346 
347   // Generational "sticky", only trace through dirty objects in region space.
348   const bool young_gen_;
349 
350   // If true, the GC thread is done scanning marked objects on dirty and aged
351   // card (see ConcurrentCopying::CopyingPhase).
352   Atomic<bool> done_scanning_;
353 
354   // The read-barrier mark-bit stack. Stores object references whose
355   // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier,
356   // so that this bit can be reset at the end of the collection in
357   // ConcurrentCopying::FinishPhase. The mark bit of an object can be
358   // used by mutator read barrier code to quickly test whether that
359   // object has been already marked.
360   std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
361   // Thread-unsafe Boolean value hinting that `rb_mark_bit_stack_` is
362   // full. A thread-safe test of whether the read-barrier mark-bit
363   // stack is full is implemented by `rb_mark_bit_stack_->AtomicPushBack(ref)`
364   // (see use case in ConcurrentCopying::MarkFromReadBarrier).
365   bool rb_mark_bit_stack_full_;
366 
367   // Guards access to pooled_mark_stacks_ and revoked_mark_stacks_ vectors.
368   // Also guards destruction and revocations of thread-local mark-stacks.
369   // Clearing thread-local mark-stack (by other threads or during destruction)
370   // should be guarded by it.
371   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
372   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
373       GUARDED_BY(mark_stack_lock_);
374   // Size of thread local mark stack.
GetMarkStackSize()375   static size_t GetMarkStackSize() {
376     return gPageSize;
377   }
378   static constexpr size_t kMarkStackPoolSize = 256;
379   std::vector<accounting::ObjectStack*> pooled_mark_stacks_
380       GUARDED_BY(mark_stack_lock_);
381   Thread* thread_running_gc_;
382   bool is_marking_;                       // True while marking is ongoing.
383   // True while we might dispatch on the read barrier entrypoints.
384   bool is_using_read_barrier_entrypoints_;
385   bool is_active_;                        // True while the collection is ongoing.
386   bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
387   ImmuneSpaces immune_spaces_;
388   accounting::ContinuousSpaceBitmap* region_space_bitmap_;
389   // A cache of Heap::GetMarkBitmap().
390   accounting::HeapBitmap* heap_mark_bitmap_;
391   size_t live_stack_freeze_size_;
392   size_t from_space_num_bytes_at_first_pause_;  // Computed if kEnableFromSpaceAccountingCheck
393   Atomic<int> is_mark_stack_push_disallowed_;   // Debug only.
394   enum MarkStackMode {
395     kMarkStackModeOff = 0,      // Mark stack is off.
396     kMarkStackModeThreadLocal,  // All threads except for the GC-running thread push refs onto
397                                 // thread-local mark stacks. The GC-running thread pushes onto and
398                                 // pops off the GC mark stack without a lock.
399     kMarkStackModeShared,       // All threads share the GC mark stack with a lock.
400     kMarkStackModeGcExclusive   // The GC-running thread pushes onto and pops from the GC mark stack
401                                 // without a lock. Other threads won't access the mark stack.
402   };
403   // mark_stack_mode_ is updated asynchronoulsy by the GC. We cannot assume that another thread
404   // has seen it until it has run some kind of checkpoint.  We generally access this using
405   // acquire/release ordering, to ensure that any relevant prior changes are visible to readers of
406   // the flag, and to ensure that CHECKs prior to a state change cannot be delayed past the state
407   // change.
408   Atomic<MarkStackMode> mark_stack_mode_;
409   bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
410 
411   // How many objects and bytes we moved. The GC thread moves many more objects
412   // than mutators.  Therefore, we separate the two to avoid CAS.  Bytes_moved_ and
413   // bytes_moved_gc_thread_ are critical for GC triggering; the others are just informative.
414   Atomic<size_t> bytes_moved_;  // Used by mutators
415   Atomic<size_t> objects_moved_;  // Used by mutators
416 
417   // copied_live_bytes_ratio_sum_ is read and written by CC per GC, in
418   // ReclaimPhase, and is read by DumpPerformanceInfo (potentially from another
419   // thread). However, at present, DumpPerformanceInfo is only called when the
420   // runtime shuts down, so no concurrent access. The same reasoning goes for
421   // gc_count_ and reclaimed_bytes_ratio_sum_
422 
423   // The sum of of all copied live bytes ratio (to_bytes/from_bytes)
424   float copied_live_bytes_ratio_sum_;
425   // The number of GC counts, used to calculate the average above. (It doesn't
426   // include GC where from_bytes is zero, IOW, from-space is empty, which is
427   // possible for minor GC if all allocated objects are in non-moving
428   // space.)
429   size_t gc_count_;
430   // Bit is set if the corresponding object has inter-region references that
431   // were found during the marking phase of two-phase full-heap GC cycle.
432   accounting::ContinuousSpaceBitmap region_space_inter_region_bitmap_;
433   accounting::ContinuousSpaceBitmap non_moving_space_inter_region_bitmap_;
434 
435   // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
436   float reclaimed_bytes_ratio_sum_;
437 
438   // Used only by GC thread, so need not be atomic. Also, should be kept
439   // in a different cacheline than bytes/objects_moved_ (above) to avoid false
440   // cacheline sharing.
441   size_t bytes_moved_gc_thread_;
442   size_t objects_moved_gc_thread_;
443   uint64_t bytes_scanned_;
444   uint64_t cumulative_bytes_moved_;
445 
446   // The skipped blocks are memory blocks/chucks that were copies of
447   // objects that were unused due to lost races (cas failures) at
448   // object copy/forward pointer install. They may be reused.
449   // Skipped blocks are always in region space. Their size is included directly
450   // in num_bytes_allocated_, i.e. they are treated as allocated, but may be directly
451   // used without going through a GC cycle like other objects. They are reused only
452   // if we run out of region space. TODO: Revisit this design.
453   Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
454   std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
455   Atomic<size_t> to_space_bytes_skipped_;
456   Atomic<size_t> to_space_objects_skipped_;
457 
458   // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
459   // and also log.
460   bool measure_read_barrier_slow_path_;
461   // mark_from_read_barrier_measurements_ is true if systrace is enabled or
462   // measure_read_barrier_time_ is true.
463   bool mark_from_read_barrier_measurements_;
464   Atomic<uint64_t> rb_slow_path_ns_;
465   Atomic<uint64_t> rb_slow_path_count_;
466   Atomic<uint64_t> rb_slow_path_count_gc_;
467   mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
468   Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
469   uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
470   uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
471 
472   accounting::ReadBarrierTable* rb_table_;
473   bool force_evacuate_all_;  // True if all regions are evacuated.
474   Atomic<bool> updated_all_immune_objects_;
475   bool gc_grays_immune_objects_;
476   Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
477   std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
478 
479   // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
480   // be filled in before flipping thread roots so that FillWithFakeObject can run. Not
481   // ObjPtr since the GC may transition to suspended and runnable between phases.
482   mirror::Class* java_lang_Object_;
483 
484   // Use signed because after_gc may be larger than before_gc.
485   int64_t num_bytes_allocated_before_gc_;
486 
487   class ActivateReadBarrierEntrypointsCallback;
488   class ActivateReadBarrierEntrypointsCheckpoint;
489   class AssertToSpaceInvariantFieldVisitor;
490   class AssertToSpaceInvariantRefsVisitor;
491   class ClearBlackPtrsVisitor;
492   class ComputeUnevacFromSpaceLiveRatioVisitor;
493   class DisableMarkingCallback;
494   class DisableMarkingCheckpoint;
495   class DisableWeakRefAccessCallback;
496   class FlipCallback;
497   template <bool kConcurrent> class GrayImmuneObjectVisitor;
498   class ImmuneSpaceScanObjVisitor;
499   class LostCopyVisitor;
500   template <bool kNoUnEvac> class RefFieldsVisitor;
501   class RevokeThreadLocalMarkStackCheckpoint;
502   class ScopedGcGraysImmuneObjects;
503   class ThreadFlipVisitor;
504   class VerifyGrayImmuneObjectsVisitor;
505   class VerifyNoFromSpaceRefsFieldVisitor;
506   class VerifyNoFromSpaceRefsVisitor;
507   class VerifyNoMissingCardMarkVisitor;
508   class ImmuneSpaceCaptureRefsVisitor;
509   template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor;
510   class CaptureThreadRootsForMarkingAndCheckpoint;
511   template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor;
512 
513   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
514 };
515 
516 }  // namespace collector
517 }  // namespace gc
518 }  // namespace art
519 
520 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
521