1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ 18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ 19 20 #include "garbage_collector.h" 21 #include "gc/accounting/space_bitmap.h" 22 #include "immune_spaces.h" 23 #include "offsets.h" 24 25 #include <map> 26 #include <memory> 27 #include <unordered_map> 28 #include <vector> 29 30 namespace art { 31 class Barrier; 32 class Closure; 33 class RootInfo; 34 35 namespace mirror { 36 template<class MirrorType> class CompressedReference; 37 template<class MirrorType> class HeapReference; 38 class Object; 39 } // namespace mirror 40 41 namespace gc { 42 43 namespace accounting { 44 template<typename T> class AtomicStack; 45 typedef AtomicStack<mirror::Object> ObjectStack; 46 template <size_t kAlignment> class SpaceBitmap; 47 typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap; 48 class HeapBitmap; 49 class ReadBarrierTable; 50 } // namespace accounting 51 52 namespace space { 53 class RegionSpace; 54 } // namespace space 55 56 namespace collector { 57 58 class ConcurrentCopying : public GarbageCollector { 59 public: 60 // Enable the no-from-space-refs verification at the pause. 61 static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild; 62 // Enable the from-space bytes/objects check. 63 static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild; 64 // Enable verbose mode. 65 static constexpr bool kVerboseMode = false; 66 // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty 67 // pages. 68 static constexpr bool kGrayDirtyImmuneObjects = true; 69 70 ConcurrentCopying(Heap* heap, 71 bool young_gen, 72 bool use_generational_cc, 73 const std::string& name_prefix = "", 74 bool measure_read_barrier_slow_path = false); 75 ~ConcurrentCopying(); 76 77 void RunPhases() override 78 REQUIRES(!immune_gray_stack_lock_, 79 !mark_stack_lock_, 80 !rb_slow_path_histogram_lock_, 81 !skipped_blocks_lock_); 82 void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_) 83 REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_); 84 void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_) 85 REQUIRES(!mark_stack_lock_); 86 void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_) 87 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 88 void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 89 void FinishPhase() REQUIRES(!mark_stack_lock_, 90 !rb_slow_path_histogram_lock_, 91 !skipped_blocks_lock_); 92 93 void CaptureRssAtPeak() REQUIRES(!mark_stack_lock_); 94 void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_) 95 REQUIRES(!Locks::heap_bitmap_lock_); GetGcType()96 GcType GetGcType() const override { 97 return (use_generational_cc_ && young_gen_) 98 ? kGcTypeSticky 99 : kGcTypePartial; 100 } GetCollectorType()101 CollectorType GetCollectorType() const override { 102 return kCollectorTypeCC; 103 } 104 void RevokeAllThreadLocalBuffers() override; 105 // Creates inter-region ref bitmaps for region-space and non-moving-space. 106 // Gets called in Heap construction after the two spaces are created. 107 void CreateInterRegionRefBitmaps(); SetRegionSpace(space::RegionSpace * region_space)108 void SetRegionSpace(space::RegionSpace* region_space) { 109 DCHECK(region_space != nullptr); 110 region_space_ = region_space; 111 } RegionSpace()112 space::RegionSpace* RegionSpace() { 113 return region_space_; 114 } 115 // Assert the to-space invariant for a heap reference `ref` held in `obj` at offset `offset`. 116 void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) 117 REQUIRES_SHARED(Locks::mutator_lock_); 118 // Assert the to-space invariant for a GC root reference `ref`. 119 void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) 120 REQUIRES_SHARED(Locks::mutator_lock_); IsInToSpace(mirror::Object * ref)121 bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { 122 DCHECK(ref != nullptr); 123 return IsMarked(ref) == ref; 124 } 125 // Mark object `from_ref`, copying it to the to-space if needed. 126 template<bool kGrayImmuneObject = true, bool kNoUnEvac = false, bool kFromGCThread = false> 127 ALWAYS_INLINE mirror::Object* Mark(Thread* const self, 128 mirror::Object* from_ref, 129 mirror::Object* holder = nullptr, 130 MemberOffset offset = MemberOffset(0)) 131 REQUIRES_SHARED(Locks::mutator_lock_) 132 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 133 ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref) 134 REQUIRES_SHARED(Locks::mutator_lock_) 135 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); IsMarking()136 bool IsMarking() const { 137 return is_marking_; 138 } 139 // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying 140 // creates a small window where we might dispatch on these entrypoints. IsUsingReadBarrierEntrypoints()141 bool IsUsingReadBarrierEntrypoints() const { 142 return is_using_read_barrier_entrypoints_; 143 } IsActive()144 bool IsActive() const { 145 return is_active_; 146 } GetBarrier()147 Barrier& GetBarrier() { 148 return *gc_barrier_; 149 } IsWeakRefAccessEnabled()150 bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) { 151 return weak_ref_access_enabled_; 152 } 153 void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES(!mark_stack_lock_); 154 155 mirror::Object* IsMarked(mirror::Object* from_ref) override 156 REQUIRES_SHARED(Locks::mutator_lock_); 157 158 void AssertNoThreadMarkStackMapping(Thread* thread) REQUIRES(!mark_stack_lock_); 159 160 private: 161 void PushOntoMarkStack(Thread* const self, mirror::Object* obj) 162 REQUIRES_SHARED(Locks::mutator_lock_) 163 REQUIRES(!mark_stack_lock_); 164 mirror::Object* Copy(Thread* const self, 165 mirror::Object* from_ref, 166 mirror::Object* holder, 167 MemberOffset offset) 168 REQUIRES_SHARED(Locks::mutator_lock_) 169 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 170 // Scan the reference fields of object `to_ref`. 171 template <bool kNoUnEvac> 172 void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_) 173 REQUIRES(!mark_stack_lock_); 174 // Scan the reference fields of object 'obj' in the dirty cards during 175 // card-table scan. In addition to visiting the references, it also sets the 176 // read-barrier state to gray for Reference-type objects to ensure that 177 // GetReferent() called on these objects calls the read-barrier on the referent. 178 template <bool kNoUnEvac> 179 void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) 180 REQUIRES(!mark_stack_lock_); 181 // Process a field. 182 template <bool kNoUnEvac> 183 void Process(mirror::Object* obj, MemberOffset offset) 184 REQUIRES_SHARED(Locks::mutator_lock_) 185 REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_); 186 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override 187 REQUIRES_SHARED(Locks::mutator_lock_) 188 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 189 template<bool kGrayImmuneObject> 190 void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root) 191 REQUIRES_SHARED(Locks::mutator_lock_) 192 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 193 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, 194 size_t count, 195 const RootInfo& info) override 196 REQUIRES_SHARED(Locks::mutator_lock_) 197 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 198 void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_); 199 accounting::ObjectStack* GetAllocationStack(); 200 accounting::ObjectStack* GetLiveStack(); 201 void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_) 202 REQUIRES(!mark_stack_lock_); 203 bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 204 void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_) 205 REQUIRES(!mark_stack_lock_); 206 void GrayAllDirtyImmuneObjects() 207 REQUIRES(Locks::mutator_lock_) 208 REQUIRES(!mark_stack_lock_); 209 void GrayAllNewlyDirtyImmuneObjects() 210 REQUIRES(Locks::mutator_lock_) 211 REQUIRES(!mark_stack_lock_); 212 void VerifyGrayImmuneObjects() 213 REQUIRES(Locks::mutator_lock_) 214 REQUIRES(!mark_stack_lock_); 215 void VerifyNoMissingCardMarks() 216 REQUIRES(Locks::mutator_lock_) 217 REQUIRES(!mark_stack_lock_); 218 template <typename Processor> 219 size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, 220 Closure* checkpoint_callback, 221 const Processor& processor) 222 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 223 void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback) 224 REQUIRES_SHARED(Locks::mutator_lock_); 225 void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_) 226 REQUIRES(!mark_stack_lock_); 227 void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_); 228 void DelayReferenceReferent(ObjPtr<mirror::Class> klass, 229 ObjPtr<mirror::Reference> reference) override 230 REQUIRES_SHARED(Locks::mutator_lock_); 231 void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 232 mirror::Object* MarkObject(mirror::Object* from_ref) override 233 REQUIRES_SHARED(Locks::mutator_lock_) 234 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 235 void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref, 236 bool do_atomic_update) override 237 REQUIRES_SHARED(Locks::mutator_lock_) 238 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 239 bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref) 240 REQUIRES_SHARED(Locks::mutator_lock_); 241 bool IsMarkedInNonMovingSpace(mirror::Object* from_ref) 242 REQUIRES_SHARED(Locks::mutator_lock_); 243 bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field, 244 bool do_atomic_update) override 245 REQUIRES_SHARED(Locks::mutator_lock_); 246 void SweepSystemWeaks(Thread* self) 247 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_); 248 // Sweep unmarked objects to complete the garbage collection. Full GCs sweep 249 // all allocation spaces (except the region space). Sticky-bit GCs just sweep 250 // a subset of the heap. 251 void Sweep(bool swap_bitmaps) 252 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); 253 // Sweep only pointers within an array. 254 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 255 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); 256 void SweepLargeObjects(bool swap_bitmaps) 257 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); 258 void MarkZygoteLargeObjects() 259 REQUIRES_SHARED(Locks::mutator_lock_); 260 void FillWithFakeObject(Thread* const self, mirror::Object* fake_obj, size_t byte_size) 261 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_) 262 REQUIRES_SHARED(Locks::mutator_lock_); 263 mirror::Object* AllocateInSkippedBlock(Thread* const self, size_t alloc_size) 264 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_) 265 REQUIRES_SHARED(Locks::mutator_lock_); 266 void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 267 void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_); 268 bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 269 mirror::Object* GetFwdPtr(mirror::Object* from_ref) 270 REQUIRES_SHARED(Locks::mutator_lock_); 271 void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_); 272 void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_); 273 void RecordLiveStackFreezeSize(Thread* self); 274 void ComputeUnevacFromSpaceLiveRatio(); 275 void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) 276 REQUIRES_SHARED(Locks::mutator_lock_); 277 // Dump information about reference `ref` and return it as a string. 278 // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`. 279 std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, const char* indent = "") 280 REQUIRES_SHARED(Locks::mutator_lock_); 281 // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`, 282 // and return it as a string. 283 std::string DumpHeapReference(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) 284 REQUIRES_SHARED(Locks::mutator_lock_); 285 // Dump information about GC root `ref` and return it as a string. 286 std::string DumpGcRoot(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 287 void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref) 288 REQUIRES_SHARED(Locks::mutator_lock_); 289 void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 290 void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_); 291 void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_); 292 void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_); 293 mirror::Object* MarkNonMoving(Thread* const self, 294 mirror::Object* from_ref, 295 mirror::Object* holder = nullptr, 296 MemberOffset offset = MemberOffset(0)) 297 REQUIRES_SHARED(Locks::mutator_lock_) 298 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 299 ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(Thread* const self, 300 mirror::Object* from_ref, 301 accounting::SpaceBitmap<kObjectAlignment>* bitmap) 302 REQUIRES_SHARED(Locks::mutator_lock_) 303 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 304 template<bool kGrayImmuneObject> 305 ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self, 306 mirror::Object* from_ref) 307 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_); 308 void ScanImmuneObject(mirror::Object* obj) 309 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 310 mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self, 311 mirror::Object* from_ref) 312 REQUIRES_SHARED(Locks::mutator_lock_) 313 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); 314 void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_); 315 // Set the read barrier mark entrypoints to non-null. 316 void ActivateReadBarrierEntrypoints(); 317 318 void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_); 319 void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 320 bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 321 template <bool kAtomic = false> 322 bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 323 void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); 324 void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_) 325 REQUIRES(!mark_stack_lock_); 326 327 void RemoveThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack) 328 REQUIRES(mark_stack_lock_); 329 void AddThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack) 330 REQUIRES(mark_stack_lock_); 331 void AssertEmptyThreadMarkStackMap() REQUIRES(mark_stack_lock_); 332 333 space::RegionSpace* region_space_; // The underlying region space. 334 std::unique_ptr<Barrier> gc_barrier_; 335 std::unique_ptr<accounting::ObjectStack> gc_mark_stack_; 336 337 // If true, enable generational collection when using the Concurrent Copying 338 // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC 339 // for major collections. Generational CC collection is currently only 340 // compatible with Baker read barriers. Set in Heap constructor. 341 const bool use_generational_cc_; 342 343 // Generational "sticky", only trace through dirty objects in region space. 344 const bool young_gen_; 345 346 // If true, the GC thread is done scanning marked objects on dirty and aged 347 // card (see ConcurrentCopying::CopyingPhase). 348 Atomic<bool> done_scanning_; 349 350 // The read-barrier mark-bit stack. Stores object references whose 351 // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier, 352 // so that this bit can be reset at the end of the collection in 353 // ConcurrentCopying::FinishPhase. The mark bit of an object can be 354 // used by mutator read barrier code to quickly test whether that 355 // object has been already marked. 356 std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_; 357 // Thread-unsafe Boolean value hinting that `rb_mark_bit_stack_` is 358 // full. A thread-safe test of whether the read-barrier mark-bit 359 // stack is full is implemented by `rb_mark_bit_stack_->AtomicPushBack(ref)` 360 // (see use case in ConcurrentCopying::MarkFromReadBarrier). 361 bool rb_mark_bit_stack_full_; 362 363 // Guards access to pooled_mark_stacks_ and revoked_mark_stacks_ vectors. 364 // Also guards destruction and revocations of thread-local mark-stacks. 365 // Clearing thread-local mark-stack (by other threads or during destruction) 366 // should be guarded by it. 367 Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 368 std::vector<accounting::ObjectStack*> revoked_mark_stacks_ 369 GUARDED_BY(mark_stack_lock_); 370 static constexpr size_t kMarkStackSize = kPageSize; 371 static constexpr size_t kMarkStackPoolSize = 256; 372 std::vector<accounting::ObjectStack*> pooled_mark_stacks_ 373 GUARDED_BY(mark_stack_lock_); 374 Thread* thread_running_gc_; 375 bool is_marking_; // True while marking is ongoing. 376 // True while we might dispatch on the read barrier entrypoints. 377 bool is_using_read_barrier_entrypoints_; 378 bool is_active_; // True while the collection is ongoing. 379 bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant. 380 ImmuneSpaces immune_spaces_; 381 accounting::ContinuousSpaceBitmap* region_space_bitmap_; 382 // A cache of Heap::GetMarkBitmap(). 383 accounting::HeapBitmap* heap_mark_bitmap_; 384 size_t live_stack_freeze_size_; 385 size_t from_space_num_objects_at_first_pause_; // Computed if kEnableFromSpaceAccountingCheck 386 size_t from_space_num_bytes_at_first_pause_; // Computed if kEnableFromSpaceAccountingCheck 387 Atomic<int> is_mark_stack_push_disallowed_; 388 enum MarkStackMode { 389 kMarkStackModeOff = 0, // Mark stack is off. 390 kMarkStackModeThreadLocal, // All threads except for the GC-running thread push refs onto 391 // thread-local mark stacks. The GC-running thread pushes onto and 392 // pops off the GC mark stack without a lock. 393 kMarkStackModeShared, // All threads share the GC mark stack with a lock. 394 kMarkStackModeGcExclusive // The GC-running thread pushes onto and pops from the GC mark stack 395 // without a lock. Other threads won't access the mark stack. 396 }; 397 Atomic<MarkStackMode> mark_stack_mode_; 398 bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_); 399 400 // How many objects and bytes we moved. The GC thread moves many more objects 401 // than mutators. Therefore, we separate the two to avoid CAS. Bytes_moved_ and 402 // bytes_moved_gc_thread_ are critical for GC triggering; the others are just informative. 403 Atomic<size_t> bytes_moved_; // Used by mutators 404 Atomic<size_t> objects_moved_; // Used by mutators 405 size_t bytes_moved_gc_thread_; // Used by GC 406 size_t objects_moved_gc_thread_; // Used by GC 407 Atomic<uint64_t> cumulative_bytes_moved_; 408 Atomic<uint64_t> cumulative_objects_moved_; 409 410 // copied_live_bytes_ratio_sum_ is read and written by CC per GC, in 411 // ReclaimPhase, and is read by DumpPerformanceInfo (potentially from another 412 // thread). However, at present, DumpPerformanceInfo is only called when the 413 // runtime shuts down, so no concurrent access. The same reasoning goes for 414 // gc_count_ and reclaimed_bytes_ratio_sum_ 415 416 // The sum of of all copied live bytes ratio (to_bytes/from_bytes) 417 float copied_live_bytes_ratio_sum_; 418 // The number of GC counts, used to calculate the average above. (It doesn't 419 // include GC where from_bytes is zero, IOW, from-space is empty, which is 420 // possible for minor GC if all allocated objects are in non-moving 421 // space.) 422 size_t gc_count_; 423 // Bit is set if the corresponding object has inter-region references that 424 // were found during the marking phase of two-phase full-heap GC cycle. 425 accounting::ContinuousSpaceBitmap region_space_inter_region_bitmap_; 426 accounting::ContinuousSpaceBitmap non_moving_space_inter_region_bitmap_; 427 428 // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle 429 float reclaimed_bytes_ratio_sum_; 430 431 // The skipped blocks are memory blocks/chucks that were copies of 432 // objects that were unused due to lost races (cas failures) at 433 // object copy/forward pointer install. They may be reused. 434 // Skipped blocks are always in region space. Their size is included directly 435 // in num_bytes_allocated_, i.e. they are treated as allocated, but may be directly 436 // used without going through a GC cycle like other objects. They are reused only 437 // if we run out of region space. TODO: Revisit this design. 438 Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 439 std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_); 440 Atomic<size_t> to_space_bytes_skipped_; 441 Atomic<size_t> to_space_objects_skipped_; 442 443 // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier 444 // and also log. 445 bool measure_read_barrier_slow_path_; 446 // mark_from_read_barrier_measurements_ is true if systrace is enabled or 447 // measure_read_barrier_time_ is true. 448 bool mark_from_read_barrier_measurements_; 449 Atomic<uint64_t> rb_slow_path_ns_; 450 Atomic<uint64_t> rb_slow_path_count_; 451 Atomic<uint64_t> rb_slow_path_count_gc_; 452 mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 453 Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_); 454 uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_); 455 uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_); 456 457 accounting::ReadBarrierTable* rb_table_; 458 bool force_evacuate_all_; // True if all regions are evacuated. 459 Atomic<bool> updated_all_immune_objects_; 460 bool gc_grays_immune_objects_; 461 Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 462 std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_); 463 464 // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must 465 // be filled in before flipping thread roots so that FillWithFakeObject can run. Not 466 // ObjPtr since the GC may transition to suspended and runnable between phases. 467 mirror::Class* java_lang_Object_; 468 469 // Sweep array free buffer, used to sweep the spaces based on an array more 470 // efficiently, by recording dead objects to be freed in batches (see 471 // ConcurrentCopying::SweepArray). 472 MemMap sweep_array_free_buffer_mem_map_; 473 474 // Use signed because after_gc may be larger than before_gc. 475 int64_t num_bytes_allocated_before_gc_; 476 477 class ActivateReadBarrierEntrypointsCallback; 478 class ActivateReadBarrierEntrypointsCheckpoint; 479 class AssertToSpaceInvariantFieldVisitor; 480 class AssertToSpaceInvariantRefsVisitor; 481 class ClearBlackPtrsVisitor; 482 class ComputeUnevacFromSpaceLiveRatioVisitor; 483 class DisableMarkingCallback; 484 class DisableMarkingCheckpoint; 485 class DisableWeakRefAccessCallback; 486 class FlipCallback; 487 template <bool kConcurrent> class GrayImmuneObjectVisitor; 488 class ImmuneSpaceScanObjVisitor; 489 class LostCopyVisitor; 490 template <bool kNoUnEvac> class RefFieldsVisitor; 491 class RevokeThreadLocalMarkStackCheckpoint; 492 class ScopedGcGraysImmuneObjects; 493 class ThreadFlipVisitor; 494 class VerifyGrayImmuneObjectsVisitor; 495 class VerifyNoFromSpaceRefsFieldVisitor; 496 class VerifyNoFromSpaceRefsVisitor; 497 class VerifyNoMissingCardMarkVisitor; 498 class ImmuneSpaceCaptureRefsVisitor; 499 template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor; 500 class CaptureThreadRootsForMarkingAndCheckpoint; 501 template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor; 502 503 DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying); 504 }; 505 506 } // namespace collector 507 } // namespace gc 508 } // namespace art 509 510 #endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ 511