1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "concurrent_copying.h"
18
19 #include "art_field-inl.h"
20 #include "barrier.h"
21 #include "base/enums.h"
22 #include "base/file_utils.h"
23 #include "base/histogram-inl.h"
24 #include "base/quasi_atomic.h"
25 #include "base/stl_util.h"
26 #include "base/systrace.h"
27 #include "class_root-inl.h"
28 #include "debugger.h"
29 #include "gc/accounting/atomic_stack.h"
30 #include "gc/accounting/heap_bitmap-inl.h"
31 #include "gc/accounting/mod_union_table-inl.h"
32 #include "gc/accounting/read_barrier_table.h"
33 #include "gc/accounting/space_bitmap-inl.h"
34 #include "gc/gc_pause_listener.h"
35 #include "gc/reference_processor.h"
36 #include "gc/space/image_space.h"
37 #include "gc/space/space-inl.h"
38 #include "gc/verification.h"
39 #include "image-inl.h"
40 #include "intern_table.h"
41 #include "mirror/class-inl.h"
42 #include "mirror/object-inl.h"
43 #include "mirror/object-refvisitor-inl.h"
44 #include "mirror/object_reference.h"
45 #include "scoped_thread_state_change-inl.h"
46 #include "thread-inl.h"
47 #include "thread_list.h"
48 #include "well_known_classes.h"
49
50 namespace art {
51 namespace gc {
52 namespace collector {
53
54 static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
55 // If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
56 // union table. Disabled since it does not seem to help the pause much.
57 static constexpr bool kFilterModUnionCards = kIsDebugBuild;
58 // If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any read barrier that
59 // occur during ConcurrentCopying::Scan in GC thread. May be used to diagnose possibly unnecessary
60 // read barriers. Only enabled for kIsDebugBuild to avoid performance hit.
61 static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
62 // Slow path mark stack size, increase this if the stack is getting full and it is causing
63 // performance problems.
64 static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
65 // Size (in the number of objects) of the sweep array free buffer.
66 static constexpr size_t kSweepArrayChunkFreeSize = 1024;
67 // Verify that there are no missing card marks.
68 static constexpr bool kVerifyNoMissingCardMarks = kIsDebugBuild;
69
ConcurrentCopying(Heap * heap,bool young_gen,bool use_generational_cc,const std::string & name_prefix,bool measure_read_barrier_slow_path)70 ConcurrentCopying::ConcurrentCopying(Heap* heap,
71 bool young_gen,
72 bool use_generational_cc,
73 const std::string& name_prefix,
74 bool measure_read_barrier_slow_path)
75 : GarbageCollector(heap,
76 name_prefix + (name_prefix.empty() ? "" : " ") +
77 "concurrent copying"),
78 region_space_(nullptr),
79 gc_barrier_(new Barrier(0)),
80 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
81 kDefaultGcMarkStackSize,
82 kDefaultGcMarkStackSize)),
83 use_generational_cc_(use_generational_cc),
84 young_gen_(young_gen),
85 rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
86 kReadBarrierMarkStackSize,
87 kReadBarrierMarkStackSize)),
88 rb_mark_bit_stack_full_(false),
89 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
90 thread_running_gc_(nullptr),
91 is_marking_(false),
92 is_using_read_barrier_entrypoints_(false),
93 is_active_(false),
94 is_asserting_to_space_invariant_(false),
95 region_space_bitmap_(nullptr),
96 heap_mark_bitmap_(nullptr),
97 live_stack_freeze_size_(0),
98 from_space_num_objects_at_first_pause_(0),
99 from_space_num_bytes_at_first_pause_(0),
100 mark_stack_mode_(kMarkStackModeOff),
101 weak_ref_access_enabled_(true),
102 copied_live_bytes_ratio_sum_(0.f),
103 gc_count_(0),
104 reclaimed_bytes_ratio_sum_(0.f),
105 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
106 measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
107 mark_from_read_barrier_measurements_(false),
108 rb_slow_path_ns_(0),
109 rb_slow_path_count_(0),
110 rb_slow_path_count_gc_(0),
111 rb_slow_path_histogram_lock_("Read barrier histogram lock"),
112 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
113 rb_slow_path_count_total_(0),
114 rb_slow_path_count_gc_total_(0),
115 rb_table_(heap_->GetReadBarrierTable()),
116 force_evacuate_all_(false),
117 gc_grays_immune_objects_(false),
118 immune_gray_stack_lock_("concurrent copying immune gray stack lock",
119 kMarkSweepMarkStackLock),
120 num_bytes_allocated_before_gc_(0) {
121 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
122 "The region space size and the read barrier table region size must match");
123 CHECK(use_generational_cc_ || !young_gen_);
124 Thread* self = Thread::Current();
125 {
126 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
127 // Cache this so that we won't have to lock heap_bitmap_lock_ in
128 // Mark() which could cause a nested lock on heap_bitmap_lock_
129 // when GC causes a RB while doing GC or a lock order violation
130 // (class_linker_lock_ and heap_bitmap_lock_).
131 heap_mark_bitmap_ = heap->GetMarkBitmap();
132 }
133 {
134 MutexLock mu(self, mark_stack_lock_);
135 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
136 accounting::AtomicStack<mirror::Object>* mark_stack =
137 accounting::AtomicStack<mirror::Object>::Create(
138 "thread local mark stack", kMarkStackSize, kMarkStackSize);
139 pooled_mark_stacks_.push_back(mark_stack);
140 }
141 }
142 if (use_generational_cc_) {
143 // Allocate sweep array free buffer.
144 std::string error_msg;
145 sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
146 "concurrent copying sweep array free buffer",
147 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
148 PROT_READ | PROT_WRITE,
149 /*low_4gb=*/ false,
150 &error_msg);
151 CHECK(sweep_array_free_buffer_mem_map_.IsValid())
152 << "Couldn't allocate sweep array free buffer: " << error_msg;
153 }
154 }
155
MarkHeapReference(mirror::HeapReference<mirror::Object> * field,bool do_atomic_update)156 void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* field,
157 bool do_atomic_update) {
158 Thread* const self = Thread::Current();
159 if (UNLIKELY(do_atomic_update)) {
160 // Used to mark the referent in DelayReferenceReferent in transaction mode.
161 mirror::Object* from_ref = field->AsMirrorPtr();
162 if (from_ref == nullptr) {
163 return;
164 }
165 mirror::Object* to_ref = Mark(self, from_ref);
166 if (from_ref != to_ref) {
167 do {
168 if (field->AsMirrorPtr() != from_ref) {
169 // Concurrently overwritten by a mutator.
170 break;
171 }
172 } while (!field->CasWeakRelaxed(from_ref, to_ref));
173 }
174 } else {
175 // Used for preserving soft references, should be OK to not have a CAS here since there should be
176 // no other threads which can trigger read barriers on the same referent during reference
177 // processing.
178 field->Assign(Mark(self, field->AsMirrorPtr()));
179 }
180 }
181
~ConcurrentCopying()182 ConcurrentCopying::~ConcurrentCopying() {
183 STLDeleteElements(&pooled_mark_stacks_);
184 }
185
RunPhases()186 void ConcurrentCopying::RunPhases() {
187 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
188 CHECK(!is_active_);
189 is_active_ = true;
190 Thread* self = Thread::Current();
191 thread_running_gc_ = self;
192 Locks::mutator_lock_->AssertNotHeld(self);
193 {
194 ReaderMutexLock mu(self, *Locks::mutator_lock_);
195 InitializePhase();
196 // In case of forced evacuation, all regions are evacuated and hence no
197 // need to compute live_bytes.
198 if (use_generational_cc_ && !young_gen_ && !force_evacuate_all_) {
199 MarkingPhase();
200 }
201 }
202 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
203 // Switch to read barrier mark entrypoints before we gray the objects. This is required in case
204 // a mutator sees a gray bit and dispatches on the entrypoint. (b/37876887).
205 ActivateReadBarrierEntrypoints();
206 // Gray dirty immune objects concurrently to reduce GC pause times. We re-process gray cards in
207 // the pause.
208 ReaderMutexLock mu(self, *Locks::mutator_lock_);
209 GrayAllDirtyImmuneObjects();
210 }
211 FlipThreadRoots();
212 {
213 ReaderMutexLock mu(self, *Locks::mutator_lock_);
214 CopyingPhase();
215 }
216 // Verify no from space refs. This causes a pause.
217 if (kEnableNoFromSpaceRefsVerification) {
218 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
219 ScopedPause pause(this, false);
220 CheckEmptyMarkStack();
221 if (kVerboseMode) {
222 LOG(INFO) << "Verifying no from-space refs";
223 }
224 VerifyNoFromSpaceReferences();
225 if (kVerboseMode) {
226 LOG(INFO) << "Done verifying no from-space refs";
227 }
228 CheckEmptyMarkStack();
229 }
230 {
231 ReaderMutexLock mu(self, *Locks::mutator_lock_);
232 ReclaimPhase();
233 }
234 FinishPhase();
235 CHECK(is_active_);
236 is_active_ = false;
237 thread_running_gc_ = nullptr;
238 }
239
240 class ConcurrentCopying::ActivateReadBarrierEntrypointsCheckpoint : public Closure {
241 public:
ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying * concurrent_copying)242 explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
243 : concurrent_copying_(concurrent_copying) {}
244
Run(Thread * thread)245 void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
246 // Note: self is not necessarily equal to thread since thread may be suspended.
247 Thread* self = Thread::Current();
248 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
249 << thread->GetState() << " thread " << thread << " self " << self;
250 // Switch to the read barrier entrypoints.
251 thread->SetReadBarrierEntrypoints();
252 // If thread is a running mutator, then act on behalf of the garbage collector.
253 // See the code in ThreadList::RunCheckpoint.
254 concurrent_copying_->GetBarrier().Pass(self);
255 }
256
257 private:
258 ConcurrentCopying* const concurrent_copying_;
259 };
260
261 class ConcurrentCopying::ActivateReadBarrierEntrypointsCallback : public Closure {
262 public:
ActivateReadBarrierEntrypointsCallback(ConcurrentCopying * concurrent_copying)263 explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
264 : concurrent_copying_(concurrent_copying) {}
265
Run(Thread * self ATTRIBUTE_UNUSED)266 void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
267 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
268 // to avoid a race with ThreadList::Register().
269 CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
270 concurrent_copying_->is_using_read_barrier_entrypoints_ = true;
271 }
272
273 private:
274 ConcurrentCopying* const concurrent_copying_;
275 };
276
ActivateReadBarrierEntrypoints()277 void ConcurrentCopying::ActivateReadBarrierEntrypoints() {
278 Thread* const self = Thread::Current();
279 ActivateReadBarrierEntrypointsCheckpoint checkpoint(this);
280 ThreadList* thread_list = Runtime::Current()->GetThreadList();
281 gc_barrier_->Init(self, 0);
282 ActivateReadBarrierEntrypointsCallback callback(this);
283 const size_t barrier_count = thread_list->RunCheckpoint(&checkpoint, &callback);
284 // If there are no threads to wait which implies that all the checkpoint functions are finished,
285 // then no need to release the mutator lock.
286 if (barrier_count == 0) {
287 return;
288 }
289 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
290 gc_barrier_->Increment(self, barrier_count);
291 }
292
CreateInterRegionRefBitmaps()293 void ConcurrentCopying::CreateInterRegionRefBitmaps() {
294 DCHECK(use_generational_cc_);
295 DCHECK(!region_space_inter_region_bitmap_.IsValid());
296 DCHECK(!non_moving_space_inter_region_bitmap_.IsValid());
297 DCHECK(region_space_ != nullptr);
298 DCHECK(heap_->non_moving_space_ != nullptr);
299 // Region-space
300 region_space_inter_region_bitmap_ = accounting::ContinuousSpaceBitmap::Create(
301 "region-space inter region ref bitmap",
302 reinterpret_cast<uint8_t*>(region_space_->Begin()),
303 region_space_->Limit() - region_space_->Begin());
304 CHECK(region_space_inter_region_bitmap_.IsValid())
305 << "Couldn't allocate region-space inter region ref bitmap";
306
307 // non-moving-space
308 non_moving_space_inter_region_bitmap_ = accounting::ContinuousSpaceBitmap::Create(
309 "non-moving-space inter region ref bitmap",
310 reinterpret_cast<uint8_t*>(heap_->non_moving_space_->Begin()),
311 heap_->non_moving_space_->Limit() - heap_->non_moving_space_->Begin());
312 CHECK(non_moving_space_inter_region_bitmap_.IsValid())
313 << "Couldn't allocate non-moving-space inter region ref bitmap";
314 }
315
BindBitmaps()316 void ConcurrentCopying::BindBitmaps() {
317 Thread* self = Thread::Current();
318 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
319 // Mark all of the spaces we never collect as immune.
320 for (const auto& space : heap_->GetContinuousSpaces()) {
321 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
322 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
323 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
324 immune_spaces_.AddSpace(space);
325 } else {
326 CHECK(!space->IsZygoteSpace());
327 CHECK(!space->IsImageSpace());
328 CHECK(space == region_space_ || space == heap_->non_moving_space_);
329 if (use_generational_cc_) {
330 if (space == region_space_) {
331 region_space_bitmap_ = region_space_->GetMarkBitmap();
332 } else if (young_gen_ && space->IsContinuousMemMapAllocSpace()) {
333 DCHECK_EQ(space->GetGcRetentionPolicy(), space::kGcRetentionPolicyAlwaysCollect);
334 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
335 }
336 if (young_gen_) {
337 // Age all of the cards for the region space so that we know which evac regions to scan.
338 heap_->GetCardTable()->ModifyCardsAtomic(space->Begin(),
339 space->End(),
340 AgeCardVisitor(),
341 VoidFunctor());
342 } else {
343 // In a full-heap GC cycle, the card-table corresponding to region-space and
344 // non-moving space can be cleared, because this cycle only needs to
345 // capture writes during the marking phase of this cycle to catch
346 // objects that skipped marking due to heap mutation. Furthermore,
347 // if the next GC is a young-gen cycle, then it only needs writes to
348 // be captured after the thread-flip of this GC cycle, as that is when
349 // the young-gen for the next GC cycle starts getting populated.
350 heap_->GetCardTable()->ClearCardRange(space->Begin(), space->Limit());
351 }
352 } else {
353 if (space == region_space_) {
354 // It is OK to clear the bitmap with mutators running since the only place it is read is
355 // VisitObjects which has exclusion with CC.
356 region_space_bitmap_ = region_space_->GetMarkBitmap();
357 region_space_bitmap_->Clear();
358 }
359 }
360 }
361 }
362 if (use_generational_cc_ && young_gen_) {
363 for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
364 CHECK(space->IsLargeObjectSpace());
365 space->AsLargeObjectSpace()->CopyLiveToMarked();
366 }
367 }
368 }
369
InitializePhase()370 void ConcurrentCopying::InitializePhase() {
371 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
372 num_bytes_allocated_before_gc_ = static_cast<int64_t>(heap_->GetBytesAllocated());
373 if (kVerboseMode) {
374 LOG(INFO) << "GC InitializePhase";
375 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
376 << reinterpret_cast<void*>(region_space_->Limit());
377 }
378 CheckEmptyMarkStack();
379 rb_mark_bit_stack_full_ = false;
380 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
381 if (measure_read_barrier_slow_path_) {
382 rb_slow_path_ns_.store(0, std::memory_order_relaxed);
383 rb_slow_path_count_.store(0, std::memory_order_relaxed);
384 rb_slow_path_count_gc_.store(0, std::memory_order_relaxed);
385 }
386
387 immune_spaces_.Reset();
388 bytes_moved_.store(0, std::memory_order_relaxed);
389 objects_moved_.store(0, std::memory_order_relaxed);
390 bytes_moved_gc_thread_ = 0;
391 objects_moved_gc_thread_ = 0;
392 GcCause gc_cause = GetCurrentIteration()->GetGcCause();
393
394 force_evacuate_all_ = false;
395 if (!use_generational_cc_ || !young_gen_) {
396 if (gc_cause == kGcCauseExplicit ||
397 gc_cause == kGcCauseCollectorTransition ||
398 GetCurrentIteration()->GetClearSoftReferences()) {
399 force_evacuate_all_ = true;
400 }
401 }
402 if (kUseBakerReadBarrier) {
403 updated_all_immune_objects_.store(false, std::memory_order_relaxed);
404 // GC may gray immune objects in the thread flip.
405 gc_grays_immune_objects_ = true;
406 if (kIsDebugBuild) {
407 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
408 DCHECK(immune_gray_stack_.empty());
409 }
410 }
411 if (use_generational_cc_) {
412 done_scanning_.store(false, std::memory_order_release);
413 }
414 BindBitmaps();
415 if (kVerboseMode) {
416 LOG(INFO) << "young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha;
417 LOG(INFO) << "force_evacuate_all=" << std::boolalpha << force_evacuate_all_ << std::noboolalpha;
418 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
419 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
420 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
421 LOG(INFO) << "Immune space: " << *space;
422 }
423 LOG(INFO) << "GC end of InitializePhase";
424 }
425 if (use_generational_cc_ && !young_gen_) {
426 region_space_bitmap_->Clear();
427 }
428 mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed);
429 // Mark all of the zygote large objects without graying them.
430 MarkZygoteLargeObjects();
431 }
432
433 // Used to switch the thread roots of a thread from from-space refs to to-space refs.
434 class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor {
435 public:
ThreadFlipVisitor(ConcurrentCopying * concurrent_copying,bool use_tlab)436 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
437 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
438 }
439
Run(Thread * thread)440 void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
441 // Note: self is not necessarily equal to thread since thread may be suspended.
442 Thread* self = Thread::Current();
443 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
444 << thread->GetState() << " thread " << thread << " self " << self;
445 thread->SetIsGcMarkingAndUpdateEntrypoints(true);
446 if (use_tlab_ && thread->HasTlab()) {
447 // We should not reuse the partially utilized TLABs revoked here as they
448 // are going to be part of from-space.
449 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
450 // This must come before the revoke.
451 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
452 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread, /*reuse=*/ false);
453 reinterpret_cast<Atomic<size_t>*>(
454 &concurrent_copying_->from_space_num_objects_at_first_pause_)->
455 fetch_add(thread_local_objects, std::memory_order_relaxed);
456 } else {
457 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread, /*reuse=*/ false);
458 }
459 }
460 if (kUseThreadLocalAllocationStack) {
461 thread->RevokeThreadLocalAllocationStack();
462 }
463 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
464 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
465 // only.
466 thread->VisitRoots(this, kVisitRootFlagAllRoots);
467 concurrent_copying_->GetBarrier().Pass(self);
468 }
469
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)470 void VisitRoots(mirror::Object*** roots,
471 size_t count,
472 const RootInfo& info ATTRIBUTE_UNUSED) override
473 REQUIRES_SHARED(Locks::mutator_lock_) {
474 Thread* self = Thread::Current();
475 for (size_t i = 0; i < count; ++i) {
476 mirror::Object** root = roots[i];
477 mirror::Object* ref = *root;
478 if (ref != nullptr) {
479 mirror::Object* to_ref = concurrent_copying_->Mark(self, ref);
480 if (to_ref != ref) {
481 *root = to_ref;
482 }
483 }
484 }
485 }
486
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)487 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
488 size_t count,
489 const RootInfo& info ATTRIBUTE_UNUSED) override
490 REQUIRES_SHARED(Locks::mutator_lock_) {
491 Thread* self = Thread::Current();
492 for (size_t i = 0; i < count; ++i) {
493 mirror::CompressedReference<mirror::Object>* const root = roots[i];
494 if (!root->IsNull()) {
495 mirror::Object* ref = root->AsMirrorPtr();
496 mirror::Object* to_ref = concurrent_copying_->Mark(self, ref);
497 if (to_ref != ref) {
498 root->Assign(to_ref);
499 }
500 }
501 }
502 }
503
504 private:
505 ConcurrentCopying* const concurrent_copying_;
506 const bool use_tlab_;
507 };
508
509 // Called back from Runtime::FlipThreadRoots() during a pause.
510 class ConcurrentCopying::FlipCallback : public Closure {
511 public:
FlipCallback(ConcurrentCopying * concurrent_copying)512 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
513 : concurrent_copying_(concurrent_copying) {
514 }
515
Run(Thread * thread)516 void Run(Thread* thread) override REQUIRES(Locks::mutator_lock_) {
517 ConcurrentCopying* cc = concurrent_copying_;
518 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
519 // Note: self is not necessarily equal to thread since thread may be suspended.
520 Thread* self = Thread::Current();
521 if (kVerifyNoMissingCardMarks && cc->young_gen_) {
522 cc->VerifyNoMissingCardMarks();
523 }
524 CHECK_EQ(thread, self);
525 Locks::mutator_lock_->AssertExclusiveHeld(self);
526 space::RegionSpace::EvacMode evac_mode = space::RegionSpace::kEvacModeLivePercentNewlyAllocated;
527 if (cc->young_gen_) {
528 CHECK(!cc->force_evacuate_all_);
529 evac_mode = space::RegionSpace::kEvacModeNewlyAllocated;
530 } else if (cc->force_evacuate_all_) {
531 evac_mode = space::RegionSpace::kEvacModeForceAll;
532 }
533 {
534 TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
535 // Only change live bytes for 1-phase full heap CC.
536 cc->region_space_->SetFromSpace(
537 cc->rb_table_,
538 evac_mode,
539 /*clear_live_bytes=*/ !cc->use_generational_cc_);
540 }
541 cc->SwapStacks();
542 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
543 cc->RecordLiveStackFreezeSize(self);
544 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
545 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
546 }
547 cc->is_marking_ = true;
548 if (kIsDebugBuild && !cc->use_generational_cc_) {
549 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
550 }
551 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
552 CHECK(Runtime::Current()->IsAotCompiler());
553 TimingLogger::ScopedTiming split3("(Paused)VisitTransactionRoots", cc->GetTimings());
554 Runtime::Current()->VisitTransactionRoots(cc);
555 }
556 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
557 cc->GrayAllNewlyDirtyImmuneObjects();
558 if (kIsDebugBuild) {
559 // Check that all non-gray immune objects only reference immune objects.
560 cc->VerifyGrayImmuneObjects();
561 }
562 }
563 // May be null during runtime creation, in this case leave java_lang_Object null.
564 // This is safe since single threaded behavior should mean FillWithFakeObject does not
565 // happen when java_lang_Object_ is null.
566 if (WellKnownClasses::java_lang_Object != nullptr) {
567 cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark(thread,
568 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object).Ptr()));
569 } else {
570 cc->java_lang_Object_ = nullptr;
571 }
572 }
573
574 private:
575 ConcurrentCopying* const concurrent_copying_;
576 };
577
578 class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
579 public:
VerifyGrayImmuneObjectsVisitor(ConcurrentCopying * collector)580 explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
581 : collector_(collector) {}
582
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool) const583 void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
584 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
585 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
586 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
587 obj, offset);
588 }
589
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const590 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
591 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
592 CHECK(klass->IsTypeOfReferenceClass());
593 CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
594 ref,
595 mirror::Reference::ReferentOffset());
596 }
597
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const598 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
599 ALWAYS_INLINE
600 REQUIRES_SHARED(Locks::mutator_lock_) {
601 if (!root->IsNull()) {
602 VisitRoot(root);
603 }
604 }
605
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const606 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
607 ALWAYS_INLINE
608 REQUIRES_SHARED(Locks::mutator_lock_) {
609 CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
610 }
611
612 private:
613 ConcurrentCopying* const collector_;
614
CheckReference(ObjPtr<mirror::Object> ref,ObjPtr<mirror::Object> holder,MemberOffset offset) const615 void CheckReference(ObjPtr<mirror::Object> ref,
616 ObjPtr<mirror::Object> holder,
617 MemberOffset offset) const
618 REQUIRES_SHARED(Locks::mutator_lock_) {
619 if (ref != nullptr) {
620 if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
621 // Not immune, must be a zygote large object.
622 space::LargeObjectSpace* large_object_space =
623 Runtime::Current()->GetHeap()->GetLargeObjectsSpace();
624 CHECK(large_object_space->Contains(ref.Ptr()) &&
625 large_object_space->IsZygoteLargeObject(Thread::Current(), ref.Ptr()))
626 << "Non gray object references non immune, non zygote large object "<< ref << " "
627 << mirror::Object::PrettyTypeOf(ref) << " in holder " << holder << " "
628 << mirror::Object::PrettyTypeOf(holder) << " offset=" << offset.Uint32Value();
629 } else {
630 // Make sure the large object class is immune since we will never scan the large object.
631 CHECK(collector_->immune_spaces_.ContainsObject(
632 ref->GetClass<kVerifyNone, kWithoutReadBarrier>()));
633 }
634 }
635 }
636 };
637
VerifyGrayImmuneObjects()638 void ConcurrentCopying::VerifyGrayImmuneObjects() {
639 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
640 for (auto& space : immune_spaces_.GetSpaces()) {
641 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
642 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
643 VerifyGrayImmuneObjectsVisitor visitor(this);
644 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
645 reinterpret_cast<uintptr_t>(space->Limit()),
646 [&visitor](mirror::Object* obj)
647 REQUIRES_SHARED(Locks::mutator_lock_) {
648 // If an object is not gray, it should only have references to things in the immune spaces.
649 if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
650 obj->VisitReferences</*kVisitNativeRoots=*/true,
651 kDefaultVerifyFlags,
652 kWithoutReadBarrier>(visitor, visitor);
653 }
654 });
655 }
656 }
657
658 class ConcurrentCopying::VerifyNoMissingCardMarkVisitor {
659 public:
VerifyNoMissingCardMarkVisitor(ConcurrentCopying * cc,ObjPtr<mirror::Object> holder)660 VerifyNoMissingCardMarkVisitor(ConcurrentCopying* cc, ObjPtr<mirror::Object> holder)
661 : cc_(cc),
662 holder_(holder) {}
663
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const664 void operator()(ObjPtr<mirror::Object> obj,
665 MemberOffset offset,
666 bool is_static ATTRIBUTE_UNUSED) const
667 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
668 if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
669 CheckReference(obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(
670 offset), offset.Uint32Value());
671 }
672 }
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const673 void operator()(ObjPtr<mirror::Class> klass,
674 ObjPtr<mirror::Reference> ref) const
675 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
676 CHECK(klass->IsTypeOfReferenceClass());
677 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
678 }
679
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const680 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
681 REQUIRES_SHARED(Locks::mutator_lock_) {
682 if (!root->IsNull()) {
683 VisitRoot(root);
684 }
685 }
686
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const687 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
688 REQUIRES_SHARED(Locks::mutator_lock_) {
689 CheckReference(root->AsMirrorPtr());
690 }
691
CheckReference(mirror::Object * ref,int32_t offset=-1) const692 void CheckReference(mirror::Object* ref, int32_t offset = -1) const
693 REQUIRES_SHARED(Locks::mutator_lock_) {
694 if (ref != nullptr && cc_->region_space_->IsInNewlyAllocatedRegion(ref)) {
695 LOG(FATAL_WITHOUT_ABORT)
696 << holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object "
697 << ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset;
698 LOG(FATAL_WITHOUT_ABORT) << "time=" << cc_->region_space_->Time();
699 constexpr const char* kIndent = " ";
700 LOG(FATAL_WITHOUT_ABORT) << cc_->DumpReferenceInfo(holder_.Ptr(), "holder_", kIndent);
701 LOG(FATAL_WITHOUT_ABORT) << cc_->DumpReferenceInfo(ref, "ref", kIndent);
702 LOG(FATAL) << "Unexpected reference to newly allocated region.";
703 }
704 }
705
706 private:
707 ConcurrentCopying* const cc_;
708 const ObjPtr<mirror::Object> holder_;
709 };
710
VerifyNoMissingCardMarks()711 void ConcurrentCopying::VerifyNoMissingCardMarks() {
712 auto visitor = [&](mirror::Object* obj)
713 REQUIRES(Locks::mutator_lock_)
714 REQUIRES(!mark_stack_lock_) {
715 // Objects on clean cards should never have references to newly allocated regions. Note
716 // that aged cards are also not clean.
717 if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
718 VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder=*/ obj);
719 obj->VisitReferences</*kVisitNativeRoots=*/true, kVerifyNone, kWithoutReadBarrier>(
720 internal_visitor, internal_visitor);
721 }
722 };
723 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
724 region_space_->Walk(visitor);
725 {
726 ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
727 heap_->GetLiveBitmap()->Visit(visitor);
728 }
729 }
730
731 // Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
FlipThreadRoots()732 void ConcurrentCopying::FlipThreadRoots() {
733 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
734 if (kVerboseMode || heap_->dump_region_info_before_gc_) {
735 LOG(INFO) << "time=" << region_space_->Time();
736 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
737 }
738 Thread* self = Thread::Current();
739 Locks::mutator_lock_->AssertNotHeld(self);
740 gc_barrier_->Init(self, 0);
741 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
742 FlipCallback flip_callback(this);
743
744 size_t barrier_count = Runtime::Current()->GetThreadList()->FlipThreadRoots(
745 &thread_flip_visitor, &flip_callback, this, GetHeap()->GetGcPauseListener());
746
747 {
748 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
749 gc_barrier_->Increment(self, barrier_count);
750 }
751 is_asserting_to_space_invariant_ = true;
752 QuasiAtomic::ThreadFenceForConstructor();
753 if (kVerboseMode) {
754 LOG(INFO) << "time=" << region_space_->Time();
755 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
756 LOG(INFO) << "GC end of FlipThreadRoots";
757 }
758 }
759
760 template <bool kConcurrent>
761 class ConcurrentCopying::GrayImmuneObjectVisitor {
762 public:
GrayImmuneObjectVisitor(Thread * self)763 explicit GrayImmuneObjectVisitor(Thread* self) : self_(self) {}
764
operator ()(mirror::Object * obj) const765 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
766 if (kUseBakerReadBarrier && obj->GetReadBarrierState() == ReadBarrier::NonGrayState()) {
767 if (kConcurrent) {
768 Locks::mutator_lock_->AssertSharedHeld(self_);
769 obj->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState());
770 // Mod union table VisitObjects may visit the same object multiple times so we can't check
771 // the result of the atomic set.
772 } else {
773 Locks::mutator_lock_->AssertExclusiveHeld(self_);
774 obj->SetReadBarrierState(ReadBarrier::GrayState());
775 }
776 }
777 }
778
Callback(mirror::Object * obj,void * arg)779 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
780 reinterpret_cast<GrayImmuneObjectVisitor<kConcurrent>*>(arg)->operator()(obj);
781 }
782
783 private:
784 Thread* const self_;
785 };
786
GrayAllDirtyImmuneObjects()787 void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
788 TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings());
789 accounting::CardTable* const card_table = heap_->GetCardTable();
790 Thread* const self = Thread::Current();
791 using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ true>;
792 VisitorType visitor(self);
793 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
794 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
795 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
796 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
797 // Mark all the objects on dirty cards since these may point to objects in other space.
798 // Once these are marked, the GC will eventually clear them later.
799 // Table is non null for boot image and zygote spaces. It is only null for application image
800 // spaces.
801 if (table != nullptr) {
802 table->ProcessCards();
803 table->VisitObjects(&VisitorType::Callback, &visitor);
804 // Don't clear cards here since we need to rescan in the pause. If we cleared the cards here,
805 // there would be races with the mutator marking new cards.
806 } else {
807 // Keep cards aged if we don't have a mod-union table since we may need to scan them in future
808 // GCs. This case is for app images.
809 card_table->ModifyCardsAtomic(
810 space->Begin(),
811 space->End(),
812 [](uint8_t card) {
813 return (card != gc::accounting::CardTable::kCardClean)
814 ? gc::accounting::CardTable::kCardAged
815 : card;
816 },
817 /* card modified visitor */ VoidFunctor());
818 card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
819 space->Begin(),
820 space->End(),
821 visitor,
822 gc::accounting::CardTable::kCardAged);
823 }
824 }
825 }
826
GrayAllNewlyDirtyImmuneObjects()827 void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
828 TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings());
829 accounting::CardTable* const card_table = heap_->GetCardTable();
830 using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ false>;
831 Thread* const self = Thread::Current();
832 VisitorType visitor(self);
833 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
834 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
835 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
836 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
837
838 // Don't need to scan aged cards since we did these before the pause. Note that scanning cards
839 // also handles the mod-union table cards.
840 card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
841 space->Begin(),
842 space->End(),
843 visitor,
844 gc::accounting::CardTable::kCardDirty);
845 if (table != nullptr) {
846 // Add the cards to the mod-union table so that we can clear cards to save RAM.
847 table->ProcessCards();
848 TimingLogger::ScopedTiming split2("(Paused)ClearCards", GetTimings());
849 card_table->ClearCardRange(space->Begin(),
850 AlignDown(space->End(), accounting::CardTable::kCardSize));
851 }
852 }
853 // Since all of the objects that may point to other spaces are gray, we can avoid all the read
854 // barriers in the immune spaces.
855 updated_all_immune_objects_.store(true, std::memory_order_relaxed);
856 }
857
SwapStacks()858 void ConcurrentCopying::SwapStacks() {
859 heap_->SwapStacks();
860 }
861
RecordLiveStackFreezeSize(Thread * self)862 void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
863 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
864 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
865 }
866
867 // Used to visit objects in the immune spaces.
ScanImmuneObject(mirror::Object * obj)868 inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
869 DCHECK(obj != nullptr);
870 DCHECK(immune_spaces_.ContainsObject(obj));
871 // Update the fields without graying it or pushing it onto the mark stack.
872 if (use_generational_cc_ && young_gen_) {
873 // Young GC does not care about references to unevac space. It is safe to not gray these as
874 // long as scan immune objects happens after scanning the dirty cards.
875 Scan<true>(obj);
876 } else {
877 Scan<false>(obj);
878 }
879 }
880
881 class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
882 public:
ImmuneSpaceScanObjVisitor(ConcurrentCopying * cc)883 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
884 : collector_(cc) {}
885
operator ()(mirror::Object * obj) const886 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
887 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
888 // Only need to scan gray objects.
889 if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
890 collector_->ScanImmuneObject(obj);
891 // Done scanning the object, go back to black (non-gray).
892 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
893 ReadBarrier::NonGrayState());
894 CHECK(success)
895 << Runtime::Current()->GetHeap()->GetVerification()->DumpObjectInfo(obj, "failed CAS");
896 }
897 } else {
898 collector_->ScanImmuneObject(obj);
899 }
900 }
901
Callback(mirror::Object * obj,void * arg)902 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
903 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
904 }
905
906 private:
907 ConcurrentCopying* const collector_;
908 };
909
910 template <bool kAtomicTestAndSet>
911 class ConcurrentCopying::CaptureRootsForMarkingVisitor : public RootVisitor {
912 public:
CaptureRootsForMarkingVisitor(ConcurrentCopying * cc,Thread * self)913 explicit CaptureRootsForMarkingVisitor(ConcurrentCopying* cc, Thread* self)
914 : collector_(cc), self_(self) {}
915
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)916 void VisitRoots(mirror::Object*** roots,
917 size_t count,
918 const RootInfo& info ATTRIBUTE_UNUSED) override
919 REQUIRES_SHARED(Locks::mutator_lock_) {
920 for (size_t i = 0; i < count; ++i) {
921 mirror::Object** root = roots[i];
922 mirror::Object* ref = *root;
923 if (ref != nullptr && !collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
924 collector_->PushOntoMarkStack(self_, ref);
925 }
926 }
927 }
928
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)929 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
930 size_t count,
931 const RootInfo& info ATTRIBUTE_UNUSED) override
932 REQUIRES_SHARED(Locks::mutator_lock_) {
933 for (size_t i = 0; i < count; ++i) {
934 mirror::CompressedReference<mirror::Object>* const root = roots[i];
935 if (!root->IsNull()) {
936 mirror::Object* ref = root->AsMirrorPtr();
937 if (!collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
938 collector_->PushOntoMarkStack(self_, ref);
939 }
940 }
941 }
942 }
943
944 private:
945 ConcurrentCopying* const collector_;
946 Thread* const self_;
947 };
948
949 class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
950 public:
RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying * concurrent_copying,bool disable_weak_ref_access)951 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
952 bool disable_weak_ref_access)
953 : concurrent_copying_(concurrent_copying),
954 disable_weak_ref_access_(disable_weak_ref_access) {
955 }
956
Run(Thread * thread)957 void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
958 // Note: self is not necessarily equal to thread since thread may be suspended.
959 Thread* const self = Thread::Current();
960 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
961 << thread->GetState() << " thread " << thread << " self " << self;
962 // Revoke thread local mark stacks.
963 {
964 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
965 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
966 if (tl_mark_stack != nullptr) {
967 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
968 thread->SetThreadLocalMarkStack(nullptr);
969 }
970 }
971 // Disable weak ref access.
972 if (disable_weak_ref_access_) {
973 thread->SetWeakRefAccessEnabled(false);
974 }
975 // If thread is a running mutator, then act on behalf of the garbage collector.
976 // See the code in ThreadList::RunCheckpoint.
977 concurrent_copying_->GetBarrier().Pass(self);
978 }
979
980 protected:
981 ConcurrentCopying* const concurrent_copying_;
982
983 private:
984 const bool disable_weak_ref_access_;
985 };
986
987 class ConcurrentCopying::CaptureThreadRootsForMarkingAndCheckpoint :
988 public RevokeThreadLocalMarkStackCheckpoint {
989 public:
CaptureThreadRootsForMarkingAndCheckpoint(ConcurrentCopying * cc)990 explicit CaptureThreadRootsForMarkingAndCheckpoint(ConcurrentCopying* cc) :
991 RevokeThreadLocalMarkStackCheckpoint(cc, /* disable_weak_ref_access */ false) {}
992
Run(Thread * thread)993 void Run(Thread* thread) override
994 REQUIRES_SHARED(Locks::mutator_lock_) {
995 Thread* const self = Thread::Current();
996 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
997 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
998 // only.
999 CaptureRootsForMarkingVisitor</*kAtomicTestAndSet*/ true> visitor(concurrent_copying_, self);
1000 thread->VisitRoots(&visitor, kVisitRootFlagAllRoots);
1001 // If thread_running_gc_ performed the root visit then its thread-local
1002 // mark-stack should be null as we directly push to gc_mark_stack_.
1003 CHECK(self == thread || self->GetThreadLocalMarkStack() == nullptr);
1004 // Barrier handling is done in the base class' Run() below.
1005 RevokeThreadLocalMarkStackCheckpoint::Run(thread);
1006 }
1007 };
1008
CaptureThreadRootsForMarking()1009 void ConcurrentCopying::CaptureThreadRootsForMarking() {
1010 TimingLogger::ScopedTiming split("CaptureThreadRootsForMarking", GetTimings());
1011 if (kVerboseMode) {
1012 LOG(INFO) << "time=" << region_space_->Time();
1013 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
1014 }
1015 Thread* const self = Thread::Current();
1016 CaptureThreadRootsForMarkingAndCheckpoint check_point(this);
1017 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1018 gc_barrier_->Init(self, 0);
1019 size_t barrier_count = thread_list->RunCheckpoint(&check_point, /* callback */ nullptr);
1020 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1021 // then no need to release the mutator lock.
1022 if (barrier_count == 0) {
1023 return;
1024 }
1025 Locks::mutator_lock_->SharedUnlock(self);
1026 {
1027 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1028 gc_barrier_->Increment(self, barrier_count);
1029 }
1030 Locks::mutator_lock_->SharedLock(self);
1031 if (kVerboseMode) {
1032 LOG(INFO) << "time=" << region_space_->Time();
1033 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
1034 LOG(INFO) << "GC end of CaptureThreadRootsForMarking";
1035 }
1036 }
1037
1038 // Used to scan ref fields of an object.
1039 template <bool kHandleInterRegionRefs>
1040 class ConcurrentCopying::ComputeLiveBytesAndMarkRefFieldsVisitor {
1041 public:
ComputeLiveBytesAndMarkRefFieldsVisitor(ConcurrentCopying * collector,size_t obj_region_idx)1042 explicit ComputeLiveBytesAndMarkRefFieldsVisitor(ConcurrentCopying* collector,
1043 size_t obj_region_idx)
1044 : collector_(collector),
1045 obj_region_idx_(obj_region_idx),
1046 contains_inter_region_idx_(false) {}
1047
operator ()(mirror::Object * obj,MemberOffset offset,bool) const1048 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
1049 ALWAYS_INLINE
1050 REQUIRES_SHARED(Locks::mutator_lock_)
1051 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
1052 DCHECK_EQ(collector_->RegionSpace()->RegionIdxForRef(obj), obj_region_idx_);
1053 DCHECK(kHandleInterRegionRefs || collector_->immune_spaces_.ContainsObject(obj));
1054 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset));
1055 }
1056
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const1057 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
1058 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1059 DCHECK(klass->IsTypeOfReferenceClass());
1060 // If the referent is not null, then we must re-visit the object during
1061 // copying phase to enqueue it for delayed processing and setting
1062 // read-barrier state to gray to ensure that call to GetReferent() triggers
1063 // the read-barrier. We use same data structure that is used to remember
1064 // objects with inter-region refs for this purpose too.
1065 if (kHandleInterRegionRefs
1066 && !contains_inter_region_idx_
1067 && ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr) {
1068 contains_inter_region_idx_ = true;
1069 }
1070 }
1071
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1072 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1073 ALWAYS_INLINE
1074 REQUIRES_SHARED(Locks::mutator_lock_) {
1075 if (!root->IsNull()) {
1076 VisitRoot(root);
1077 }
1078 }
1079
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1080 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1081 ALWAYS_INLINE
1082 REQUIRES_SHARED(Locks::mutator_lock_) {
1083 CheckReference(root->AsMirrorPtr());
1084 }
1085
ContainsInterRegionRefs() const1086 bool ContainsInterRegionRefs() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
1087 return contains_inter_region_idx_;
1088 }
1089
1090 private:
CheckReference(mirror::Object * ref) const1091 void CheckReference(mirror::Object* ref) const
1092 REQUIRES_SHARED(Locks::mutator_lock_) {
1093 if (ref == nullptr) {
1094 // Nothing to do.
1095 return;
1096 }
1097 if (!collector_->TestAndSetMarkBitForRef(ref)) {
1098 collector_->PushOntoLocalMarkStack(ref);
1099 }
1100 if (kHandleInterRegionRefs && !contains_inter_region_idx_) {
1101 size_t ref_region_idx = collector_->RegionSpace()->RegionIdxForRef(ref);
1102 // If a region-space object refers to an outside object, we will have a
1103 // mismatch of region idx, but the object need not be re-visited in
1104 // copying phase.
1105 if (ref_region_idx != static_cast<size_t>(-1) && obj_region_idx_ != ref_region_idx) {
1106 contains_inter_region_idx_ = true;
1107 }
1108 }
1109 }
1110
1111 ConcurrentCopying* const collector_;
1112 const size_t obj_region_idx_;
1113 mutable bool contains_inter_region_idx_;
1114 };
1115
AddLiveBytesAndScanRef(mirror::Object * ref)1116 void ConcurrentCopying::AddLiveBytesAndScanRef(mirror::Object* ref) {
1117 DCHECK(ref != nullptr);
1118 DCHECK(!immune_spaces_.ContainsObject(ref));
1119 DCHECK(TestMarkBitmapForRef(ref));
1120 size_t obj_region_idx = static_cast<size_t>(-1);
1121 if (LIKELY(region_space_->HasAddress(ref))) {
1122 obj_region_idx = region_space_->RegionIdxForRefUnchecked(ref);
1123 // Add live bytes to the corresponding region
1124 if (!region_space_->IsRegionNewlyAllocated(obj_region_idx)) {
1125 // Newly Allocated regions are always chosen for evacuation. So no need
1126 // to update live_bytes_.
1127 size_t obj_size = ref->SizeOf<kDefaultVerifyFlags>();
1128 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1129 region_space_->AddLiveBytes(ref, alloc_size);
1130 }
1131 }
1132 ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ true>
1133 visitor(this, obj_region_idx);
1134 ref->VisitReferences</*kVisitNativeRoots=*/ true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1135 visitor, visitor);
1136 // Mark the corresponding card dirty if the object contains any
1137 // inter-region reference.
1138 if (visitor.ContainsInterRegionRefs()) {
1139 if (obj_region_idx == static_cast<size_t>(-1)) {
1140 // If an inter-region ref has been found in a non-region-space, then it
1141 // must be non-moving-space. This is because this function cannot be
1142 // called on a immune-space object, and a large-object-space object has
1143 // only class object reference, which is either in some immune-space, or
1144 // in non-moving-space.
1145 DCHECK(heap_->non_moving_space_->HasAddress(ref));
1146 non_moving_space_inter_region_bitmap_.Set(ref);
1147 } else {
1148 region_space_inter_region_bitmap_.Set(ref);
1149 }
1150 }
1151 }
1152
1153 template <bool kAtomic>
TestAndSetMarkBitForRef(mirror::Object * ref)1154 bool ConcurrentCopying::TestAndSetMarkBitForRef(mirror::Object* ref) {
1155 accounting::ContinuousSpaceBitmap* bitmap = nullptr;
1156 accounting::LargeObjectBitmap* los_bitmap = nullptr;
1157 if (LIKELY(region_space_->HasAddress(ref))) {
1158 bitmap = region_space_bitmap_;
1159 } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
1160 bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
1161 } else if (immune_spaces_.ContainsObject(ref)) {
1162 // References to immune space objects are always live.
1163 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
1164 return true;
1165 } else {
1166 // Should be a large object. Must be page aligned and the LOS must exist.
1167 if (kIsDebugBuild
1168 && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
1169 // It must be heap corruption. Remove memory protection and dump data.
1170 region_space_->Unprotect();
1171 heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
1172 MemberOffset(0),
1173 ref,
1174 /* fatal */ true);
1175 }
1176 los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
1177 }
1178 if (kAtomic) {
1179 return (bitmap != nullptr) ? bitmap->AtomicTestAndSet(ref) : los_bitmap->AtomicTestAndSet(ref);
1180 } else {
1181 return (bitmap != nullptr) ? bitmap->Set(ref) : los_bitmap->Set(ref);
1182 }
1183 }
1184
TestMarkBitmapForRef(mirror::Object * ref)1185 bool ConcurrentCopying::TestMarkBitmapForRef(mirror::Object* ref) {
1186 if (LIKELY(region_space_->HasAddress(ref))) {
1187 return region_space_bitmap_->Test(ref);
1188 } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
1189 return heap_->GetNonMovingSpace()->GetMarkBitmap()->Test(ref);
1190 } else if (immune_spaces_.ContainsObject(ref)) {
1191 // References to immune space objects are always live.
1192 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
1193 return true;
1194 } else {
1195 // Should be a large object. Must be page aligned and the LOS must exist.
1196 if (kIsDebugBuild
1197 && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
1198 // It must be heap corruption. Remove memory protection and dump data.
1199 region_space_->Unprotect();
1200 heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
1201 MemberOffset(0),
1202 ref,
1203 /* fatal */ true);
1204 }
1205 return heap_->GetLargeObjectsSpace()->GetMarkBitmap()->Test(ref);
1206 }
1207 }
1208
PushOntoLocalMarkStack(mirror::Object * ref)1209 void ConcurrentCopying::PushOntoLocalMarkStack(mirror::Object* ref) {
1210 if (kIsDebugBuild) {
1211 Thread *self = Thread::Current();
1212 DCHECK_EQ(thread_running_gc_, self);
1213 DCHECK(self->GetThreadLocalMarkStack() == nullptr);
1214 }
1215 DCHECK_EQ(mark_stack_mode_.load(std::memory_order_relaxed), kMarkStackModeThreadLocal);
1216 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1217 ExpandGcMarkStack();
1218 }
1219 gc_mark_stack_->PushBack(ref);
1220 }
1221
ProcessMarkStackForMarkingAndComputeLiveBytes()1222 void ConcurrentCopying::ProcessMarkStackForMarkingAndComputeLiveBytes() {
1223 // Process thread-local mark stack containing thread roots
1224 ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
1225 /* checkpoint_callback */ nullptr,
1226 [this] (mirror::Object* ref)
1227 REQUIRES_SHARED(Locks::mutator_lock_) {
1228 AddLiveBytesAndScanRef(ref);
1229 });
1230 {
1231 MutexLock mu(thread_running_gc_, mark_stack_lock_);
1232 CHECK(revoked_mark_stacks_.empty());
1233 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
1234 }
1235
1236 while (!gc_mark_stack_->IsEmpty()) {
1237 mirror::Object* ref = gc_mark_stack_->PopBack();
1238 AddLiveBytesAndScanRef(ref);
1239 }
1240 }
1241
1242 class ConcurrentCopying::ImmuneSpaceCaptureRefsVisitor {
1243 public:
ImmuneSpaceCaptureRefsVisitor(ConcurrentCopying * cc)1244 explicit ImmuneSpaceCaptureRefsVisitor(ConcurrentCopying* cc) : collector_(cc) {}
1245
operator ()(mirror::Object * obj) const1246 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
1247 ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ false>
1248 visitor(collector_, /*obj_region_idx*/ static_cast<size_t>(-1));
1249 obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1250 visitor, visitor);
1251 }
1252
Callback(mirror::Object * obj,void * arg)1253 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
1254 reinterpret_cast<ImmuneSpaceCaptureRefsVisitor*>(arg)->operator()(obj);
1255 }
1256
1257 private:
1258 ConcurrentCopying* const collector_;
1259 };
1260
1261 /* Invariants for two-phase CC
1262 * ===========================
1263 * A) Definitions
1264 * ---------------
1265 * 1) Black: marked in bitmap, rb_state is non-gray, and not in mark stack
1266 * 2) Black-clean: marked in bitmap, and corresponding card is clean/aged
1267 * 3) Black-dirty: marked in bitmap, and corresponding card is dirty
1268 * 4) Gray: marked in bitmap, and exists in mark stack
1269 * 5) Gray-dirty: marked in bitmap, rb_state is gray, corresponding card is
1270 * dirty, and exists in mark stack
1271 * 6) White: unmarked in bitmap, rb_state is non-gray, and not in mark stack
1272 *
1273 * B) Before marking phase
1274 * -----------------------
1275 * 1) All objects are white
1276 * 2) Cards are either clean or aged (cannot be asserted without a STW pause)
1277 * 3) Mark bitmap is cleared
1278 * 4) Mark stack is empty
1279 *
1280 * C) During marking phase
1281 * ------------------------
1282 * 1) If a black object holds an inter-region or white reference, then its
1283 * corresponding card is dirty. In other words, it changes from being
1284 * black-clean to black-dirty
1285 * 2) No black-clean object points to a white object
1286 *
1287 * D) After marking phase
1288 * -----------------------
1289 * 1) There are no gray objects
1290 * 2) All newly allocated objects are in from space
1291 * 3) No white object can be reachable, directly or otherwise, from a
1292 * black-clean object
1293 *
1294 * E) During copying phase
1295 * ------------------------
1296 * 1) Mutators cannot observe white and black-dirty objects
1297 * 2) New allocations are in to-space (newly allocated regions are part of to-space)
1298 * 3) An object in mark stack must have its rb_state = Gray
1299 *
1300 * F) During card table scan
1301 * --------------------------
1302 * 1) Referents corresponding to root references are gray or in to-space
1303 * 2) Every path from an object that is read or written by a mutator during
1304 * this period to a dirty black object goes through some gray object.
1305 * Mutators preserve this by graying black objects as needed during this
1306 * period. Ensures that a mutator never encounters a black dirty object.
1307 *
1308 * G) After card table scan
1309 * ------------------------
1310 * 1) There are no black-dirty objects
1311 * 2) Referents corresponding to root references are gray, black-clean or in
1312 * to-space
1313 *
1314 * H) After copying phase
1315 * -----------------------
1316 * 1) Mark stack is empty
1317 * 2) No references into evacuated from-space
1318 * 3) No reference to an object which is unmarked and is also not in newly
1319 * allocated region. In other words, no reference to white objects.
1320 */
1321
MarkingPhase()1322 void ConcurrentCopying::MarkingPhase() {
1323 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
1324 if (kVerboseMode) {
1325 LOG(INFO) << "GC MarkingPhase";
1326 }
1327 accounting::CardTable* const card_table = heap_->GetCardTable();
1328 Thread* const self = Thread::Current();
1329 CHECK_EQ(self, thread_running_gc_);
1330 // Clear live_bytes_ of every non-free region, except the ones that are newly
1331 // allocated.
1332 region_space_->SetAllRegionLiveBytesZero();
1333 if (kIsDebugBuild) {
1334 region_space_->AssertAllRegionLiveBytesZeroOrCleared();
1335 }
1336 // Scan immune spaces
1337 {
1338 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
1339 for (auto& space : immune_spaces_.GetSpaces()) {
1340 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
1341 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1342 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
1343 ImmuneSpaceCaptureRefsVisitor visitor(this);
1344 if (table != nullptr) {
1345 table->VisitObjects(ImmuneSpaceCaptureRefsVisitor::Callback, &visitor);
1346 } else {
1347 WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
1348 card_table->Scan<false>(
1349 live_bitmap,
1350 space->Begin(),
1351 space->Limit(),
1352 visitor,
1353 accounting::CardTable::kCardDirty - 1);
1354 }
1355 }
1356 }
1357 // Scan runtime roots
1358 {
1359 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
1360 CaptureRootsForMarkingVisitor visitor(this, self);
1361 Runtime::Current()->VisitConcurrentRoots(&visitor, kVisitRootFlagAllRoots);
1362 }
1363 {
1364 // TODO: don't visit the transaction roots if it's not active.
1365 TimingLogger::ScopedTiming split2("VisitNonThreadRoots", GetTimings());
1366 CaptureRootsForMarkingVisitor visitor(this, self);
1367 Runtime::Current()->VisitNonThreadRoots(&visitor);
1368 }
1369 // Capture thread roots
1370 CaptureThreadRootsForMarking();
1371 // Process mark stack
1372 ProcessMarkStackForMarkingAndComputeLiveBytes();
1373
1374 if (kVerboseMode) {
1375 LOG(INFO) << "GC end of MarkingPhase";
1376 }
1377 }
1378
1379 template <bool kNoUnEvac>
ScanDirtyObject(mirror::Object * obj)1380 void ConcurrentCopying::ScanDirtyObject(mirror::Object* obj) {
1381 Scan<kNoUnEvac>(obj);
1382 // Set the read-barrier state of a reference-type object to gray if its
1383 // referent is not marked yet. This is to ensure that if GetReferent() is
1384 // called, it triggers the read-barrier to process the referent before use.
1385 if (UNLIKELY((obj->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass()))) {
1386 mirror::Object* referent =
1387 obj->AsReference<kVerifyNone, kWithoutReadBarrier>()->GetReferent<kWithoutReadBarrier>();
1388 if (referent != nullptr && !IsInToSpace(referent)) {
1389 obj->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState());
1390 }
1391 }
1392 }
1393
1394 // Concurrently mark roots that are guarded by read barriers and process the mark stack.
CopyingPhase()1395 void ConcurrentCopying::CopyingPhase() {
1396 TimingLogger::ScopedTiming split("CopyingPhase", GetTimings());
1397 if (kVerboseMode) {
1398 LOG(INFO) << "GC CopyingPhase";
1399 }
1400 Thread* self = Thread::Current();
1401 accounting::CardTable* const card_table = heap_->GetCardTable();
1402 if (kIsDebugBuild) {
1403 MutexLock mu(self, *Locks::thread_list_lock_);
1404 CHECK(weak_ref_access_enabled_);
1405 }
1406
1407 // Scan immune spaces.
1408 // Update all the fields in the immune spaces first without graying the objects so that we
1409 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
1410 // of the objects.
1411 if (kUseBakerReadBarrier) {
1412 gc_grays_immune_objects_ = false;
1413 }
1414 if (use_generational_cc_) {
1415 if (kVerboseMode) {
1416 LOG(INFO) << "GC ScanCardsForSpace";
1417 }
1418 TimingLogger::ScopedTiming split2("ScanCardsForSpace", GetTimings());
1419 WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
1420 CHECK(!done_scanning_.load(std::memory_order_relaxed));
1421 if (kIsDebugBuild) {
1422 // Leave some time for mutators to race ahead to try and find races between the GC card
1423 // scanning and mutators reading references.
1424 usleep(10 * 1000);
1425 }
1426 for (space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
1427 if (space->IsImageSpace() || space->IsZygoteSpace()) {
1428 // Image and zygote spaces are already handled since we gray the objects in the pause.
1429 continue;
1430 }
1431 // Scan all of the objects on dirty cards in unevac from space, and non moving space. These
1432 // are from previous GCs (or from marking phase of 2-phase full GC) and may reference things
1433 // in the from space.
1434 //
1435 // Note that we do not need to process the large-object space (the only discontinuous space)
1436 // as it contains only large string objects and large primitive array objects, that have no
1437 // reference to other objects, except their class. There is no need to scan these large
1438 // objects, as the String class and the primitive array classes are expected to never move
1439 // during a collection:
1440 // - In the case where we run with a boot image, these classes are part of the image space,
1441 // which is an immune space.
1442 // - In the case where we run without a boot image, these classes are allocated in the
1443 // non-moving space (see art::ClassLinker::InitWithoutImage).
1444 card_table->Scan<false>(
1445 space->GetMarkBitmap(),
1446 space->Begin(),
1447 space->End(),
1448 [this, space](mirror::Object* obj)
1449 REQUIRES(Locks::heap_bitmap_lock_)
1450 REQUIRES_SHARED(Locks::mutator_lock_) {
1451 // TODO: This code may be refactored to avoid scanning object while
1452 // done_scanning_ is false by setting rb_state to gray, and pushing the
1453 // object on mark stack. However, it will also require clearing the
1454 // corresponding mark-bit and, for region space objects,
1455 // decrementing the object's size from the corresponding region's
1456 // live_bytes.
1457 if (young_gen_) {
1458 // Don't push or gray unevac refs.
1459 if (kIsDebugBuild && space == region_space_) {
1460 // We may get unevac large objects.
1461 if (!region_space_->IsInUnevacFromSpace(obj)) {
1462 CHECK(region_space_bitmap_->Test(obj));
1463 region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
1464 LOG(FATAL) << "Scanning " << obj << " not in unevac space";
1465 }
1466 }
1467 ScanDirtyObject</*kNoUnEvac*/ true>(obj);
1468 } else if (space != region_space_) {
1469 DCHECK(space == heap_->non_moving_space_);
1470 // We need to process un-evac references as they may be unprocessed,
1471 // if they skipped the marking phase due to heap mutation.
1472 ScanDirtyObject</*kNoUnEvac*/ false>(obj);
1473 non_moving_space_inter_region_bitmap_.Clear(obj);
1474 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1475 ScanDirtyObject</*kNoUnEvac*/ false>(obj);
1476 region_space_inter_region_bitmap_.Clear(obj);
1477 }
1478 },
1479 accounting::CardTable::kCardAged);
1480
1481 if (!young_gen_) {
1482 auto visitor = [this](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
1483 // We don't need to process un-evac references as any unprocessed
1484 // ones will be taken care of in the card-table scan above.
1485 ScanDirtyObject</*kNoUnEvac*/ true>(obj);
1486 };
1487 if (space == region_space_) {
1488 region_space_->ScanUnevacFromSpace(®ion_space_inter_region_bitmap_, visitor);
1489 } else {
1490 DCHECK(space == heap_->non_moving_space_);
1491 non_moving_space_inter_region_bitmap_.VisitMarkedRange(
1492 reinterpret_cast<uintptr_t>(space->Begin()),
1493 reinterpret_cast<uintptr_t>(space->End()),
1494 visitor);
1495 }
1496 }
1497 }
1498 // Done scanning unevac space.
1499 done_scanning_.store(true, std::memory_order_release);
1500 // NOTE: inter-region-ref bitmaps can be cleared here to release memory, if needed.
1501 // Currently we do it in ReclaimPhase().
1502 if (kVerboseMode) {
1503 LOG(INFO) << "GC end of ScanCardsForSpace";
1504 }
1505 }
1506 {
1507 // For a sticky-bit collection, this phase needs to be after the card scanning since the
1508 // mutator may read an unevac space object out of an image object. If the image object is no
1509 // longer gray it will trigger a read barrier for the unevac space object.
1510 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
1511 for (auto& space : immune_spaces_.GetSpaces()) {
1512 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
1513 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1514 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
1515 ImmuneSpaceScanObjVisitor visitor(this);
1516 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
1517 table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
1518 } else {
1519 WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
1520 card_table->Scan<false>(
1521 live_bitmap,
1522 space->Begin(),
1523 space->Limit(),
1524 visitor,
1525 accounting::CardTable::kCardDirty - 1);
1526 }
1527 }
1528 }
1529 if (kUseBakerReadBarrier) {
1530 // This release fence makes the field updates in the above loop visible before allowing mutator
1531 // getting access to immune objects without graying it first.
1532 updated_all_immune_objects_.store(true, std::memory_order_release);
1533 // Now "un-gray" (conceptually blacken) immune objects concurrently accessed and grayed by
1534 // mutators. We can't do this in the above loop because we would incorrectly disable the read
1535 // barrier by un-graying (conceptually blackening) an object which may point to an unscanned,
1536 // white object, breaking the to-space invariant (a mutator shall never observe a from-space
1537 // (white) object).
1538 //
1539 // Make sure no mutators are in the middle of marking an immune object before un-graying
1540 // (blackening) immune objects.
1541 IssueEmptyCheckpoint();
1542 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
1543 if (kVerboseMode) {
1544 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
1545 }
1546 for (mirror::Object* obj : immune_gray_stack_) {
1547 DCHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::GrayState());
1548 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
1549 ReadBarrier::NonGrayState());
1550 DCHECK(success);
1551 }
1552 immune_gray_stack_.clear();
1553 }
1554
1555 {
1556 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
1557 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
1558 }
1559 {
1560 // TODO: don't visit the transaction roots if it's not active.
1561 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
1562 Runtime::Current()->VisitNonThreadRoots(this);
1563 }
1564
1565 {
1566 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
1567 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
1568 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
1569 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
1570 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
1571 // reach the point where we process weak references, we can avoid using a lock when accessing
1572 // the GC mark stack, which makes mark stack processing more efficient.
1573
1574 // Process the mark stack once in the thread local stack mode. This marks most of the live
1575 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
1576 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
1577 // objects and push refs on the mark stack.
1578 ProcessMarkStack();
1579 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
1580 // for the last time before transitioning to the shared mark stack mode, which would process new
1581 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
1582 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
1583 // important to do these together in a single checkpoint so that we can ensure that mutators
1584 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
1585 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
1586 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
1587 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
1588 SwitchToSharedMarkStackMode();
1589 CHECK(!self->GetWeakRefAccessEnabled());
1590 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
1591 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
1592 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
1593 // (via read barriers) have no way to produce any more refs to process. Marking converges once
1594 // before we process weak refs below.
1595 ProcessMarkStack();
1596 CheckEmptyMarkStack();
1597 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
1598 // lock from this point on.
1599 SwitchToGcExclusiveMarkStackMode();
1600 CheckEmptyMarkStack();
1601 if (kVerboseMode) {
1602 LOG(INFO) << "ProcessReferences";
1603 }
1604 // Process weak references. This may produce new refs to process and have them processed via
1605 // ProcessMarkStack (in the GC exclusive mark stack mode).
1606 ProcessReferences(self);
1607 CheckEmptyMarkStack();
1608 if (kVerboseMode) {
1609 LOG(INFO) << "SweepSystemWeaks";
1610 }
1611 SweepSystemWeaks(self);
1612 if (kVerboseMode) {
1613 LOG(INFO) << "SweepSystemWeaks done";
1614 }
1615 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
1616 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
1617 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
1618 ProcessMarkStack();
1619 CheckEmptyMarkStack();
1620 // Re-enable weak ref accesses.
1621 ReenableWeakRefAccess(self);
1622 // Free data for class loaders that we unloaded.
1623 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
1624 // Marking is done. Disable marking.
1625 DisableMarking();
1626 CheckEmptyMarkStack();
1627 }
1628
1629 if (kIsDebugBuild) {
1630 MutexLock mu(self, *Locks::thread_list_lock_);
1631 CHECK(weak_ref_access_enabled_);
1632 }
1633 if (kVerboseMode) {
1634 LOG(INFO) << "GC end of CopyingPhase";
1635 }
1636 }
1637
ReenableWeakRefAccess(Thread * self)1638 void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
1639 if (kVerboseMode) {
1640 LOG(INFO) << "ReenableWeakRefAccess";
1641 }
1642 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
1643 {
1644 MutexLock mu(self, *Locks::thread_list_lock_);
1645 weak_ref_access_enabled_ = true; // This is for new threads.
1646 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1647 for (Thread* thread : thread_list) {
1648 thread->SetWeakRefAccessEnabled(true);
1649 }
1650 }
1651 // Unblock blocking threads.
1652 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
1653 Runtime::Current()->BroadcastForNewSystemWeaks();
1654 }
1655
1656 class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
1657 public:
DisableMarkingCheckpoint(ConcurrentCopying * concurrent_copying)1658 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
1659 : concurrent_copying_(concurrent_copying) {
1660 }
1661
Run(Thread * thread)1662 void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
1663 // Note: self is not necessarily equal to thread since thread may be suspended.
1664 Thread* self = Thread::Current();
1665 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1666 << thread->GetState() << " thread " << thread << " self " << self;
1667 // Disable the thread-local is_gc_marking flag.
1668 // Note a thread that has just started right before this checkpoint may have already this flag
1669 // set to false, which is ok.
1670 thread->SetIsGcMarkingAndUpdateEntrypoints(false);
1671 // If thread is a running mutator, then act on behalf of the garbage collector.
1672 // See the code in ThreadList::RunCheckpoint.
1673 concurrent_copying_->GetBarrier().Pass(self);
1674 }
1675
1676 private:
1677 ConcurrentCopying* const concurrent_copying_;
1678 };
1679
1680 class ConcurrentCopying::DisableMarkingCallback : public Closure {
1681 public:
DisableMarkingCallback(ConcurrentCopying * concurrent_copying)1682 explicit DisableMarkingCallback(ConcurrentCopying* concurrent_copying)
1683 : concurrent_copying_(concurrent_copying) {
1684 }
1685
Run(Thread * self ATTRIBUTE_UNUSED)1686 void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
1687 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
1688 // to avoid a race with ThreadList::Register().
1689 CHECK(concurrent_copying_->is_marking_);
1690 concurrent_copying_->is_marking_ = false;
1691 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
1692 CHECK(concurrent_copying_->is_using_read_barrier_entrypoints_);
1693 concurrent_copying_->is_using_read_barrier_entrypoints_ = false;
1694 } else {
1695 CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
1696 }
1697 }
1698
1699 private:
1700 ConcurrentCopying* const concurrent_copying_;
1701 };
1702
IssueDisableMarkingCheckpoint()1703 void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
1704 Thread* self = Thread::Current();
1705 DisableMarkingCheckpoint check_point(this);
1706 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1707 gc_barrier_->Init(self, 0);
1708 DisableMarkingCallback dmc(this);
1709 size_t barrier_count = thread_list->RunCheckpoint(&check_point, &dmc);
1710 // If there are no threads to wait which implies that all the checkpoint functions are finished,
1711 // then no need to release the mutator lock.
1712 if (barrier_count == 0) {
1713 return;
1714 }
1715 // Release locks then wait for all mutator threads to pass the barrier.
1716 Locks::mutator_lock_->SharedUnlock(self);
1717 {
1718 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1719 gc_barrier_->Increment(self, barrier_count);
1720 }
1721 Locks::mutator_lock_->SharedLock(self);
1722 }
1723
DisableMarking()1724 void ConcurrentCopying::DisableMarking() {
1725 // Use a checkpoint to turn off the global is_marking and the thread-local is_gc_marking flags and
1726 // to ensure no threads are still in the middle of a read barrier which may have a from-space ref
1727 // cached in a local variable.
1728 IssueDisableMarkingCheckpoint();
1729 if (kUseTableLookupReadBarrier) {
1730 heap_->rb_table_->ClearAll();
1731 DCHECK(heap_->rb_table_->IsAllCleared());
1732 }
1733 is_mark_stack_push_disallowed_.store(1, std::memory_order_seq_cst);
1734 mark_stack_mode_.store(kMarkStackModeOff, std::memory_order_seq_cst);
1735 }
1736
IssueEmptyCheckpoint()1737 void ConcurrentCopying::IssueEmptyCheckpoint() {
1738 Thread* self = Thread::Current();
1739 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1740 // Release locks then wait for all mutator threads to pass the barrier.
1741 Locks::mutator_lock_->SharedUnlock(self);
1742 thread_list->RunEmptyCheckpoint();
1743 Locks::mutator_lock_->SharedLock(self);
1744 }
1745
ExpandGcMarkStack()1746 void ConcurrentCopying::ExpandGcMarkStack() {
1747 DCHECK(gc_mark_stack_->IsFull());
1748 const size_t new_size = gc_mark_stack_->Capacity() * 2;
1749 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
1750 gc_mark_stack_->End());
1751 gc_mark_stack_->Resize(new_size);
1752 for (auto& ref : temp) {
1753 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
1754 }
1755 DCHECK(!gc_mark_stack_->IsFull());
1756 }
1757
PushOntoMarkStack(Thread * const self,mirror::Object * to_ref)1758 void ConcurrentCopying::PushOntoMarkStack(Thread* const self, mirror::Object* to_ref) {
1759 CHECK_EQ(is_mark_stack_push_disallowed_.load(std::memory_order_relaxed), 0)
1760 << " " << to_ref << " " << mirror::Object::PrettyTypeOf(to_ref);
1761 CHECK(thread_running_gc_ != nullptr);
1762 MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
1763 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
1764 if (LIKELY(self == thread_running_gc_)) {
1765 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
1766 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1767 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1768 ExpandGcMarkStack();
1769 }
1770 gc_mark_stack_->PushBack(to_ref);
1771 } else {
1772 // Otherwise, use a thread-local mark stack.
1773 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
1774 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
1775 MutexLock mu(self, mark_stack_lock_);
1776 // Get a new thread local mark stack.
1777 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
1778 if (!pooled_mark_stacks_.empty()) {
1779 // Use a pooled mark stack.
1780 new_tl_mark_stack = pooled_mark_stacks_.back();
1781 pooled_mark_stacks_.pop_back();
1782 } else {
1783 // None pooled. Create a new one.
1784 new_tl_mark_stack =
1785 accounting::AtomicStack<mirror::Object>::Create(
1786 "thread local mark stack", 4 * KB, 4 * KB);
1787 }
1788 DCHECK(new_tl_mark_stack != nullptr);
1789 DCHECK(new_tl_mark_stack->IsEmpty());
1790 new_tl_mark_stack->PushBack(to_ref);
1791 self->SetThreadLocalMarkStack(new_tl_mark_stack);
1792 if (tl_mark_stack != nullptr) {
1793 // Store the old full stack into a vector.
1794 revoked_mark_stacks_.push_back(tl_mark_stack);
1795 }
1796 } else {
1797 tl_mark_stack->PushBack(to_ref);
1798 }
1799 }
1800 } else if (mark_stack_mode == kMarkStackModeShared) {
1801 // Access the shared GC mark stack with a lock.
1802 MutexLock mu(self, mark_stack_lock_);
1803 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1804 ExpandGcMarkStack();
1805 }
1806 gc_mark_stack_->PushBack(to_ref);
1807 } else {
1808 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1809 static_cast<uint32_t>(kMarkStackModeGcExclusive))
1810 << "ref=" << to_ref
1811 << " self->gc_marking=" << self->GetIsGcMarking()
1812 << " cc->is_marking=" << is_marking_;
1813 CHECK(self == thread_running_gc_)
1814 << "Only GC-running thread should access the mark stack "
1815 << "in the GC exclusive mark stack mode";
1816 // Access the GC mark stack without a lock.
1817 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1818 ExpandGcMarkStack();
1819 }
1820 gc_mark_stack_->PushBack(to_ref);
1821 }
1822 }
1823
GetAllocationStack()1824 accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
1825 return heap_->allocation_stack_.get();
1826 }
1827
GetLiveStack()1828 accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
1829 return heap_->live_stack_.get();
1830 }
1831
1832 // The following visitors are used to verify that there's no references to the from-space left after
1833 // marking.
1834 class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
1835 public:
VerifyNoFromSpaceRefsVisitor(ConcurrentCopying * collector)1836 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
1837 : collector_(collector) {}
1838
operator ()(mirror::Object * ref,MemberOffset offset=MemberOffset (0),mirror::Object * holder=nullptr) const1839 void operator()(mirror::Object* ref,
1840 MemberOffset offset = MemberOffset(0),
1841 mirror::Object* holder = nullptr) const
1842 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1843 if (ref == nullptr) {
1844 // OK.
1845 return;
1846 }
1847 collector_->AssertToSpaceInvariant(holder, offset, ref);
1848 if (kUseBakerReadBarrier) {
1849 CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState())
1850 << "Ref " << ref << " " << ref->PrettyTypeOf() << " has gray rb_state";
1851 }
1852 }
1853
VisitRoot(mirror::Object * root,const RootInfo & info ATTRIBUTE_UNUSED)1854 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
1855 override REQUIRES_SHARED(Locks::mutator_lock_) {
1856 DCHECK(root != nullptr);
1857 operator()(root);
1858 }
1859
1860 private:
1861 ConcurrentCopying* const collector_;
1862 };
1863
1864 class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
1865 public:
VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying * collector)1866 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
1867 : collector_(collector) {}
1868
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1869 void operator()(ObjPtr<mirror::Object> obj,
1870 MemberOffset offset,
1871 bool is_static ATTRIBUTE_UNUSED) const
1872 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1873 mirror::Object* ref =
1874 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
1875 VerifyNoFromSpaceRefsVisitor visitor(collector_);
1876 visitor(ref, offset, obj.Ptr());
1877 }
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const1878 void operator()(ObjPtr<mirror::Class> klass,
1879 ObjPtr<mirror::Reference> ref) const
1880 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1881 CHECK(klass->IsTypeOfReferenceClass());
1882 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
1883 }
1884
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1885 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1886 REQUIRES_SHARED(Locks::mutator_lock_) {
1887 if (!root->IsNull()) {
1888 VisitRoot(root);
1889 }
1890 }
1891
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1892 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1893 REQUIRES_SHARED(Locks::mutator_lock_) {
1894 VerifyNoFromSpaceRefsVisitor visitor(collector_);
1895 visitor(root->AsMirrorPtr());
1896 }
1897
1898 private:
1899 ConcurrentCopying* const collector_;
1900 };
1901
1902 // Verify there's no from-space references left after the marking phase.
VerifyNoFromSpaceReferences()1903 void ConcurrentCopying::VerifyNoFromSpaceReferences() {
1904 Thread* self = Thread::Current();
1905 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
1906 // Verify all threads have is_gc_marking to be false
1907 {
1908 MutexLock mu(self, *Locks::thread_list_lock_);
1909 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1910 for (Thread* thread : thread_list) {
1911 CHECK(!thread->GetIsGcMarking());
1912 }
1913 }
1914
1915 auto verify_no_from_space_refs_visitor = [&](mirror::Object* obj)
1916 REQUIRES_SHARED(Locks::mutator_lock_) {
1917 CHECK(obj != nullptr);
1918 space::RegionSpace* region_space = RegionSpace();
1919 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
1920 VerifyNoFromSpaceRefsFieldVisitor visitor(this);
1921 obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1922 visitor,
1923 visitor);
1924 if (kUseBakerReadBarrier) {
1925 CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::NonGrayState())
1926 << "obj=" << obj << " has gray rb_state " << obj->GetReadBarrierState();
1927 }
1928 };
1929 // Roots.
1930 {
1931 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1932 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
1933 Runtime::Current()->VisitRoots(&ref_visitor);
1934 }
1935 // The to-space.
1936 region_space_->WalkToSpace(verify_no_from_space_refs_visitor);
1937 // Non-moving spaces.
1938 {
1939 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1940 heap_->GetMarkBitmap()->Visit(verify_no_from_space_refs_visitor);
1941 }
1942 // The alloc stack.
1943 {
1944 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
1945 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
1946 it < end; ++it) {
1947 mirror::Object* const obj = it->AsMirrorPtr();
1948 if (obj != nullptr && obj->GetClass() != nullptr) {
1949 // TODO: need to call this only if obj is alive?
1950 ref_visitor(obj);
1951 verify_no_from_space_refs_visitor(obj);
1952 }
1953 }
1954 }
1955 // TODO: LOS. But only refs in LOS are classes.
1956 }
1957
1958 // The following visitors are used to assert the to-space invariant.
1959 class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
1960 public:
AssertToSpaceInvariantFieldVisitor(ConcurrentCopying * collector)1961 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
1962 : collector_(collector) {}
1963
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1964 void operator()(ObjPtr<mirror::Object> obj,
1965 MemberOffset offset,
1966 bool is_static ATTRIBUTE_UNUSED) const
1967 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1968 mirror::Object* ref =
1969 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
1970 collector_->AssertToSpaceInvariant(obj.Ptr(), offset, ref);
1971 }
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const1972 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
1973 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
1974 CHECK(klass->IsTypeOfReferenceClass());
1975 }
1976
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1977 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1978 REQUIRES_SHARED(Locks::mutator_lock_) {
1979 if (!root->IsNull()) {
1980 VisitRoot(root);
1981 }
1982 }
1983
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1984 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1985 REQUIRES_SHARED(Locks::mutator_lock_) {
1986 mirror::Object* ref = root->AsMirrorPtr();
1987 collector_->AssertToSpaceInvariant(/* obj */ nullptr, MemberOffset(0), ref);
1988 }
1989
1990 private:
1991 ConcurrentCopying* const collector_;
1992 };
1993
RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,Closure * checkpoint_callback)1994 void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
1995 Closure* checkpoint_callback) {
1996 Thread* self = Thread::Current();
1997 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
1998 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1999 gc_barrier_->Init(self, 0);
2000 size_t barrier_count = thread_list->RunCheckpoint(&check_point, checkpoint_callback);
2001 // If there are no threads to wait which implys that all the checkpoint functions are finished,
2002 // then no need to release the mutator lock.
2003 if (barrier_count == 0) {
2004 return;
2005 }
2006 Locks::mutator_lock_->SharedUnlock(self);
2007 {
2008 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
2009 gc_barrier_->Increment(self, barrier_count);
2010 }
2011 Locks::mutator_lock_->SharedLock(self);
2012 }
2013
RevokeThreadLocalMarkStack(Thread * thread)2014 void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
2015 Thread* self = Thread::Current();
2016 CHECK_EQ(self, thread);
2017 MutexLock mu(self, mark_stack_lock_);
2018 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
2019 if (tl_mark_stack != nullptr) {
2020 CHECK(is_marking_);
2021 revoked_mark_stacks_.push_back(tl_mark_stack);
2022 thread->SetThreadLocalMarkStack(nullptr);
2023 }
2024 }
2025
ProcessMarkStack()2026 void ConcurrentCopying::ProcessMarkStack() {
2027 if (kVerboseMode) {
2028 LOG(INFO) << "ProcessMarkStack. ";
2029 }
2030 bool empty_prev = false;
2031 while (true) {
2032 bool empty = ProcessMarkStackOnce();
2033 if (empty_prev && empty) {
2034 // Saw empty mark stack for a second time, done.
2035 break;
2036 }
2037 empty_prev = empty;
2038 }
2039 }
2040
ProcessMarkStackOnce()2041 bool ConcurrentCopying::ProcessMarkStackOnce() {
2042 DCHECK(thread_running_gc_ != nullptr);
2043 Thread* const self = Thread::Current();
2044 DCHECK(self == thread_running_gc_);
2045 DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
2046 size_t count = 0;
2047 MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
2048 if (mark_stack_mode == kMarkStackModeThreadLocal) {
2049 // Process the thread-local mark stacks and the GC mark stack.
2050 count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
2051 /* checkpoint_callback= */ nullptr,
2052 [this] (mirror::Object* ref)
2053 REQUIRES_SHARED(Locks::mutator_lock_) {
2054 ProcessMarkStackRef(ref);
2055 });
2056 while (!gc_mark_stack_->IsEmpty()) {
2057 mirror::Object* to_ref = gc_mark_stack_->PopBack();
2058 ProcessMarkStackRef(to_ref);
2059 ++count;
2060 }
2061 gc_mark_stack_->Reset();
2062 } else if (mark_stack_mode == kMarkStackModeShared) {
2063 // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read
2064 // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is
2065 // disabled at this point.
2066 IssueEmptyCheckpoint();
2067 // Process the shared GC mark stack with a lock.
2068 {
2069 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2070 CHECK(revoked_mark_stacks_.empty());
2071 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2072 }
2073 while (true) {
2074 std::vector<mirror::Object*> refs;
2075 {
2076 // Copy refs with lock. Note the number of refs should be small.
2077 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2078 if (gc_mark_stack_->IsEmpty()) {
2079 break;
2080 }
2081 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
2082 p != gc_mark_stack_->End(); ++p) {
2083 refs.push_back(p->AsMirrorPtr());
2084 }
2085 gc_mark_stack_->Reset();
2086 }
2087 for (mirror::Object* ref : refs) {
2088 ProcessMarkStackRef(ref);
2089 ++count;
2090 }
2091 }
2092 } else {
2093 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
2094 static_cast<uint32_t>(kMarkStackModeGcExclusive));
2095 {
2096 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2097 CHECK(revoked_mark_stacks_.empty());
2098 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2099 }
2100 // Process the GC mark stack in the exclusive mode. No need to take the lock.
2101 while (!gc_mark_stack_->IsEmpty()) {
2102 mirror::Object* to_ref = gc_mark_stack_->PopBack();
2103 ProcessMarkStackRef(to_ref);
2104 ++count;
2105 }
2106 gc_mark_stack_->Reset();
2107 }
2108
2109 // Return true if the stack was empty.
2110 return count == 0;
2111 }
2112
2113 template <typename Processor>
ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,Closure * checkpoint_callback,const Processor & processor)2114 size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
2115 Closure* checkpoint_callback,
2116 const Processor& processor) {
2117 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
2118 RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
2119 if (disable_weak_ref_access) {
2120 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode_.load(std::memory_order_relaxed)),
2121 static_cast<uint32_t>(kMarkStackModeShared));
2122 }
2123 size_t count = 0;
2124 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
2125 {
2126 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2127 // Make a copy of the mark stack vector.
2128 mark_stacks = revoked_mark_stacks_;
2129 revoked_mark_stacks_.clear();
2130 }
2131 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
2132 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
2133 mirror::Object* to_ref = p->AsMirrorPtr();
2134 processor(to_ref);
2135 ++count;
2136 }
2137 {
2138 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2139 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
2140 // The pool has enough. Delete it.
2141 delete mark_stack;
2142 } else {
2143 // Otherwise, put it into the pool for later reuse.
2144 mark_stack->Reset();
2145 pooled_mark_stacks_.push_back(mark_stack);
2146 }
2147 }
2148 }
2149 if (disable_weak_ref_access) {
2150 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2151 CHECK(revoked_mark_stacks_.empty());
2152 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2153 }
2154 return count;
2155 }
2156
ProcessMarkStackRef(mirror::Object * to_ref)2157 inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
2158 DCHECK(!region_space_->IsInFromSpace(to_ref));
2159 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(to_ref);
2160 if (kUseBakerReadBarrier) {
2161 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
2162 << " to_ref=" << to_ref
2163 << " rb_state=" << to_ref->GetReadBarrierState()
2164 << " is_marked=" << IsMarked(to_ref)
2165 << " type=" << to_ref->PrettyTypeOf()
2166 << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
2167 << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
2168 << " region_type=" << rtype
2169 // TODO: Temporary; remove this when this is no longer needed (b/116087961).
2170 << " runtime->sentinel=" << Runtime::Current()->GetSentinel().Read<kWithoutReadBarrier>();
2171 }
2172 bool add_to_live_bytes = false;
2173 // Invariant: There should be no object from a newly-allocated
2174 // region (either large or non-large) on the mark stack.
2175 DCHECK(!region_space_->IsInNewlyAllocatedRegion(to_ref)) << to_ref;
2176 bool perform_scan = false;
2177 switch (rtype) {
2178 case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
2179 // Mark the bitmap only in the GC thread here so that we don't need a CAS.
2180 if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
2181 // It may be already marked if we accidentally pushed the same object twice due to the racy
2182 // bitmap read in MarkUnevacFromSpaceRegion.
2183 if (use_generational_cc_ && young_gen_) {
2184 CHECK(region_space_->IsLargeObject(to_ref));
2185 region_space_->ZeroLiveBytesForLargeObject(to_ref);
2186 }
2187 perform_scan = true;
2188 // Only add to the live bytes if the object was not already marked and we are not the young
2189 // GC.
2190 // Why add live bytes even after 2-phase GC?
2191 // We need to ensure that if there is a unevac region with any live
2192 // objects, then its live_bytes must be non-zero. Otherwise,
2193 // ClearFromSpace() will clear the region. Considering, that we may skip
2194 // live objects during marking phase of 2-phase GC, we have to take care
2195 // of such objects here.
2196 add_to_live_bytes = true;
2197 }
2198 break;
2199 case space::RegionSpace::RegionType::kRegionTypeToSpace:
2200 if (use_generational_cc_) {
2201 // Copied to to-space, set the bit so that the next GC can scan objects.
2202 region_space_bitmap_->Set(to_ref);
2203 }
2204 perform_scan = true;
2205 break;
2206 default:
2207 DCHECK(!region_space_->HasAddress(to_ref)) << to_ref;
2208 DCHECK(!immune_spaces_.ContainsObject(to_ref));
2209 // Non-moving or large-object space.
2210 if (kUseBakerReadBarrier) {
2211 accounting::ContinuousSpaceBitmap* mark_bitmap =
2212 heap_->GetNonMovingSpace()->GetMarkBitmap();
2213 const bool is_los = !mark_bitmap->HasAddress(to_ref);
2214 if (is_los) {
2215 if (!IsAligned<kPageSize>(to_ref)) {
2216 // Ref is a large object that is not aligned, it must be heap
2217 // corruption. Remove memory protection and dump data before
2218 // AtomicSetReadBarrierState since it will fault if the address is not
2219 // valid.
2220 region_space_->Unprotect();
2221 heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
2222 MemberOffset(0),
2223 to_ref,
2224 /* fatal */ true);
2225 }
2226 DCHECK(heap_->GetLargeObjectsSpace())
2227 << "ref=" << to_ref
2228 << " doesn't belong to non-moving space and large object space doesn't exist";
2229 accounting::LargeObjectBitmap* los_bitmap =
2230 heap_->GetLargeObjectsSpace()->GetMarkBitmap();
2231 DCHECK(los_bitmap->HasAddress(to_ref));
2232 // Only the GC thread could be setting the LOS bit map hence doesn't
2233 // need to be atomically done.
2234 perform_scan = !los_bitmap->Set(to_ref);
2235 } else {
2236 // Only the GC thread could be setting the non-moving space bit map
2237 // hence doesn't need to be atomically done.
2238 perform_scan = !mark_bitmap->Set(to_ref);
2239 }
2240 } else {
2241 perform_scan = true;
2242 }
2243 }
2244 if (perform_scan) {
2245 if (use_generational_cc_ && young_gen_) {
2246 Scan<true>(to_ref);
2247 } else {
2248 Scan<false>(to_ref);
2249 }
2250 }
2251 if (kUseBakerReadBarrier) {
2252 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
2253 << " to_ref=" << to_ref
2254 << " rb_state=" << to_ref->GetReadBarrierState()
2255 << " is_marked=" << IsMarked(to_ref)
2256 << " type=" << to_ref->PrettyTypeOf()
2257 << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
2258 << " space=" << heap_->DumpSpaceNameFromAddress(to_ref)
2259 << " region_type=" << rtype
2260 // TODO: Temporary; remove this when this is no longer needed (b/116087961).
2261 << " runtime->sentinel=" << Runtime::Current()->GetSentinel().Read<kWithoutReadBarrier>();
2262 }
2263 #ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
2264 mirror::Object* referent = nullptr;
2265 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
2266 (referent = to_ref->AsReference()->GetReferent<kWithoutReadBarrier>()) != nullptr &&
2267 !IsInToSpace(referent)))) {
2268 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
2269 // will change it to non-gray later in ReferenceQueue::DisableReadBarrierForReference.
2270 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr)
2271 << "Left unenqueued ref gray " << to_ref;
2272 } else {
2273 // We may occasionally leave a reference non-gray in the queue if its referent happens to be
2274 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
2275 // above IsInToSpace() evaluates to true and we change the color from gray to non-gray here in
2276 // this else block.
2277 if (kUseBakerReadBarrier) {
2278 bool success = to_ref->AtomicSetReadBarrierState<std::memory_order_release>(
2279 ReadBarrier::GrayState(),
2280 ReadBarrier::NonGrayState());
2281 DCHECK(success) << "Must succeed as we won the race.";
2282 }
2283 }
2284 #else
2285 DCHECK(!kUseBakerReadBarrier);
2286 #endif
2287
2288 if (add_to_live_bytes) {
2289 // Add to the live bytes per unevacuated from-space. Note this code is always run by the
2290 // GC-running thread (no synchronization required).
2291 DCHECK(region_space_bitmap_->Test(to_ref));
2292 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags>();
2293 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
2294 region_space_->AddLiveBytes(to_ref, alloc_size);
2295 }
2296 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
2297 CHECK(to_ref != nullptr);
2298 space::RegionSpace* region_space = RegionSpace();
2299 CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
2300 AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
2301 AssertToSpaceInvariantFieldVisitor visitor(this);
2302 to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
2303 visitor,
2304 visitor);
2305 }
2306 }
2307
2308 class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
2309 public:
DisableWeakRefAccessCallback(ConcurrentCopying * concurrent_copying)2310 explicit DisableWeakRefAccessCallback(ConcurrentCopying* concurrent_copying)
2311 : concurrent_copying_(concurrent_copying) {
2312 }
2313
Run(Thread * self ATTRIBUTE_UNUSED)2314 void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
2315 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
2316 // to avoid a deadlock b/31500969.
2317 CHECK(concurrent_copying_->weak_ref_access_enabled_);
2318 concurrent_copying_->weak_ref_access_enabled_ = false;
2319 }
2320
2321 private:
2322 ConcurrentCopying* const concurrent_copying_;
2323 };
2324
SwitchToSharedMarkStackMode()2325 void ConcurrentCopying::SwitchToSharedMarkStackMode() {
2326 Thread* self = Thread::Current();
2327 DCHECK(thread_running_gc_ != nullptr);
2328 DCHECK(self == thread_running_gc_);
2329 DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
2330 MarkStackMode before_mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
2331 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
2332 static_cast<uint32_t>(kMarkStackModeThreadLocal));
2333 mark_stack_mode_.store(kMarkStackModeShared, std::memory_order_relaxed);
2334 DisableWeakRefAccessCallback dwrac(this);
2335 // Process the thread local mark stacks one last time after switching to the shared mark stack
2336 // mode and disable weak ref accesses.
2337 ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true,
2338 &dwrac,
2339 [this] (mirror::Object* ref)
2340 REQUIRES_SHARED(Locks::mutator_lock_) {
2341 ProcessMarkStackRef(ref);
2342 });
2343 if (kVerboseMode) {
2344 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
2345 }
2346 }
2347
SwitchToGcExclusiveMarkStackMode()2348 void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
2349 Thread* self = Thread::Current();
2350 DCHECK(thread_running_gc_ != nullptr);
2351 DCHECK(self == thread_running_gc_);
2352 DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
2353 MarkStackMode before_mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
2354 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
2355 static_cast<uint32_t>(kMarkStackModeShared));
2356 mark_stack_mode_.store(kMarkStackModeGcExclusive, std::memory_order_relaxed);
2357 QuasiAtomic::ThreadFenceForConstructor();
2358 if (kVerboseMode) {
2359 LOG(INFO) << "Switched to GC exclusive mark stack mode";
2360 }
2361 }
2362
CheckEmptyMarkStack()2363 void ConcurrentCopying::CheckEmptyMarkStack() {
2364 Thread* self = Thread::Current();
2365 DCHECK(thread_running_gc_ != nullptr);
2366 DCHECK(self == thread_running_gc_);
2367 DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
2368 MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
2369 if (mark_stack_mode == kMarkStackModeThreadLocal) {
2370 // Thread-local mark stack mode.
2371 RevokeThreadLocalMarkStacks(false, nullptr);
2372 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2373 if (!revoked_mark_stacks_.empty()) {
2374 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
2375 while (!mark_stack->IsEmpty()) {
2376 mirror::Object* obj = mark_stack->PopBack();
2377 if (kUseBakerReadBarrier) {
2378 uint32_t rb_state = obj->GetReadBarrierState();
2379 LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() << " rb_state="
2380 << rb_state << " is_marked=" << IsMarked(obj);
2381 } else {
2382 LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf()
2383 << " is_marked=" << IsMarked(obj);
2384 }
2385 }
2386 }
2387 LOG(FATAL) << "mark stack is not empty";
2388 }
2389 } else {
2390 // Shared, GC-exclusive, or off.
2391 MutexLock mu(thread_running_gc_, mark_stack_lock_);
2392 CHECK(gc_mark_stack_->IsEmpty());
2393 CHECK(revoked_mark_stacks_.empty());
2394 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2395 }
2396 }
2397
SweepSystemWeaks(Thread * self)2398 void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
2399 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
2400 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2401 Runtime::Current()->SweepSystemWeaks(this);
2402 }
2403
Sweep(bool swap_bitmaps)2404 void ConcurrentCopying::Sweep(bool swap_bitmaps) {
2405 if (use_generational_cc_ && young_gen_) {
2406 // Only sweep objects on the live stack.
2407 SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false);
2408 } else {
2409 {
2410 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
2411 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
2412 if (kEnableFromSpaceAccountingCheck) {
2413 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
2414 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
2415 }
2416 heap_->MarkAllocStackAsLive(live_stack);
2417 live_stack->Reset();
2418 }
2419 CheckEmptyMarkStack();
2420 TimingLogger::ScopedTiming split("Sweep", GetTimings());
2421 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
2422 if (space->IsContinuousMemMapAllocSpace() && space != region_space_
2423 && !immune_spaces_.ContainsSpace(space)) {
2424 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2425 TimingLogger::ScopedTiming split2(
2426 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
2427 RecordFree(alloc_space->Sweep(swap_bitmaps));
2428 }
2429 }
2430 SweepLargeObjects(swap_bitmaps);
2431 }
2432 }
2433
2434 // Copied and adapted from MarkSweep::SweepArray.
SweepArray(accounting::ObjectStack * allocations,bool swap_bitmaps)2435 void ConcurrentCopying::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
2436 // This method is only used when Generational CC collection is enabled.
2437 DCHECK(use_generational_cc_);
2438 CheckEmptyMarkStack();
2439 TimingLogger::ScopedTiming t("SweepArray", GetTimings());
2440 Thread* self = Thread::Current();
2441 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
2442 sweep_array_free_buffer_mem_map_.BaseBegin());
2443 size_t chunk_free_pos = 0;
2444 ObjectBytePair freed;
2445 ObjectBytePair freed_los;
2446 // How many objects are left in the array, modified after each space is swept.
2447 StackReference<mirror::Object>* objects = allocations->Begin();
2448 size_t count = allocations->Size();
2449 // Start by sweeping the continuous spaces.
2450 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
2451 if (!space->IsAllocSpace() ||
2452 space == region_space_ ||
2453 immune_spaces_.ContainsSpace(space) ||
2454 space->GetLiveBitmap() == nullptr) {
2455 continue;
2456 }
2457 space::AllocSpace* alloc_space = space->AsAllocSpace();
2458 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
2459 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
2460 if (swap_bitmaps) {
2461 std::swap(live_bitmap, mark_bitmap);
2462 }
2463 StackReference<mirror::Object>* out = objects;
2464 for (size_t i = 0; i < count; ++i) {
2465 mirror::Object* const obj = objects[i].AsMirrorPtr();
2466 if (kUseThreadLocalAllocationStack && obj == nullptr) {
2467 continue;
2468 }
2469 if (space->HasAddress(obj)) {
2470 // This object is in the space, remove it from the array and add it to the sweep buffer
2471 // if needed.
2472 if (!mark_bitmap->Test(obj)) {
2473 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
2474 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
2475 freed.objects += chunk_free_pos;
2476 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
2477 chunk_free_pos = 0;
2478 }
2479 chunk_free_buffer[chunk_free_pos++] = obj;
2480 }
2481 } else {
2482 (out++)->Assign(obj);
2483 }
2484 }
2485 if (chunk_free_pos > 0) {
2486 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
2487 freed.objects += chunk_free_pos;
2488 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
2489 chunk_free_pos = 0;
2490 }
2491 // All of the references which space contained are no longer in the allocation stack, update
2492 // the count.
2493 count = out - objects;
2494 }
2495 // Handle the large object space.
2496 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
2497 if (large_object_space != nullptr) {
2498 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
2499 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
2500 if (swap_bitmaps) {
2501 std::swap(large_live_objects, large_mark_objects);
2502 }
2503 for (size_t i = 0; i < count; ++i) {
2504 mirror::Object* const obj = objects[i].AsMirrorPtr();
2505 // Handle large objects.
2506 if (kUseThreadLocalAllocationStack && obj == nullptr) {
2507 continue;
2508 }
2509 if (!large_mark_objects->Test(obj)) {
2510 ++freed_los.objects;
2511 freed_los.bytes += large_object_space->Free(self, obj);
2512 }
2513 }
2514 }
2515 {
2516 TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
2517 RecordFree(freed);
2518 RecordFreeLOS(freed_los);
2519 t2.NewTiming("ResetStack");
2520 allocations->Reset();
2521 }
2522 sweep_array_free_buffer_mem_map_.MadviseDontNeedAndZero();
2523 }
2524
MarkZygoteLargeObjects()2525 void ConcurrentCopying::MarkZygoteLargeObjects() {
2526 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
2527 Thread* const self = Thread::Current();
2528 WriterMutexLock rmu(self, *Locks::heap_bitmap_lock_);
2529 space::LargeObjectSpace* const los = heap_->GetLargeObjectsSpace();
2530 if (los != nullptr) {
2531 // Pick the current live bitmap (mark bitmap if swapped).
2532 accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap();
2533 accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap();
2534 // Walk through all of the objects and explicitly mark the zygote ones so they don't get swept.
2535 std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
2536 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
2537 reinterpret_cast<uintptr_t>(range.second),
2538 [mark_bitmap, los, self](mirror::Object* obj)
2539 REQUIRES(Locks::heap_bitmap_lock_)
2540 REQUIRES_SHARED(Locks::mutator_lock_) {
2541 if (los->IsZygoteLargeObject(self, obj)) {
2542 mark_bitmap->Set(obj);
2543 }
2544 });
2545 }
2546 }
2547
SweepLargeObjects(bool swap_bitmaps)2548 void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
2549 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
2550 if (heap_->GetLargeObjectsSpace() != nullptr) {
2551 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
2552 }
2553 }
2554
CaptureRssAtPeak()2555 void ConcurrentCopying::CaptureRssAtPeak() {
2556 using range_t = std::pair<void*, void*>;
2557 // This operation is expensive as several calls to mincore() are performed.
2558 // Also, this must be called before clearing regions in ReclaimPhase().
2559 // Therefore, we make it conditional on the flag that enables dumping GC
2560 // performance info on shutdown.
2561 if (Runtime::Current()->GetDumpGCPerformanceOnShutdown()) {
2562 std::list<range_t> gc_ranges;
2563 auto add_gc_range = [&gc_ranges](void* start, size_t size) {
2564 void* end = static_cast<char*>(start) + RoundUp(size, kPageSize);
2565 gc_ranges.emplace_back(range_t(start, end));
2566 };
2567
2568 // region space
2569 DCHECK(IsAligned<kPageSize>(region_space_->Limit()));
2570 gc_ranges.emplace_back(range_t(region_space_->Begin(), region_space_->Limit()));
2571 // mark bitmap
2572 add_gc_range(region_space_bitmap_->Begin(), region_space_bitmap_->Size());
2573
2574 // non-moving space
2575 {
2576 DCHECK(IsAligned<kPageSize>(heap_->non_moving_space_->Limit()));
2577 gc_ranges.emplace_back(range_t(heap_->non_moving_space_->Begin(),
2578 heap_->non_moving_space_->Limit()));
2579 // mark bitmap
2580 accounting::ContinuousSpaceBitmap *bitmap = heap_->non_moving_space_->GetMarkBitmap();
2581 add_gc_range(bitmap->Begin(), bitmap->Size());
2582 // live bitmap. Deal with bound bitmaps.
2583 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2584 if (heap_->non_moving_space_->HasBoundBitmaps()) {
2585 DCHECK_EQ(bitmap, heap_->non_moving_space_->GetLiveBitmap());
2586 bitmap = heap_->non_moving_space_->GetTempBitmap();
2587 } else {
2588 bitmap = heap_->non_moving_space_->GetLiveBitmap();
2589 }
2590 add_gc_range(bitmap->Begin(), bitmap->Size());
2591 }
2592 // large-object space
2593 if (heap_->GetLargeObjectsSpace()) {
2594 heap_->GetLargeObjectsSpace()->ForEachMemMap([&add_gc_range](const MemMap& map) {
2595 DCHECK(IsAligned<kPageSize>(map.BaseSize()));
2596 add_gc_range(map.BaseBegin(), map.BaseSize());
2597 });
2598 // mark bitmap
2599 accounting::LargeObjectBitmap* bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
2600 add_gc_range(bitmap->Begin(), bitmap->Size());
2601 // live bitmap
2602 bitmap = heap_->GetLargeObjectsSpace()->GetLiveBitmap();
2603 add_gc_range(bitmap->Begin(), bitmap->Size());
2604 }
2605 // card table
2606 add_gc_range(heap_->GetCardTable()->MemMapBegin(), heap_->GetCardTable()->MemMapSize());
2607 // inter-region refs
2608 if (use_generational_cc_ && !young_gen_) {
2609 // region space
2610 add_gc_range(region_space_inter_region_bitmap_.Begin(),
2611 region_space_inter_region_bitmap_.Size());
2612 // non-moving space
2613 add_gc_range(non_moving_space_inter_region_bitmap_.Begin(),
2614 non_moving_space_inter_region_bitmap_.Size());
2615 }
2616 // Extract RSS using mincore(). Updates the cummulative RSS counter.
2617 ExtractRssFromMincore(&gc_ranges);
2618 }
2619 }
2620
ReclaimPhase()2621 void ConcurrentCopying::ReclaimPhase() {
2622 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
2623 if (kVerboseMode) {
2624 LOG(INFO) << "GC ReclaimPhase";
2625 }
2626 Thread* self = Thread::Current();
2627
2628 {
2629 // Double-check that the mark stack is empty.
2630 // Note: need to set this after VerifyNoFromSpaceRef().
2631 is_asserting_to_space_invariant_ = false;
2632 QuasiAtomic::ThreadFenceForConstructor();
2633 if (kVerboseMode) {
2634 LOG(INFO) << "Issue an empty check point. ";
2635 }
2636 IssueEmptyCheckpoint();
2637 // Disable the check.
2638 is_mark_stack_push_disallowed_.store(0, std::memory_order_seq_cst);
2639 if (kUseBakerReadBarrier) {
2640 updated_all_immune_objects_.store(false, std::memory_order_seq_cst);
2641 }
2642 CheckEmptyMarkStack();
2643 }
2644
2645 // Capture RSS at the time when memory usage is at its peak. All GC related
2646 // memory ranges like java heap, card table, bitmap etc. are taken into
2647 // account.
2648 // TODO: We can fetch resident memory for region space directly by going
2649 // through list of allocated regions. This way we can avoid calling mincore on
2650 // the biggest memory range, thereby reducing the cost of this function.
2651 CaptureRssAtPeak();
2652
2653 // Sweep the malloc spaces before clearing the from space since the memory tool mode might
2654 // access the object classes in the from space for dead objects.
2655 {
2656 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2657 Sweep(/* swap_bitmaps= */ false);
2658 SwapBitmaps();
2659 heap_->UnBindBitmaps();
2660
2661 // The bitmap was cleared at the start of the GC, there is nothing we need to do here.
2662 DCHECK(region_space_bitmap_ != nullptr);
2663 region_space_bitmap_ = nullptr;
2664 }
2665
2666
2667 {
2668 // Record freed objects.
2669 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
2670 // Don't include thread-locals that are in the to-space.
2671 const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
2672 const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
2673 const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
2674 const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
2675 uint64_t to_bytes = bytes_moved_.load(std::memory_order_relaxed) + bytes_moved_gc_thread_;
2676 cumulative_bytes_moved_.fetch_add(to_bytes, std::memory_order_relaxed);
2677 uint64_t to_objects = objects_moved_.load(std::memory_order_relaxed) + objects_moved_gc_thread_;
2678 cumulative_objects_moved_.fetch_add(to_objects, std::memory_order_relaxed);
2679 if (kEnableFromSpaceAccountingCheck) {
2680 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
2681 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
2682 }
2683 CHECK_LE(to_objects, from_objects);
2684 // to_bytes <= from_bytes is only approximately true, because objects expand a little when
2685 // copying to non-moving space in near-OOM situations.
2686 if (from_bytes > 0) {
2687 copied_live_bytes_ratio_sum_ += static_cast<float>(to_bytes) / from_bytes;
2688 gc_count_++;
2689 }
2690
2691 // Cleared bytes and objects, populated by the call to RegionSpace::ClearFromSpace below.
2692 uint64_t cleared_bytes;
2693 uint64_t cleared_objects;
2694 {
2695 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
2696 region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects, /*clear_bitmap*/ !young_gen_);
2697 // `cleared_bytes` and `cleared_objects` may be greater than the from space equivalents since
2698 // RegionSpace::ClearFromSpace may clear empty unevac regions.
2699 CHECK_GE(cleared_bytes, from_bytes);
2700 CHECK_GE(cleared_objects, from_objects);
2701 }
2702 // freed_bytes could conceivably be negative if we fall back to nonmoving space and have to
2703 // pad to a larger size.
2704 int64_t freed_bytes = (int64_t)cleared_bytes - (int64_t)to_bytes;
2705 uint64_t freed_objects = cleared_objects - to_objects;
2706 if (kVerboseMode) {
2707 LOG(INFO) << "RecordFree:"
2708 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
2709 << " unevac_from_bytes=" << unevac_from_bytes
2710 << " unevac_from_objects=" << unevac_from_objects
2711 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
2712 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
2713 << " from_space size=" << region_space_->FromSpaceSize()
2714 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
2715 << " to_space size=" << region_space_->ToSpaceSize();
2716 LOG(INFO) << "(before) num_bytes_allocated="
2717 << heap_->num_bytes_allocated_.load();
2718 }
2719 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
2720 if (kVerboseMode) {
2721 LOG(INFO) << "(after) num_bytes_allocated="
2722 << heap_->num_bytes_allocated_.load();
2723 }
2724
2725 float reclaimed_bytes_ratio = static_cast<float>(freed_bytes) / num_bytes_allocated_before_gc_;
2726 reclaimed_bytes_ratio_sum_ += reclaimed_bytes_ratio;
2727 }
2728
2729 CheckEmptyMarkStack();
2730
2731 if (heap_->dump_region_info_after_gc_) {
2732 LOG(INFO) << "time=" << region_space_->Time();
2733 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
2734 }
2735
2736 if (kVerboseMode) {
2737 LOG(INFO) << "GC end of ReclaimPhase";
2738 }
2739 }
2740
DumpReferenceInfo(mirror::Object * ref,const char * ref_name,const char * indent)2741 std::string ConcurrentCopying::DumpReferenceInfo(mirror::Object* ref,
2742 const char* ref_name,
2743 const char* indent) {
2744 std::ostringstream oss;
2745 oss << indent << heap_->GetVerification()->DumpObjectInfo(ref, ref_name) << '\n';
2746 if (ref != nullptr) {
2747 if (kUseBakerReadBarrier) {
2748 oss << indent << ref_name << "->GetMarkBit()=" << ref->GetMarkBit() << '\n';
2749 oss << indent << ref_name << "->GetReadBarrierState()=" << ref->GetReadBarrierState() << '\n';
2750 }
2751 }
2752 if (region_space_->HasAddress(ref)) {
2753 oss << indent << "Region containing " << ref_name << ":" << '\n';
2754 region_space_->DumpRegionForObject(oss, ref);
2755 if (region_space_bitmap_ != nullptr) {
2756 oss << indent << "region_space_bitmap_->Test(" << ref_name << ")="
2757 << std::boolalpha << region_space_bitmap_->Test(ref) << std::noboolalpha;
2758 }
2759 }
2760 return oss.str();
2761 }
2762
DumpHeapReference(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)2763 std::string ConcurrentCopying::DumpHeapReference(mirror::Object* obj,
2764 MemberOffset offset,
2765 mirror::Object* ref) {
2766 std::ostringstream oss;
2767 constexpr const char* kIndent = " ";
2768 oss << kIndent << "Invalid reference: ref=" << ref
2769 << " referenced from: object=" << obj << " offset= " << offset << '\n';
2770 // Information about `obj`.
2771 oss << DumpReferenceInfo(obj, "obj", kIndent) << '\n';
2772 // Information about `ref`.
2773 oss << DumpReferenceInfo(ref, "ref", kIndent);
2774 return oss.str();
2775 }
2776
AssertToSpaceInvariant(mirror::Object * obj,MemberOffset offset,mirror::Object * ref)2777 void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj,
2778 MemberOffset offset,
2779 mirror::Object* ref) {
2780 CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
2781 if (is_asserting_to_space_invariant_) {
2782 if (ref == nullptr) {
2783 // OK.
2784 return;
2785 } else if (region_space_->HasAddress(ref)) {
2786 // Check to-space invariant in region space (moving space).
2787 using RegionType = space::RegionSpace::RegionType;
2788 space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
2789 if (type == RegionType::kRegionTypeToSpace) {
2790 // OK.
2791 return;
2792 } else if (type == RegionType::kRegionTypeUnevacFromSpace) {
2793 if (!IsMarkedInUnevacFromSpace(ref)) {
2794 LOG(FATAL_WITHOUT_ABORT) << "Found unmarked reference in unevac from-space:";
2795 // Remove memory protection from the region space and log debugging information.
2796 region_space_->Unprotect();
2797 LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
2798 Thread::Current()->DumpJavaStack(LOG_STREAM(FATAL_WITHOUT_ABORT));
2799 }
2800 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
2801 } else {
2802 // Not OK: either a from-space ref or a reference in an unused region.
2803 if (type == RegionType::kRegionTypeFromSpace) {
2804 LOG(FATAL_WITHOUT_ABORT) << "Found from-space reference:";
2805 } else {
2806 LOG(FATAL_WITHOUT_ABORT) << "Found reference in region with type " << type << ":";
2807 }
2808 // Remove memory protection from the region space and log debugging information.
2809 region_space_->Unprotect();
2810 LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
2811 if (obj != nullptr) {
2812 LogFromSpaceRefHolder(obj, offset);
2813 LOG(FATAL_WITHOUT_ABORT) << "UNEVAC " << region_space_->IsInUnevacFromSpace(obj) << " "
2814 << obj << " " << obj->GetMarkBit();
2815 if (region_space_->HasAddress(obj)) {
2816 region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
2817 }
2818 LOG(FATAL_WITHOUT_ABORT) << "CARD " << static_cast<size_t>(
2819 *Runtime::Current()->GetHeap()->GetCardTable()->CardFromAddr(
2820 reinterpret_cast<uint8_t*>(obj)));
2821 if (region_space_->HasAddress(obj)) {
2822 LOG(FATAL_WITHOUT_ABORT) << "BITMAP " << region_space_bitmap_->Test(obj);
2823 } else {
2824 accounting::ContinuousSpaceBitmap* mark_bitmap =
2825 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
2826 if (mark_bitmap != nullptr) {
2827 LOG(FATAL_WITHOUT_ABORT) << "BITMAP " << mark_bitmap->Test(obj);
2828 } else {
2829 accounting::LargeObjectBitmap* los_bitmap =
2830 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
2831 LOG(FATAL_WITHOUT_ABORT) << "BITMAP " << los_bitmap->Test(obj);
2832 }
2833 }
2834 }
2835 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
2836 LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
2837 region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
2838 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
2839 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
2840 LOG(FATAL) << "Invalid reference " << ref
2841 << " referenced from object " << obj << " at offset " << offset;
2842 }
2843 } else {
2844 // Check to-space invariant in non-moving space.
2845 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
2846 }
2847 }
2848 }
2849
2850 class RootPrinter {
2851 public:
RootPrinter()2852 RootPrinter() { }
2853
2854 template <class MirrorType>
VisitRootIfNonNull(mirror::CompressedReference<MirrorType> * root)2855 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
2856 REQUIRES_SHARED(Locks::mutator_lock_) {
2857 if (!root->IsNull()) {
2858 VisitRoot(root);
2859 }
2860 }
2861
2862 template <class MirrorType>
VisitRoot(mirror::Object ** root)2863 void VisitRoot(mirror::Object** root)
2864 REQUIRES_SHARED(Locks::mutator_lock_) {
2865 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << *root;
2866 }
2867
2868 template <class MirrorType>
VisitRoot(mirror::CompressedReference<MirrorType> * root)2869 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
2870 REQUIRES_SHARED(Locks::mutator_lock_) {
2871 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << root->AsMirrorPtr();
2872 }
2873 };
2874
DumpGcRoot(mirror::Object * ref)2875 std::string ConcurrentCopying::DumpGcRoot(mirror::Object* ref) {
2876 std::ostringstream oss;
2877 constexpr const char* kIndent = " ";
2878 oss << kIndent << "Invalid GC root: ref=" << ref << '\n';
2879 // Information about `ref`.
2880 oss << DumpReferenceInfo(ref, "ref", kIndent);
2881 return oss.str();
2882 }
2883
AssertToSpaceInvariant(GcRootSource * gc_root_source,mirror::Object * ref)2884 void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
2885 mirror::Object* ref) {
2886 CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
2887 if (is_asserting_to_space_invariant_) {
2888 if (ref == nullptr) {
2889 // OK.
2890 return;
2891 } else if (region_space_->HasAddress(ref)) {
2892 // Check to-space invariant in region space (moving space).
2893 using RegionType = space::RegionSpace::RegionType;
2894 space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
2895 if (type == RegionType::kRegionTypeToSpace) {
2896 // OK.
2897 return;
2898 } else if (type == RegionType::kRegionTypeUnevacFromSpace) {
2899 if (!IsMarkedInUnevacFromSpace(ref)) {
2900 LOG(FATAL_WITHOUT_ABORT) << "Found unmarked reference in unevac from-space:";
2901 // Remove memory protection from the region space and log debugging information.
2902 region_space_->Unprotect();
2903 LOG(FATAL_WITHOUT_ABORT) << DumpGcRoot(ref);
2904 }
2905 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
2906 } else {
2907 // Not OK: either a from-space ref or a reference in an unused region.
2908 if (type == RegionType::kRegionTypeFromSpace) {
2909 LOG(FATAL_WITHOUT_ABORT) << "Found from-space reference:";
2910 } else {
2911 LOG(FATAL_WITHOUT_ABORT) << "Found reference in region with type " << type << ":";
2912 }
2913 // Remove memory protection from the region space and log debugging information.
2914 region_space_->Unprotect();
2915 LOG(FATAL_WITHOUT_ABORT) << DumpGcRoot(ref);
2916 if (gc_root_source == nullptr) {
2917 // No info.
2918 } else if (gc_root_source->HasArtField()) {
2919 ArtField* field = gc_root_source->GetArtField();
2920 LOG(FATAL_WITHOUT_ABORT) << "gc root in field " << field << " "
2921 << ArtField::PrettyField(field);
2922 RootPrinter root_printer;
2923 field->VisitRoots(root_printer);
2924 } else if (gc_root_source->HasArtMethod()) {
2925 ArtMethod* method = gc_root_source->GetArtMethod();
2926 LOG(FATAL_WITHOUT_ABORT) << "gc root in method " << method << " "
2927 << ArtMethod::PrettyMethod(method);
2928 RootPrinter root_printer;
2929 method->VisitRoots(root_printer, kRuntimePointerSize);
2930 }
2931 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
2932 LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
2933 region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
2934 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
2935 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
2936 LOG(FATAL) << "Invalid reference " << ref;
2937 }
2938 } else {
2939 // Check to-space invariant in non-moving space.
2940 AssertToSpaceInvariantInNonMovingSpace(/* obj= */ nullptr, ref);
2941 }
2942 }
2943 }
2944
LogFromSpaceRefHolder(mirror::Object * obj,MemberOffset offset)2945 void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
2946 if (kUseBakerReadBarrier) {
2947 LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf()
2948 << " holder rb_state=" << obj->GetReadBarrierState();
2949 } else {
2950 LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf();
2951 }
2952 if (region_space_->IsInFromSpace(obj)) {
2953 LOG(INFO) << "holder is in the from-space.";
2954 } else if (region_space_->IsInToSpace(obj)) {
2955 LOG(INFO) << "holder is in the to-space.";
2956 } else if (region_space_->IsInUnevacFromSpace(obj)) {
2957 LOG(INFO) << "holder is in the unevac from-space.";
2958 if (IsMarkedInUnevacFromSpace(obj)) {
2959 LOG(INFO) << "holder is marked in the region space bitmap.";
2960 } else {
2961 LOG(INFO) << "holder is not marked in the region space bitmap.";
2962 }
2963 } else {
2964 // In a non-moving space.
2965 if (immune_spaces_.ContainsObject(obj)) {
2966 LOG(INFO) << "holder is in an immune image or the zygote space.";
2967 } else {
2968 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
2969 accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
2970 accounting::LargeObjectBitmap* los_bitmap = nullptr;
2971 const bool is_los = !mark_bitmap->HasAddress(obj);
2972 if (is_los) {
2973 DCHECK(heap_->GetLargeObjectsSpace() && heap_->GetLargeObjectsSpace()->Contains(obj))
2974 << "obj=" << obj
2975 << " LOS bit map covers the entire lower 4GB address range";
2976 los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
2977 }
2978 if (!is_los && mark_bitmap->Test(obj)) {
2979 LOG(INFO) << "holder is marked in the non-moving space mark bit map.";
2980 } else if (is_los && los_bitmap->Test(obj)) {
2981 LOG(INFO) << "holder is marked in the los bit map.";
2982 } else {
2983 // If ref is on the allocation stack, then it is considered
2984 // mark/alive (but not necessarily on the live stack.)
2985 if (IsOnAllocStack(obj)) {
2986 LOG(INFO) << "holder is on the alloc stack.";
2987 } else {
2988 LOG(INFO) << "holder is not marked or on the alloc stack.";
2989 }
2990 }
2991 }
2992 }
2993 LOG(INFO) << "offset=" << offset.SizeValue();
2994 }
2995
IsMarkedInNonMovingSpace(mirror::Object * from_ref)2996 bool ConcurrentCopying::IsMarkedInNonMovingSpace(mirror::Object* from_ref) {
2997 DCHECK(!region_space_->HasAddress(from_ref)) << "ref=" << from_ref;
2998 DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
2999 if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
3000 return true;
3001 } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) {
3002 // Read the comment in IsMarkedInUnevacFromSpace()
3003 accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
3004 accounting::LargeObjectBitmap* los_bitmap = nullptr;
3005 const bool is_los = !mark_bitmap->HasAddress(from_ref);
3006 if (is_los) {
3007 DCHECK(heap_->GetLargeObjectsSpace() && heap_->GetLargeObjectsSpace()->Contains(from_ref))
3008 << "ref=" << from_ref
3009 << " doesn't belong to non-moving space and large object space doesn't exist";
3010 los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
3011 }
3012 if (is_los ? los_bitmap->Test(from_ref) : mark_bitmap->Test(from_ref)) {
3013 return true;
3014 }
3015 }
3016 return IsOnAllocStack(from_ref);
3017 }
3018
AssertToSpaceInvariantInNonMovingSpace(mirror::Object * obj,mirror::Object * ref)3019 void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
3020 mirror::Object* ref) {
3021 CHECK(ref != nullptr);
3022 CHECK(!region_space_->HasAddress(ref)) << "obj=" << obj << " ref=" << ref;
3023 // In a non-moving space. Check that the ref is marked.
3024 if (immune_spaces_.ContainsObject(ref)) {
3025 // Immune space case.
3026 if (kUseBakerReadBarrier) {
3027 // Immune object may not be gray if called from the GC.
3028 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
3029 return;
3030 }
3031 bool updated_all_immune_objects = updated_all_immune_objects_.load(std::memory_order_seq_cst);
3032 CHECK(updated_all_immune_objects || ref->GetReadBarrierState() == ReadBarrier::GrayState())
3033 << "Unmarked immune space ref. obj=" << obj << " rb_state="
3034 << (obj != nullptr ? obj->GetReadBarrierState() : 0U)
3035 << " ref=" << ref << " ref rb_state=" << ref->GetReadBarrierState()
3036 << " updated_all_immune_objects=" << updated_all_immune_objects;
3037 }
3038 } else {
3039 // Non-moving space and large-object space (LOS) cases.
3040 // If `ref` is on the allocation stack, then it may not be
3041 // marked live, but considered marked/alive (but not
3042 // necessarily on the live stack).
3043 CHECK(IsMarkedInNonMovingSpace(ref))
3044 << "Unmarked ref that's not on the allocation stack."
3045 << " obj=" << obj
3046 << " ref=" << ref
3047 << " rb_state=" << ref->GetReadBarrierState()
3048 << " is_marking=" << std::boolalpha << is_marking_ << std::noboolalpha
3049 << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
3050 << " done_scanning="
3051 << std::boolalpha << done_scanning_.load(std::memory_order_acquire) << std::noboolalpha
3052 << " self=" << Thread::Current();
3053 }
3054 }
3055
3056 // Used to scan ref fields of an object.
3057 template <bool kNoUnEvac>
3058 class ConcurrentCopying::RefFieldsVisitor {
3059 public:
RefFieldsVisitor(ConcurrentCopying * collector,Thread * const thread)3060 explicit RefFieldsVisitor(ConcurrentCopying* collector, Thread* const thread)
3061 : collector_(collector), thread_(thread) {
3062 // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
3063 DCHECK(!kNoUnEvac || collector_->use_generational_cc_);
3064 }
3065
operator ()(mirror::Object * obj,MemberOffset offset,bool) const3066 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
3067 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
3068 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
3069 collector_->Process<kNoUnEvac>(obj, offset);
3070 }
3071
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const3072 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
3073 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
3074 CHECK(klass->IsTypeOfReferenceClass());
3075 collector_->DelayReferenceReferent(klass, ref);
3076 }
3077
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const3078 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
3079 ALWAYS_INLINE
3080 REQUIRES_SHARED(Locks::mutator_lock_) {
3081 if (!root->IsNull()) {
3082 VisitRoot(root);
3083 }
3084 }
3085
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const3086 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
3087 ALWAYS_INLINE
3088 REQUIRES_SHARED(Locks::mutator_lock_) {
3089 collector_->MarkRoot</*kGrayImmuneObject=*/false>(thread_, root);
3090 }
3091
3092 private:
3093 ConcurrentCopying* const collector_;
3094 Thread* const thread_;
3095 };
3096
3097 template <bool kNoUnEvac>
Scan(mirror::Object * to_ref)3098 inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
3099 // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
3100 DCHECK(!kNoUnEvac || use_generational_cc_);
3101 if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
3102 // Avoid all read barriers during visit references to help performance.
3103 // Don't do this in transaction mode because we may read the old value of an field which may
3104 // trigger read barriers.
3105 Thread::Current()->ModifyDebugDisallowReadBarrier(1);
3106 }
3107 DCHECK(!region_space_->IsInFromSpace(to_ref));
3108 DCHECK_EQ(Thread::Current(), thread_running_gc_);
3109 RefFieldsVisitor<kNoUnEvac> visitor(this, thread_running_gc_);
3110 // Disable the read barrier for a performance reason.
3111 to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
3112 visitor, visitor);
3113 if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
3114 thread_running_gc_->ModifyDebugDisallowReadBarrier(-1);
3115 }
3116 }
3117
3118 template <bool kNoUnEvac>
Process(mirror::Object * obj,MemberOffset offset)3119 inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
3120 // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
3121 DCHECK(!kNoUnEvac || use_generational_cc_);
3122 DCHECK_EQ(Thread::Current(), thread_running_gc_);
3123 mirror::Object* ref = obj->GetFieldObject<
3124 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
3125 mirror::Object* to_ref = Mark</*kGrayImmuneObject=*/false, kNoUnEvac, /*kFromGCThread=*/true>(
3126 thread_running_gc_,
3127 ref,
3128 /*holder=*/ obj,
3129 offset);
3130 if (to_ref == ref) {
3131 return;
3132 }
3133 // This may fail if the mutator writes to the field at the same time. But it's ok.
3134 mirror::Object* expected_ref = ref;
3135 mirror::Object* new_ref = to_ref;
3136 do {
3137 if (expected_ref !=
3138 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
3139 // It was updated by the mutator.
3140 break;
3141 }
3142 // Use release CAS to make sure threads reading the reference see contents of copied objects.
3143 } while (!obj->CasFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(
3144 offset,
3145 expected_ref,
3146 new_ref,
3147 CASMode::kWeak,
3148 std::memory_order_release));
3149 }
3150
3151 // Process some roots.
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)3152 inline void ConcurrentCopying::VisitRoots(
3153 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
3154 Thread* const self = Thread::Current();
3155 for (size_t i = 0; i < count; ++i) {
3156 mirror::Object** root = roots[i];
3157 mirror::Object* ref = *root;
3158 mirror::Object* to_ref = Mark(self, ref);
3159 if (to_ref == ref) {
3160 continue;
3161 }
3162 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
3163 mirror::Object* expected_ref = ref;
3164 mirror::Object* new_ref = to_ref;
3165 do {
3166 if (expected_ref != addr->load(std::memory_order_relaxed)) {
3167 // It was updated by the mutator.
3168 break;
3169 }
3170 } while (!addr->CompareAndSetWeakRelaxed(expected_ref, new_ref));
3171 }
3172 }
3173
3174 template<bool kGrayImmuneObject>
MarkRoot(Thread * const self,mirror::CompressedReference<mirror::Object> * root)3175 inline void ConcurrentCopying::MarkRoot(Thread* const self,
3176 mirror::CompressedReference<mirror::Object>* root) {
3177 DCHECK(!root->IsNull());
3178 mirror::Object* const ref = root->AsMirrorPtr();
3179 mirror::Object* to_ref = Mark<kGrayImmuneObject>(self, ref);
3180 if (to_ref != ref) {
3181 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
3182 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
3183 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
3184 // If the cas fails, then it was updated by the mutator.
3185 do {
3186 if (ref != addr->load(std::memory_order_relaxed).AsMirrorPtr()) {
3187 // It was updated by the mutator.
3188 break;
3189 }
3190 } while (!addr->CompareAndSetWeakRelaxed(expected_ref, new_ref));
3191 }
3192 }
3193
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info ATTRIBUTE_UNUSED)3194 inline void ConcurrentCopying::VisitRoots(
3195 mirror::CompressedReference<mirror::Object>** roots, size_t count,
3196 const RootInfo& info ATTRIBUTE_UNUSED) {
3197 Thread* const self = Thread::Current();
3198 for (size_t i = 0; i < count; ++i) {
3199 mirror::CompressedReference<mirror::Object>* const root = roots[i];
3200 if (!root->IsNull()) {
3201 // kGrayImmuneObject is true because this is used for the thread flip.
3202 MarkRoot</*kGrayImmuneObject=*/true>(self, root);
3203 }
3204 }
3205 }
3206
3207 // Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
3208 class ConcurrentCopying::ScopedGcGraysImmuneObjects {
3209 public:
ScopedGcGraysImmuneObjects(ConcurrentCopying * collector)3210 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
3211 : collector_(collector), enabled_(false) {
3212 if (kUseBakerReadBarrier &&
3213 collector_->thread_running_gc_ == Thread::Current() &&
3214 !collector_->gc_grays_immune_objects_) {
3215 collector_->gc_grays_immune_objects_ = true;
3216 enabled_ = true;
3217 }
3218 }
3219
~ScopedGcGraysImmuneObjects()3220 ~ScopedGcGraysImmuneObjects() {
3221 if (kUseBakerReadBarrier &&
3222 collector_->thread_running_gc_ == Thread::Current() &&
3223 enabled_) {
3224 DCHECK(collector_->gc_grays_immune_objects_);
3225 collector_->gc_grays_immune_objects_ = false;
3226 }
3227 }
3228
3229 private:
3230 ConcurrentCopying* const collector_;
3231 bool enabled_;
3232 };
3233
3234 // Fill the given memory block with a fake object. Used to fill in a
3235 // copy of objects that was lost in race.
FillWithFakeObject(Thread * const self,mirror::Object * fake_obj,size_t byte_size)3236 void ConcurrentCopying::FillWithFakeObject(Thread* const self,
3237 mirror::Object* fake_obj,
3238 size_t byte_size) {
3239 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
3240 // barriers here because we need the updated reference to the int array class, etc. Temporary set
3241 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
3242 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
3243 CHECK_ALIGNED(byte_size, kObjectAlignment);
3244 memset(fake_obj, 0, byte_size);
3245 // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
3246 // Explicitly mark to make sure to get an object in the to-space.
3247 mirror::Class* int_array_class = down_cast<mirror::Class*>(
3248 Mark(self, GetClassRoot<mirror::IntArray, kWithoutReadBarrier>().Ptr()));
3249 CHECK(int_array_class != nullptr);
3250 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
3251 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
3252 }
3253 size_t component_size = int_array_class->GetComponentSize();
3254 CHECK_EQ(component_size, sizeof(int32_t));
3255 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
3256 if (data_offset > byte_size) {
3257 // An int array is too big. Use java.lang.Object.
3258 CHECK(java_lang_Object_ != nullptr);
3259 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
3260 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
3261 }
3262 CHECK_EQ(byte_size, java_lang_Object_->GetObjectSize<kVerifyNone>());
3263 fake_obj->SetClass(java_lang_Object_);
3264 CHECK_EQ(byte_size, (fake_obj->SizeOf<kVerifyNone>()));
3265 } else {
3266 // Use an int array.
3267 fake_obj->SetClass(int_array_class);
3268 CHECK(fake_obj->IsArrayInstance<kVerifyNone>());
3269 int32_t length = (byte_size - data_offset) / component_size;
3270 ObjPtr<mirror::Array> fake_arr = fake_obj->AsArray<kVerifyNone>();
3271 fake_arr->SetLength(length);
3272 CHECK_EQ(fake_arr->GetLength(), length)
3273 << "byte_size=" << byte_size << " length=" << length
3274 << " component_size=" << component_size << " data_offset=" << data_offset;
3275 CHECK_EQ(byte_size, (fake_obj->SizeOf<kVerifyNone>()))
3276 << "byte_size=" << byte_size << " length=" << length
3277 << " component_size=" << component_size << " data_offset=" << data_offset;
3278 }
3279 }
3280
3281 // Reuse the memory blocks that were copy of objects that were lost in race.
AllocateInSkippedBlock(Thread * const self,size_t alloc_size)3282 mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(Thread* const self, size_t alloc_size) {
3283 // Try to reuse the blocks that were unused due to CAS failures.
3284 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
3285 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
3286 size_t byte_size;
3287 uint8_t* addr;
3288 {
3289 MutexLock mu(self, skipped_blocks_lock_);
3290 auto it = skipped_blocks_map_.lower_bound(alloc_size);
3291 if (it == skipped_blocks_map_.end()) {
3292 // Not found.
3293 return nullptr;
3294 }
3295 byte_size = it->first;
3296 CHECK_GE(byte_size, alloc_size);
3297 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
3298 // If remainder would be too small for a fake object, retry with a larger request size.
3299 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
3300 if (it == skipped_blocks_map_.end()) {
3301 // Not found.
3302 return nullptr;
3303 }
3304 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
3305 CHECK_GE(it->first - alloc_size, min_object_size)
3306 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
3307 }
3308 // Found a block.
3309 CHECK(it != skipped_blocks_map_.end());
3310 byte_size = it->first;
3311 addr = it->second;
3312 CHECK_GE(byte_size, alloc_size);
3313 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
3314 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
3315 if (kVerboseMode) {
3316 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
3317 }
3318 skipped_blocks_map_.erase(it);
3319 }
3320 memset(addr, 0, byte_size);
3321 if (byte_size > alloc_size) {
3322 // Return the remainder to the map.
3323 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
3324 CHECK_GE(byte_size - alloc_size, min_object_size);
3325 // FillWithFakeObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
3326 // violation and possible deadlock. The deadlock case is a recursive case:
3327 // FillWithFakeObject -> Mark(IntArray.class) -> Copy -> AllocateInSkippedBlock.
3328 FillWithFakeObject(self,
3329 reinterpret_cast<mirror::Object*>(addr + alloc_size),
3330 byte_size - alloc_size);
3331 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
3332 {
3333 MutexLock mu(self, skipped_blocks_lock_);
3334 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
3335 }
3336 }
3337 return reinterpret_cast<mirror::Object*>(addr);
3338 }
3339
Copy(Thread * const self,mirror::Object * from_ref,mirror::Object * holder,MemberOffset offset)3340 mirror::Object* ConcurrentCopying::Copy(Thread* const self,
3341 mirror::Object* from_ref,
3342 mirror::Object* holder,
3343 MemberOffset offset) {
3344 DCHECK(region_space_->IsInFromSpace(from_ref));
3345 // If the class pointer is null, the object is invalid. This could occur for a dangling pointer
3346 // from a previous GC that is either inside or outside the allocated region.
3347 mirror::Class* klass = from_ref->GetClass<kVerifyNone, kWithoutReadBarrier>();
3348 if (UNLIKELY(klass == nullptr)) {
3349 // Remove memory protection from the region space and log debugging information.
3350 region_space_->Unprotect();
3351 heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
3352 }
3353 // There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
3354 // Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
3355 // objects, but it's ok and necessary.
3356 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags>();
3357 size_t region_space_alloc_size = (obj_size <= space::RegionSpace::kRegionSize)
3358 ? RoundUp(obj_size, space::RegionSpace::kAlignment)
3359 : RoundUp(obj_size, space::RegionSpace::kRegionSize);
3360 size_t region_space_bytes_allocated = 0U;
3361 size_t non_moving_space_bytes_allocated = 0U;
3362 size_t bytes_allocated = 0U;
3363 size_t unused_size;
3364 bool fall_back_to_non_moving = false;
3365 mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac=*/ true>(
3366 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &unused_size);
3367 bytes_allocated = region_space_bytes_allocated;
3368 if (LIKELY(to_ref != nullptr)) {
3369 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
3370 } else {
3371 // Failed to allocate in the region space. Try the skipped blocks.
3372 to_ref = AllocateInSkippedBlock(self, region_space_alloc_size);
3373 if (to_ref != nullptr) {
3374 // Succeeded to allocate in a skipped block.
3375 if (heap_->use_tlab_) {
3376 // This is necessary for the tlab case as it's not accounted in the space.
3377 region_space_->RecordAlloc(to_ref);
3378 }
3379 bytes_allocated = region_space_alloc_size;
3380 heap_->num_bytes_allocated_.fetch_sub(bytes_allocated, std::memory_order_relaxed);
3381 to_space_bytes_skipped_.fetch_sub(bytes_allocated, std::memory_order_relaxed);
3382 to_space_objects_skipped_.fetch_sub(1, std::memory_order_relaxed);
3383 } else {
3384 // Fall back to the non-moving space.
3385 fall_back_to_non_moving = true;
3386 if (kVerboseMode) {
3387 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
3388 << to_space_bytes_skipped_.load(std::memory_order_relaxed)
3389 << " skipped_objects="
3390 << to_space_objects_skipped_.load(std::memory_order_relaxed);
3391 }
3392 to_ref = heap_->non_moving_space_->Alloc(
3393 self, obj_size, &non_moving_space_bytes_allocated, nullptr, &unused_size);
3394 if (UNLIKELY(to_ref == nullptr)) {
3395 LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a "
3396 << obj_size << " byte object in region type "
3397 << region_space_->GetRegionType(from_ref);
3398 LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
3399 }
3400 bytes_allocated = non_moving_space_bytes_allocated;
3401 }
3402 }
3403 DCHECK(to_ref != nullptr);
3404
3405 // Copy the object excluding the lock word since that is handled in the loop.
3406 to_ref->SetClass(klass);
3407 const size_t kObjectHeaderSize = sizeof(mirror::Object);
3408 DCHECK_GE(obj_size, kObjectHeaderSize);
3409 static_assert(kObjectHeaderSize == sizeof(mirror::HeapReference<mirror::Class>) +
3410 sizeof(LockWord),
3411 "Object header size does not match");
3412 // Memcpy can tear for words since it may do byte copy. It is only safe to do this since the
3413 // object in the from space is immutable other than the lock word. b/31423258
3414 memcpy(reinterpret_cast<uint8_t*>(to_ref) + kObjectHeaderSize,
3415 reinterpret_cast<const uint8_t*>(from_ref) + kObjectHeaderSize,
3416 obj_size - kObjectHeaderSize);
3417
3418 // Attempt to install the forward pointer. This is in a loop as the
3419 // lock word atomic write can fail.
3420 while (true) {
3421 LockWord old_lock_word = from_ref->GetLockWord(false);
3422
3423 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
3424 // Lost the race. Another thread (either GC or mutator) stored
3425 // the forwarding pointer first. Make the lost copy (to_ref)
3426 // look like a valid but dead (fake) object and keep it for
3427 // future reuse.
3428 FillWithFakeObject(self, to_ref, bytes_allocated);
3429 if (!fall_back_to_non_moving) {
3430 DCHECK(region_space_->IsInToSpace(to_ref));
3431 if (bytes_allocated > space::RegionSpace::kRegionSize) {
3432 // Free the large alloc.
3433 region_space_->FreeLarge</*kForEvac=*/ true>(to_ref, bytes_allocated);
3434 } else {
3435 // Record the lost copy for later reuse.
3436 heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_relaxed);
3437 to_space_bytes_skipped_.fetch_add(bytes_allocated, std::memory_order_relaxed);
3438 to_space_objects_skipped_.fetch_add(1, std::memory_order_relaxed);
3439 MutexLock mu(self, skipped_blocks_lock_);
3440 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
3441 reinterpret_cast<uint8_t*>(to_ref)));
3442 }
3443 } else {
3444 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
3445 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
3446 // Free the non-moving-space chunk.
3447 heap_->non_moving_space_->Free(self, to_ref);
3448 }
3449
3450 // Get the winner's forward ptr.
3451 mirror::Object* lost_fwd_ptr = to_ref;
3452 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
3453 CHECK(to_ref != nullptr);
3454 CHECK_NE(to_ref, lost_fwd_ptr);
3455 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
3456 << "to_ref=" << to_ref << " " << heap_->DumpSpaces();
3457 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
3458 return to_ref;
3459 }
3460
3461 // Copy the old lock word over since we did not copy it yet.
3462 to_ref->SetLockWord(old_lock_word, false);
3463 // Set the gray ptr.
3464 if (kUseBakerReadBarrier) {
3465 to_ref->SetReadBarrierState(ReadBarrier::GrayState());
3466 }
3467
3468 // Do a fence to prevent the field CAS in ConcurrentCopying::Process from possibly reordering
3469 // before the object copy.
3470 std::atomic_thread_fence(std::memory_order_release);
3471
3472 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
3473
3474 // Try to atomically write the fwd ptr.
3475 bool success = from_ref->CasLockWord(old_lock_word,
3476 new_lock_word,
3477 CASMode::kWeak,
3478 std::memory_order_relaxed);
3479 if (LIKELY(success)) {
3480 // The CAS succeeded.
3481 DCHECK(thread_running_gc_ != nullptr);
3482 if (LIKELY(self == thread_running_gc_)) {
3483 objects_moved_gc_thread_ += 1;
3484 bytes_moved_gc_thread_ += bytes_allocated;
3485 } else {
3486 objects_moved_.fetch_add(1, std::memory_order_relaxed);
3487 bytes_moved_.fetch_add(bytes_allocated, std::memory_order_relaxed);
3488 }
3489
3490 if (LIKELY(!fall_back_to_non_moving)) {
3491 DCHECK(region_space_->IsInToSpace(to_ref));
3492 } else {
3493 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
3494 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
3495 if (!use_generational_cc_ || !young_gen_) {
3496 // Mark it in the live bitmap.
3497 CHECK(!heap_->non_moving_space_->GetLiveBitmap()->AtomicTestAndSet(to_ref));
3498 }
3499 if (!kUseBakerReadBarrier) {
3500 // Mark it in the mark bitmap.
3501 CHECK(!heap_->non_moving_space_->GetMarkBitmap()->AtomicTestAndSet(to_ref));
3502 }
3503 }
3504 if (kUseBakerReadBarrier) {
3505 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState());
3506 }
3507 DCHECK(GetFwdPtr(from_ref) == to_ref);
3508 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
3509 PushOntoMarkStack(self, to_ref);
3510 return to_ref;
3511 } else {
3512 // The CAS failed. It may have lost the race or may have failed
3513 // due to monitor/hashcode ops. Either way, retry.
3514 }
3515 }
3516 }
3517
IsMarked(mirror::Object * from_ref)3518 mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
3519 DCHECK(from_ref != nullptr);
3520 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
3521 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
3522 // It's already marked.
3523 return from_ref;
3524 }
3525 mirror::Object* to_ref;
3526 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
3527 to_ref = GetFwdPtr(from_ref);
3528 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
3529 heap_->non_moving_space_->HasAddress(to_ref))
3530 << "from_ref=" << from_ref << " to_ref=" << to_ref;
3531 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
3532 if (IsMarkedInUnevacFromSpace(from_ref)) {
3533 to_ref = from_ref;
3534 } else {
3535 to_ref = nullptr;
3536 }
3537 } else {
3538 // At this point, `from_ref` should not be in the region space
3539 // (i.e. within an "unused" region).
3540 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
3541 // from_ref is in a non-moving space.
3542 if (immune_spaces_.ContainsObject(from_ref)) {
3543 // An immune object is alive.
3544 to_ref = from_ref;
3545 } else {
3546 // Non-immune non-moving space. Use the mark bitmap.
3547 if (IsMarkedInNonMovingSpace(from_ref)) {
3548 // Already marked.
3549 to_ref = from_ref;
3550 } else {
3551 to_ref = nullptr;
3552 }
3553 }
3554 }
3555 return to_ref;
3556 }
3557
IsOnAllocStack(mirror::Object * ref)3558 bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
3559 // TODO: Explain why this is here. What release operation does it pair with?
3560 std::atomic_thread_fence(std::memory_order_acquire);
3561 accounting::ObjectStack* alloc_stack = GetAllocationStack();
3562 return alloc_stack->Contains(ref);
3563 }
3564
MarkNonMoving(Thread * const self,mirror::Object * ref,mirror::Object * holder,MemberOffset offset)3565 mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self,
3566 mirror::Object* ref,
3567 mirror::Object* holder,
3568 MemberOffset offset) {
3569 // ref is in a non-moving space (from_ref == to_ref).
3570 DCHECK(!region_space_->HasAddress(ref)) << ref;
3571 DCHECK(!immune_spaces_.ContainsObject(ref));
3572 // Use the mark bitmap.
3573 accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
3574 accounting::LargeObjectBitmap* los_bitmap = nullptr;
3575 const bool is_los = !mark_bitmap->HasAddress(ref);
3576 if (is_los) {
3577 if (!IsAligned<kPageSize>(ref)) {
3578 // Ref is a large object that is not aligned, it must be heap
3579 // corruption. Remove memory protection and dump data before
3580 // AtomicSetReadBarrierState since it will fault if the address is not
3581 // valid.
3582 region_space_->Unprotect();
3583 heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
3584 }
3585 DCHECK(heap_->GetLargeObjectsSpace())
3586 << "ref=" << ref
3587 << " doesn't belong to non-moving space and large object space doesn't exist";
3588 los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
3589 DCHECK(los_bitmap->HasAddress(ref));
3590 }
3591 if (use_generational_cc_) {
3592 // The sticky-bit CC collector is only compatible with Baker-style read barriers.
3593 DCHECK(kUseBakerReadBarrier);
3594 // Not done scanning, use AtomicSetReadBarrierPointer.
3595 if (!done_scanning_.load(std::memory_order_acquire)) {
3596 // Since the mark bitmap is still filled in from last GC, we can not use that or else the
3597 // mutator may see references to the from space. Instead, use the Baker pointer itself as
3598 // the mark bit.
3599 //
3600 // We need to avoid marking objects that are on allocation stack as that will lead to a
3601 // situation (after this GC cycle is finished) where some object(s) are on both allocation
3602 // stack and live bitmap. This leads to visiting the same object(s) twice during a heapdump
3603 // (b/117426281).
3604 if (!IsOnAllocStack(ref) &&
3605 ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
3606 // TODO: We don't actually need to scan this object later, we just need to clear the gray
3607 // bit.
3608 // We don't need to mark newly allocated objects (those in allocation stack) as they can
3609 // only point to to-space objects. Also, they are considered live till the next GC cycle.
3610 PushOntoMarkStack(self, ref);
3611 }
3612 return ref;
3613 }
3614 }
3615 if (!is_los && mark_bitmap->Test(ref)) {
3616 // Already marked.
3617 } else if (is_los && los_bitmap->Test(ref)) {
3618 // Already marked in LOS.
3619 } else if (IsOnAllocStack(ref)) {
3620 // If it's on the allocation stack, it's considered marked. Keep it white (non-gray).
3621 // Objects on the allocation stack need not be marked.
3622 if (!is_los) {
3623 DCHECK(!mark_bitmap->Test(ref));
3624 } else {
3625 DCHECK(!los_bitmap->Test(ref));
3626 }
3627 if (kUseBakerReadBarrier) {
3628 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
3629 }
3630 } else {
3631 // Not marked nor on the allocation stack. Try to mark it.
3632 // This may or may not succeed, which is ok.
3633 bool success = false;
3634 if (kUseBakerReadBarrier) {
3635 success = ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(),
3636 ReadBarrier::GrayState());
3637 } else {
3638 success = is_los ?
3639 !los_bitmap->AtomicTestAndSet(ref) :
3640 !mark_bitmap->AtomicTestAndSet(ref);
3641 }
3642 if (success) {
3643 if (kUseBakerReadBarrier) {
3644 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
3645 }
3646 PushOntoMarkStack(self, ref);
3647 }
3648 }
3649 return ref;
3650 }
3651
FinishPhase()3652 void ConcurrentCopying::FinishPhase() {
3653 Thread* const self = Thread::Current();
3654 {
3655 MutexLock mu(self, mark_stack_lock_);
3656 CHECK(revoked_mark_stacks_.empty());
3657 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
3658 }
3659 // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
3660 // positives.
3661 if (!kVerifyNoMissingCardMarks && !use_generational_cc_) {
3662 TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
3663 // We do not currently use the region space cards at all, madvise them away to save ram.
3664 heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
3665 } else if (use_generational_cc_ && !young_gen_) {
3666 region_space_inter_region_bitmap_.Clear();
3667 non_moving_space_inter_region_bitmap_.Clear();
3668 }
3669 {
3670 MutexLock mu(self, skipped_blocks_lock_);
3671 skipped_blocks_map_.clear();
3672 }
3673 {
3674 ReaderMutexLock mu(self, *Locks::mutator_lock_);
3675 {
3676 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3677 heap_->ClearMarkedObjects();
3678 }
3679 if (kUseBakerReadBarrier && kFilterModUnionCards) {
3680 TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
3681 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3682 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
3683 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
3684 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
3685 // Filter out cards that don't need to be set.
3686 if (table != nullptr) {
3687 table->FilterCards();
3688 }
3689 }
3690 }
3691 if (kUseBakerReadBarrier) {
3692 TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings());
3693 DCHECK(rb_mark_bit_stack_ != nullptr);
3694 const auto* limit = rb_mark_bit_stack_->End();
3695 for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) {
3696 CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0))
3697 << "rb_mark_bit_stack_->Begin()" << rb_mark_bit_stack_->Begin() << '\n'
3698 << "rb_mark_bit_stack_->End()" << rb_mark_bit_stack_->End() << '\n'
3699 << "rb_mark_bit_stack_->IsFull()"
3700 << std::boolalpha << rb_mark_bit_stack_->IsFull() << std::noboolalpha << '\n'
3701 << DumpReferenceInfo(it->AsMirrorPtr(), "*it");
3702 }
3703 rb_mark_bit_stack_->Reset();
3704 }
3705 }
3706 if (measure_read_barrier_slow_path_) {
3707 MutexLock mu(self, rb_slow_path_histogram_lock_);
3708 rb_slow_path_time_histogram_.AdjustAndAddValue(
3709 rb_slow_path_ns_.load(std::memory_order_relaxed));
3710 rb_slow_path_count_total_ += rb_slow_path_count_.load(std::memory_order_relaxed);
3711 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.load(std::memory_order_relaxed);
3712 }
3713 }
3714
IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object> * field,bool do_atomic_update)3715 bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
3716 bool do_atomic_update) {
3717 mirror::Object* from_ref = field->AsMirrorPtr();
3718 if (from_ref == nullptr) {
3719 return true;
3720 }
3721 mirror::Object* to_ref = IsMarked(from_ref);
3722 if (to_ref == nullptr) {
3723 return false;
3724 }
3725 if (from_ref != to_ref) {
3726 if (do_atomic_update) {
3727 do {
3728 if (field->AsMirrorPtr() != from_ref) {
3729 // Concurrently overwritten by a mutator.
3730 break;
3731 }
3732 } while (!field->CasWeakRelaxed(from_ref, to_ref));
3733 } else {
3734 // TODO: Why is this seq_cst when the above is relaxed? Document memory ordering.
3735 field->Assign</* kIsVolatile= */ true>(to_ref);
3736 }
3737 }
3738 return true;
3739 }
3740
MarkObject(mirror::Object * from_ref)3741 mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
3742 return Mark(Thread::Current(), from_ref);
3743 }
3744
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> reference)3745 void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
3746 ObjPtr<mirror::Reference> reference) {
3747 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
3748 }
3749
ProcessReferences(Thread * self)3750 void ConcurrentCopying::ProcessReferences(Thread* self) {
3751 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
3752 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
3753 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3754 GetHeap()->GetReferenceProcessor()->ProcessReferences(
3755 /*concurrent=*/ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
3756 }
3757
RevokeAllThreadLocalBuffers()3758 void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
3759 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
3760 region_space_->RevokeAllThreadLocalBuffers();
3761 }
3762
MarkFromReadBarrierWithMeasurements(Thread * const self,mirror::Object * from_ref)3763 mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(Thread* const self,
3764 mirror::Object* from_ref) {
3765 if (self != thread_running_gc_) {
3766 rb_slow_path_count_.fetch_add(1u, std::memory_order_relaxed);
3767 } else {
3768 rb_slow_path_count_gc_.fetch_add(1u, std::memory_order_relaxed);
3769 }
3770 ScopedTrace tr(__FUNCTION__);
3771 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
3772 mirror::Object* ret =
3773 Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
3774 from_ref);
3775 if (measure_read_barrier_slow_path_) {
3776 rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed);
3777 }
3778 return ret;
3779 }
3780
DumpPerformanceInfo(std::ostream & os)3781 void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
3782 GarbageCollector::DumpPerformanceInfo(os);
3783 size_t num_gc_cycles = GetCumulativeTimings().GetIterations();
3784 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
3785 if (rb_slow_path_time_histogram_.SampleSize() > 0) {
3786 Histogram<uint64_t>::CumulativeData cumulative_data;
3787 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
3788 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
3789 }
3790 if (rb_slow_path_count_total_ > 0) {
3791 os << "Slow path count " << rb_slow_path_count_total_ << "\n";
3792 }
3793 if (rb_slow_path_count_gc_total_ > 0) {
3794 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
3795 }
3796
3797 os << "Average " << (young_gen_ ? "minor" : "major") << " GC reclaim bytes ratio "
3798 << (reclaimed_bytes_ratio_sum_ / num_gc_cycles) << " over " << num_gc_cycles
3799 << " GC cycles\n";
3800
3801 os << "Average " << (young_gen_ ? "minor" : "major") << " GC copied live bytes ratio "
3802 << (copied_live_bytes_ratio_sum_ / gc_count_) << " over " << gc_count_
3803 << " " << (young_gen_ ? "minor" : "major") << " GCs\n";
3804
3805 os << "Cumulative bytes moved "
3806 << cumulative_bytes_moved_.load(std::memory_order_relaxed) << "\n";
3807 os << "Cumulative objects moved "
3808 << cumulative_objects_moved_.load(std::memory_order_relaxed) << "\n";
3809
3810 os << "Peak regions allocated "
3811 << region_space_->GetMaxPeakNumNonFreeRegions() << " ("
3812 << PrettySize(region_space_->GetMaxPeakNumNonFreeRegions() * space::RegionSpace::kRegionSize)
3813 << ") / " << region_space_->GetNumRegions() / 2 << " ("
3814 << PrettySize(region_space_->GetNumRegions() * space::RegionSpace::kRegionSize / 2)
3815 << ")\n";
3816 if (!young_gen_) {
3817 os << "Total madvise time " << PrettyDuration(region_space_->GetMadviseTime()) << "\n";
3818 }
3819 }
3820
3821 } // namespace collector
3822 } // namespace gc
3823 } // namespace art
3824