Lines Matching refs:self

124   Thread* self = Thread::Current();  in ConcurrentCopying()  local
126 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); in ConcurrentCopying()
134 MutexLock mu(self, mark_stack_lock_); in ConcurrentCopying()
158 Thread* const self = Thread::Current(); in MarkHeapReference() local
165 mirror::Object* to_ref = Mark(self, from_ref); in MarkHeapReference()
178 field->Assign(Mark(self, field->AsMirrorPtr())); in MarkHeapReference()
190 Thread* self = Thread::Current(); in RunPhases() local
191 thread_running_gc_ = self; in RunPhases()
192 Locks::mutator_lock_->AssertNotHeld(self); in RunPhases()
194 ReaderMutexLock mu(self, *Locks::mutator_lock_); in RunPhases()
208 ReaderMutexLock mu(self, *Locks::mutator_lock_); in RunPhases()
213 ReaderMutexLock mu(self, *Locks::mutator_lock_); in RunPhases()
231 ReaderMutexLock mu(self, *Locks::mutator_lock_); in RunPhases()
247 Thread* self = Thread::Current(); in Run() local
248 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) in Run()
249 << thread->GetState() << " thread " << thread << " self " << self; in Run()
254 concurrent_copying_->GetBarrier().Pass(self); in Run()
266 void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) { in Run()
278 Thread* const self = Thread::Current(); in ActivateReadBarrierEntrypoints() local
281 gc_barrier_->Init(self, 0); in ActivateReadBarrierEntrypoints()
289 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); in ActivateReadBarrierEntrypoints()
290 gc_barrier_->Increment(self, barrier_count); in ActivateReadBarrierEntrypoints()
317 Thread* self = Thread::Current(); in BindBitmaps() local
318 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); in BindBitmaps()
442 Thread* self = Thread::Current(); in Run() local
443 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) in Run()
444 << thread->GetState() << " thread " << thread << " self " << self; in Run()
463 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); in Run()
467 concurrent_copying_->GetBarrier().Pass(self); in Run()
474 Thread* self = Thread::Current(); in VisitRoots() local
479 mirror::Object* to_ref = concurrent_copying_->Mark(self, ref); in VisitRoots()
491 Thread* self = Thread::Current(); in VisitRoots() local
496 mirror::Object* to_ref = concurrent_copying_->Mark(self, ref); in VisitRoots()
520 Thread* self = Thread::Current(); in Run() local
524 CHECK_EQ(thread, self); in Run()
525 Locks::mutator_lock_->AssertExclusiveHeld(self); in Run()
543 cc->RecordLiveStackFreezeSize(self); in Run()
738 Thread* self = Thread::Current(); in FlipThreadRoots() local
739 Locks::mutator_lock_->AssertNotHeld(self); in FlipThreadRoots()
740 gc_barrier_->Init(self, 0); in FlipThreadRoots()
748 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); in FlipThreadRoots()
749 gc_barrier_->Increment(self, barrier_count); in FlipThreadRoots()
763 explicit GrayImmuneObjectVisitor(Thread* self) : self_(self) {} in GrayImmuneObjectVisitor() argument
790 Thread* const self = Thread::Current(); in GrayAllDirtyImmuneObjects() local
792 VisitorType visitor(self); in GrayAllDirtyImmuneObjects()
793 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); in GrayAllDirtyImmuneObjects()
831 Thread* const self = Thread::Current(); in GrayAllNewlyDirtyImmuneObjects() local
832 VisitorType visitor(self); in GrayAllNewlyDirtyImmuneObjects()
862 void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { in RecordLiveStackFreezeSize() argument
863 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); in RecordLiveStackFreezeSize()
913 explicit CaptureRootsForMarkingVisitor(ConcurrentCopying* cc, Thread* self) in CaptureRootsForMarkingVisitor() argument
914 : collector_(cc), self_(self) {} in CaptureRootsForMarkingVisitor()
959 Thread* const self = Thread::Current(); in Run() local
960 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) in Run()
961 << thread->GetState() << " thread " << thread << " self " << self; in Run()
964 MutexLock mu(self, concurrent_copying_->mark_stack_lock_); in Run()
977 concurrent_copying_->GetBarrier().Pass(self); in Run()
995 Thread* const self = Thread::Current(); in Run() local
996 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); in Run()
999 CaptureRootsForMarkingVisitor</*kAtomicTestAndSet*/ true> visitor(concurrent_copying_, self); in Run()
1003 CHECK(self == thread || self->GetThreadLocalMarkStack() == nullptr); in Run()
1015 Thread* const self = Thread::Current(); in CaptureThreadRootsForMarking() local
1018 gc_barrier_->Init(self, 0); in CaptureThreadRootsForMarking()
1025 Locks::mutator_lock_->SharedUnlock(self); in CaptureThreadRootsForMarking()
1027 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); in CaptureThreadRootsForMarking()
1028 gc_barrier_->Increment(self, barrier_count); in CaptureThreadRootsForMarking()
1030 Locks::mutator_lock_->SharedLock(self); in CaptureThreadRootsForMarking()
1211 Thread *self = Thread::Current(); in PushOntoLocalMarkStack() local
1212 DCHECK_EQ(thread_running_gc_, self); in PushOntoLocalMarkStack()
1213 DCHECK(self->GetThreadLocalMarkStack() == nullptr); in PushOntoLocalMarkStack()
1328 Thread* const self = Thread::Current(); in MarkingPhase() local
1329 CHECK_EQ(self, thread_running_gc_); in MarkingPhase()
1360 CaptureRootsForMarkingVisitor visitor(this, self); in MarkingPhase()
1366 CaptureRootsForMarkingVisitor visitor(this, self); in MarkingPhase()
1400 Thread* self = Thread::Current(); in CopyingPhase() local
1403 MutexLock mu(self, *Locks::thread_list_lock_); in CopyingPhase()
1589 CHECK(!self->GetWeakRefAccessEnabled()); in CopyingPhase()
1606 ProcessReferences(self); in CopyingPhase()
1611 SweepSystemWeaks(self); in CopyingPhase()
1621 ReenableWeakRefAccess(self); in CopyingPhase()
1630 MutexLock mu(self, *Locks::thread_list_lock_); in CopyingPhase()
1638 void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) { in ReenableWeakRefAccess() argument
1644 MutexLock mu(self, *Locks::thread_list_lock_); in ReenableWeakRefAccess()
1652 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self); in ReenableWeakRefAccess()
1664 Thread* self = Thread::Current(); in Run() local
1665 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) in Run()
1666 << thread->GetState() << " thread " << thread << " self " << self; in Run()
1673 concurrent_copying_->GetBarrier().Pass(self); in Run()
1686 void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) { in Run()
1704 Thread* self = Thread::Current(); in IssueDisableMarkingCheckpoint() local
1707 gc_barrier_->Init(self, 0); in IssueDisableMarkingCheckpoint()
1716 Locks::mutator_lock_->SharedUnlock(self); in IssueDisableMarkingCheckpoint()
1718 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); in IssueDisableMarkingCheckpoint()
1719 gc_barrier_->Increment(self, barrier_count); in IssueDisableMarkingCheckpoint()
1721 Locks::mutator_lock_->SharedLock(self); in IssueDisableMarkingCheckpoint()
1738 Thread* self = Thread::Current(); in IssueEmptyCheckpoint() local
1741 Locks::mutator_lock_->SharedUnlock(self); in IssueEmptyCheckpoint()
1743 Locks::mutator_lock_->SharedLock(self); in IssueEmptyCheckpoint()
1758 void ConcurrentCopying::PushOntoMarkStack(Thread* const self, mirror::Object* to_ref) { in PushOntoMarkStack() argument
1764 if (LIKELY(self == thread_running_gc_)) { in PushOntoMarkStack()
1766 CHECK(self->GetThreadLocalMarkStack() == nullptr); in PushOntoMarkStack()
1773 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack(); in PushOntoMarkStack()
1775 MutexLock mu(self, mark_stack_lock_); in PushOntoMarkStack()
1791 self->SetThreadLocalMarkStack(new_tl_mark_stack); in PushOntoMarkStack()
1802 MutexLock mu(self, mark_stack_lock_); in PushOntoMarkStack()
1811 << " self->gc_marking=" << self->GetIsGcMarking() in PushOntoMarkStack()
1813 CHECK(self == thread_running_gc_) in PushOntoMarkStack()
1904 Thread* self = Thread::Current(); in VerifyNoFromSpaceReferences() local
1905 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); in VerifyNoFromSpaceReferences()
1908 MutexLock mu(self, *Locks::thread_list_lock_); in VerifyNoFromSpaceReferences()
1931 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); in VerifyNoFromSpaceReferences()
1939 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); in VerifyNoFromSpaceReferences()
1996 Thread* self = Thread::Current(); in RevokeThreadLocalMarkStacks() local
1999 gc_barrier_->Init(self, 0); in RevokeThreadLocalMarkStacks()
2006 Locks::mutator_lock_->SharedUnlock(self); in RevokeThreadLocalMarkStacks()
2008 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); in RevokeThreadLocalMarkStacks()
2009 gc_barrier_->Increment(self, barrier_count); in RevokeThreadLocalMarkStacks()
2011 Locks::mutator_lock_->SharedLock(self); in RevokeThreadLocalMarkStacks()
2015 Thread* self = Thread::Current(); in RevokeThreadLocalMarkStack() local
2016 CHECK_EQ(self, thread); in RevokeThreadLocalMarkStack()
2017 MutexLock mu(self, mark_stack_lock_); in RevokeThreadLocalMarkStack()
2043 Thread* const self = Thread::Current(); in ProcessMarkStackOnce() local
2044 DCHECK(self == thread_running_gc_); in ProcessMarkStackOnce()
2314 void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) { in Run()
2326 Thread* self = Thread::Current(); in SwitchToSharedMarkStackMode() local
2328 DCHECK(self == thread_running_gc_); in SwitchToSharedMarkStackMode()
2349 Thread* self = Thread::Current(); in SwitchToGcExclusiveMarkStackMode() local
2351 DCHECK(self == thread_running_gc_); in SwitchToGcExclusiveMarkStackMode()
2364 Thread* self = Thread::Current(); in CheckEmptyMarkStack() local
2366 DCHECK(self == thread_running_gc_); in CheckEmptyMarkStack()
2398 void ConcurrentCopying::SweepSystemWeaks(Thread* self) { in SweepSystemWeaks() argument
2400 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); in SweepSystemWeaks()
2440 Thread* self = Thread::Current(); in SweepArray() local
2476 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); in SweepArray()
2488 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); in SweepArray()
2511 freed_los.bytes += large_object_space->Free(self, obj); in SweepArray()
2527 Thread* const self = Thread::Current(); in MarkZygoteLargeObjects() local
2528 WriterMutexLock rmu(self, *Locks::heap_bitmap_lock_); in MarkZygoteLargeObjects()
2538 [mark_bitmap, los, self](mirror::Object* obj) in MarkZygoteLargeObjects()
2541 if (los->IsZygoteLargeObject(self, obj)) { in MarkZygoteLargeObjects()
2626 Thread* self = Thread::Current(); in ReclaimPhase() local
2656 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); in ReclaimPhase()
3154 Thread* const self = Thread::Current(); in VisitRoots() local
3158 mirror::Object* to_ref = Mark(self, ref); in VisitRoots()
3175 inline void ConcurrentCopying::MarkRoot(Thread* const self, in MarkRoot() argument
3179 mirror::Object* to_ref = Mark<kGrayImmuneObject>(self, ref); in MarkRoot()
3197 Thread* const self = Thread::Current(); in VisitRoots() local
3202 MarkRoot</*kGrayImmuneObject=*/true>(self, root); in VisitRoots()
3236 void ConcurrentCopying::FillWithFakeObject(Thread* const self, in FillWithFakeObject() argument
3248 Mark(self, GetClassRoot<mirror::IntArray, kWithoutReadBarrier>().Ptr())); in FillWithFakeObject()
3282 mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(Thread* const self, size_t alloc_size) { in AllocateInSkippedBlock() argument
3289 MutexLock mu(self, skipped_blocks_lock_); in AllocateInSkippedBlock()
3328 FillWithFakeObject(self, in AllocateInSkippedBlock()
3333 MutexLock mu(self, skipped_blocks_lock_); in AllocateInSkippedBlock()
3340 mirror::Object* ConcurrentCopying::Copy(Thread* const self, in Copy() argument
3372 to_ref = AllocateInSkippedBlock(self, region_space_alloc_size); in Copy()
3393 self, obj_size, &non_moving_space_bytes_allocated, nullptr, &unused_size); in Copy()
3428 FillWithFakeObject(self, to_ref, bytes_allocated); in Copy()
3439 MutexLock mu(self, skipped_blocks_lock_); in Copy()
3447 heap_->non_moving_space_->Free(self, to_ref); in Copy()
3482 if (LIKELY(self == thread_running_gc_)) { in Copy()
3509 PushOntoMarkStack(self, to_ref); in Copy()
3565 mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self, in MarkNonMoving() argument
3610 PushOntoMarkStack(self, ref); in MarkNonMoving()
3646 PushOntoMarkStack(self, ref); in MarkNonMoving()
3653 Thread* const self = Thread::Current(); in FinishPhase() local
3655 MutexLock mu(self, mark_stack_lock_); in FinishPhase()
3670 MutexLock mu(self, skipped_blocks_lock_); in FinishPhase()
3674 ReaderMutexLock mu(self, *Locks::mutator_lock_); in FinishPhase()
3676 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_); in FinishPhase()
3681 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_); in FinishPhase()
3707 MutexLock mu(self, rb_slow_path_histogram_lock_); in FinishPhase()
3750 void ConcurrentCopying::ProcessReferences(Thread* self) { in ProcessReferences() argument
3753 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); in ProcessReferences()
3763 mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(Thread* const self, in MarkFromReadBarrierWithMeasurements() argument
3765 if (self != thread_running_gc_) { in MarkFromReadBarrierWithMeasurements()
3773 Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self, in MarkFromReadBarrierWithMeasurements()