Lines Matching refs:self

875 void Heap::IncrementDisableMovingGC(Thread* self) {  in IncrementDisableMovingGC()  argument
878 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); in IncrementDisableMovingGC()
879 MutexLock mu(self, *gc_complete_lock_); in IncrementDisableMovingGC()
882 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self); in IncrementDisableMovingGC()
886 void Heap::DecrementDisableMovingGC(Thread* self) { in DecrementDisableMovingGC() argument
887 MutexLock mu(self, *gc_complete_lock_); in DecrementDisableMovingGC()
892 void Heap::IncrementDisableThreadFlip(Thread* self) { in IncrementDisableThreadFlip() argument
895 bool is_nested = self->GetDisableThreadFlipCount() > 0; in IncrementDisableThreadFlip()
896 self->IncrementDisableThreadFlipCount(); in IncrementDisableThreadFlip()
902 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip); in IncrementDisableThreadFlip()
903 MutexLock mu(self, *thread_flip_lock_); in IncrementDisableThreadFlip()
904 thread_flip_cond_->CheckSafeToWait(self); in IncrementDisableThreadFlip()
912 thread_flip_cond_->Wait(self); in IncrementDisableThreadFlip()
925 void Heap::DecrementDisableThreadFlip(Thread* self) { in DecrementDisableThreadFlip() argument
929 self->DecrementDisableThreadFlipCount(); in DecrementDisableThreadFlip()
930 bool is_outermost = self->GetDisableThreadFlipCount() == 0; in DecrementDisableThreadFlip()
936 MutexLock mu(self, *thread_flip_lock_); in DecrementDisableThreadFlip()
941 thread_flip_cond_->Broadcast(self); in DecrementDisableThreadFlip()
945 void Heap::ThreadFlipBegin(Thread* self) { in ThreadFlipBegin() argument
949 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip); in ThreadFlipBegin()
950 MutexLock mu(self, *thread_flip_lock_); in ThreadFlipBegin()
951 thread_flip_cond_->CheckSafeToWait(self); in ThreadFlipBegin()
960 thread_flip_cond_->Wait(self); in ThreadFlipBegin()
971 void Heap::ThreadFlipEnd(Thread* self) { in ThreadFlipEnd() argument
975 MutexLock mu(self, *thread_flip_lock_); in ThreadFlipEnd()
979 thread_flip_cond_->Broadcast(self); in ThreadFlipEnd()
1369 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) { in ThrowOutOfMemoryError() argument
1372 if (self->IsHandlingStackOverflow()) { in ThrowOutOfMemoryError()
1373 self->SetException( in ThrowOutOfMemoryError()
1409 self->ThrowOutOfMemoryError(oss.str().c_str()); in ThrowOutOfMemoryError()
1436 void Heap::Trim(Thread* self) { in Trim() argument
1443 ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim); in Trim()
1450 TrimIndirectReferenceTables(self); in Trim()
1451 TrimSpaces(self); in Trim()
1471 void Heap::TrimIndirectReferenceTables(Thread* self) { in TrimIndirectReferenceTables() argument
1472 ScopedObjectAccess soa(self); in TrimIndirectReferenceTables()
1480 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); in TrimIndirectReferenceTables()
1483 barrier.Increment(self, barrier_count); in TrimIndirectReferenceTables()
1487 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) { in StartGC() argument
1490 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); in StartGC()
1491 MutexLock mu(self, *gc_complete_lock_); in StartGC()
1493 WaitForGcToCompleteLocked(cause, self); in StartGC()
1496 thread_running_gc_ = self; in StartGC()
1499 void Heap::TrimSpaces(Thread* self) { in TrimSpaces() argument
1502 StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim); in TrimSpaces()
1510 ScopedObjectAccess soa(self); in TrimSpaces()
1537 FinishGC(self, collector::kGcTypeNone); in TrimSpaces()
1740 mirror::Object* Heap::AllocateInternalWithGc(Thread* self, in AllocateInternalWithGc() argument
1753 self->AssertNoPendingException(); in AllocateInternalWithGc()
1756 StackHandleScope<1> hs(self); in AllocateInternalWithGc()
1764 l->PreObjectAllocated(self, h_klass, &alloc_size); in AllocateInternalWithGc()
1779 PERFORM_SUSPENDING_OPERATION(WaitForGcToComplete(kGcCauseForAlloc, self)); in AllocateInternalWithGc()
1788 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, in AllocateInternalWithGc()
1817 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, in AllocateInternalWithGc()
1841 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, in AllocateInternalWithGc()
1865 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, in AllocateInternalWithGc()
1905 ThrowOutOfMemoryError(self, alloc_size, allocator); in AllocateInternalWithGc()
1917 Thread* const self = Thread::Current(); in GetObjectsAllocated() local
1918 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated); in GetObjectsAllocated()
1926 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); in GetObjectsAllocated()
2060 Thread* self = Thread::Current(); in PerformHomogeneousSpaceCompact() local
2064 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); in PerformHomogeneousSpaceCompact()
2065 Locks::mutator_lock_->AssertNotHeld(self); in PerformHomogeneousSpaceCompact()
2067 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); in PerformHomogeneousSpaceCompact()
2068 MutexLock mu(self, *gc_complete_lock_); in PerformHomogeneousSpaceCompact()
2070 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self); in PerformHomogeneousSpaceCompact()
2084 if (Runtime::Current()->IsShuttingDown(self)) { in PerformHomogeneousSpaceCompact()
2087 FinishGC(self, collector::kGcTypeNone); in PerformHomogeneousSpaceCompact()
2120 SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self); in PerformHomogeneousSpaceCompact()
2123 FinishGC(self, collector::kGcTypeFull); in PerformHomogeneousSpaceCompact()
2125 clear->Run(self); in PerformHomogeneousSpaceCompact()
2128 ScopedObjectAccess soa(self); in PerformHomogeneousSpaceCompact()
2323 Thread* self = Thread::Current(); in PreZygoteFork() local
2324 MutexLock mu(self, zygote_creation_lock_); in PreZygoteFork()
2463 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self, set_mark_bit); in PreZygoteFork()
2578 Thread* self = Thread::Current(); in CollectGarbageInternal() local
2593 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); in CollectGarbageInternal()
2594 Locks::mutator_lock_->AssertNotHeld(self); in CollectGarbageInternal()
2595 if (self->IsHandlingStackOverflow()) { in CollectGarbageInternal()
2602 gc_complete_lock_->AssertNotHeld(self); in CollectGarbageInternal()
2603 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); in CollectGarbageInternal()
2604 MutexLock mu(self, *gc_complete_lock_); in CollectGarbageInternal()
2606 WaitForGcToCompleteLocked(gc_cause, self); in CollectGarbageInternal()
2620 ++self->GetStats()->gc_for_alloc_count; in CollectGarbageInternal()
2675 RequestTrim(self); in CollectGarbageInternal()
2677 SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self); in CollectGarbageInternal()
2681 FinishGC(self, gc_type); in CollectGarbageInternal()
2684 clear->Run(self); in CollectGarbageInternal()
2694 ScopedObjectAccess soa(self); in CollectGarbageInternal()
2735 void Heap::FinishGC(Thread* self, collector::GcType gc_type) { in FinishGC() argument
2736 MutexLock mu(self, *gc_complete_lock_); in FinishGC()
2757 gc_complete_cond_->Broadcast(self); in FinishGC()
2825 VerifyReferenceVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent) in VerifyReferenceVisitor() argument
2827 : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) { in VerifyReferenceVisitor()
2978 VerifyObjectVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent) in VerifyObjectVisitor() argument
2979 : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {} in VerifyObjectVisitor()
3007 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) { in PushOnAllocationStackWithInternalGC() argument
3012 StackHandleScope<1> hs(self); in PushOnAllocationStackWithInternalGC()
3022 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, in PushOnThreadLocalAllocationStackWithInternalGC() argument
3025 DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr())); in PushOnThreadLocalAllocationStackWithInternalGC()
3031 StackHandleScope<1> hs(self); in PushOnThreadLocalAllocationStackWithInternalGC()
3040 self->SetThreadLocalAllocationStack(start_address, end_address); in PushOnThreadLocalAllocationStackWithInternalGC()
3042 CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr())); // Must succeed. in PushOnThreadLocalAllocationStackWithInternalGC()
3047 Thread* self = Thread::Current(); in VerifyHeapReferences() local
3048 Locks::mutator_lock_->AssertExclusiveHeld(self); in VerifyHeapReferences()
3054 RevokeAllThreadLocalAllocationStacks(self); in VerifyHeapReferences()
3056 VerifyObjectVisitor visitor(self, this, &fail_count, verify_referents); in VerifyHeapReferences()
3178 Thread* self = Thread::Current(); in VerifyMissingCardMarks() local
3179 Locks::mutator_lock_->AssertExclusiveHeld(self); in VerifyMissingCardMarks()
3184 RevokeAllThreadLocalAllocationStacks(self); in VerifyMissingCardMarks()
3203 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) { in RevokeAllThreadLocalAllocationStacks() argument
3205 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); in RevokeAllThreadLocalAllocationStacks()
3206 MutexLock mu(self, *Locks::runtime_shutdown_lock_); in RevokeAllThreadLocalAllocationStacks()
3207 MutexLock mu2(self, *Locks::thread_list_lock_); in RevokeAllThreadLocalAllocationStacks()
3301 Thread* const self = Thread::Current(); in PreGcVerificationPaused() local
3315 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); in PreGcVerificationPaused()
3324 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); in PreGcVerificationPaused()
3349 Thread* const self = Thread::Current(); in PreSweepingGcVerification() local
3356 CHECK_NE(self->GetState(), kRunnable); in PreSweepingGcVerification()
3358 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); in PreSweepingGcVerification()
3370 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); in PreSweepingGcVerification()
3381 Thread* const self = Thread::Current(); in PostGcVerificationPaused() local
3385 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_); in PostGcVerificationPaused()
3419 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) { in WaitForGcToComplete() argument
3420 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); in WaitForGcToComplete()
3421 MutexLock mu(self, *gc_complete_lock_); in WaitForGcToComplete()
3422 return WaitForGcToCompleteLocked(cause, self); in WaitForGcToComplete()
3425 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) { in WaitForGcToCompleteLocked() argument
3426 gc_complete_cond_->CheckSafeToWait(self); in WaitForGcToCompleteLocked()
3431 if (self != task_processor_->GetRunningThread()) { in WaitForGcToCompleteLocked()
3441 gc_complete_cond_->Wait(self); in WaitForGcToCompleteLocked()
3451 if (self != task_processor_->GetRunningThread()) { in WaitForGcToCompleteLocked()
3662 void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) { in AddFinalizerReference() argument
3663 ScopedObjectAccess soa(self); in AddFinalizerReference()
3664 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object)); in AddFinalizerReference()
3672 void Heap::RequestConcurrentGCAndSaveObject(Thread* self, in RequestConcurrentGCAndSaveObject() argument
3675 StackHandleScope<1> hs(self); in RequestConcurrentGCAndSaveObject()
3677 RequestConcurrentGC(self, kGcCauseBackground, force_full); in RequestConcurrentGCAndSaveObject()
3684 void Run(Thread* self) override { in Run() argument
3686 heap->ConcurrentGC(self, cause_, force_full_); in Run()
3695 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) { in CanAddHeapTask() argument
3697 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) && in CanAddHeapTask()
3698 !self->IsHandlingStackOverflow(); in CanAddHeapTask()
3705 void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) { in RequestConcurrentGC() argument
3706 if (CanAddHeapTask(self) && in RequestConcurrentGC()
3708 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away. in RequestConcurrentGC()
3714 void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) { in ConcurrentGC() argument
3715 if (!Runtime::Current()->IsShuttingDown(self)) { in ConcurrentGC()
3717 if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) { in ConcurrentGC()
3742 void Run(Thread* self) override { in Run() argument
3745 heap->ClearPendingCollectorTransition(self); in Run()
3749 void Heap::ClearPendingCollectorTransition(Thread* self) { in ClearPendingCollectorTransition() argument
3750 MutexLock mu(self, *pending_task_lock_); in ClearPendingCollectorTransition()
3755 Thread* self = Thread::Current(); in RequestCollectorTransition() local
3757 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) { in RequestCollectorTransition()
3769 MutexLock mu(self, *pending_task_lock_); in RequestCollectorTransition()
3772 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time); in RequestCollectorTransition()
3778 task_processor_->AddTask(self, added_task); in RequestCollectorTransition()
3784 void Run(Thread* self) override { in Run() argument
3786 heap->Trim(self); in Run()
3787 heap->ClearPendingTrim(self); in Run()
3791 void Heap::ClearPendingTrim(Thread* self) { in ClearPendingTrim() argument
3792 MutexLock mu(self, *pending_task_lock_); in ClearPendingTrim()
3796 void Heap::RequestTrim(Thread* self) { in RequestTrim() argument
3797 if (!CanAddHeapTask(self)) { in RequestTrim()
3814 MutexLock mu(self, *pending_task_lock_); in RequestTrim()
3822 task_processor_->AddTask(self, added_task); in RequestTrim()
3929 inline void Heap::CheckGCForNative(Thread* self) { in CheckGCForNative() argument
3935 RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true); in CheckGCForNative()
3942 WaitForGcToComplete(kGcCauseForNativeAlloc, self); in CheckGCForNative()
4092 void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) { in CheckGcStressMode() argument
4100 MutexLock mu(self, *backtrace_lock_); in CheckGcStressMode()
4110 StackHandleScope<1> hs(self); in CheckGcStressMode()
4121 Thread* const self = Thread::Current(); in DisableGCForShutdown() local
4122 CHECK(Runtime::Current()->IsShuttingDown(self)); in DisableGCForShutdown()
4123 MutexLock mu(self, *gc_complete_lock_); in DisableGCForShutdown()
4171 mirror::Object* Heap::AllocWithNewTLAB(Thread* self, in AllocWithNewTLAB() argument
4178 if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) { in AllocWithNewTLAB()
4179 DCHECK_GT(alloc_size, self->TlabSize()); in AllocWithNewTLAB()
4182 const size_t min_expand_size = alloc_size - self->TlabSize(); in AllocWithNewTLAB()
4185 std::min(self->TlabRemainingCapacity() - self->TlabSize(), kPartialTlabSize)); in AllocWithNewTLAB()
4190 self->ExpandTlab(expand_bytes); in AllocWithNewTLAB()
4191 DCHECK_LE(alloc_size, self->TlabSize()); in AllocWithNewTLAB()
4200 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) { in AllocWithNewTLAB()
4216 if (!region_space_->AllocNewTlab(self, new_tlab_size, bytes_tl_bulk_allocated)) { in AllocWithNewTLAB()
4247 mirror::Object* ret = self->AllocTlab(alloc_size); in AllocWithNewTLAB()
4266 void Run(Thread* self) override { in Run() argument
4271 heap->RequestConcurrentGC(self, kGcCauseBackground, false); in Run()
4276 void Heap::PostForkChildAction(Thread* self) { in PostForkChildAction() argument
4287 self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS))); in PostForkChildAction()
4314 Thread* const self = Thread::Current(); in AddHeapTask() local
4315 if (!CanAddHeapTask(self)) { in AddHeapTask()
4318 GetTaskProcessor()->AddTask(self, task); in AddHeapTask()