Lines Matching refs:self

235   mirror::Object* AllocObject(Thread* self,  in AllocObject()  argument
245 return AllocObjectWithAllocator<kInstrumented>(self, in AllocObject()
253 mirror::Object* AllocNonMovableObject(Thread* self, in AllocNonMovableObject() argument
263 return AllocObjectWithAllocator<kInstrumented>(self, in AllocNonMovableObject()
271 ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
365 void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
366 void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
369 void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
370 void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
371 void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
372 void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
386 void ConcurrentGC(Thread* self, GcCause cause, bool force_full)
474 collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
543 void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
642 void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
672 void RevokeAllThreadLocalAllocationStacks(Thread* self)
823 bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) { in IsMovingGCDisabled() argument
824 MutexLock mu(self, *gc_complete_lock_); in IsMovingGCDisabled()
829 void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
832 void RequestConcurrentGC(Thread* self, GcCause cause, bool force_full)
922 void PostForkChildAction(Thread* self);
941 void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
943 void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
996 ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self,
1001 void CheckGCForNative(Thread* self)
1010 mirror::Object* AllocLargeObject(Thread* self,
1020 mirror::Object* AllocateInternalWithGc(Thread* self,
1033 mirror::Object* AllocateInto(Thread* self,
1046 ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
1054 mirror::Object* AllocWithNewTLAB(Thread* self,
1063 void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
1080 collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
1086 void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, ObjPtr<mirror::Object>* obj)
1152 void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
1155 void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
1163 void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
1164 void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
1175 void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
1178 void TrimIndirectReferenceTables(Thread* self);
1191 void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)