1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_THREAD_H_
18 #define ART_RUNTIME_THREAD_H_
19 
20 #include <atomic>
21 #include <bitset>
22 #include <deque>
23 #include <iosfwd>
24 #include <list>
25 #include <memory>
26 #include <string>
27 
28 #include "base/atomic.h"
29 #include "base/enums.h"
30 #include "base/locks.h"
31 #include "base/macros.h"
32 #include "base/safe_map.h"
33 #include "base/value_object.h"
34 #include "entrypoints/jni/jni_entrypoints.h"
35 #include "entrypoints/quick/quick_entrypoints.h"
36 #include "handle.h"
37 #include "handle_scope.h"
38 #include "interpreter/interpreter_cache.h"
39 #include "jvalue.h"
40 #include "managed_stack.h"
41 #include "offsets.h"
42 #include "read_barrier_config.h"
43 #include "reflective_handle_scope.h"
44 #include "runtime_globals.h"
45 #include "runtime_stats.h"
46 #include "thread_state.h"
47 
48 class BacktraceMap;
49 
50 namespace art {
51 
52 namespace gc {
53 namespace accounting {
54 template<class T> class AtomicStack;
55 }  // namespace accounting
56 namespace collector {
57 class SemiSpace;
58 }  // namespace collector
59 }  // namespace gc
60 
61 namespace instrumentation {
62 struct InstrumentationStackFrame;
63 }  // namespace instrumentation
64 
65 namespace mirror {
66 class Array;
67 class Class;
68 class ClassLoader;
69 class Object;
70 template<class T> class ObjectArray;
71 template<class T> class PrimitiveArray;
72 typedef PrimitiveArray<int32_t> IntArray;
73 class StackTraceElement;
74 class String;
75 class Throwable;
76 }  // namespace mirror
77 
78 namespace verifier {
79 class MethodVerifier;
80 class VerifierDeps;
81 }  // namespace verifier
82 
83 class ArtMethod;
84 class BaseMutex;
85 class ClassLinker;
86 class Closure;
87 class Context;
88 class DeoptimizationContextRecord;
89 class DexFile;
90 class FrameIdToShadowFrame;
91 class IsMarkedVisitor;
92 class JavaVMExt;
93 class JNIEnvExt;
94 class Monitor;
95 class RootVisitor;
96 class ScopedObjectAccessAlreadyRunnable;
97 class ShadowFrame;
98 class StackedShadowFrameRecord;
99 enum class SuspendReason : char;
100 class Thread;
101 class ThreadList;
102 enum VisitRootFlags : uint8_t;
103 
104 // A piece of data that can be held in the CustomTls. The destructor will be called during thread
105 // shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
106 // on.
107 class TLSData {
108  public:
~TLSData()109   virtual ~TLSData() {}
110 };
111 
112 // Thread priorities. These must match the Thread.MIN_PRIORITY,
113 // Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
114 enum ThreadPriority {
115   kMinThreadPriority = 1,
116   kNormThreadPriority = 5,
117   kMaxThreadPriority = 10,
118 };
119 
120 enum ThreadFlag {
121   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
122                           // safepoint handler.
123   kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
124   kEmptyCheckpointRequest = 4,  // Request that the thread do empty checkpoint and then continue.
125   kActiveSuspendBarrier = 8,  // Register that at least 1 suspend barrier needs to be passed.
126 };
127 
128 enum class StackedShadowFrameType {
129   kShadowFrameUnderConstruction,
130   kDeoptimizationShadowFrame,
131 };
132 
133 // The type of method that triggers deoptimization. It contains info on whether
134 // the deoptimized method should advance dex_pc.
135 enum class DeoptimizationMethodType {
136   kKeepDexPc,  // dex pc is required to be kept upon deoptimization.
137   kDefault     // dex pc may or may not advance depending on other conditions.
138 };
139 
140 // This should match RosAlloc::kNumThreadLocalSizeBrackets.
141 static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
142 
143 // Thread's stack layout for implicit stack overflow checks:
144 //
145 //   +---------------------+  <- highest address of stack memory
146 //   |                     |
147 //   .                     .  <- SP
148 //   |                     |
149 //   |                     |
150 //   +---------------------+  <- stack_end
151 //   |                     |
152 //   |  Gap                |
153 //   |                     |
154 //   +---------------------+  <- stack_begin
155 //   |                     |
156 //   | Protected region    |
157 //   |                     |
158 //   +---------------------+  <- lowest address of stack memory
159 //
160 // The stack always grows down in memory.  At the lowest address is a region of memory
161 // that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
162 // result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
163 // between the stack_end and the highest address in stack memory.  An implicit stack
164 // overflow check is a read of memory at a certain offset below the current SP (4K typically).
165 // If the thread's SP is below the stack_end address this will be a read into the protected
166 // region.  If the SP is above the stack_end address, the thread is guaranteed to have
167 // at least 4K of space.  Because stack overflow checks are only performed in generated code,
168 // if the thread makes a call out to a native function (through JNI), that native function
169 // might only have 4K of memory (if the SP is adjacent to stack_end).
170 
171 class Thread {
172  public:
173   static const size_t kStackOverflowImplicitCheckSize;
174   static constexpr bool kVerifyStack = kIsDebugBuild;
175 
176   // Creates a new native thread corresponding to the given managed peer.
177   // Used to implement Thread.start.
178   static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
179 
180   // Attaches the calling native thread to the runtime, returning the new native peer.
181   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
182   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
183                         bool create_peer);
184   // Attaches the calling native thread to the runtime, returning the new native peer.
185   static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_peer);
186 
187   // Reset internal state of child thread after fork.
188   void InitAfterFork();
189 
190   // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
191   // high cost and so we favor passing self around when possible.
192   // TODO: mark as PURE so the compiler may coalesce and remove?
193   static Thread* Current();
194 
195   // On a runnable thread, check for pending thread suspension request and handle if pending.
196   void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
197 
198   // Process pending thread suspension request and handle if pending.
199   void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
200 
201   // Process a pending empty checkpoint if pending.
202   void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
203   void CheckEmptyCheckpointFromMutex();
204 
205   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
206                                    ObjPtr<mirror::Object> thread_peer)
207       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
208       REQUIRES_SHARED(Locks::mutator_lock_);
209   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
210       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
211       REQUIRES_SHARED(Locks::mutator_lock_);
212 
213   // Translates 172 to pAllocArrayFromCode and so on.
214   template<PointerSize size_of_pointers>
215   static void DumpThreadOffset(std::ostream& os, uint32_t offset);
216 
217   // Dumps a one-line summary of thread state (used for operator<<).
218   void ShortDump(std::ostream& os) const;
219 
220   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
221   void Dump(std::ostream& os,
222             bool dump_native_stack = true,
223             BacktraceMap* backtrace_map = nullptr,
224             bool force_dump_stack = false) const
225       REQUIRES_SHARED(Locks::mutator_lock_);
226 
227   void DumpJavaStack(std::ostream& os,
228                      bool check_suspended = true,
229                      bool dump_locks = true) const
230       REQUIRES_SHARED(Locks::mutator_lock_);
231 
232   // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
233   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
234   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
235       REQUIRES_SHARED(Locks::mutator_lock_);
236 
GetState()237   ThreadState GetState() const {
238     DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
239     DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
240     return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
241   }
242 
243   ThreadState SetState(ThreadState new_state);
244 
GetSuspendCount()245   int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) {
246     return tls32_.suspend_count;
247   }
248 
GetUserCodeSuspendCount()249   int GetUserCodeSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_,
250                                                Locks::user_code_suspension_lock_) {
251     return tls32_.user_code_suspend_count;
252   }
253 
IsSuspended()254   bool IsSuspended() const {
255     union StateAndFlags state_and_flags;
256     state_and_flags.as_int = tls32_.state_and_flags.as_int;
257     return state_and_flags.as_struct.state != kRunnable &&
258         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
259   }
260 
DecrDefineClassCount()261   void DecrDefineClassCount() {
262     tls32_.define_class_counter--;
263   }
264 
IncrDefineClassCount()265   void IncrDefineClassCount() {
266     tls32_.define_class_counter++;
267   }
GetDefineClassCount()268   uint32_t GetDefineClassCount() const {
269     return tls32_.define_class_counter;
270   }
271 
272   // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
273   // release thread_suspend_count_lock_ internally.
274   ALWAYS_INLINE
275   bool ModifySuspendCount(Thread* self,
276                           int delta,
277                           AtomicInteger* suspend_barrier,
278                           SuspendReason reason)
279       WARN_UNUSED
280       REQUIRES(Locks::thread_suspend_count_lock_);
281 
282   // Requests a checkpoint closure to run on another thread. The closure will be run when the thread
283   // gets suspended. This will return true if the closure was added and will (eventually) be
284   // executed. It returns false otherwise.
285   //
286   // Since multiple closures can be queued and some closures can delay other threads from running no
287   // closure should attempt to suspend another thread while running.
288   // TODO We should add some debug option that verifies this.
289   bool RequestCheckpoint(Closure* function)
290       REQUIRES(Locks::thread_suspend_count_lock_);
291 
292   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is
293   // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to
294   // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread
295   // will go into while it is awaiting the checkpoint to be run.
296   // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable
297   // while holding the mutator_lock_.  Callers should ensure that this will not cause any problems
298   // for the closure or the rest of the system.
299   // NB Since multiple closures can be queued and some closures can delay other threads from running
300   // no closure should attempt to suspend another thread while running.
301   bool RequestSynchronousCheckpoint(Closure* function,
302                                     ThreadState suspend_state = ThreadState::kWaiting)
303       REQUIRES_SHARED(Locks::mutator_lock_)
304       RELEASE(Locks::thread_list_lock_)
305       REQUIRES(!Locks::thread_suspend_count_lock_);
306 
307   bool RequestEmptyCheckpoint()
308       REQUIRES(Locks::thread_suspend_count_lock_);
309 
310   void SetFlipFunction(Closure* function);
311   Closure* GetFlipFunction();
312 
GetThreadLocalMarkStack()313   gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
314     CHECK(kUseReadBarrier);
315     return tlsPtr_.thread_local_mark_stack;
316   }
SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object> * stack)317   void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
318     CHECK(kUseReadBarrier);
319     tlsPtr_.thread_local_mark_stack = stack;
320   }
321 
322   // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
323   // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
324   void FullSuspendCheck()
325       REQUIRES(!Locks::thread_suspend_count_lock_)
326       REQUIRES_SHARED(Locks::mutator_lock_);
327 
328   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
329   ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
330       REQUIRES(!Locks::thread_suspend_count_lock_)
331       SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
332 
333   // Transition from runnable into a state where mutator privileges are denied. Releases share of
334   // mutator lock.
335   ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
336       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
337       UNLOCK_FUNCTION(Locks::mutator_lock_);
338 
339   // Once called thread suspension will cause an assertion failure.
StartAssertNoThreadSuspension(const char * cause)340   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
341     Roles::uninterruptible_.Acquire();  // No-op.
342     if (kIsDebugBuild) {
343       CHECK(cause != nullptr);
344       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
345       tls32_.no_thread_suspension++;
346       tlsPtr_.last_no_thread_suspension_cause = cause;
347       return previous_cause;
348     } else {
349       return nullptr;
350     }
351   }
352 
353   // End region where no thread suspension is expected.
EndAssertNoThreadSuspension(const char * old_cause)354   void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) {
355     if (kIsDebugBuild) {
356       CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
357       CHECK_GT(tls32_.no_thread_suspension, 0U);
358       tls32_.no_thread_suspension--;
359       tlsPtr_.last_no_thread_suspension_cause = old_cause;
360     }
361     Roles::uninterruptible_.Release();  // No-op.
362   }
363 
364   // End region where no thread suspension is expected. Returns the current open region in case we
365   // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
366   // is larger than one.
EndAssertNoThreadSuspension()367   const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
368     const char* ret = nullptr;
369     if (kIsDebugBuild) {
370       CHECK_EQ(tls32_.no_thread_suspension, 1u);
371       tls32_.no_thread_suspension--;
372       ret = tlsPtr_.last_no_thread_suspension_cause;
373       tlsPtr_.last_no_thread_suspension_cause = nullptr;
374     }
375     Roles::uninterruptible_.Release();  // No-op.
376     return ret;
377   }
378 
379   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
380 
381   // Return true if thread suspension is allowable.
382   bool IsThreadSuspensionAllowable() const;
383 
IsDaemon()384   bool IsDaemon() const {
385     return tls32_.daemon;
386   }
387 
388   size_t NumberOfHeldMutexes() const;
389 
390   bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_);
391 
392   /*
393    * Changes the priority of this thread to match that of the java.lang.Thread object.
394    *
395    * We map a priority value from 1-10 to Linux "nice" values, where lower
396    * numbers indicate higher priority.
397    */
398   void SetNativePriority(int newPriority);
399 
400   /*
401    * Returns the priority of this thread by querying the system.
402    * This is useful when attaching a thread through JNI.
403    *
404    * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
405    */
406   int GetNativePriority() const;
407 
408   // Guaranteed to be non-zero.
GetThreadId()409   uint32_t GetThreadId() const {
410     return tls32_.thin_lock_thread_id;
411   }
412 
GetTid()413   pid_t GetTid() const {
414     return tls32_.tid;
415   }
416 
417   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
418   ObjPtr<mirror::String> GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
419 
420   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
421   // allocation, or locking.
422   void GetThreadName(std::string& name) const;
423 
424   // Sets the thread's name.
425   void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
426 
427   // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
428   uint64_t GetCpuMicroTime() const;
429 
GetPeer()430   mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
431     DCHECK(Thread::Current() == this) << "Use GetPeerFromOtherThread instead";
432     CHECK(tlsPtr_.jpeer == nullptr);
433     return tlsPtr_.opeer;
434   }
435   // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
436   // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
437   // This function will explicitly mark/forward it.
438   mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
439 
HasPeer()440   bool HasPeer() const {
441     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
442   }
443 
GetStats()444   RuntimeStats* GetStats() {
445     return &tls64_.stats;
446   }
447 
448   bool IsStillStarting() const;
449 
IsExceptionPending()450   bool IsExceptionPending() const {
451     return tlsPtr_.exception != nullptr;
452   }
453 
IsAsyncExceptionPending()454   bool IsAsyncExceptionPending() const {
455     return tlsPtr_.async_exception != nullptr;
456   }
457 
GetException()458   mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
459     return tlsPtr_.exception;
460   }
461 
462   void AssertPendingException() const;
463   void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
464   void AssertNoPendingException() const;
465   void AssertNoPendingExceptionForNewException(const char* msg) const;
466 
467   void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
468 
469   // Set an exception that is asynchronously thrown from a different thread. This will be checked
470   // periodically and might overwrite the current 'Exception'. This can only be called from a
471   // checkpoint.
472   //
473   // The caller should also make sure that the thread has been deoptimized so that the exception
474   // could be detected on back-edges.
475   void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
476       REQUIRES_SHARED(Locks::mutator_lock_);
477 
ClearException()478   void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
479     tlsPtr_.exception = nullptr;
480   }
481 
482   // Move the current async-exception to the main exception. This should be called when the current
483   // thread is ready to deal with any async exceptions. Returns true if there is an async exception
484   // that needs to be dealt with, false otherwise.
485   bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
486 
487   // Find catch block and perform long jump to appropriate exception handle
488   NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
489 
490   Context* GetLongJumpContext();
ReleaseLongJumpContext(Context * context)491   void ReleaseLongJumpContext(Context* context) {
492     if (tlsPtr_.long_jump_context != nullptr) {
493       ReleaseLongJumpContextInternal();
494     }
495     tlsPtr_.long_jump_context = context;
496   }
497 
498   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
499   // abort the runtime iff abort_on_error is true.
500   ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
501                               bool check_suspended = true,
502                               bool abort_on_error = true) const
503       REQUIRES_SHARED(Locks::mutator_lock_);
504 
505   // Returns whether the given exception was thrown by the current Java method being executed
506   // (Note that this includes native Java methods).
507   bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const
508       REQUIRES_SHARED(Locks::mutator_lock_);
509 
SetTopOfStack(ArtMethod ** top_method)510   void SetTopOfStack(ArtMethod** top_method) {
511     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
512   }
513 
SetTopOfStackTagged(ArtMethod ** top_method)514   void SetTopOfStackTagged(ArtMethod** top_method) {
515     tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
516   }
517 
SetTopOfShadowStack(ShadowFrame * top)518   void SetTopOfShadowStack(ShadowFrame* top) {
519     tlsPtr_.managed_stack.SetTopShadowFrame(top);
520   }
521 
HasManagedStack()522   bool HasManagedStack() const {
523     return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
524   }
525 
526   // If 'msg' is null, no detail message is set.
527   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
528       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
529 
530   // If 'msg' is null, no detail message is set. An exception must be pending, and will be
531   // used as the new exception's cause.
532   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
533       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
534 
535   void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
536       __attribute__((format(printf, 3, 4)))
537       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
538 
539   void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
540       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
541 
542   // OutOfMemoryError is special, because we need to pre-allocate an instance.
543   // Only the GC should call this.
544   void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
545       REQUIRES(!Roles::uninterruptible_);
546 
547   static void Startup();
548   static void FinishStartup();
549   static void Shutdown();
550 
551   // Notify this thread's thread-group that this thread has started.
552   // Note: the given thread-group is used as a fast path and verified in debug build. If the value
553   //       is null, the thread's thread-group is loaded from the peer.
554   void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
555       REQUIRES_SHARED(Locks::mutator_lock_);
556 
557   // JNI methods
GetJniEnv()558   JNIEnvExt* GetJniEnv() const {
559     return tlsPtr_.jni_env;
560   }
561 
562   // Convert a jobject into a Object*
563   ObjPtr<mirror::Object> DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
564   // Checks if the weak global ref has been cleared by the GC without decoding it.
565   bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
566 
GetMonitorEnterObject()567   mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
568     return tlsPtr_.monitor_enter_object;
569   }
570 
SetMonitorEnterObject(mirror::Object * obj)571   void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
572     tlsPtr_.monitor_enter_object = obj;
573   }
574 
575   // Implements java.lang.Thread.interrupted.
576   bool Interrupted();
577   // Implements java.lang.Thread.isInterrupted.
578   bool IsInterrupted();
579   void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
SetInterrupted(bool i)580   void SetInterrupted(bool i) {
581     tls32_.interrupted.store(i, std::memory_order_seq_cst);
582   }
583   void Notify() REQUIRES(!wait_mutex_);
584 
PoisonObjectPointers()585   ALWAYS_INLINE void PoisonObjectPointers() {
586     ++poison_object_cookie_;
587   }
588 
589   ALWAYS_INLINE static void PoisonObjectPointersIfDebug();
590 
GetPoisonObjectCookie()591   ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
592     return poison_object_cookie_;
593   }
594 
595   // Parking for 0ns of relative time means an untimed park, negative (though
596   // should be handled in java code) returns immediately
597   void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
598   void Unpark();
599 
600  private:
601   void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
602 
603  public:
GetWaitMutex()604   Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
605     return wait_mutex_;
606   }
607 
GetWaitConditionVariable()608   ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) {
609     return wait_cond_;
610   }
611 
GetWaitMonitor()612   Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) {
613     return wait_monitor_;
614   }
615 
SetWaitMonitor(Monitor * mon)616   void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) {
617     wait_monitor_ = mon;
618   }
619 
620   // Waiter link-list support.
GetWaitNext()621   Thread* GetWaitNext() const {
622     return tlsPtr_.wait_next;
623   }
624 
SetWaitNext(Thread * next)625   void SetWaitNext(Thread* next) {
626     tlsPtr_.wait_next = next;
627   }
628 
GetClassLoaderOverride()629   jobject GetClassLoaderOverride() {
630     return tlsPtr_.class_loader_override;
631   }
632 
633   void SetClassLoaderOverride(jobject class_loader_override);
634 
635   // Create the internal representation of a stack trace, that is more time
636   // and space efficient to compute than the StackTraceElement[].
637   jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
638       REQUIRES_SHARED(Locks::mutator_lock_);
639 
640   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
641   // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
642   // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
643   // with the number of valid frames in the returned array.
644   static jobjectArray InternalStackTraceToStackTraceElementArray(
645       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
646       jobjectArray output_array = nullptr, int* stack_depth = nullptr)
647       REQUIRES_SHARED(Locks::mutator_lock_);
648 
649   jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
650       REQUIRES_SHARED(Locks::mutator_lock_);
651 
HasDebuggerShadowFrames()652   bool HasDebuggerShadowFrames() const {
653     return tlsPtr_.frame_id_to_shadow_frame != nullptr;
654   }
655 
656   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
657       REQUIRES_SHARED(Locks::mutator_lock_);
658 
659   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
660       REQUIRES(Locks::mutator_lock_);
661 
VerifyStack()662   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
663     if (kVerifyStack) {
664       VerifyStackImpl();
665     }
666   }
667 
668   //
669   // Offsets of various members of native Thread class, used by compiled code.
670   //
671 
672   template<PointerSize pointer_size>
ThinLockIdOffset()673   static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
674     return ThreadOffset<pointer_size>(
675         OFFSETOF_MEMBER(Thread, tls32_) +
676         OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
677   }
678 
679   template<PointerSize pointer_size>
InterruptedOffset()680   static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
681     return ThreadOffset<pointer_size>(
682         OFFSETOF_MEMBER(Thread, tls32_) +
683         OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
684   }
685 
686   template<PointerSize pointer_size>
ThreadFlagsOffset()687   static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
688     return ThreadOffset<pointer_size>(
689         OFFSETOF_MEMBER(Thread, tls32_) +
690         OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
691   }
692 
693   template<PointerSize pointer_size>
UseMterpOffset()694   static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
695     return ThreadOffset<pointer_size>(
696         OFFSETOF_MEMBER(Thread, tls32_) +
697         OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
698   }
699 
700   template<PointerSize pointer_size>
IsGcMarkingOffset()701   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
702     return ThreadOffset<pointer_size>(
703         OFFSETOF_MEMBER(Thread, tls32_) +
704         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
705   }
706 
IsGcMarkingSize()707   static constexpr size_t IsGcMarkingSize() {
708     return sizeof(tls32_.is_gc_marking);
709   }
710 
711   // Deoptimize the Java stack.
712   void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
713 
714  private:
715   template<PointerSize pointer_size>
ThreadOffsetFromTlsPtr(size_t tls_ptr_offset)716   static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
717     size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
718     size_t scale = (pointer_size > kRuntimePointerSize) ?
719       static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
720     size_t shrink = (kRuntimePointerSize > pointer_size) ?
721       static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
722     return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
723   }
724 
725  public:
726   template<PointerSize pointer_size>
QuickEntryPointOffset(size_t quick_entrypoint_offset)727   static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
728       size_t quick_entrypoint_offset) {
729     return ThreadOffsetFromTlsPtr<pointer_size>(
730         OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
731   }
732 
QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,PointerSize pointer_size)733   static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
734                                                           PointerSize pointer_size) {
735     if (pointer_size == PointerSize::k32) {
736       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
737           Uint32Value();
738     } else {
739       return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
740           Uint32Value();
741     }
742   }
743 
744   template<PointerSize pointer_size>
JniEntryPointOffset(size_t jni_entrypoint_offset)745   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
746     return ThreadOffsetFromTlsPtr<pointer_size>(
747         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
748   }
749 
750   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
751   template <PointerSize pointer_size>
ReadBarrierMarkEntryPointsOffset(size_t reg)752   static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
753     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
754     DCHECK_LT(reg, 30u);
755     // The ReadBarrierMarkRegX entry points are ordered by increasing
756     // register number in Thread::tls_Ptr_.quick_entrypoints.
757     return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
758         + static_cast<size_t>(pointer_size) * reg;
759   }
760 
761   template<PointerSize pointer_size>
SelfOffset()762   static constexpr ThreadOffset<pointer_size> SelfOffset() {
763     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
764   }
765 
766   template<PointerSize pointer_size>
MterpCurrentIBaseOffset()767   static constexpr ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
768     return ThreadOffsetFromTlsPtr<pointer_size>(
769         OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
770   }
771 
772   template<PointerSize pointer_size>
ExceptionOffset()773   static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
774     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
775   }
776 
777   template<PointerSize pointer_size>
PeerOffset()778   static constexpr ThreadOffset<pointer_size> PeerOffset() {
779     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
780   }
781 
782 
783   template<PointerSize pointer_size>
CardTableOffset()784   static constexpr ThreadOffset<pointer_size> CardTableOffset() {
785     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
786   }
787 
788   template<PointerSize pointer_size>
ThreadSuspendTriggerOffset()789   static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
790     return ThreadOffsetFromTlsPtr<pointer_size>(
791         OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
792   }
793 
794   template<PointerSize pointer_size>
ThreadLocalPosOffset()795   static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
796     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
797                                                                 thread_local_pos));
798   }
799 
800   template<PointerSize pointer_size>
ThreadLocalEndOffset()801   static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
802     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
803                                                                 thread_local_end));
804   }
805 
806   template<PointerSize pointer_size>
ThreadLocalObjectsOffset()807   static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
808     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
809                                                                 thread_local_objects));
810   }
811 
812   template<PointerSize pointer_size>
RosAllocRunsOffset()813   static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
814     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
815                                                                 rosalloc_runs));
816   }
817 
818   template<PointerSize pointer_size>
ThreadLocalAllocStackTopOffset()819   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
820     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
821                                                                 thread_local_alloc_stack_top));
822   }
823 
824   template<PointerSize pointer_size>
ThreadLocalAllocStackEndOffset()825   static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
826     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
827                                                                 thread_local_alloc_stack_end));
828   }
829 
830   // Size of stack less any space reserved for stack overflow
GetStackSize()831   size_t GetStackSize() const {
832     return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
833   }
834 
835   ALWAYS_INLINE uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const;
836 
GetStackEnd()837   uint8_t* GetStackEnd() const {
838     return tlsPtr_.stack_end;
839   }
840 
841   // Set the stack end to that to be used during a stack overflow
842   void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
843 
844   // Set the stack end to that to be used during regular execution
845   ALWAYS_INLINE void ResetDefaultStackEnd();
846 
IsHandlingStackOverflow()847   bool IsHandlingStackOverflow() const {
848     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
849   }
850 
851   template<PointerSize pointer_size>
StackEndOffset()852   static constexpr ThreadOffset<pointer_size> StackEndOffset() {
853     return ThreadOffsetFromTlsPtr<pointer_size>(
854         OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
855   }
856 
857   template<PointerSize pointer_size>
JniEnvOffset()858   static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
859     return ThreadOffsetFromTlsPtr<pointer_size>(
860         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
861   }
862 
863   template<PointerSize pointer_size>
TopOfManagedStackOffset()864   static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
865     return ThreadOffsetFromTlsPtr<pointer_size>(
866         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
867         ManagedStack::TaggedTopQuickFrameOffset());
868   }
869 
GetManagedStack()870   const ManagedStack* GetManagedStack() const {
871     return &tlsPtr_.managed_stack;
872   }
873 
874   // Linked list recording fragments of managed stack.
PushManagedStackFragment(ManagedStack * fragment)875   void PushManagedStackFragment(ManagedStack* fragment) {
876     tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
877   }
PopManagedStackFragment(const ManagedStack & fragment)878   void PopManagedStackFragment(const ManagedStack& fragment) {
879     tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
880   }
881 
882   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
883   ALWAYS_INLINE ShadowFrame* PopShadowFrame();
884 
885   template<PointerSize pointer_size>
TopShadowFrameOffset()886   static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
887     return ThreadOffsetFromTlsPtr<pointer_size>(
888         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
889         ManagedStack::TopShadowFrameOffset());
890   }
891 
892   // Is the given obj in this thread's stack indirect reference table?
893   bool HandleScopeContains(jobject obj) const;
894 
895   void HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id)
896       REQUIRES_SHARED(Locks::mutator_lock_);
897 
GetTopHandleScope()898   BaseHandleScope* GetTopHandleScope() {
899     return tlsPtr_.top_handle_scope;
900   }
901 
PushHandleScope(BaseHandleScope * handle_scope)902   void PushHandleScope(BaseHandleScope* handle_scope) {
903     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
904     tlsPtr_.top_handle_scope = handle_scope;
905   }
906 
PopHandleScope()907   BaseHandleScope* PopHandleScope() {
908     BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
909     DCHECK(handle_scope != nullptr);
910     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
911     return handle_scope;
912   }
913 
914   template<PointerSize pointer_size>
TopHandleScopeOffset()915   static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
916     return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
917                                                                 top_handle_scope));
918   }
919 
GetTopReflectiveHandleScope()920   BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
921     return tlsPtr_.top_reflective_handle_scope;
922   }
923 
PushReflectiveHandleScope(BaseReflectiveHandleScope * scope)924   void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
925     DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
926     DCHECK_EQ(scope->GetThread(), this);
927     tlsPtr_.top_reflective_handle_scope = scope;
928   }
929 
PopReflectiveHandleScope()930   BaseReflectiveHandleScope* PopReflectiveHandleScope() {
931     BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
932     DCHECK(handle_scope != nullptr);
933     tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
934     return handle_scope;
935   }
936 
937   // Indicates whether this thread is ready to invoke a method for debugging. This
938   // is only true if the thread has been suspended by a debug event.
IsReadyForDebugInvoke()939   bool IsReadyForDebugInvoke() const {
940     return tls32_.ready_for_debug_invoke;
941   }
942 
SetReadyForDebugInvoke(bool ready)943   void SetReadyForDebugInvoke(bool ready) {
944     tls32_.ready_for_debug_invoke = ready;
945   }
946 
IsDebugMethodEntry()947   bool IsDebugMethodEntry() const {
948     return tls32_.debug_method_entry_;
949   }
950 
SetDebugMethodEntry()951   void SetDebugMethodEntry() {
952     tls32_.debug_method_entry_ = true;
953   }
954 
ClearDebugMethodEntry()955   void ClearDebugMethodEntry() {
956     tls32_.debug_method_entry_ = false;
957   }
958 
GetIsGcMarking()959   bool GetIsGcMarking() const {
960     CHECK(kUseReadBarrier);
961     return tls32_.is_gc_marking;
962   }
963 
964   void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
965 
GetWeakRefAccessEnabled()966   bool GetWeakRefAccessEnabled() const {
967     CHECK(kUseReadBarrier);
968     return tls32_.weak_ref_access_enabled;
969   }
970 
SetWeakRefAccessEnabled(bool enabled)971   void SetWeakRefAccessEnabled(bool enabled) {
972     CHECK(kUseReadBarrier);
973     tls32_.weak_ref_access_enabled = enabled;
974   }
975 
GetDisableThreadFlipCount()976   uint32_t GetDisableThreadFlipCount() const {
977     CHECK(kUseReadBarrier);
978     return tls32_.disable_thread_flip_count;
979   }
980 
IncrementDisableThreadFlipCount()981   void IncrementDisableThreadFlipCount() {
982     CHECK(kUseReadBarrier);
983     ++tls32_.disable_thread_flip_count;
984   }
985 
DecrementDisableThreadFlipCount()986   void DecrementDisableThreadFlipCount() {
987     CHECK(kUseReadBarrier);
988     DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
989     --tls32_.disable_thread_flip_count;
990   }
991 
992   // Returns true if the thread is a runtime thread (eg from a ThreadPool).
IsRuntimeThread()993   bool IsRuntimeThread() const {
994     return is_runtime_thread_;
995   }
996 
SetIsRuntimeThread(bool is_runtime_thread)997   void SetIsRuntimeThread(bool is_runtime_thread) {
998     is_runtime_thread_ = is_runtime_thread;
999   }
1000 
CorePlatformApiCookie()1001   uint32_t CorePlatformApiCookie() {
1002     return core_platform_api_cookie_;
1003   }
1004 
SetCorePlatformApiCookie(uint32_t cookie)1005   void SetCorePlatformApiCookie(uint32_t cookie) {
1006     core_platform_api_cookie_ = cookie;
1007   }
1008 
1009   // Returns true if the thread is allowed to load java classes.
1010   bool CanLoadClasses() const;
1011 
1012   // Returns the fake exception used to activate deoptimization.
GetDeoptimizationException()1013   static mirror::Throwable* GetDeoptimizationException() {
1014     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
1015     // represented by ObjPtr.
1016     return reinterpret_cast<mirror::Throwable*>(0x100);
1017   }
1018 
1019   // Currently deoptimization invokes verifier which can trigger class loading
1020   // and execute Java code, so there might be nested deoptimizations happening.
1021   // We need to save the ongoing deoptimization shadow frames and return
1022   // values on stacks.
1023   // 'from_code' denotes whether the deoptimization was explicitly made from
1024   // compiled code.
1025   // 'method_type' contains info on whether deoptimization should advance
1026   // dex_pc.
1027   void PushDeoptimizationContext(const JValue& return_value,
1028                                  bool is_reference,
1029                                  ObjPtr<mirror::Throwable> exception,
1030                                  bool from_code,
1031                                  DeoptimizationMethodType method_type)
1032       REQUIRES_SHARED(Locks::mutator_lock_);
1033   void PopDeoptimizationContext(JValue* result,
1034                                 ObjPtr<mirror::Throwable>* exception,
1035                                 bool* from_code,
1036                                 DeoptimizationMethodType* method_type)
1037       REQUIRES_SHARED(Locks::mutator_lock_);
1038   void AssertHasDeoptimizationContext()
1039       REQUIRES_SHARED(Locks::mutator_lock_);
1040   void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
1041   ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
1042 
1043   // For debugger, find the shadow frame that corresponds to a frame id.
1044   // Or return null if there is none.
1045   ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
1046       REQUIRES_SHARED(Locks::mutator_lock_);
1047   // For debugger, find the bool array that keeps track of the updated vreg set
1048   // for a frame id.
1049   bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
1050   // For debugger, find the shadow frame that corresponds to a frame id. If
1051   // one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
1052   ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
1053                                                uint32_t num_vregs,
1054                                                ArtMethod* method,
1055                                                uint32_t dex_pc)
1056       REQUIRES_SHARED(Locks::mutator_lock_);
1057 
1058   // Delete the entry that maps from frame_id to shadow_frame.
1059   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
1060       REQUIRES_SHARED(Locks::mutator_lock_);
1061 
1062   // While getting this map requires shared the mutator lock, manipulating it
1063   // should actually follow these rules:
1064   // (1) The owner of this map (the thread) can change it with its mutator lock.
1065   // (2) Other threads can read this map when the owner is suspended and they
1066   //     hold the mutator lock.
1067   // (3) Other threads can change this map when owning the mutator lock exclusively.
1068   //
1069   // The reason why (3) needs the mutator lock exclusively (and not just having
1070   // the owner suspended) is that we don't want other threads to concurrently read the map.
1071   //
1072   // TODO: Add a class abstraction to express these rules.
GetInstrumentationStack()1073   std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* GetInstrumentationStack()
1074       REQUIRES_SHARED(Locks::mutator_lock_) {
1075     return tlsPtr_.instrumentation_stack;
1076   }
1077 
GetStackTraceSample()1078   std::vector<ArtMethod*>* GetStackTraceSample() const {
1079     DCHECK(!IsAotCompiler());
1080     return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
1081   }
1082 
SetStackTraceSample(std::vector<ArtMethod * > * sample)1083   void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
1084     DCHECK(!IsAotCompiler());
1085     tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
1086   }
1087 
GetVerifierDeps()1088   verifier::VerifierDeps* GetVerifierDeps() const {
1089     DCHECK(IsAotCompiler());
1090     return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
1091   }
1092 
1093   // It is the responsability of the caller to make sure the verifier_deps
1094   // entry in the thread is cleared before destruction of the actual VerifierDeps
1095   // object, or the thread.
SetVerifierDeps(verifier::VerifierDeps * verifier_deps)1096   void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
1097     DCHECK(IsAotCompiler());
1098     DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
1099     tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
1100   }
1101 
GetTraceClockBase()1102   uint64_t GetTraceClockBase() const {
1103     return tls64_.trace_clock_base;
1104   }
1105 
SetTraceClockBase(uint64_t clock_base)1106   void SetTraceClockBase(uint64_t clock_base) {
1107     tls64_.trace_clock_base = clock_base;
1108   }
1109 
GetHeldMutex(LockLevel level)1110   BaseMutex* GetHeldMutex(LockLevel level) const {
1111     return tlsPtr_.held_mutexes[level];
1112   }
1113 
SetHeldMutex(LockLevel level,BaseMutex * mutex)1114   void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
1115     tlsPtr_.held_mutexes[level] = mutex;
1116   }
1117 
1118   void ClearSuspendBarrier(AtomicInteger* target)
1119       REQUIRES(Locks::thread_suspend_count_lock_);
1120 
ReadFlag(ThreadFlag flag)1121   bool ReadFlag(ThreadFlag flag) const {
1122     return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
1123   }
1124 
TestAllFlags()1125   bool TestAllFlags() const {
1126     return (tls32_.state_and_flags.as_struct.flags != 0);
1127   }
1128 
AtomicSetFlag(ThreadFlag flag)1129   void AtomicSetFlag(ThreadFlag flag) {
1130     tls32_.state_and_flags.as_atomic_int.fetch_or(flag, std::memory_order_seq_cst);
1131   }
1132 
AtomicClearFlag(ThreadFlag flag)1133   void AtomicClearFlag(ThreadFlag flag) {
1134     tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
1135   }
1136 
UseMterp()1137   bool UseMterp() const {
1138     return tls32_.use_mterp.load();
1139   }
1140 
1141   void ResetQuickAllocEntryPointsForThread();
1142 
1143   // Returns the remaining space in the TLAB.
TlabSize()1144   size_t TlabSize() const {
1145     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
1146   }
1147 
1148   // Returns the remaining space in the TLAB if we were to expand it to maximum capacity.
TlabRemainingCapacity()1149   size_t TlabRemainingCapacity() const {
1150     return tlsPtr_.thread_local_limit - tlsPtr_.thread_local_pos;
1151   }
1152 
1153   // Expand the TLAB by a fixed number of bytes. There must be enough capacity to do so.
ExpandTlab(size_t bytes)1154   void ExpandTlab(size_t bytes) {
1155     tlsPtr_.thread_local_end += bytes;
1156     DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
1157   }
1158 
1159   // Doesn't check that there is room.
1160   mirror::Object* AllocTlab(size_t bytes);
1161   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
1162   bool HasTlab() const;
1163   void ResetTlab();
GetTlabStart()1164   uint8_t* GetTlabStart() {
1165     return tlsPtr_.thread_local_start;
1166   }
GetTlabPos()1167   uint8_t* GetTlabPos() {
1168     return tlsPtr_.thread_local_pos;
1169   }
GetTlabEnd()1170   uint8_t* GetTlabEnd() {
1171     return tlsPtr_.thread_local_end;
1172   }
1173   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
1174   // equal to a valid pointer.
1175   // TODO: does this need to atomic?  I don't think so.
RemoveSuspendTrigger()1176   void RemoveSuspendTrigger() {
1177     tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
1178   }
1179 
1180   // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
1181   // The next time a suspend check is done, it will load from the value at this address
1182   // and trigger a SIGSEGV.
TriggerSuspend()1183   void TriggerSuspend() {
1184     tlsPtr_.suspend_trigger = nullptr;
1185   }
1186 
1187 
1188   // Push an object onto the allocation stack.
1189   bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
1190       REQUIRES_SHARED(Locks::mutator_lock_);
1191 
1192   // Set the thread local allocation pointers to the given pointers.
1193   void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
1194                                      StackReference<mirror::Object>* end);
1195 
1196   // Resets the thread local allocation pointers.
1197   void RevokeThreadLocalAllocationStack();
1198 
GetThreadLocalBytesAllocated()1199   size_t GetThreadLocalBytesAllocated() const {
1200     return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
1201   }
1202 
GetThreadLocalObjectsAllocated()1203   size_t GetThreadLocalObjectsAllocated() const {
1204     return tlsPtr_.thread_local_objects;
1205   }
1206 
GetRosAllocRun(size_t index)1207   void* GetRosAllocRun(size_t index) const {
1208     return tlsPtr_.rosalloc_runs[index];
1209   }
1210 
SetRosAllocRun(size_t index,void * run)1211   void SetRosAllocRun(size_t index, void* run) {
1212     tlsPtr_.rosalloc_runs[index] = run;
1213   }
1214 
1215   bool ProtectStack(bool fatal_on_error = true);
1216   bool UnprotectStack();
1217 
SetMterpCurrentIBase(void * ibase)1218   void SetMterpCurrentIBase(void* ibase) {
1219     tlsPtr_.mterp_current_ibase = ibase;
1220   }
1221 
GetMterpCurrentIBase()1222   const void* GetMterpCurrentIBase() const {
1223     return tlsPtr_.mterp_current_ibase;
1224   }
1225 
HandlingSignal()1226   bool HandlingSignal() const {
1227     return tls32_.handling_signal_;
1228   }
1229 
SetHandlingSignal(bool handling_signal)1230   void SetHandlingSignal(bool handling_signal) {
1231     tls32_.handling_signal_ = handling_signal;
1232   }
1233 
IsTransitioningToRunnable()1234   bool IsTransitioningToRunnable() const {
1235     return tls32_.is_transitioning_to_runnable;
1236   }
1237 
SetIsTransitioningToRunnable(bool value)1238   void SetIsTransitioningToRunnable(bool value) {
1239     tls32_.is_transitioning_to_runnable = value;
1240   }
1241 
DecrementForceInterpreterCount()1242   uint32_t DecrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1243     return --tls32_.force_interpreter_count;
1244   }
1245 
IncrementForceInterpreterCount()1246   uint32_t IncrementForceInterpreterCount() REQUIRES(Locks::thread_list_lock_) {
1247     return ++tls32_.force_interpreter_count;
1248   }
1249 
SetForceInterpreterCount(uint32_t value)1250   void SetForceInterpreterCount(uint32_t value) REQUIRES(Locks::thread_list_lock_) {
1251     tls32_.force_interpreter_count = value;
1252   }
1253 
ForceInterpreterCount()1254   uint32_t ForceInterpreterCount() const {
1255     return tls32_.force_interpreter_count;
1256   }
1257 
IsForceInterpreter()1258   bool IsForceInterpreter() const {
1259     return tls32_.force_interpreter_count != 0;
1260   }
1261 
IncrementMakeVisiblyInitializedCounter()1262   bool IncrementMakeVisiblyInitializedCounter() {
1263     tls32_.make_visibly_initialized_counter += 1u;
1264     return tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount;
1265   }
1266 
ClearMakeVisiblyInitializedCounter()1267   void ClearMakeVisiblyInitializedCounter() {
1268     tls32_.make_visibly_initialized_counter = 0u;
1269   }
1270 
1271   void PushVerifier(verifier::MethodVerifier* verifier);
1272   void PopVerifier(verifier::MethodVerifier* verifier);
1273 
1274   void InitStringEntryPoints();
1275 
ModifyDebugDisallowReadBarrier(int8_t delta)1276   void ModifyDebugDisallowReadBarrier(int8_t delta) {
1277     debug_disallow_read_barrier_ += delta;
1278   }
1279 
GetDebugDisallowReadBarrierCount()1280   uint8_t GetDebugDisallowReadBarrierCount() const {
1281     return debug_disallow_read_barrier_;
1282   }
1283 
1284   // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
1285   // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
1286   // it from being deleted.
1287   TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
1288 
1289   // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
1290   // will be run when the thread exits or when SetCustomTLS is called again with the same key.
1291   void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
1292 
1293   // Returns true if the current thread is the jit sensitive thread.
IsJitSensitiveThread()1294   bool IsJitSensitiveThread() const {
1295     return this == jit_sensitive_thread_;
1296   }
1297 
1298   bool IsSystemDaemon() const REQUIRES_SHARED(Locks::mutator_lock_);
1299 
1300   // Returns true if StrictMode events are traced for the current thread.
IsSensitiveThread()1301   static bool IsSensitiveThread() {
1302     if (is_sensitive_thread_hook_ != nullptr) {
1303       return (*is_sensitive_thread_hook_)();
1304     }
1305     return false;
1306   }
1307 
1308   // Set to the read barrier marking entrypoints to be non-null.
1309   void SetReadBarrierEntrypoints();
1310 
1311   static jobject CreateCompileTimePeer(JNIEnv* env,
1312                                        const char* name,
1313                                        bool as_daemon,
1314                                        jobject thread_group)
1315       REQUIRES_SHARED(Locks::mutator_lock_);
1316 
GetInterpreterCache()1317   ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
1318     return &interpreter_cache_;
1319   }
1320 
1321   // Clear all thread-local interpreter caches.
1322   //
1323   // Since the caches are keyed by memory pointer to dex instructions, this must be
1324   // called when any dex code is unloaded (before different code gets loaded at the
1325   // same memory location).
1326   //
1327   // If presence of cache entry implies some pre-conditions, this must also be
1328   // called if the pre-conditions might no longer hold true.
1329   static void ClearAllInterpreterCaches();
1330 
1331   template<PointerSize pointer_size>
InterpreterCacheOffset()1332   static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
1333     return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
1334   }
1335 
InterpreterCacheSizeLog2()1336   static constexpr int InterpreterCacheSizeLog2() {
1337     return WhichPowerOf2(InterpreterCache::kSize);
1338   }
1339 
1340  private:
1341   explicit Thread(bool daemon);
1342   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
1343   void Destroy();
1344 
1345   // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
1346   // observed to be set at the same time by instrumentation.
1347   void DeleteJPeer(JNIEnv* env);
1348 
1349   void NotifyInTheadList()
1350       REQUIRES_SHARED(Locks::thread_list_lock_);
1351 
1352   // Attaches the calling native thread to the runtime, returning the new native peer.
1353   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
1354   template <typename PeerAction>
1355   static Thread* Attach(const char* thread_name,
1356                         bool as_daemon,
1357                         PeerAction p);
1358 
1359   void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
1360 
1361   template<bool kTransactionActive>
1362   static void InitPeer(ScopedObjectAccessAlreadyRunnable& soa,
1363                        ObjPtr<mirror::Object> peer,
1364                        jboolean thread_is_daemon,
1365                        jobject thread_group,
1366                        jobject thread_name,
1367                        jint thread_priority)
1368       REQUIRES_SHARED(Locks::mutator_lock_);
1369 
1370   // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit and, ~Thread
SetStateUnsafe(ThreadState new_state)1371   ThreadState SetStateUnsafe(ThreadState new_state) {
1372     ThreadState old_state = GetState();
1373     if (old_state == kRunnable && new_state != kRunnable) {
1374       // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
1375       // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
1376       // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
1377       TransitionToSuspendedAndRunCheckpoints(new_state);
1378       // Since we transitioned to a suspended state, check the pass barrier requests.
1379       PassActiveSuspendBarriers();
1380     } else {
1381       tls32_.state_and_flags.as_struct.state = new_state;
1382     }
1383     return old_state;
1384   }
1385 
1386   void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
1387 
1388   void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
1389   void DumpStack(std::ostream& os,
1390                  bool dump_native_stack = true,
1391                  BacktraceMap* backtrace_map = nullptr,
1392                  bool force_dump_stack = false) const
1393       REQUIRES_SHARED(Locks::mutator_lock_);
1394 
1395   // Out-of-line conveniences for debugging in gdb.
1396   static Thread* CurrentFromGdb();  // Like Thread::Current.
1397   // Like Thread::Dump(std::cerr).
1398   void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
1399 
1400   static void* CreateCallback(void* arg);
1401 
1402   void HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa)
1403       REQUIRES_SHARED(Locks::mutator_lock_);
1404   void RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa)
1405       REQUIRES_SHARED(Locks::mutator_lock_);
1406 
1407   // Initialize a thread.
1408   //
1409   // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
1410   // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
1411   // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
1412   // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
1413   // of false).
1414   bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
1415       REQUIRES(Locks::runtime_shutdown_lock_);
1416   void InitCardTable();
1417   void InitCpu();
1418   void CleanupCpu();
1419   void InitTlsEntryPoints();
1420   void InitTid();
1421   void InitPthreadKeySelf();
1422   bool InitStackHwm();
1423 
1424   void SetUpAlternateSignalStack();
1425   void TearDownAlternateSignalStack();
1426 
1427   ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
1428       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1429 
1430   ALWAYS_INLINE void PassActiveSuspendBarriers()
1431       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
1432 
1433   // Registers the current thread as the jit sensitive thread. Should be called just once.
SetJitSensitiveThread()1434   static void SetJitSensitiveThread() {
1435     if (jit_sensitive_thread_ == nullptr) {
1436       jit_sensitive_thread_ = Thread::Current();
1437     } else {
1438       LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
1439           << Thread::Current()->GetTid();
1440     }
1441   }
1442 
SetSensitiveThreadHook(bool (* is_sensitive_thread_hook)())1443   static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
1444     is_sensitive_thread_hook_ = is_sensitive_thread_hook;
1445   }
1446 
1447   bool ModifySuspendCountInternal(Thread* self,
1448                                   int delta,
1449                                   AtomicInteger* suspend_barrier,
1450                                   SuspendReason reason)
1451       WARN_UNUSED
1452       REQUIRES(Locks::thread_suspend_count_lock_);
1453 
1454   // Runs a single checkpoint function. If there are no more pending checkpoint functions it will
1455   // clear the kCheckpointRequest flag. The caller is responsible for calling this in a loop until
1456   // the kCheckpointRequest flag is cleared.
1457   void RunCheckpointFunction();
1458   void RunEmptyCheckpoint();
1459 
1460   bool PassActiveSuspendBarriers(Thread* self)
1461       REQUIRES(!Locks::thread_suspend_count_lock_);
1462 
1463   // Install the protected region for implicit stack checks.
1464   void InstallImplicitProtection();
1465 
1466   template <bool kPrecise>
1467   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1468 
1469   void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
1470 
1471   static bool IsAotCompiler();
1472 
1473   void ReleaseLongJumpContextInternal();
1474 
1475   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1476   // change from being Suspended to Runnable without a suspend request occurring.
1477   union PACKED(4) StateAndFlags {
StateAndFlags()1478     StateAndFlags() {}
1479     struct PACKED(4) {
1480       // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1481       // ThreadFlags for bit field meanings.
1482       volatile uint16_t flags;
1483       // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1484       // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1485       // operation. If a thread is suspended and a suspend_request is present, a thread may not
1486       // change to Runnable as a GC or other operation is in progress.
1487       volatile uint16_t state;
1488     } as_struct;
1489     AtomicInteger as_atomic_int;
1490     volatile int32_t as_int;
1491 
1492    private:
1493     // gcc does not handle struct with volatile member assignments correctly.
1494     // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1495     DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
1496   };
1497   static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
1498 
1499   static void ThreadExitCallback(void* arg);
1500 
1501   // Maximum number of suspend barriers.
1502   static constexpr uint32_t kMaxSuspendBarriers = 3;
1503 
1504   // Has Thread::Startup been called?
1505   static bool is_started_;
1506 
1507   // TLS key used to retrieve the Thread*.
1508   static pthread_key_t pthread_key_self_;
1509 
1510   // Used to notify threads that they should attempt to resume, they will suspend again if
1511   // their suspend count is > 0.
1512   static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1513 
1514   // Hook passed by framework which returns true
1515   // when StrictMode events are traced for the current thread.
1516   static bool (*is_sensitive_thread_hook_)();
1517   // Stores the jit sensitive thread (which for now is the UI thread).
1518   static Thread* jit_sensitive_thread_;
1519 
1520   static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
1521 
1522   /***********************************************************************************************/
1523   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1524   // pointer size differences. To encourage shorter encoding, more frequently used values appear
1525   // first if possible.
1526   /***********************************************************************************************/
1527 
1528   struct PACKED(4) tls_32bit_sized_values {
1529     // We have no control over the size of 'bool', but want our boolean fields
1530     // to be 4-byte quantities.
1531     typedef uint32_t bool32_t;
1532 
tls_32bit_sized_valuestls_32bit_sized_values1533     explicit tls_32bit_sized_values(bool is_daemon)
1534         : suspend_count(0),
1535           thin_lock_thread_id(0),
1536           tid(0),
1537           daemon(is_daemon),
1538           throwing_OutOfMemoryError(false),
1539           no_thread_suspension(0),
1540           thread_exit_check_count(0),
1541           handling_signal_(false),
1542           is_transitioning_to_runnable(false),
1543           ready_for_debug_invoke(false),
1544           debug_method_entry_(false),
1545           is_gc_marking(false),
1546           weak_ref_access_enabled(true),
1547           disable_thread_flip_count(0),
1548           user_code_suspend_count(0),
1549           force_interpreter_count(0),
1550           use_mterp(0),
1551           make_visibly_initialized_counter(0),
1552           define_class_counter(0) {}
1553 
1554     union StateAndFlags state_and_flags;
1555     static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1556                   "Size of state_and_flags and int32 are different");
1557 
1558     // A non-zero value is used to tell the current thread to enter a safe point
1559     // at the next poll.
1560     int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1561 
1562     // Thin lock thread id. This is a small integer used by the thin lock implementation.
1563     // This is not to be confused with the native thread's tid, nor is it the value returned
1564     // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1565     // important difference between this id and the ids visible to managed code is that these
1566     // ones get reused (to ensure that they fit in the number of bits available).
1567     uint32_t thin_lock_thread_id;
1568 
1569     // System thread id.
1570     uint32_t tid;
1571 
1572     // Is the thread a daemon?
1573     const bool32_t daemon;
1574 
1575     // A boolean telling us whether we're recursively throwing OOME.
1576     bool32_t throwing_OutOfMemoryError;
1577 
1578     // A positive value implies we're in a region where thread suspension isn't expected.
1579     uint32_t no_thread_suspension;
1580 
1581     // How many times has our pthread key's destructor been called?
1582     uint32_t thread_exit_check_count;
1583 
1584     // True if signal is being handled by this thread.
1585     bool32_t handling_signal_;
1586 
1587     // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
1588     // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
1589     // the rest of them.
1590     bool32_t is_transitioning_to_runnable;
1591 
1592     // True if the thread has been suspended by a debugger event. This is
1593     // used to invoke method from the debugger which is only allowed when
1594     // the thread is suspended by an event.
1595     bool32_t ready_for_debug_invoke;
1596 
1597     // True if the thread enters a method. This is used to detect method entry
1598     // event for the debugger.
1599     bool32_t debug_method_entry_;
1600 
1601     // True if the GC is in the marking phase. This is used for the CC collector only. This is
1602     // thread local so that we can simplify the logic to check for the fast path of read barriers of
1603     // GC roots.
1604     bool32_t is_gc_marking;
1605 
1606     // Thread "interrupted" status; stays raised until queried or thrown.
1607     Atomic<bool32_t> interrupted;
1608 
1609     AtomicInteger park_state_;
1610 
1611     // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
1612     // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
1613     // processing of the CC collector only. This is thread local so that we can enable/disable weak
1614     // ref access by using a checkpoint and avoid a race around the time weak ref access gets
1615     // disabled and concurrent reference processing begins (if weak ref access is disabled during a
1616     // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and
1617     // ReferenceProcessor::EnableSlowPath().
1618     bool32_t weak_ref_access_enabled;
1619 
1620     // A thread local version of Heap::disable_thread_flip_count_. This keeps track of how many
1621     // levels of (nested) JNI critical sections the thread is in and is used to detect a nested JNI
1622     // critical section enter.
1623     uint32_t disable_thread_flip_count;
1624 
1625     // How much of 'suspend_count_' is by request of user code, used to distinguish threads
1626     // suspended by the runtime from those suspended by user code.
1627     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
1628     // told that AssertHeld should be good enough.
1629     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1630 
1631     // Count of how many times this thread has been forced to interpreter. If this is not 0 the
1632     // thread must remain in interpreted code as much as possible.
1633     uint32_t force_interpreter_count;
1634 
1635     // True if everything is in the ideal state for fast interpretation.
1636     // False if we need to switch to the C++ interpreter to handle special cases.
1637     std::atomic<bool32_t> use_mterp;
1638 
1639     // Counter for calls to initialize a class that's initialized but not visibly initialized.
1640     // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
1641     // make initialized classes visibly initialized. This is needed because we usually make
1642     // classes visibly initialized in batches but we do not want to be stuck with a class
1643     // initialized but not visibly initialized for a long time even if no more classes are
1644     // being initialized anymore.
1645     uint32_t make_visibly_initialized_counter;
1646 
1647     // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
1648     // for threads to be done with class-definition work.
1649     uint32_t define_class_counter;
1650   } tls32_;
1651 
1652   struct PACKED(8) tls_64bit_sized_values {
tls_64bit_sized_valuestls_64bit_sized_values1653     tls_64bit_sized_values() : trace_clock_base(0) {
1654     }
1655 
1656     // The clock base used for tracing.
1657     uint64_t trace_clock_base;
1658 
1659     RuntimeStats stats;
1660   } tls64_;
1661 
PACKED(sizeof (void *))1662   struct PACKED(sizeof(void*)) tls_ptr_sized_values {
1663       tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1664       managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1665       self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
1666       deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr),
1667       top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1668       instrumentation_stack(nullptr),
1669       stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
1670       frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
1671       last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
1672       thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
1673       thread_local_limit(nullptr),
1674       thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
1675       thread_local_alloc_stack_end(nullptr),
1676       flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
1677       async_exception(nullptr), top_reflective_handle_scope(nullptr) {
1678       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1679     }
1680 
1681     // The biased card table, see CardTable for details.
1682     uint8_t* card_table;
1683 
1684     // The pending exception or null.
1685     mirror::Throwable* exception;
1686 
1687     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1688     // We leave extra space so there's room for the code that throws StackOverflowError.
1689     uint8_t* stack_end;
1690 
1691     // The top of the managed stack often manipulated directly by compiler generated code.
1692     ManagedStack managed_stack;
1693 
1694     // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1695     // normally set to the address of itself.
1696     uintptr_t* suspend_trigger;
1697 
1698     // Every thread may have an associated JNI environment
1699     JNIEnvExt* jni_env;
1700 
1701     // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1702     // created thread.
1703     JNIEnvExt* tmp_jni_env;
1704 
1705     // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1706     // is easy but getting the address of Thread::Current is hard. This field can be read off of
1707     // Thread::Current to give the address.
1708     Thread* self;
1709 
1710     // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1711     // start up, until the thread is registered and the local opeer_ is used.
1712     mirror::Object* opeer;
1713     jobject jpeer;
1714 
1715     // The "lowest addressable byte" of the stack.
1716     uint8_t* stack_begin;
1717 
1718     // Size of the stack.
1719     size_t stack_size;
1720 
1721     // Sampling profiler and AOT verification cannot happen on the same run, so we share
1722     // the same entry for the stack trace and the verifier deps.
1723     union DepsOrStackTraceSample {
1724       DepsOrStackTraceSample() {
1725         verifier_deps = nullptr;
1726         stack_trace_sample = nullptr;
1727       }
1728       // Pointer to previous stack trace captured by sampling profiler.
1729       std::vector<ArtMethod*>* stack_trace_sample;
1730       // When doing AOT verification, per-thread VerifierDeps.
1731       verifier::VerifierDeps* verifier_deps;
1732     } deps_or_stack_trace_sample;
1733 
1734     // The next thread in the wait set this thread is part of or null if not waiting.
1735     Thread* wait_next;
1736 
1737     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1738     mirror::Object* monitor_enter_object;
1739 
1740     // Top of linked list of handle scopes or null for none.
1741     BaseHandleScope* top_handle_scope;
1742 
1743     // Needed to get the right ClassLoader in JNI_OnLoad, but also
1744     // useful for testing.
1745     jobject class_loader_override;
1746 
1747     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1748     Context* long_jump_context;
1749 
1750     // Additional stack used by method instrumentation to store method and return pc values.
1751     // Stored as a pointer since std::map is not PACKED.
1752     // !DO NOT CHANGE! to std::unordered_map: the users of this map require an
1753     // ordered iteration on the keys (which are stack addresses).
1754     // Also see Thread::GetInstrumentationStack for the requirements on
1755     // manipulating and reading this map.
1756     std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1757 
1758     // For gc purpose, a shadow frame record stack that keeps track of:
1759     // 1) shadow frames under construction.
1760     // 2) deoptimization shadow frames.
1761     StackedShadowFrameRecord* stacked_shadow_frame_record;
1762 
1763     // Deoptimization return value record stack.
1764     DeoptimizationContextRecord* deoptimization_context_stack;
1765 
1766     // For debugger, a linked list that keeps the mapping from frame_id to shadow frame.
1767     // Shadow frames may be created before deoptimization happens so that the debugger can
1768     // set local values there first.
1769     FrameIdToShadowFrame* frame_id_to_shadow_frame;
1770 
1771     // A cached copy of the java.lang.Thread's name.
1772     std::string* name;
1773 
1774     // A cached pthread_t for the pthread underlying this Thread*.
1775     pthread_t pthread_self;
1776 
1777     // If no_thread_suspension_ is > 0, what is causing that assertion.
1778     const char* last_no_thread_suspension_cause;
1779 
1780     // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
1781     // requests another checkpoint, it goes to the checkpoint overflow list.
1782     Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
1783 
1784     // Pending barriers that require passing or NULL if non-pending. Installation guarding by
1785     // Locks::thread_suspend_count_lock_.
1786     // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex
1787     // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
1788     AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
1789 
1790     // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
1791     uint8_t* thread_local_start;
1792 
1793     // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
1794     // potentially better performance.
1795     uint8_t* thread_local_pos;
1796     uint8_t* thread_local_end;
1797 
1798     // Thread local limit is how much we can expand the thread local buffer to, it is greater or
1799     // equal to thread_local_end.
1800     uint8_t* thread_local_limit;
1801 
1802     size_t thread_local_objects;
1803 
1804     // Entrypoint function pointers.
1805     // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1806     JniEntryPoints jni_entrypoints;
1807     QuickEntryPoints quick_entrypoints;
1808 
1809     // Mterp jump table base.
1810     void* mterp_current_ibase;
1811 
1812     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1813     void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
1814 
1815     // Thread-local allocation stack data/routines.
1816     StackReference<mirror::Object>* thread_local_alloc_stack_top;
1817     StackReference<mirror::Object>* thread_local_alloc_stack_end;
1818 
1819     // Support for Mutex lock hierarchy bug detection.
1820     BaseMutex* held_mutexes[kLockLevelCount];
1821 
1822     // The function used for thread flip.
1823     Closure* flip_function;
1824 
1825     // Current method verifier, used for root marking.
1826     verifier::MethodVerifier* method_verifier;
1827 
1828     // Thread-local mark stack for the concurrent copying collector.
1829     gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
1830 
1831     // The pending async-exception or null.
1832     mirror::Throwable* async_exception;
1833 
1834     // Top of the linked-list for reflective-handle scopes or null if none.
1835     BaseReflectiveHandleScope* top_reflective_handle_scope;
1836   } tlsPtr_;
1837 
1838   // Small thread-local cache to be used from the interpreter.
1839   // It is keyed by dex instruction pointer.
1840   // The value is opcode-depended (e.g. field offset).
1841   InterpreterCache interpreter_cache_;
1842 
1843   // All fields below this line should not be accessed by native code. This means these fields can
1844   // be modified, rearranged, added or removed without having to modify asm_support.h
1845 
1846   // Guards the 'wait_monitor_' members.
1847   Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1848 
1849   // Condition variable waited upon during a wait.
1850   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1851   // Pointer to the monitor lock we're currently waiting on or null if not waiting.
1852   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1853 
1854   // Debug disable read barrier count, only is checked for debug builds and only in the runtime.
1855   uint8_t debug_disallow_read_barrier_ = 0;
1856 
1857   // Note that it is not in the packed struct, may not be accessed for cross compilation.
1858   uintptr_t poison_object_cookie_ = 0;
1859 
1860   // Pending extra checkpoints if checkpoint_function_ is already used.
1861   std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1862 
1863   // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
1864   // compiled code or entrypoints.
1865   SafeMap<std::string, std::unique_ptr<TLSData>> custom_tls_ GUARDED_BY(Locks::custom_tls_lock_);
1866 
1867 #ifndef __BIONIC__
1868   __attribute__((tls_model("initial-exec")))
1869   static thread_local Thread* self_tls_;
1870 #endif
1871 
1872   // True if the thread is some form of runtime thread (ex, GC or JIT).
1873   bool is_runtime_thread_;
1874 
1875   // Set during execution of JNI methods that get field and method id's as part of determining if
1876   // the caller is allowed to access all fields and methods in the Core Platform API.
1877   uint32_t core_platform_api_cookie_ = 0;
1878 
1879   friend class gc::collector::SemiSpace;  // For getting stack traces.
1880   friend class Runtime;  // For CreatePeer.
1881   friend class QuickExceptionHandler;  // For dumping the stack.
1882   friend class ScopedThreadStateChange;
1883   friend class StubTest;  // For accessing entrypoints.
1884   friend class ThreadList;  // For ~Thread and Destroy.
1885 
1886   friend class EntrypointsOrderTest;  // To test the order of tls entries.
1887 
1888   DISALLOW_COPY_AND_ASSIGN(Thread);
1889 };
1890 
1891 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
1892  public:
1893   ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause,
1894                                                bool enabled = true)
ACQUIRE(Roles::uninterruptible_)1895       ACQUIRE(Roles::uninterruptible_)
1896       : enabled_(enabled) {
1897     if (!enabled_) {
1898       return;
1899     }
1900     if (kIsDebugBuild) {
1901       self_ = Thread::Current();
1902       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
1903     } else {
1904       Roles::uninterruptible_.Acquire();  // No-op.
1905     }
1906   }
~ScopedAssertNoThreadSuspension()1907   ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) {
1908     if (!enabled_) {
1909       return;
1910     }
1911     if (kIsDebugBuild) {
1912       self_->EndAssertNoThreadSuspension(old_cause_);
1913     } else {
1914       Roles::uninterruptible_.Release();  // No-op.
1915     }
1916   }
1917 
1918  private:
1919   Thread* self_;
1920   const bool enabled_;
1921   const char* old_cause_;
1922 };
1923 
1924 class ScopedAllowThreadSuspension {
1925  public:
ScopedAllowThreadSuspension()1926   ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
1927     if (kIsDebugBuild) {
1928       self_ = Thread::Current();
1929       old_cause_ = self_->EndAssertNoThreadSuspension();
1930     } else {
1931       Roles::uninterruptible_.Release();  // No-op.
1932     }
1933   }
~ScopedAllowThreadSuspension()1934   ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
1935     if (kIsDebugBuild) {
1936       CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
1937     } else {
1938       Roles::uninterruptible_.Acquire();  // No-op.
1939     }
1940   }
1941 
1942  private:
1943   Thread* self_;
1944   const char* old_cause_;
1945 };
1946 
1947 
1948 class ScopedStackedShadowFramePusher {
1949  public:
ScopedStackedShadowFramePusher(Thread * self,ShadowFrame * sf,StackedShadowFrameType type)1950   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1951     : self_(self), type_(type) {
1952     self_->PushStackedShadowFrame(sf, type);
1953   }
~ScopedStackedShadowFramePusher()1954   ~ScopedStackedShadowFramePusher() {
1955     self_->PopStackedShadowFrame(type_);
1956   }
1957 
1958  private:
1959   Thread* const self_;
1960   const StackedShadowFrameType type_;
1961 
1962   DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1963 };
1964 
1965 // Only works for debug builds.
1966 class ScopedDebugDisallowReadBarriers {
1967  public:
ScopedDebugDisallowReadBarriers(Thread * self)1968   explicit ScopedDebugDisallowReadBarriers(Thread* self) : self_(self) {
1969     self_->ModifyDebugDisallowReadBarrier(1);
1970   }
~ScopedDebugDisallowReadBarriers()1971   ~ScopedDebugDisallowReadBarriers() {
1972     self_->ModifyDebugDisallowReadBarrier(-1);
1973   }
1974 
1975  private:
1976   Thread* const self_;
1977 };
1978 
1979 class ScopedTransitioningToRunnable : public ValueObject {
1980  public:
ScopedTransitioningToRunnable(Thread * self)1981   explicit ScopedTransitioningToRunnable(Thread* self)
1982       : self_(self) {
1983     DCHECK_EQ(self, Thread::Current());
1984     if (kUseReadBarrier) {
1985       self_->SetIsTransitioningToRunnable(true);
1986     }
1987   }
1988 
~ScopedTransitioningToRunnable()1989   ~ScopedTransitioningToRunnable() {
1990     if (kUseReadBarrier) {
1991       self_->SetIsTransitioningToRunnable(false);
1992     }
1993   }
1994 
1995  private:
1996   Thread* const self_;
1997 };
1998 
1999 class ThreadLifecycleCallback {
2000  public:
~ThreadLifecycleCallback()2001   virtual ~ThreadLifecycleCallback() {}
2002 
2003   virtual void ThreadStart(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2004   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
2005 };
2006 
2007 // Store an exception from the thread and suppress it for the duration of this object.
2008 class ScopedExceptionStorage {
2009  public:
2010   explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
2011   void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
2012   ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
2013 
2014  private:
2015   Thread* self_;
2016   StackHandleScope<1> hs_;
2017   MutableHandle<mirror::Throwable> excp_;
2018 };
2019 
2020 std::ostream& operator<<(std::ostream& os, const Thread& thread);
2021 std::ostream& operator<<(std::ostream& os, StackedShadowFrameType thread);
2022 
2023 }  // namespace art
2024 
2025 #endif  // ART_RUNTIME_THREAD_H_
2026