1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "monitor-inl.h"
18 
19 #include <vector>
20 
21 #include "android-base/stringprintf.h"
22 
23 #include "art_method-inl.h"
24 #include "base/logging.h"  // For VLOG.
25 #include "base/mutex.h"
26 #include "base/quasi_atomic.h"
27 #include "base/stl_util.h"
28 #include "base/systrace.h"
29 #include "base/time_utils.h"
30 #include "class_linker.h"
31 #include "dex/dex_file-inl.h"
32 #include "dex/dex_file_types.h"
33 #include "dex/dex_instruction-inl.h"
34 #include "lock_word-inl.h"
35 #include "mirror/class-inl.h"
36 #include "mirror/object-inl.h"
37 #include "object_callbacks.h"
38 #include "scoped_thread_state_change-inl.h"
39 #include "stack.h"
40 #include "thread.h"
41 #include "thread_list.h"
42 #include "verifier/method_verifier.h"
43 #include "well_known_classes.h"
44 
45 namespace art {
46 
47 using android::base::StringPrintf;
48 
49 static constexpr uint64_t kDebugThresholdFudgeFactor = kIsDebugBuild ? 10 : 1;
50 static constexpr uint64_t kLongWaitMs = 100 * kDebugThresholdFudgeFactor;
51 
52 /*
53  * Every Object has a monitor associated with it, but not every Object is actually locked.  Even
54  * the ones that are locked do not need a full-fledged monitor until a) there is actual contention
55  * or b) wait() is called on the Object, or (c) we need to lock an object that also has an
56  * identity hashcode.
57  *
58  * For Android, we have implemented a scheme similar to the one described in Bacon et al.'s
59  * "Thin locks: featherweight synchronization for Java" (ACM 1998).  Things are even easier for us,
60  * though, because we have a full 32 bits to work with.
61  *
62  * The two states of an Object's lock are referred to as "thin" and "fat".  A lock may transition
63  * from the "thin" state to the "fat" state and this transition is referred to as inflation. We
64  * deflate locks from time to time as part of heap trimming.
65  *
66  * The lock value itself is stored in mirror::Object::monitor_ and the representation is described
67  * in the LockWord value type.
68  *
69  * Monitors provide:
70  *  - mutually exclusive access to resources
71  *  - a way for multiple threads to wait for notification
72  *
73  * In effect, they fill the role of both mutexes and condition variables.
74  *
75  * Only one thread can own the monitor at any time.  There may be several threads waiting on it
76  * (the wait call unlocks it).  One or more waiting threads may be getting interrupted or notified
77  * at any given time.
78  */
79 
80 uint32_t Monitor::lock_profiling_threshold_ = 0;
81 uint32_t Monitor::stack_dump_lock_profiling_threshold_ = 0;
82 
Init(uint32_t lock_profiling_threshold,uint32_t stack_dump_lock_profiling_threshold)83 void Monitor::Init(uint32_t lock_profiling_threshold,
84                    uint32_t stack_dump_lock_profiling_threshold) {
85   // It isn't great to always include the debug build fudge factor for command-
86   // line driven arguments, but it's easier to adjust here than in the build.
87   lock_profiling_threshold_ =
88       lock_profiling_threshold * kDebugThresholdFudgeFactor;
89   stack_dump_lock_profiling_threshold_ =
90       stack_dump_lock_profiling_threshold * kDebugThresholdFudgeFactor;
91 }
92 
Monitor(Thread * self,Thread * owner,ObjPtr<mirror::Object> obj,int32_t hash_code)93 Monitor::Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code)
94     : monitor_lock_("a monitor lock", kMonitorLock),
95       num_waiters_(0),
96       owner_(owner),
97       lock_count_(0),
98       obj_(GcRoot<mirror::Object>(obj)),
99       wait_set_(nullptr),
100       wake_set_(nullptr),
101       hash_code_(hash_code),
102       lock_owner_(nullptr),
103       lock_owner_method_(nullptr),
104       lock_owner_dex_pc_(0),
105       lock_owner_sum_(0),
106       lock_owner_request_(nullptr),
107       monitor_id_(MonitorPool::ComputeMonitorId(this, self)) {
108 #ifdef __LP64__
109   DCHECK(false) << "Should not be reached in 64b";
110   next_free_ = nullptr;
111 #endif
112   // We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
113   // with the owner unlocking the thin-lock.
114   CHECK(owner == nullptr || owner == self || owner->IsSuspended());
115   // The identity hash code is set for the life time of the monitor.
116 }
117 
Monitor(Thread * self,Thread * owner,ObjPtr<mirror::Object> obj,int32_t hash_code,MonitorId id)118 Monitor::Monitor(Thread* self,
119                  Thread* owner,
120                  ObjPtr<mirror::Object> obj,
121                  int32_t hash_code,
122                  MonitorId id)
123     : monitor_lock_("a monitor lock", kMonitorLock),
124       num_waiters_(0),
125       owner_(owner),
126       lock_count_(0),
127       obj_(GcRoot<mirror::Object>(obj)),
128       wait_set_(nullptr),
129       wake_set_(nullptr),
130       hash_code_(hash_code),
131       lock_owner_(nullptr),
132       lock_owner_method_(nullptr),
133       lock_owner_dex_pc_(0),
134       lock_owner_sum_(0),
135       lock_owner_request_(nullptr),
136       monitor_id_(id) {
137 #ifdef __LP64__
138   next_free_ = nullptr;
139 #endif
140   // We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
141   // with the owner unlocking the thin-lock.
142   CHECK(owner == nullptr || owner == self || owner->IsSuspended());
143   // The identity hash code is set for the life time of the monitor.
144 }
145 
GetHashCode()146 int32_t Monitor::GetHashCode() {
147   int32_t hc = hash_code_.load(std::memory_order_relaxed);
148   if (!HasHashCode()) {
149     // Use a strong CAS to prevent spurious failures since these can make the boot image
150     // non-deterministic.
151     hash_code_.CompareAndSetStrongRelaxed(0, mirror::Object::GenerateIdentityHashCode());
152     hc = hash_code_.load(std::memory_order_relaxed);
153   }
154   DCHECK(HasHashCode());
155   return hc;
156 }
157 
SetLockingMethod(Thread * owner)158 void Monitor::SetLockingMethod(Thread* owner) {
159   DCHECK(owner == Thread::Current() || owner->IsSuspended());
160   // Do not abort on dex pc errors. This can easily happen when we want to dump a stack trace on
161   // abort.
162   ArtMethod* lock_owner_method;
163   uint32_t lock_owner_dex_pc;
164   lock_owner_method = owner->GetCurrentMethod(&lock_owner_dex_pc, false);
165   if (lock_owner_method != nullptr && UNLIKELY(lock_owner_method->IsProxyMethod())) {
166     // Grab another frame. Proxy methods are not helpful for lock profiling. This should be rare
167     // enough that it's OK to walk the stack twice.
168     struct NextMethodVisitor final : public StackVisitor {
169       explicit NextMethodVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
170           : StackVisitor(thread,
171                          nullptr,
172                          StackVisitor::StackWalkKind::kIncludeInlinedFrames,
173                          false),
174             count_(0),
175             method_(nullptr),
176             dex_pc_(0) {}
177       bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
178         ArtMethod* m = GetMethod();
179         if (m->IsRuntimeMethod()) {
180           // Continue if this is a runtime method.
181           return true;
182         }
183         count_++;
184         if (count_ == 2u) {
185           method_ = m;
186           dex_pc_ = GetDexPc(false);
187           return false;
188         }
189         return true;
190       }
191       size_t count_;
192       ArtMethod* method_;
193       uint32_t dex_pc_;
194     };
195     NextMethodVisitor nmv(owner_.load(std::memory_order_relaxed));
196     nmv.WalkStack();
197     lock_owner_method = nmv.method_;
198     lock_owner_dex_pc = nmv.dex_pc_;
199   }
200   SetLockOwnerInfo(lock_owner_method, lock_owner_dex_pc, owner);
201   DCHECK(lock_owner_method == nullptr || !lock_owner_method->IsProxyMethod());
202 }
203 
SetLockingMethodNoProxy(Thread * owner)204 void Monitor::SetLockingMethodNoProxy(Thread *owner) {
205   DCHECK(owner == Thread::Current());
206   uint32_t lock_owner_dex_pc;
207   ArtMethod* lock_owner_method = owner->GetCurrentMethod(&lock_owner_dex_pc);
208   // We don't expect a proxy method here.
209   DCHECK(lock_owner_method == nullptr || !lock_owner_method->IsProxyMethod());
210   SetLockOwnerInfo(lock_owner_method, lock_owner_dex_pc, owner);
211 }
212 
Install(Thread * self)213 bool Monitor::Install(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
214   // This may or may not result in acquiring monitor_lock_. Its behavior is much more complicated
215   // than what clang thread safety analysis understands.
216   // Monitor is not yet public.
217   Thread* owner = owner_.load(std::memory_order_relaxed);
218   CHECK(owner == nullptr || owner == self || (ART_USE_FUTEXES && owner->IsSuspended()));
219   // Propagate the lock state.
220   LockWord lw(GetObject()->GetLockWord(false));
221   switch (lw.GetState()) {
222     case LockWord::kThinLocked: {
223       DCHECK(owner != nullptr);
224       CHECK_EQ(owner->GetThreadId(), lw.ThinLockOwner());
225       DCHECK_EQ(monitor_lock_.GetExclusiveOwnerTid(), 0) << " my tid = " << SafeGetTid(self);
226       lock_count_ = lw.ThinLockCount();
227 #if ART_USE_FUTEXES
228       monitor_lock_.ExclusiveLockUncontendedFor(owner);
229 #else
230       monitor_lock_.ExclusiveLock(owner);
231 #endif
232       DCHECK_EQ(monitor_lock_.GetExclusiveOwnerTid(), owner->GetTid())
233           << " my tid = " << SafeGetTid(self);
234       LockWord fat(this, lw.GCState());
235       // Publish the updated lock word, which may race with other threads.
236       bool success = GetObject()->CasLockWord(lw, fat, CASMode::kWeak, std::memory_order_release);
237       if (success) {
238         if (ATraceEnabled()) {
239           SetLockingMethod(owner);
240         }
241         return true;
242       } else {
243 #if ART_USE_FUTEXES
244         monitor_lock_.ExclusiveUnlockUncontended();
245 #else
246         for (uint32_t i = 0; i <= lockCount; ++i) {
247           monitor_lock_.ExclusiveUnlock(owner);
248         }
249 #endif
250         return false;
251       }
252     }
253     case LockWord::kHashCode: {
254       CHECK_EQ(hash_code_.load(std::memory_order_relaxed), static_cast<int32_t>(lw.GetHashCode()));
255       DCHECK_EQ(monitor_lock_.GetExclusiveOwnerTid(), 0) << " my tid = " << SafeGetTid(self);
256       LockWord fat(this, lw.GCState());
257       return GetObject()->CasLockWord(lw, fat, CASMode::kWeak, std::memory_order_release);
258     }
259     case LockWord::kFatLocked: {
260       // The owner_ is suspended but another thread beat us to install a monitor.
261       return false;
262     }
263     case LockWord::kUnlocked: {
264       LOG(FATAL) << "Inflating unlocked lock word";
265       UNREACHABLE();
266     }
267     default: {
268       LOG(FATAL) << "Invalid monitor state " << lw.GetState();
269       UNREACHABLE();
270     }
271   }
272 }
273 
~Monitor()274 Monitor::~Monitor() {
275   // Deflated monitors have a null object.
276 }
277 
AppendToWaitSet(Thread * thread)278 void Monitor::AppendToWaitSet(Thread* thread) {
279   // Not checking that the owner is equal to this thread, since we've released
280   // the monitor by the time this method is called.
281   DCHECK(thread != nullptr);
282   DCHECK(thread->GetWaitNext() == nullptr) << thread->GetWaitNext();
283   if (wait_set_ == nullptr) {
284     wait_set_ = thread;
285     return;
286   }
287 
288   // push_back.
289   Thread* t = wait_set_;
290   while (t->GetWaitNext() != nullptr) {
291     t = t->GetWaitNext();
292   }
293   t->SetWaitNext(thread);
294 }
295 
RemoveFromWaitSet(Thread * thread)296 void Monitor::RemoveFromWaitSet(Thread *thread) {
297   DCHECK(owner_ == Thread::Current());
298   DCHECK(thread != nullptr);
299   auto remove = [&](Thread*& set){
300     if (set != nullptr) {
301       if (set == thread) {
302         set = thread->GetWaitNext();
303         thread->SetWaitNext(nullptr);
304         return true;
305       }
306       Thread* t = set;
307       while (t->GetWaitNext() != nullptr) {
308         if (t->GetWaitNext() == thread) {
309           t->SetWaitNext(thread->GetWaitNext());
310           thread->SetWaitNext(nullptr);
311           return true;
312         }
313         t = t->GetWaitNext();
314       }
315     }
316     return false;
317   };
318   if (remove(wait_set_)) {
319     return;
320   }
321   remove(wake_set_);
322 }
323 
SetObject(ObjPtr<mirror::Object> object)324 void Monitor::SetObject(ObjPtr<mirror::Object> object) {
325   obj_ = GcRoot<mirror::Object>(object);
326 }
327 
328 // This function is inlined and just helps to not have the VLOG and ATRACE check at all the
329 // potential tracing points.
AtraceMonitorLock(Thread * self,ObjPtr<mirror::Object> obj,bool is_wait)330 void Monitor::AtraceMonitorLock(Thread* self, ObjPtr<mirror::Object> obj, bool is_wait) {
331   if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging) && ATraceEnabled())) {
332     AtraceMonitorLockImpl(self, obj, is_wait);
333   }
334 }
335 
AtraceMonitorLockImpl(Thread * self,ObjPtr<mirror::Object> obj,bool is_wait)336 void Monitor::AtraceMonitorLockImpl(Thread* self, ObjPtr<mirror::Object> obj, bool is_wait) {
337   // Wait() requires a deeper call stack to be useful. Otherwise you'll see "Waiting at
338   // Object.java". Assume that we'll wait a nontrivial amount, so it's OK to do a longer
339   // stack walk than if !is_wait.
340   const size_t wanted_frame_number = is_wait ? 1U : 0U;
341 
342   ArtMethod* method = nullptr;
343   uint32_t dex_pc = 0u;
344 
345   size_t current_frame_number = 0u;
346   StackVisitor::WalkStack(
347       // Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
348       [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
349         ArtMethod* m = stack_visitor->GetMethod();
350         if (m == nullptr || m->IsRuntimeMethod()) {
351           // Runtime method, upcall, or resolution issue. Skip.
352           return true;
353         }
354 
355         // Is this the requested frame?
356         if (current_frame_number == wanted_frame_number) {
357           method = m;
358           dex_pc = stack_visitor->GetDexPc(false /* abort_on_error*/);
359           return false;
360         }
361 
362         // Look for more.
363         current_frame_number++;
364         return true;
365       },
366       self,
367       /* context= */ nullptr,
368       art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
369 
370   const char* prefix = is_wait ? "Waiting on " : "Locking ";
371 
372   const char* filename;
373   int32_t line_number;
374   TranslateLocation(method, dex_pc, &filename, &line_number);
375 
376   // It would be nice to have a stable "ID" for the object here. However, the only stable thing
377   // would be the identity hashcode. But we cannot use IdentityHashcode here: For one, there are
378   // times when it is unsafe to make that call (see stack dumping for an explanation). More
379   // importantly, we would have to give up on thin-locking when adding systrace locks, as the
380   // identity hashcode is stored in the lockword normally (so can't be used with thin-locks).
381   //
382   // Because of thin-locks we also cannot use the monitor id (as there is no monitor). Monitor ids
383   // also do not have to be stable, as the monitor may be deflated.
384   std::string tmp = StringPrintf("%s %d at %s:%d",
385       prefix,
386       (obj == nullptr ? -1 : static_cast<int32_t>(reinterpret_cast<uintptr_t>(obj.Ptr()))),
387       (filename != nullptr ? filename : "null"),
388       line_number);
389   ATraceBegin(tmp.c_str());
390 }
391 
AtraceMonitorUnlock()392 void Monitor::AtraceMonitorUnlock() {
393   if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
394     ATraceEnd();
395   }
396 }
397 
PrettyContentionInfo(const std::string & owner_name,pid_t owner_tid,ArtMethod * owners_method,uint32_t owners_dex_pc,size_t num_waiters)398 std::string Monitor::PrettyContentionInfo(const std::string& owner_name,
399                                           pid_t owner_tid,
400                                           ArtMethod* owners_method,
401                                           uint32_t owners_dex_pc,
402                                           size_t num_waiters) {
403   Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
404   const char* owners_filename;
405   int32_t owners_line_number = 0;
406   if (owners_method != nullptr) {
407     TranslateLocation(owners_method, owners_dex_pc, &owners_filename, &owners_line_number);
408   }
409   std::ostringstream oss;
410   oss << "monitor contention with owner " << owner_name << " (" << owner_tid << ")";
411   if (owners_method != nullptr) {
412     oss << " at " << owners_method->PrettyMethod();
413     oss << "(" << owners_filename << ":" << owners_line_number << ")";
414   }
415   oss << " waiters=" << num_waiters;
416   return oss.str();
417 }
418 
TryLock(Thread * self,bool spin)419 bool Monitor::TryLock(Thread* self, bool spin) {
420   Thread *owner = owner_.load(std::memory_order_relaxed);
421   if (owner == self) {
422     lock_count_++;
423     CHECK_NE(lock_count_, 0u);  // Abort on overflow.
424   } else {
425     bool success = spin ? monitor_lock_.ExclusiveTryLockWithSpinning(self)
426         : monitor_lock_.ExclusiveTryLock(self);
427     if (!success) {
428       return false;
429     }
430     DCHECK(owner_.load(std::memory_order_relaxed) == nullptr);
431     owner_.store(self, std::memory_order_relaxed);
432     CHECK_EQ(lock_count_, 0u);
433     if (ATraceEnabled()) {
434       SetLockingMethodNoProxy(self);
435     }
436   }
437   DCHECK(monitor_lock_.IsExclusiveHeld(self));
438   AtraceMonitorLock(self, GetObject(), /* is_wait= */ false);
439   return true;
440 }
441 
442 template <LockReason reason>
Lock(Thread * self)443 void Monitor::Lock(Thread* self) {
444   bool called_monitors_callback = false;
445   if (TryLock(self, /*spin=*/ true)) {
446     // TODO: This preserves original behavior. Correct?
447     if (called_monitors_callback) {
448       CHECK(reason == LockReason::kForLock);
449       Runtime::Current()->GetRuntimeCallbacks()->MonitorContendedLocked(this);
450     }
451     return;
452   }
453   // Contended; not reentrant. We hold no locks, so tread carefully.
454   const bool log_contention = (lock_profiling_threshold_ != 0);
455   uint64_t wait_start_ms = log_contention ? MilliTime() : 0;
456 
457   Thread *orig_owner = nullptr;
458   ArtMethod* owners_method;
459   uint32_t owners_dex_pc;
460 
461   // Do this before releasing the mutator lock so that we don't get deflated.
462   size_t num_waiters = num_waiters_.fetch_add(1, std::memory_order_relaxed);
463 
464   bool started_trace = false;
465   if (ATraceEnabled() && owner_.load(std::memory_order_relaxed) != nullptr) {
466     // Acquiring thread_list_lock_ ensures that owner doesn't disappear while
467     // we're looking at it.
468     Locks::thread_list_lock_->ExclusiveLock(self);
469     orig_owner = owner_.load(std::memory_order_relaxed);
470     if (orig_owner != nullptr) {  // Did the owner_ give the lock up?
471       const uint32_t orig_owner_thread_id = orig_owner->GetThreadId();
472       GetLockOwnerInfo(&owners_method, &owners_dex_pc, orig_owner);
473       std::ostringstream oss;
474       std::string name;
475       orig_owner->GetThreadName(name);
476       oss << PrettyContentionInfo(name,
477                                   orig_owner_thread_id,
478                                   owners_method,
479                                   owners_dex_pc,
480                                   num_waiters);
481       Locks::thread_list_lock_->ExclusiveUnlock(self);
482       // Add info for contending thread.
483       uint32_t pc;
484       ArtMethod* m = self->GetCurrentMethod(&pc);
485       const char* filename;
486       int32_t line_number;
487       TranslateLocation(m, pc, &filename, &line_number);
488       oss << " blocking from "
489           << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
490           << ":" << line_number << ")";
491       ATraceBegin(oss.str().c_str());
492       started_trace = true;
493     } else {
494       Locks::thread_list_lock_->ExclusiveUnlock(self);
495     }
496   }
497   if (log_contention) {
498     // Request the current holder to set lock_owner_info.
499     // Do this even if tracing is enabled, so we semi-consistently get the information
500     // corresponding to MonitorExit.
501     // TODO: Consider optionally obtaining a stack trace here via a checkpoint.  That would allow
502     // us to see what the other thread is doing while we're waiting.
503     orig_owner = owner_.load(std::memory_order_relaxed);
504     lock_owner_request_.store(orig_owner, std::memory_order_relaxed);
505   }
506   // Call the contended locking cb once and only once. Also only call it if we are locking for
507   // the first time, not during a Wait wakeup.
508   if (reason == LockReason::kForLock && !called_monitors_callback) {
509     called_monitors_callback = true;
510     Runtime::Current()->GetRuntimeCallbacks()->MonitorContendedLocking(this);
511   }
512   self->SetMonitorEnterObject(GetObject().Ptr());
513   {
514     ScopedThreadSuspension tsc(self, kBlocked);  // Change to blocked and give up mutator_lock_.
515 
516     // Acquire monitor_lock_ without mutator_lock_, expecting to block this time.
517     // We already tried spinning above. The shutdown procedure currently assumes we stop
518     // touching monitors shortly after we suspend, so don't spin again here.
519     monitor_lock_.ExclusiveLock(self);
520 
521     if (log_contention && orig_owner != nullptr) {
522       // Woken from contention.
523       uint64_t wait_ms = MilliTime() - wait_start_ms;
524       uint32_t sample_percent;
525       if (wait_ms >= lock_profiling_threshold_) {
526         sample_percent = 100;
527       } else {
528         sample_percent = 100 * wait_ms / lock_profiling_threshold_;
529       }
530       if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
531         // Do this unconditionally for consistency. It's possible another thread
532         // snuck in in the middle, and tracing was enabled. In that case, we may get its
533         // MonitorEnter information. We can live with that.
534         GetLockOwnerInfo(&owners_method, &owners_dex_pc, orig_owner);
535 
536         // Reacquire mutator_lock_ for logging.
537         ScopedObjectAccess soa(self);
538 
539         const bool should_dump_stacks = stack_dump_lock_profiling_threshold_ > 0 &&
540             wait_ms > stack_dump_lock_profiling_threshold_;
541 
542         // Acquire thread-list lock to find thread and keep it from dying until we've got all
543         // the info we need.
544         Locks::thread_list_lock_->ExclusiveLock(self);
545 
546         // Is there still a thread at the same address as the original owner?
547         // We tolerate the fact that it may occasionally be the wrong one.
548         if (Runtime::Current()->GetThreadList()->Contains(orig_owner)) {
549           uint32_t original_owner_tid = orig_owner->GetTid();  // System thread id.
550           std::string original_owner_name;
551           orig_owner->GetThreadName(original_owner_name);
552           std::string owner_stack_dump;
553 
554           if (should_dump_stacks) {
555             // Very long contention. Dump stacks.
556             struct CollectStackTrace : public Closure {
557               void Run(art::Thread* thread) override
558                   REQUIRES_SHARED(art::Locks::mutator_lock_) {
559                 thread->DumpJavaStack(oss);
560               }
561 
562               std::ostringstream oss;
563             };
564             CollectStackTrace owner_trace;
565             // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its
566             // execution.
567             orig_owner->RequestSynchronousCheckpoint(&owner_trace);
568             owner_stack_dump = owner_trace.oss.str();
569           } else {
570             Locks::thread_list_lock_->ExclusiveUnlock(self);
571           }
572 
573           // This is all the data we need. We dropped the thread-list lock, it's OK for the
574           // owner to go away now.
575 
576           if (should_dump_stacks) {
577             // Give the detailed traces for really long contention.
578             // This must be here (and not above) because we cannot hold the thread-list lock
579             // while running the checkpoint.
580             std::ostringstream self_trace_oss;
581             self->DumpJavaStack(self_trace_oss);
582 
583             uint32_t pc;
584             ArtMethod* m = self->GetCurrentMethod(&pc);
585 
586             LOG(WARNING) << "Long "
587                 << PrettyContentionInfo(original_owner_name,
588                                         original_owner_tid,
589                                         owners_method,
590                                         owners_dex_pc,
591                                         num_waiters)
592                 << " in " << ArtMethod::PrettyMethod(m) << " for "
593                 << PrettyDuration(MsToNs(wait_ms)) << "\n"
594                 << "Current owner stack:\n" << owner_stack_dump
595                 << "Contender stack:\n" << self_trace_oss.str();
596           } else if (wait_ms > kLongWaitMs && owners_method != nullptr) {
597             uint32_t pc;
598             ArtMethod* m = self->GetCurrentMethod(&pc);
599             // TODO: We should maybe check that original_owner is still a live thread.
600             LOG(WARNING) << "Long "
601                 << PrettyContentionInfo(original_owner_name,
602                                         original_owner_tid,
603                                         owners_method,
604                                         owners_dex_pc,
605                                         num_waiters)
606                 << " in " << ArtMethod::PrettyMethod(m) << " for "
607                 << PrettyDuration(MsToNs(wait_ms));
608           }
609           LogContentionEvent(self,
610                             wait_ms,
611                             sample_percent,
612                             owners_method,
613                             owners_dex_pc);
614         } else {
615           Locks::thread_list_lock_->ExclusiveUnlock(self);
616         }
617       }
618     }
619   }
620   // We've successfully acquired monitor_lock_, released thread_list_lock, and are runnable.
621 
622   // We avoided touching monitor fields while suspended, so set owner_ here.
623   owner_.store(self, std::memory_order_relaxed);
624   DCHECK_EQ(lock_count_, 0u);
625 
626   if (ATraceEnabled()) {
627     SetLockingMethodNoProxy(self);
628   }
629   if (started_trace) {
630     ATraceEnd();
631   }
632   self->SetMonitorEnterObject(nullptr);
633   num_waiters_.fetch_sub(1, std::memory_order_relaxed);
634   DCHECK(monitor_lock_.IsExclusiveHeld(self));
635   // We need to pair this with a single contended locking call. NB we match the RI behavior and call
636   // this even if MonitorEnter failed.
637   if (called_monitors_callback) {
638     CHECK(reason == LockReason::kForLock);
639     Runtime::Current()->GetRuntimeCallbacks()->MonitorContendedLocked(this);
640   }
641 }
642 
643 template void Monitor::Lock<LockReason::kForLock>(Thread* self);
644 template void Monitor::Lock<LockReason::kForWait>(Thread* self);
645 
646 static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
647                                               __attribute__((format(printf, 1, 2)));
648 
ThrowIllegalMonitorStateExceptionF(const char * fmt,...)649 static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
650     REQUIRES_SHARED(Locks::mutator_lock_) {
651   va_list args;
652   va_start(args, fmt);
653   Thread* self = Thread::Current();
654   self->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args);
655   if (!Runtime::Current()->IsStarted() || VLOG_IS_ON(monitor)) {
656     std::ostringstream ss;
657     self->Dump(ss);
658     LOG(Runtime::Current()->IsStarted() ? ::android::base::INFO : ::android::base::ERROR)
659         << self->GetException()->Dump() << "\n" << ss.str();
660   }
661   va_end(args);
662 }
663 
ThreadToString(Thread * thread)664 static std::string ThreadToString(Thread* thread) {
665   if (thread == nullptr) {
666     return "nullptr";
667   }
668   std::ostringstream oss;
669   // TODO: alternatively, we could just return the thread's name.
670   oss << *thread;
671   return oss.str();
672 }
673 
FailedUnlock(ObjPtr<mirror::Object> o,uint32_t expected_owner_thread_id,uint32_t found_owner_thread_id,Monitor * monitor)674 void Monitor::FailedUnlock(ObjPtr<mirror::Object> o,
675                            uint32_t expected_owner_thread_id,
676                            uint32_t found_owner_thread_id,
677                            Monitor* monitor) {
678   std::string current_owner_string;
679   std::string expected_owner_string;
680   std::string found_owner_string;
681   uint32_t current_owner_thread_id = 0u;
682   {
683     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
684     ThreadList* const thread_list = Runtime::Current()->GetThreadList();
685     Thread* expected_owner = thread_list->FindThreadByThreadId(expected_owner_thread_id);
686     Thread* found_owner = thread_list->FindThreadByThreadId(found_owner_thread_id);
687 
688     // Re-read owner now that we hold lock.
689     Thread* current_owner = (monitor != nullptr) ? monitor->GetOwner() : nullptr;
690     if (current_owner != nullptr) {
691       current_owner_thread_id = current_owner->GetThreadId();
692     }
693     // Get short descriptions of the threads involved.
694     current_owner_string = ThreadToString(current_owner);
695     expected_owner_string = expected_owner != nullptr ? ThreadToString(expected_owner) : "unnamed";
696     found_owner_string = found_owner != nullptr ? ThreadToString(found_owner) : "unnamed";
697   }
698 
699   if (current_owner_thread_id == 0u) {
700     if (found_owner_thread_id == 0u) {
701       ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
702                                          " on thread '%s'",
703                                          mirror::Object::PrettyTypeOf(o).c_str(),
704                                          expected_owner_string.c_str());
705     } else {
706       // Race: the original read found an owner but now there is none
707       ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
708                                          " (where now the monitor appears unowned) on thread '%s'",
709                                          found_owner_string.c_str(),
710                                          mirror::Object::PrettyTypeOf(o).c_str(),
711                                          expected_owner_string.c_str());
712     }
713   } else {
714     if (found_owner_thread_id == 0u) {
715       // Race: originally there was no owner, there is now
716       ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
717                                          " (originally believed to be unowned) on thread '%s'",
718                                          current_owner_string.c_str(),
719                                          mirror::Object::PrettyTypeOf(o).c_str(),
720                                          expected_owner_string.c_str());
721     } else {
722       if (found_owner_thread_id != current_owner_thread_id) {
723         // Race: originally found and current owner have changed
724         ThrowIllegalMonitorStateExceptionF("unlock of monitor originally owned by '%s' (now"
725                                            " owned by '%s') on object of type '%s' on thread '%s'",
726                                            found_owner_string.c_str(),
727                                            current_owner_string.c_str(),
728                                            mirror::Object::PrettyTypeOf(o).c_str(),
729                                            expected_owner_string.c_str());
730       } else {
731         ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
732                                            " on thread '%s",
733                                            current_owner_string.c_str(),
734                                            mirror::Object::PrettyTypeOf(o).c_str(),
735                                            expected_owner_string.c_str());
736       }
737     }
738   }
739 }
740 
Unlock(Thread * self)741 bool Monitor::Unlock(Thread* self) {
742   DCHECK(self != nullptr);
743   Thread* owner = owner_.load(std::memory_order_relaxed);
744   if (owner == self) {
745     // We own the monitor, so nobody else can be in here.
746     CheckLockOwnerRequest(self);
747     AtraceMonitorUnlock();
748     if (lock_count_ == 0) {
749       owner_.store(nullptr, std::memory_order_relaxed);
750       SignalWaiterAndReleaseMonitorLock(self);
751     } else {
752       --lock_count_;
753       DCHECK(monitor_lock_.IsExclusiveHeld(self));
754       DCHECK_EQ(owner_.load(std::memory_order_relaxed), self);
755       // Keep monitor_lock_, but pretend we released it.
756       FakeUnlockMonitorLock();
757     }
758     return true;
759   }
760   // We don't own this, so we're not allowed to unlock it.
761   // The JNI spec says that we should throw IllegalMonitorStateException in this case.
762   uint32_t owner_thread_id = 0u;
763   {
764     MutexLock mu(self, *Locks::thread_list_lock_);
765     owner = owner_.load(std::memory_order_relaxed);
766     if (owner != nullptr) {
767       owner_thread_id = owner->GetThreadId();
768     }
769   }
770   FailedUnlock(GetObject(), self->GetThreadId(), owner_thread_id, this);
771   // Pretend to release monitor_lock_, which we should not.
772   FakeUnlockMonitorLock();
773   return false;
774 }
775 
SignalWaiterAndReleaseMonitorLock(Thread * self)776 void Monitor::SignalWaiterAndReleaseMonitorLock(Thread* self) {
777   // We want to release the monitor and signal up to one thread that was waiting
778   // but has since been notified.
779   DCHECK_EQ(lock_count_, 0u);
780   DCHECK(monitor_lock_.IsExclusiveHeld(self));
781   while (wake_set_ != nullptr) {
782     // No risk of waking ourselves here; since monitor_lock_ is not released until we're ready to
783     // return, notify can't move the current thread from wait_set_ to wake_set_ until this
784     // method is done checking wake_set_.
785     Thread* thread = wake_set_;
786     wake_set_ = thread->GetWaitNext();
787     thread->SetWaitNext(nullptr);
788     DCHECK(owner_.load(std::memory_order_relaxed) == nullptr);
789 
790     // Check to see if the thread is still waiting.
791     {
792       // In the case of wait(), we'll be acquiring another thread's GetWaitMutex with
793       // self's GetWaitMutex held. This does not risk deadlock, because we only acquire this lock
794       // for threads in the wake_set_. A thread can only enter wake_set_ from Notify or NotifyAll,
795       // and those hold monitor_lock_. Thus, the threads whose wait mutexes we acquire here must
796       // have already been released from wait(), since we have not released monitor_lock_ until
797       // after we've chosen our thread to wake, so there is no risk of the following lock ordering
798       // leading to deadlock:
799       // Thread 1 waits
800       // Thread 2 waits
801       // Thread 3 moves threads 1 and 2 from wait_set_ to wake_set_
802       // Thread 1 enters this block, and attempts to acquire Thread 2's GetWaitMutex to wake it
803       // Thread 2 enters this block, and attempts to acquire Thread 1's GetWaitMutex to wake it
804       //
805       // Since monitor_lock_ is not released until the thread-to-be-woken-up's GetWaitMutex is
806       // acquired, two threads cannot attempt to acquire each other's GetWaitMutex while holding
807       // their own and cause deadlock.
808       MutexLock wait_mu(self, *thread->GetWaitMutex());
809       if (thread->GetWaitMonitor() != nullptr) {
810         // Release the lock, so that a potentially awakened thread will not
811         // immediately contend on it. The lock ordering here is:
812         // monitor_lock_, self->GetWaitMutex, thread->GetWaitMutex
813         monitor_lock_.Unlock(self);  // Releases contenders.
814         thread->GetWaitConditionVariable()->Signal(self);
815         return;
816       }
817     }
818   }
819   monitor_lock_.Unlock(self);
820   DCHECK(!monitor_lock_.IsExclusiveHeld(self));
821 }
822 
Wait(Thread * self,int64_t ms,int32_t ns,bool interruptShouldThrow,ThreadState why)823 void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
824                    bool interruptShouldThrow, ThreadState why) {
825   DCHECK(self != nullptr);
826   DCHECK(why == kTimedWaiting || why == kWaiting || why == kSleeping);
827 
828   // Make sure that we hold the lock.
829   if (owner_.load(std::memory_order_relaxed) != self) {
830     ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
831     return;
832   }
833 
834   // We need to turn a zero-length timed wait into a regular wait because
835   // Object.wait(0, 0) is defined as Object.wait(0), which is defined as Object.wait().
836   if (why == kTimedWaiting && (ms == 0 && ns == 0)) {
837     why = kWaiting;
838   }
839 
840   // Enforce the timeout range.
841   if (ms < 0 || ns < 0 || ns > 999999) {
842     self->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
843                              "timeout arguments out of range: ms=%" PRId64 " ns=%d", ms, ns);
844     return;
845   }
846 
847   CheckLockOwnerRequest(self);
848 
849   /*
850    * Release our hold - we need to let it go even if we're a few levels
851    * deep in a recursive lock, and we need to restore that later.
852    */
853   unsigned int prev_lock_count = lock_count_;
854   lock_count_ = 0;
855 
856   AtraceMonitorUnlock();  // For the implict Unlock() just above. This will only end the deepest
857                           // nesting, but that is enough for the visualization, and corresponds to
858                           // the single Lock() we do afterwards.
859   AtraceMonitorLock(self, GetObject(), /* is_wait= */ true);
860 
861   bool was_interrupted = false;
862   bool timed_out = false;
863   // Update monitor state now; it's not safe once we're "suspended".
864   owner_.store(nullptr, std::memory_order_relaxed);
865   num_waiters_.fetch_add(1, std::memory_order_relaxed);
866   {
867     // Update thread state. If the GC wakes up, it'll ignore us, knowing
868     // that we won't touch any references in this state, and we'll check
869     // our suspend mode before we transition out.
870     ScopedThreadSuspension sts(self, why);
871 
872     // Pseudo-atomically wait on self's wait_cond_ and release the monitor lock.
873     MutexLock mu(self, *self->GetWaitMutex());
874 
875     /*
876      * Add ourselves to the set of threads waiting on this monitor.
877      * It's important that we are only added to the wait set after
878      * acquiring our GetWaitMutex, so that calls to Notify() that occur after we
879      * have released monitor_lock_ will not move us from wait_set_ to wake_set_
880      * until we've signalled contenders on this monitor.
881      */
882     AppendToWaitSet(self);
883 
884     // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
885     // non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
886     // up.
887     DCHECK(self->GetWaitMonitor() == nullptr);
888     self->SetWaitMonitor(this);
889 
890     // Release the monitor lock.
891     DCHECK(monitor_lock_.IsExclusiveHeld(self));
892     SignalWaiterAndReleaseMonitorLock(self);
893 
894     // Handle the case where the thread was interrupted before we called wait().
895     if (self->IsInterrupted()) {
896       was_interrupted = true;
897     } else {
898       // Wait for a notification or a timeout to occur.
899       if (why == kWaiting) {
900         self->GetWaitConditionVariable()->Wait(self);
901       } else {
902         DCHECK(why == kTimedWaiting || why == kSleeping) << why;
903         timed_out = self->GetWaitConditionVariable()->TimedWait(self, ms, ns);
904       }
905       was_interrupted = self->IsInterrupted();
906     }
907   }
908 
909   {
910     // We reset the thread's wait_monitor_ field after transitioning back to runnable so
911     // that a thread in a waiting/sleeping state has a non-null wait_monitor_ for debugging
912     // and diagnostic purposes. (If you reset this earlier, stack dumps will claim that threads
913     // are waiting on "null".)
914     MutexLock mu(self, *self->GetWaitMutex());
915     DCHECK(self->GetWaitMonitor() != nullptr);
916     self->SetWaitMonitor(nullptr);
917   }
918 
919   // Allocate the interrupted exception not holding the monitor lock since it may cause a GC.
920   // If the GC requires acquiring the monitor for enqueuing cleared references, this would
921   // cause a deadlock if the monitor is held.
922   if (was_interrupted && interruptShouldThrow) {
923     /*
924      * We were interrupted while waiting, or somebody interrupted an
925      * un-interruptible thread earlier and we're bailing out immediately.
926      *
927      * The doc sayeth: "The interrupted status of the current thread is
928      * cleared when this exception is thrown."
929      */
930     self->SetInterrupted(false);
931     self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
932   }
933 
934   AtraceMonitorUnlock();  // End Wait().
935 
936   // We just slept, tell the runtime callbacks about this.
937   Runtime::Current()->GetRuntimeCallbacks()->MonitorWaitFinished(this, timed_out);
938 
939   // Re-acquire the monitor and lock.
940   Lock<LockReason::kForWait>(self);
941   lock_count_ = prev_lock_count;
942   DCHECK(monitor_lock_.IsExclusiveHeld(self));
943   self->GetWaitMutex()->AssertNotHeld(self);
944 
945   num_waiters_.fetch_sub(1, std::memory_order_relaxed);
946   RemoveFromWaitSet(self);
947 }
948 
Notify(Thread * self)949 void Monitor::Notify(Thread* self) {
950   DCHECK(self != nullptr);
951   // Make sure that we hold the lock.
952   if (owner_.load(std::memory_order_relaxed) != self) {
953     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
954     return;
955   }
956   // Move one thread from waiters to wake set
957   Thread* to_move = wait_set_;
958   if (to_move != nullptr) {
959     wait_set_ = to_move->GetWaitNext();
960     to_move->SetWaitNext(wake_set_);
961     wake_set_ = to_move;
962   }
963 }
964 
NotifyAll(Thread * self)965 void Monitor::NotifyAll(Thread* self) {
966   DCHECK(self != nullptr);
967   // Make sure that we hold the lock.
968   if (owner_.load(std::memory_order_relaxed) != self) {
969     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
970     return;
971   }
972 
973   // Move all threads from waiters to wake set
974   Thread* to_move = wait_set_;
975   if (to_move != nullptr) {
976     wait_set_ = nullptr;
977     Thread* move_to = wake_set_;
978     if (move_to == nullptr) {
979       wake_set_ = to_move;
980       return;
981     }
982     while (move_to->GetWaitNext() != nullptr) {
983       move_to = move_to->GetWaitNext();
984     }
985     move_to->SetWaitNext(to_move);
986   }
987 }
988 
Deflate(Thread * self,ObjPtr<mirror::Object> obj)989 bool Monitor::Deflate(Thread* self, ObjPtr<mirror::Object> obj) {
990   DCHECK(obj != nullptr);
991   // Don't need volatile since we only deflate with mutators suspended.
992   LockWord lw(obj->GetLockWord(false));
993   // If the lock isn't an inflated monitor, then we don't need to deflate anything.
994   if (lw.GetState() == LockWord::kFatLocked) {
995     Monitor* monitor = lw.FatLockMonitor();
996     DCHECK(monitor != nullptr);
997     // Can't deflate if we have anybody waiting on the CV or trying to acquire the monitor.
998     if (monitor->num_waiters_.load(std::memory_order_relaxed) > 0) {
999       return false;
1000     }
1001     if (!monitor->monitor_lock_.ExclusiveTryLock(self)) {
1002       // We cannot deflate a monitor that's currently held. It's unclear whether we should if
1003       // we could.
1004       return false;
1005     }
1006     DCHECK_EQ(monitor->lock_count_, 0u);
1007     DCHECK_EQ(monitor->owner_.load(std::memory_order_relaxed), static_cast<Thread*>(nullptr));
1008     if (monitor->HasHashCode()) {
1009       LockWord new_lw = LockWord::FromHashCode(monitor->GetHashCode(), lw.GCState());
1010       // Assume no concurrent read barrier state changes as mutators are suspended.
1011       obj->SetLockWord(new_lw, false);
1012       VLOG(monitor) << "Deflated " << obj << " to hash monitor " << monitor->GetHashCode();
1013     } else {
1014       // No lock and no hash, just put an empty lock word inside the object.
1015       LockWord new_lw = LockWord::FromDefault(lw.GCState());
1016       // Assume no concurrent read barrier state changes as mutators are suspended.
1017       obj->SetLockWord(new_lw, false);
1018       VLOG(monitor) << "Deflated" << obj << " to empty lock word";
1019     }
1020     monitor->monitor_lock_.ExclusiveUnlock(self);
1021     DCHECK(!(monitor->monitor_lock_.IsExclusiveHeld(self)));
1022     // The monitor is deflated, mark the object as null so that we know to delete it during the
1023     // next GC.
1024     monitor->obj_ = GcRoot<mirror::Object>(nullptr);
1025   }
1026   return true;
1027 }
1028 
Inflate(Thread * self,Thread * owner,ObjPtr<mirror::Object> obj,int32_t hash_code)1029 void Monitor::Inflate(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code) {
1030   DCHECK(self != nullptr);
1031   DCHECK(obj != nullptr);
1032   // Allocate and acquire a new monitor.
1033   Monitor* m = MonitorPool::CreateMonitor(self, owner, obj, hash_code);
1034   DCHECK(m != nullptr);
1035   if (m->Install(self)) {
1036     if (owner != nullptr) {
1037       VLOG(monitor) << "monitor: thread" << owner->GetThreadId()
1038           << " created monitor " << m << " for object " << obj;
1039     } else {
1040       VLOG(monitor) << "monitor: Inflate with hashcode " << hash_code
1041           << " created monitor " << m << " for object " << obj;
1042     }
1043     Runtime::Current()->GetMonitorList()->Add(m);
1044     CHECK_EQ(obj->GetLockWord(true).GetState(), LockWord::kFatLocked);
1045   } else {
1046     MonitorPool::ReleaseMonitor(self, m);
1047   }
1048 }
1049 
InflateThinLocked(Thread * self,Handle<mirror::Object> obj,LockWord lock_word,uint32_t hash_code)1050 void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
1051                                 uint32_t hash_code) {
1052   DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
1053   uint32_t owner_thread_id = lock_word.ThinLockOwner();
1054   if (owner_thread_id == self->GetThreadId()) {
1055     // We own the monitor, we can easily inflate it.
1056     Inflate(self, self, obj.Get(), hash_code);
1057   } else {
1058     ThreadList* thread_list = Runtime::Current()->GetThreadList();
1059     // Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
1060     self->SetMonitorEnterObject(obj.Get());
1061     bool timed_out;
1062     Thread* owner;
1063     {
1064       ScopedThreadSuspension sts(self, kWaitingForLockInflation);
1065       owner = thread_list->SuspendThreadByThreadId(owner_thread_id,
1066                                                    SuspendReason::kInternal,
1067                                                    &timed_out);
1068     }
1069     if (owner != nullptr) {
1070       // We succeeded in suspending the thread, check the lock's status didn't change.
1071       lock_word = obj->GetLockWord(true);
1072       if (lock_word.GetState() == LockWord::kThinLocked &&
1073           lock_word.ThinLockOwner() == owner_thread_id) {
1074         // Go ahead and inflate the lock.
1075         Inflate(self, owner, obj.Get(), hash_code);
1076       }
1077       bool resumed = thread_list->Resume(owner, SuspendReason::kInternal);
1078       DCHECK(resumed);
1079     }
1080     self->SetMonitorEnterObject(nullptr);
1081   }
1082 }
1083 
1084 // Fool annotalysis into thinking that the lock on obj is acquired.
FakeLock(ObjPtr<mirror::Object> obj)1085 static ObjPtr<mirror::Object> FakeLock(ObjPtr<mirror::Object> obj)
1086     EXCLUSIVE_LOCK_FUNCTION(obj.Ptr()) NO_THREAD_SAFETY_ANALYSIS {
1087   return obj;
1088 }
1089 
1090 // Fool annotalysis into thinking that the lock on obj is release.
FakeUnlock(ObjPtr<mirror::Object> obj)1091 static ObjPtr<mirror::Object> FakeUnlock(ObjPtr<mirror::Object> obj)
1092     UNLOCK_FUNCTION(obj.Ptr()) NO_THREAD_SAFETY_ANALYSIS {
1093   return obj;
1094 }
1095 
MonitorEnter(Thread * self,ObjPtr<mirror::Object> obj,bool trylock)1096 ObjPtr<mirror::Object> Monitor::MonitorEnter(Thread* self,
1097                                              ObjPtr<mirror::Object> obj,
1098                                              bool trylock) {
1099   DCHECK(self != nullptr);
1100   DCHECK(obj != nullptr);
1101   self->AssertThreadSuspensionIsAllowable();
1102   obj = FakeLock(obj);
1103   uint32_t thread_id = self->GetThreadId();
1104   size_t contention_count = 0;
1105   constexpr size_t kExtraSpinIters = 100;
1106   StackHandleScope<1> hs(self);
1107   Handle<mirror::Object> h_obj(hs.NewHandle(obj));
1108 #if !ART_USE_FUTEXES
1109   // In this case we cannot inflate an unowned monitor, so we sometimes defer inflation.
1110   bool should_inflate = false;
1111 #endif
1112   while (true) {
1113     // We initially read the lockword with ordinary Java/relaxed semantics. When stronger
1114     // semantics are needed, we address it below. Since GetLockWord bottoms out to a relaxed load,
1115     // we can fix it later, in an infrequently executed case, with a fence.
1116     LockWord lock_word = h_obj->GetLockWord(false);
1117     switch (lock_word.GetState()) {
1118       case LockWord::kUnlocked: {
1119         // No ordering required for preceding lockword read, since we retest.
1120         LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
1121         if (h_obj->CasLockWord(lock_word, thin_locked, CASMode::kWeak, std::memory_order_acquire)) {
1122 #if !ART_USE_FUTEXES
1123           if (should_inflate) {
1124             InflateThinLocked(self, h_obj, lock_word, 0);
1125           }
1126 #endif
1127           AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
1128           return h_obj.Get();  // Success!
1129         }
1130         continue;  // Go again.
1131       }
1132       case LockWord::kThinLocked: {
1133         uint32_t owner_thread_id = lock_word.ThinLockOwner();
1134         if (owner_thread_id == thread_id) {
1135           // No ordering required for initial lockword read.
1136           // We own the lock, increase the recursion count.
1137           uint32_t new_count = lock_word.ThinLockCount() + 1;
1138           if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
1139             LockWord thin_locked(LockWord::FromThinLockId(thread_id,
1140                                                           new_count,
1141                                                           lock_word.GCState()));
1142             // Only this thread pays attention to the count. Thus there is no need for stronger
1143             // than relaxed memory ordering.
1144             if (!kUseReadBarrier) {
1145               h_obj->SetLockWord(thin_locked, /* as_volatile= */ false);
1146               AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
1147               return h_obj.Get();  // Success!
1148             } else {
1149               // Use CAS to preserve the read barrier state.
1150               if (h_obj->CasLockWord(lock_word,
1151                                      thin_locked,
1152                                      CASMode::kWeak,
1153                                      std::memory_order_relaxed)) {
1154                 AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
1155                 return h_obj.Get();  // Success!
1156               }
1157             }
1158             continue;  // Go again.
1159           } else {
1160             // We'd overflow the recursion count, so inflate the monitor.
1161             InflateThinLocked(self, h_obj, lock_word, 0);
1162           }
1163         } else {
1164           if (trylock) {
1165             return nullptr;
1166           }
1167           // Contention.
1168           contention_count++;
1169           Runtime* runtime = Runtime::Current();
1170           if (contention_count
1171               <= kExtraSpinIters + runtime->GetMaxSpinsBeforeThinLockInflation()) {
1172             // TODO: Consider switching the thread state to kWaitingForLockInflation when we are
1173             // yielding.  Use sched_yield instead of NanoSleep since NanoSleep can wait much longer
1174             // than the parameter you pass in. This can cause thread suspension to take excessively
1175             // long and make long pauses. See b/16307460.
1176             if (contention_count > kExtraSpinIters) {
1177               sched_yield();
1178             }
1179           } else {
1180 #if ART_USE_FUTEXES
1181             contention_count = 0;
1182             // No ordering required for initial lockword read. Install rereads it anyway.
1183             InflateThinLocked(self, h_obj, lock_word, 0);
1184 #else
1185             // Can't inflate from non-owning thread. Keep waiting. Bad for power, but this code
1186             // isn't used on-device.
1187             should_inflate = true;
1188             usleep(10);
1189 #endif
1190           }
1191         }
1192         continue;  // Start from the beginning.
1193       }
1194       case LockWord::kFatLocked: {
1195         // We should have done an acquire read of the lockword initially, to ensure
1196         // visibility of the monitor data structure. Use an explicit fence instead.
1197         std::atomic_thread_fence(std::memory_order_acquire);
1198         Monitor* mon = lock_word.FatLockMonitor();
1199         if (trylock) {
1200           return mon->TryLock(self) ? h_obj.Get() : nullptr;
1201         } else {
1202           mon->Lock(self);
1203           DCHECK(mon->monitor_lock_.IsExclusiveHeld(self));
1204           return h_obj.Get();  // Success!
1205         }
1206       }
1207       case LockWord::kHashCode:
1208         // Inflate with the existing hashcode.
1209         // Again no ordering required for initial lockword read, since we don't rely
1210         // on the visibility of any prior computation.
1211         Inflate(self, nullptr, h_obj.Get(), lock_word.GetHashCode());
1212         continue;  // Start from the beginning.
1213       default: {
1214         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
1215         UNREACHABLE();
1216       }
1217     }
1218   }
1219 }
1220 
MonitorExit(Thread * self,ObjPtr<mirror::Object> obj)1221 bool Monitor::MonitorExit(Thread* self, ObjPtr<mirror::Object> obj) {
1222   DCHECK(self != nullptr);
1223   DCHECK(obj != nullptr);
1224   self->AssertThreadSuspensionIsAllowable();
1225   obj = FakeUnlock(obj);
1226   StackHandleScope<1> hs(self);
1227   Handle<mirror::Object> h_obj(hs.NewHandle(obj));
1228   while (true) {
1229     LockWord lock_word = obj->GetLockWord(true);
1230     switch (lock_word.GetState()) {
1231       case LockWord::kHashCode:
1232         // Fall-through.
1233       case LockWord::kUnlocked:
1234         FailedUnlock(h_obj.Get(), self->GetThreadId(), 0u, nullptr);
1235         return false;  // Failure.
1236       case LockWord::kThinLocked: {
1237         uint32_t thread_id = self->GetThreadId();
1238         uint32_t owner_thread_id = lock_word.ThinLockOwner();
1239         if (owner_thread_id != thread_id) {
1240           FailedUnlock(h_obj.Get(), thread_id, owner_thread_id, nullptr);
1241           return false;  // Failure.
1242         } else {
1243           // We own the lock, decrease the recursion count.
1244           LockWord new_lw = LockWord::Default();
1245           if (lock_word.ThinLockCount() != 0) {
1246             uint32_t new_count = lock_word.ThinLockCount() - 1;
1247             new_lw = LockWord::FromThinLockId(thread_id, new_count, lock_word.GCState());
1248           } else {
1249             new_lw = LockWord::FromDefault(lock_word.GCState());
1250           }
1251           if (!kUseReadBarrier) {
1252             DCHECK_EQ(new_lw.ReadBarrierState(), 0U);
1253             // TODO: This really only needs memory_order_release, but we currently have
1254             // no way to specify that. In fact there seem to be no legitimate uses of SetLockWord
1255             // with a final argument of true. This slows down x86 and ARMv7, but probably not v8.
1256             h_obj->SetLockWord(new_lw, true);
1257             AtraceMonitorUnlock();
1258             // Success!
1259             return true;
1260           } else {
1261             // Use CAS to preserve the read barrier state.
1262             if (h_obj->CasLockWord(lock_word, new_lw, CASMode::kWeak, std::memory_order_release)) {
1263               AtraceMonitorUnlock();
1264               // Success!
1265               return true;
1266             }
1267           }
1268           continue;  // Go again.
1269         }
1270       }
1271       case LockWord::kFatLocked: {
1272         Monitor* mon = lock_word.FatLockMonitor();
1273         return mon->Unlock(self);
1274       }
1275       default: {
1276         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
1277         UNREACHABLE();
1278       }
1279     }
1280   }
1281 }
1282 
Wait(Thread * self,ObjPtr<mirror::Object> obj,int64_t ms,int32_t ns,bool interruptShouldThrow,ThreadState why)1283 void Monitor::Wait(Thread* self,
1284                    ObjPtr<mirror::Object> obj,
1285                    int64_t ms,
1286                    int32_t ns,
1287                    bool interruptShouldThrow,
1288                    ThreadState why) {
1289   DCHECK(self != nullptr);
1290   DCHECK(obj != nullptr);
1291   StackHandleScope<1> hs(self);
1292   Handle<mirror::Object> h_obj(hs.NewHandle(obj));
1293 
1294   Runtime::Current()->GetRuntimeCallbacks()->ObjectWaitStart(h_obj, ms);
1295   if (UNLIKELY(self->ObserveAsyncException() || self->IsExceptionPending())) {
1296     // See b/65558434 for information on handling of exceptions here.
1297     return;
1298   }
1299 
1300   LockWord lock_word = h_obj->GetLockWord(true);
1301   while (lock_word.GetState() != LockWord::kFatLocked) {
1302     switch (lock_word.GetState()) {
1303       case LockWord::kHashCode:
1304         // Fall-through.
1305       case LockWord::kUnlocked:
1306         ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
1307         return;  // Failure.
1308       case LockWord::kThinLocked: {
1309         uint32_t thread_id = self->GetThreadId();
1310         uint32_t owner_thread_id = lock_word.ThinLockOwner();
1311         if (owner_thread_id != thread_id) {
1312           ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
1313           return;  // Failure.
1314         } else {
1315           // We own the lock, inflate to enqueue ourself on the Monitor. May fail spuriously so
1316           // re-load.
1317           Inflate(self, self, h_obj.Get(), 0);
1318           lock_word = h_obj->GetLockWord(true);
1319         }
1320         break;
1321       }
1322       case LockWord::kFatLocked:  // Unreachable given the loop condition above. Fall-through.
1323       default: {
1324         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
1325         UNREACHABLE();
1326       }
1327     }
1328   }
1329   Monitor* mon = lock_word.FatLockMonitor();
1330   mon->Wait(self, ms, ns, interruptShouldThrow, why);
1331 }
1332 
DoNotify(Thread * self,ObjPtr<mirror::Object> obj,bool notify_all)1333 void Monitor::DoNotify(Thread* self, ObjPtr<mirror::Object> obj, bool notify_all) {
1334   DCHECK(self != nullptr);
1335   DCHECK(obj != nullptr);
1336   LockWord lock_word = obj->GetLockWord(true);
1337   switch (lock_word.GetState()) {
1338     case LockWord::kHashCode:
1339       // Fall-through.
1340     case LockWord::kUnlocked:
1341       ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
1342       return;  // Failure.
1343     case LockWord::kThinLocked: {
1344       uint32_t thread_id = self->GetThreadId();
1345       uint32_t owner_thread_id = lock_word.ThinLockOwner();
1346       if (owner_thread_id != thread_id) {
1347         ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
1348         return;  // Failure.
1349       } else {
1350         // We own the lock but there's no Monitor and therefore no waiters.
1351         return;  // Success.
1352       }
1353     }
1354     case LockWord::kFatLocked: {
1355       Monitor* mon = lock_word.FatLockMonitor();
1356       if (notify_all) {
1357         mon->NotifyAll(self);
1358       } else {
1359         mon->Notify(self);
1360       }
1361       return;  // Success.
1362     }
1363     default: {
1364       LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
1365       UNREACHABLE();
1366     }
1367   }
1368 }
1369 
GetLockOwnerThreadId(ObjPtr<mirror::Object> obj)1370 uint32_t Monitor::GetLockOwnerThreadId(ObjPtr<mirror::Object> obj) {
1371   DCHECK(obj != nullptr);
1372   LockWord lock_word = obj->GetLockWord(true);
1373   switch (lock_word.GetState()) {
1374     case LockWord::kHashCode:
1375       // Fall-through.
1376     case LockWord::kUnlocked:
1377       return ThreadList::kInvalidThreadId;
1378     case LockWord::kThinLocked:
1379       return lock_word.ThinLockOwner();
1380     case LockWord::kFatLocked: {
1381       Monitor* mon = lock_word.FatLockMonitor();
1382       return mon->GetOwnerThreadId();
1383     }
1384     default: {
1385       LOG(FATAL) << "Unreachable";
1386       UNREACHABLE();
1387     }
1388   }
1389 }
1390 
FetchState(const Thread * thread,ObjPtr<mirror::Object> * monitor_object,uint32_t * lock_owner_tid)1391 ThreadState Monitor::FetchState(const Thread* thread,
1392                                 /* out */ ObjPtr<mirror::Object>* monitor_object,
1393                                 /* out */ uint32_t* lock_owner_tid) {
1394   DCHECK(monitor_object != nullptr);
1395   DCHECK(lock_owner_tid != nullptr);
1396 
1397   *monitor_object = nullptr;
1398   *lock_owner_tid = ThreadList::kInvalidThreadId;
1399 
1400   ThreadState state = thread->GetState();
1401 
1402   switch (state) {
1403     case kWaiting:
1404     case kTimedWaiting:
1405     case kSleeping:
1406     {
1407       Thread* self = Thread::Current();
1408       MutexLock mu(self, *thread->GetWaitMutex());
1409       Monitor* monitor = thread->GetWaitMonitor();
1410       if (monitor != nullptr) {
1411         *monitor_object = monitor->GetObject();
1412       }
1413     }
1414     break;
1415 
1416     case kBlocked:
1417     case kWaitingForLockInflation:
1418     {
1419       ObjPtr<mirror::Object> lock_object = thread->GetMonitorEnterObject();
1420       if (lock_object != nullptr) {
1421         if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
1422           // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
1423           // may have not been flipped yet and "pretty_object" may be a from-space (stale) ref, in
1424           // which case the GetLockOwnerThreadId() call below will crash. So explicitly mark/forward
1425           // it here.
1426           lock_object = ReadBarrier::Mark(lock_object.Ptr());
1427         }
1428         *monitor_object = lock_object;
1429         *lock_owner_tid = lock_object->GetLockOwnerThreadId();
1430       }
1431     }
1432     break;
1433 
1434     default:
1435       break;
1436   }
1437 
1438   return state;
1439 }
1440 
GetContendedMonitor(Thread * thread)1441 ObjPtr<mirror::Object> Monitor::GetContendedMonitor(Thread* thread) {
1442   // This is used to implement JDWP's ThreadReference.CurrentContendedMonitor, and has a bizarre
1443   // definition of contended that includes a monitor a thread is trying to enter...
1444   ObjPtr<mirror::Object> result = thread->GetMonitorEnterObject();
1445   if (result == nullptr) {
1446     // ...but also a monitor that the thread is waiting on.
1447     MutexLock mu(Thread::Current(), *thread->GetWaitMutex());
1448     Monitor* monitor = thread->GetWaitMonitor();
1449     if (monitor != nullptr) {
1450       result = monitor->GetObject();
1451     }
1452   }
1453   return result;
1454 }
1455 
VisitLocks(StackVisitor * stack_visitor,void (* callback)(ObjPtr<mirror::Object>,void *),void * callback_context,bool abort_on_failure)1456 void Monitor::VisitLocks(StackVisitor* stack_visitor,
1457                          void (*callback)(ObjPtr<mirror::Object>, void*),
1458                          void* callback_context,
1459                          bool abort_on_failure) {
1460   ArtMethod* m = stack_visitor->GetMethod();
1461   CHECK(m != nullptr);
1462 
1463   // Native methods are an easy special case.
1464   // TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
1465   if (m->IsNative()) {
1466     if (m->IsSynchronized()) {
1467       ObjPtr<mirror::Object> jni_this =
1468           stack_visitor->GetCurrentHandleScope(sizeof(void*))->GetReference(0);
1469       callback(jni_this, callback_context);
1470     }
1471     return;
1472   }
1473 
1474   // Proxy methods should not be synchronized.
1475   if (m->IsProxyMethod()) {
1476     CHECK(!m->IsSynchronized());
1477     return;
1478   }
1479 
1480   // Is there any reason to believe there's any synchronization in this method?
1481   CHECK(m->GetCodeItem() != nullptr) << m->PrettyMethod();
1482   CodeItemDataAccessor accessor(m->DexInstructionData());
1483   if (accessor.TriesSize() == 0) {
1484     return;  // No "tries" implies no synchronization, so no held locks to report.
1485   }
1486 
1487   // Get the dex pc. If abort_on_failure is false, GetDexPc will not abort in the case it cannot
1488   // find the dex pc, and instead return kDexNoIndex. Then bail out, as it indicates we have an
1489   // inconsistent stack anyways.
1490   uint32_t dex_pc = stack_visitor->GetDexPc(abort_on_failure);
1491   if (!abort_on_failure && dex_pc == dex::kDexNoIndex) {
1492     LOG(ERROR) << "Could not find dex_pc for " << m->PrettyMethod();
1493     return;
1494   }
1495 
1496   // Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
1497   // the locks held in this stack frame.
1498   std::vector<verifier::MethodVerifier::DexLockInfo> monitor_enter_dex_pcs;
1499   verifier::MethodVerifier::FindLocksAtDexPc(m,
1500                                              dex_pc,
1501                                              &monitor_enter_dex_pcs,
1502                                              Runtime::Current()->GetTargetSdkVersion());
1503   for (verifier::MethodVerifier::DexLockInfo& dex_lock_info : monitor_enter_dex_pcs) {
1504     // As a debug check, check that dex PC corresponds to a monitor-enter.
1505     if (kIsDebugBuild) {
1506       const Instruction& monitor_enter_instruction = accessor.InstructionAt(dex_lock_info.dex_pc);
1507       CHECK_EQ(monitor_enter_instruction.Opcode(), Instruction::MONITOR_ENTER)
1508           << "expected monitor-enter @" << dex_lock_info.dex_pc << "; was "
1509           << reinterpret_cast<const void*>(&monitor_enter_instruction);
1510     }
1511 
1512     // Iterate through the set of dex registers, as the compiler may not have held all of them
1513     // live.
1514     bool success = false;
1515     for (uint32_t dex_reg : dex_lock_info.dex_registers) {
1516       uint32_t value;
1517 
1518       // For optimized code we expect the DexRegisterMap to be present - monitor information
1519       // not be optimized out.
1520       success = stack_visitor->GetVReg(m, dex_reg, kReferenceVReg, &value);
1521       if (success) {
1522         ObjPtr<mirror::Object> o = reinterpret_cast<mirror::Object*>(value);
1523         callback(o, callback_context);
1524         break;
1525       }
1526     }
1527     DCHECK(success) << "Failed to find/read reference for monitor-enter at dex pc "
1528                     << dex_lock_info.dex_pc
1529                     << " in method "
1530                     << m->PrettyMethod();
1531     if (!success) {
1532       LOG(WARNING) << "Had a lock reported for dex pc " << dex_lock_info.dex_pc
1533                    << " but was not able to fetch a corresponding object!";
1534     }
1535   }
1536 }
1537 
IsValidLockWord(LockWord lock_word)1538 bool Monitor::IsValidLockWord(LockWord lock_word) {
1539   switch (lock_word.GetState()) {
1540     case LockWord::kUnlocked:
1541       // Nothing to check.
1542       return true;
1543     case LockWord::kThinLocked:
1544       // Basic consistency check of owner.
1545       return lock_word.ThinLockOwner() != ThreadList::kInvalidThreadId;
1546     case LockWord::kFatLocked: {
1547       // Check the  monitor appears in the monitor list.
1548       Monitor* mon = lock_word.FatLockMonitor();
1549       MonitorList* list = Runtime::Current()->GetMonitorList();
1550       MutexLock mu(Thread::Current(), list->monitor_list_lock_);
1551       for (Monitor* list_mon : list->list_) {
1552         if (mon == list_mon) {
1553           return true;  // Found our monitor.
1554         }
1555       }
1556       return false;  // Fail - unowned monitor in an object.
1557     }
1558     case LockWord::kHashCode:
1559       return true;
1560     default:
1561       LOG(FATAL) << "Unreachable";
1562       UNREACHABLE();
1563   }
1564 }
1565 
IsLocked()1566 bool Monitor::IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) {
1567   return GetOwner() != nullptr;
1568 }
1569 
TranslateLocation(ArtMethod * method,uint32_t dex_pc,const char ** source_file,int32_t * line_number)1570 void Monitor::TranslateLocation(ArtMethod* method,
1571                                 uint32_t dex_pc,
1572                                 const char** source_file,
1573                                 int32_t* line_number) {
1574   // If method is null, location is unknown
1575   if (method == nullptr) {
1576     *source_file = "";
1577     *line_number = 0;
1578     return;
1579   }
1580   *source_file = method->GetDeclaringClassSourceFile();
1581   if (*source_file == nullptr) {
1582     *source_file = "";
1583   }
1584   *line_number = method->GetLineNumFromDexPC(dex_pc);
1585 }
1586 
GetOwnerThreadId()1587 uint32_t Monitor::GetOwnerThreadId() {
1588   // Make sure owner is not deallocated during access.
1589   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1590   Thread* owner = GetOwner();
1591   if (owner != nullptr) {
1592     return owner->GetThreadId();
1593   } else {
1594     return ThreadList::kInvalidThreadId;
1595   }
1596 }
1597 
MonitorList()1598 MonitorList::MonitorList()
1599     : allow_new_monitors_(true), monitor_list_lock_("MonitorList lock", kMonitorListLock),
1600       monitor_add_condition_("MonitorList disallow condition", monitor_list_lock_) {
1601 }
1602 
~MonitorList()1603 MonitorList::~MonitorList() {
1604   Thread* self = Thread::Current();
1605   MutexLock mu(self, monitor_list_lock_);
1606   // Release all monitors to the pool.
1607   // TODO: Is it an invariant that *all* open monitors are in the list? Then we could
1608   // clear faster in the pool.
1609   MonitorPool::ReleaseMonitors(self, &list_);
1610 }
1611 
DisallowNewMonitors()1612 void MonitorList::DisallowNewMonitors() {
1613   CHECK(!kUseReadBarrier);
1614   MutexLock mu(Thread::Current(), monitor_list_lock_);
1615   allow_new_monitors_ = false;
1616 }
1617 
AllowNewMonitors()1618 void MonitorList::AllowNewMonitors() {
1619   CHECK(!kUseReadBarrier);
1620   Thread* self = Thread::Current();
1621   MutexLock mu(self, monitor_list_lock_);
1622   allow_new_monitors_ = true;
1623   monitor_add_condition_.Broadcast(self);
1624 }
1625 
BroadcastForNewMonitors()1626 void MonitorList::BroadcastForNewMonitors() {
1627   Thread* self = Thread::Current();
1628   MutexLock mu(self, monitor_list_lock_);
1629   monitor_add_condition_.Broadcast(self);
1630 }
1631 
Add(Monitor * m)1632 void MonitorList::Add(Monitor* m) {
1633   Thread* self = Thread::Current();
1634   MutexLock mu(self, monitor_list_lock_);
1635   // CMS needs this to block for concurrent reference processing because an object allocated during
1636   // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
1637   // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
1638   while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
1639     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
1640     // presence of threads blocking for weak ref access.
1641     self->CheckEmptyCheckpointFromWeakRefAccess(&monitor_list_lock_);
1642     monitor_add_condition_.WaitHoldingLocks(self);
1643   }
1644   list_.push_front(m);
1645 }
1646 
SweepMonitorList(IsMarkedVisitor * visitor)1647 void MonitorList::SweepMonitorList(IsMarkedVisitor* visitor) {
1648   Thread* self = Thread::Current();
1649   MutexLock mu(self, monitor_list_lock_);
1650   for (auto it = list_.begin(); it != list_.end(); ) {
1651     Monitor* m = *it;
1652     // Disable the read barrier in GetObject() as this is called by GC.
1653     ObjPtr<mirror::Object> obj = m->GetObject<kWithoutReadBarrier>();
1654     // The object of a monitor can be null if we have deflated it.
1655     ObjPtr<mirror::Object> new_obj = obj != nullptr ? visitor->IsMarked(obj.Ptr()) : nullptr;
1656     if (new_obj == nullptr) {
1657       VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
1658                     << obj;
1659       MonitorPool::ReleaseMonitor(self, m);
1660       it = list_.erase(it);
1661     } else {
1662       m->SetObject(new_obj);
1663       ++it;
1664     }
1665   }
1666 }
1667 
Size()1668 size_t MonitorList::Size() {
1669   Thread* self = Thread::Current();
1670   MutexLock mu(self, monitor_list_lock_);
1671   return list_.size();
1672 }
1673 
1674 class MonitorDeflateVisitor : public IsMarkedVisitor {
1675  public:
MonitorDeflateVisitor()1676   MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
1677 
IsMarked(mirror::Object * object)1678   mirror::Object* IsMarked(mirror::Object* object) override
1679       REQUIRES_SHARED(Locks::mutator_lock_) {
1680     if (Monitor::Deflate(self_, object)) {
1681       DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
1682       ++deflate_count_;
1683       // If we deflated, return null so that the monitor gets removed from the array.
1684       return nullptr;
1685     }
1686     return object;  // Monitor was not deflated.
1687   }
1688 
1689   Thread* const self_;
1690   size_t deflate_count_;
1691 };
1692 
DeflateMonitors()1693 size_t MonitorList::DeflateMonitors() {
1694   MonitorDeflateVisitor visitor;
1695   Locks::mutator_lock_->AssertExclusiveHeld(visitor.self_);
1696   SweepMonitorList(&visitor);
1697   return visitor.deflate_count_;
1698 }
1699 
MonitorInfo(ObjPtr<mirror::Object> obj)1700 MonitorInfo::MonitorInfo(ObjPtr<mirror::Object> obj) : owner_(nullptr), entry_count_(0) {
1701   DCHECK(obj != nullptr);
1702   LockWord lock_word = obj->GetLockWord(true);
1703   switch (lock_word.GetState()) {
1704     case LockWord::kUnlocked:
1705       // Fall-through.
1706     case LockWord::kForwardingAddress:
1707       // Fall-through.
1708     case LockWord::kHashCode:
1709       break;
1710     case LockWord::kThinLocked:
1711       owner_ = Runtime::Current()->GetThreadList()->FindThreadByThreadId(lock_word.ThinLockOwner());
1712       DCHECK(owner_ != nullptr) << "Thin-locked without owner!";
1713       entry_count_ = 1 + lock_word.ThinLockCount();
1714       // Thin locks have no waiters.
1715       break;
1716     case LockWord::kFatLocked: {
1717       Monitor* mon = lock_word.FatLockMonitor();
1718       owner_ = mon->owner_.load(std::memory_order_relaxed);
1719       // Here it is okay for the owner to be null since we don't reset the LockWord back to
1720       // kUnlocked until we get a GC. In cases where this hasn't happened yet we will have a fat
1721       // lock without an owner.
1722       // Neither owner_ nor entry_count_ is touched by threads in "suspended" state, so
1723       // we must see consistent values.
1724       if (owner_ != nullptr) {
1725         entry_count_ = 1 + mon->lock_count_;
1726       } else {
1727         DCHECK_EQ(mon->lock_count_, 0u) << "Monitor is fat-locked without any owner!";
1728       }
1729       for (Thread* waiter = mon->wait_set_; waiter != nullptr; waiter = waiter->GetWaitNext()) {
1730         waiters_.push_back(waiter);
1731       }
1732       break;
1733     }
1734   }
1735 }
1736 
1737 }  // namespace art
1738