1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "allocation_record.h"
18 
19 #include "art_method-inl.h"
20 #include "base/enums.h"
21 #include "base/logging.h"  // For VLOG
22 #include "base/stl_util.h"
23 #include "obj_ptr-inl.h"
24 #include "object_callbacks.h"
25 #include "stack.h"
26 
27 #include <android-base/properties.h>
28 
29 namespace art {
30 namespace gc {
31 
ComputeLineNumber() const32 int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
33   DCHECK(method_ != nullptr);
34   return method_->GetLineNumFromDexPC(dex_pc_);
35 }
36 
GetClassDescriptor(std::string * storage) const37 const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
38   // klass_ could contain null only if we implement class unloading.
39   return klass_.IsNull() ? "null" : klass_.Read()->GetDescriptor(storage);
40 }
41 
SetMaxStackDepth(size_t max_stack_depth)42 void AllocRecordObjectMap::SetMaxStackDepth(size_t max_stack_depth) {
43   // Log fatal since this should already be checked when calling VMDebug.setAllocTrackerStackDepth.
44   CHECK_LE(max_stack_depth, kMaxSupportedStackDepth)
45       << "Allocation record max stack depth is too large";
46   max_stack_depth_ = max_stack_depth;
47 }
48 
~AllocRecordObjectMap()49 AllocRecordObjectMap::~AllocRecordObjectMap() {
50   Clear();
51 }
52 
VisitRoots(RootVisitor * visitor)53 void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
54   CHECK_LE(recent_record_max_, alloc_record_max_);
55   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
56   size_t count = recent_record_max_;
57   // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
58   // klass_ fields as strong roots.
59   for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
60     AllocRecord& record = it->second;
61     if (count > 0) {
62       buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
63       --count;
64     }
65     // Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
66     // class unloading.
67     for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
68       const AllocRecordStackTraceElement& element = record.StackElement(i);
69       DCHECK(element.GetMethod() != nullptr);
70       element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
71     }
72   }
73 }
74 
SweepClassObject(AllocRecord * record,IsMarkedVisitor * visitor)75 static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
76     REQUIRES_SHARED(Locks::mutator_lock_)
77     REQUIRES(Locks::alloc_tracker_lock_) {
78   GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
79   // This does not need a read barrier because this is called by GC.
80   mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
81   if (old_object != nullptr) {
82     // The class object can become null if we implement class unloading.
83     // In that case we might still want to keep the class name string (not implemented).
84     mirror::Object* new_object = visitor->IsMarked(old_object);
85     DCHECK(new_object != nullptr);
86     if (UNLIKELY(old_object != new_object)) {
87       klass = GcRoot<mirror::Class>(new_object->AsClass());
88     }
89   }
90 }
91 
SweepAllocationRecords(IsMarkedVisitor * visitor)92 void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
93   VLOG(heap) << "Start SweepAllocationRecords()";
94   size_t count_deleted = 0, count_moved = 0, count = 0;
95   // Only the first (size - recent_record_max_) number of records can be deleted.
96   const size_t delete_bound = std::max(entries_.size(), recent_record_max_) - recent_record_max_;
97   for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
98     ++count;
99     // This does not need a read barrier because this is called by GC.
100     mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
101     AllocRecord& record = it->second;
102     mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
103     if (new_object == nullptr) {
104       if (count > delete_bound) {
105         it->first = GcRoot<mirror::Object>(nullptr);
106         SweepClassObject(&record, visitor);
107         ++it;
108       } else {
109         it = entries_.erase(it);
110         ++count_deleted;
111       }
112     } else {
113       if (old_object != new_object) {
114         it->first = GcRoot<mirror::Object>(new_object);
115         ++count_moved;
116       }
117       SweepClassObject(&record, visitor);
118       ++it;
119     }
120   }
121   VLOG(heap) << "Deleted " << count_deleted << " allocation records";
122   VLOG(heap) << "Updated " << count_moved << " allocation records";
123 }
124 
AllowNewAllocationRecords()125 void AllocRecordObjectMap::AllowNewAllocationRecords() {
126   CHECK(!kUseReadBarrier);
127   allow_new_record_ = true;
128   new_record_condition_.Broadcast(Thread::Current());
129 }
130 
DisallowNewAllocationRecords()131 void AllocRecordObjectMap::DisallowNewAllocationRecords() {
132   CHECK(!kUseReadBarrier);
133   allow_new_record_ = false;
134 }
135 
BroadcastForNewAllocationRecords()136 void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
137   new_record_condition_.Broadcast(Thread::Current());
138 }
139 
SetAllocTrackingEnabled(bool enable)140 void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
141   Thread* self = Thread::Current();
142   Heap* heap = Runtime::Current()->GetHeap();
143   if (enable) {
144     {
145       MutexLock mu(self, *Locks::alloc_tracker_lock_);
146       if (heap->IsAllocTrackingEnabled()) {
147         return;  // Already enabled, bail.
148       }
149       AllocRecordObjectMap* records = heap->GetAllocationRecords();
150       if (records == nullptr) {
151         records = new AllocRecordObjectMap;
152         heap->SetAllocationRecords(records);
153       }
154       CHECK(records != nullptr);
155       records->SetMaxStackDepth(heap->GetAllocTrackerStackDepth());
156       size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
157                   sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
158       LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
159                 << records->max_stack_depth_ << " frames, taking up to "
160                 << PrettySize(sz * records->alloc_record_max_) << ")";
161     }
162     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
163     {
164       MutexLock mu(self, *Locks::alloc_tracker_lock_);
165       heap->SetAllocTrackingEnabled(true);
166     }
167   } else {
168     // Delete outside of the critical section to avoid possible lock violations like the runtime
169     // shutdown lock.
170     {
171       MutexLock mu(self, *Locks::alloc_tracker_lock_);
172       if (!heap->IsAllocTrackingEnabled()) {
173         return;  // Already disabled, bail.
174       }
175       heap->SetAllocTrackingEnabled(false);
176       LOG(INFO) << "Disabling alloc tracker";
177       AllocRecordObjectMap* records = heap->GetAllocationRecords();
178       records->Clear();
179     }
180     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
181     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
182   }
183 }
184 
RecordAllocation(Thread * self,ObjPtr<mirror::Object> * obj,size_t byte_count)185 void AllocRecordObjectMap::RecordAllocation(Thread* self,
186                                             ObjPtr<mirror::Object>* obj,
187                                             size_t byte_count) {
188   // Get stack trace outside of lock in case there are allocations during the stack walk.
189   // b/27858645.
190   AllocRecordStackTrace trace;
191   {
192     StackHandleScope<1> hs(self);
193     auto obj_wrapper = hs.NewHandleWrapper(obj);
194 
195     StackVisitor::WalkStack(
196         [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
197           if (trace.GetDepth() >= max_stack_depth_) {
198             return false;
199           }
200           ArtMethod* m = stack_visitor->GetMethod();
201           // m may be null if we have inlined methods of unresolved classes. b/27858645
202           if (m != nullptr && !m->IsRuntimeMethod()) {
203             m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
204             trace.AddStackElement(AllocRecordStackTraceElement(m, stack_visitor->GetDexPc()));
205           }
206           return true;
207         },
208         self,
209         /* context= */ nullptr,
210         art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
211   }
212 
213   MutexLock mu(self, *Locks::alloc_tracker_lock_);
214   Heap* const heap = Runtime::Current()->GetHeap();
215   if (!heap->IsAllocTrackingEnabled()) {
216     // In the process of shutting down recording, bail.
217     return;
218   }
219 
220   // TODO Skip recording allocations associated with DDMS. This was a feature of the old debugger
221   // but when we switched to the JVMTI based debugger the feature was (unintentionally) broken.
222   // Since nobody seemed to really notice or care it might not be worth the trouble.
223 
224   // Wait for GC's sweeping to complete and allow new records.
225   while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
226                   (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
227     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
228     // presence of threads blocking for weak ref access.
229     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
230     new_record_condition_.WaitHoldingLocks(self);
231   }
232 
233   if (!heap->IsAllocTrackingEnabled()) {
234     // Return if the allocation tracking has been disabled while waiting for system weak access
235     // above.
236     return;
237   }
238 
239   DCHECK_LE(Size(), alloc_record_max_);
240 
241   // Erase extra unfilled elements.
242   trace.SetTid(self->GetTid());
243 
244   // Add the record.
245   Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
246   DCHECK_LE(Size(), alloc_record_max_);
247 }
248 
Clear()249 void AllocRecordObjectMap::Clear() {
250   entries_.clear();
251 }
252 
AllocRecordObjectMap()253 AllocRecordObjectMap::AllocRecordObjectMap()
254     : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
255 
256 }  // namespace gc
257 }  // namespace art
258