1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "stack.h"
18 #include <limits>
19
20 #include "android-base/stringprintf.h"
21
22 #include "arch/context.h"
23 #include "art_method-inl.h"
24 #include "base/callee_save_type.h"
25 #include "base/enums.h"
26 #include "base/hex_dump.h"
27 #include "dex/dex_file_types.h"
28 #include "entrypoints/entrypoint_utils-inl.h"
29 #include "entrypoints/quick/callee_save_frame.h"
30 #include "entrypoints/runtime_asm_entrypoints.h"
31 #include "gc/space/image_space.h"
32 #include "gc/space/space-inl.h"
33 #include "interpreter/mterp/nterp.h"
34 #include "interpreter/shadow_frame-inl.h"
35 #include "jit/jit.h"
36 #include "jit/jit_code_cache.h"
37 #include "linear_alloc.h"
38 #include "managed_stack.h"
39 #include "mirror/class-inl.h"
40 #include "mirror/object-inl.h"
41 #include "mirror/object_array-inl.h"
42 #include "nterp_helpers.h"
43 #include "oat_quick_method_header.h"
44 #include "obj_ptr-inl.h"
45 #include "quick/quick_method_frame_info.h"
46 #include "runtime.h"
47 #include "thread.h"
48 #include "thread_list.h"
49
50 namespace art {
51
52 using android::base::StringPrintf;
53
54 static constexpr bool kDebugStackWalk = false;
55
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,bool check_suspended)56 StackVisitor::StackVisitor(Thread* thread,
57 Context* context,
58 StackWalkKind walk_kind,
59 bool check_suspended)
60 : StackVisitor(thread, context, walk_kind, 0, check_suspended) {}
61
StackVisitor(Thread * thread,Context * context,StackWalkKind walk_kind,size_t num_frames,bool check_suspended)62 StackVisitor::StackVisitor(Thread* thread,
63 Context* context,
64 StackWalkKind walk_kind,
65 size_t num_frames,
66 bool check_suspended)
67 : thread_(thread),
68 walk_kind_(walk_kind),
69 cur_shadow_frame_(nullptr),
70 cur_quick_frame_(nullptr),
71 cur_quick_frame_pc_(0),
72 cur_oat_quick_method_header_(nullptr),
73 num_frames_(num_frames),
74 cur_depth_(0),
75 cur_inline_info_(nullptr, CodeInfo()),
76 cur_stack_map_(0, StackMap()),
77 context_(context),
78 check_suspended_(check_suspended) {
79 if (check_suspended_) {
80 DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
81 }
82 }
83
GetCurrentInlineInfo() const84 CodeInfo* StackVisitor::GetCurrentInlineInfo() const {
85 DCHECK(!(*cur_quick_frame_)->IsNative());
86 const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
87 if (cur_inline_info_.first != header) {
88 cur_inline_info_ = std::make_pair(header, CodeInfo::DecodeInlineInfoOnly(header));
89 }
90 return &cur_inline_info_.second;
91 }
92
GetCurrentStackMap() const93 StackMap* StackVisitor::GetCurrentStackMap() const {
94 DCHECK(!(*cur_quick_frame_)->IsNative());
95 const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
96 if (cur_stack_map_.first != cur_quick_frame_pc_) {
97 uint32_t pc = header->NativeQuickPcOffset(cur_quick_frame_pc_);
98 cur_stack_map_ = std::make_pair(cur_quick_frame_pc_,
99 GetCurrentInlineInfo()->GetStackMapForNativePcOffset(pc));
100 }
101 return &cur_stack_map_.second;
102 }
103
GetMethod() const104 ArtMethod* StackVisitor::GetMethod() const {
105 if (cur_shadow_frame_ != nullptr) {
106 return cur_shadow_frame_->GetMethod();
107 } else if (cur_quick_frame_ != nullptr) {
108 if (IsInInlinedFrame()) {
109 CodeInfo* code_info = GetCurrentInlineInfo();
110 DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
111 return GetResolvedMethod(*GetCurrentQuickFrame(), *code_info, current_inline_frames_);
112 } else {
113 return *cur_quick_frame_;
114 }
115 }
116 return nullptr;
117 }
118
GetDexPc(bool abort_on_failure) const119 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
120 if (cur_shadow_frame_ != nullptr) {
121 return cur_shadow_frame_->GetDexPC();
122 } else if (cur_quick_frame_ != nullptr) {
123 if (IsInInlinedFrame()) {
124 return current_inline_frames_.back().GetDexPc();
125 } else if (cur_oat_quick_method_header_ == nullptr) {
126 return dex::kDexNoIndex;
127 } else if ((*GetCurrentQuickFrame())->IsNative()) {
128 return cur_oat_quick_method_header_->ToDexPc(
129 GetCurrentQuickFrame(), cur_quick_frame_pc_, abort_on_failure);
130 } else if (cur_oat_quick_method_header_->IsOptimized()) {
131 StackMap* stack_map = GetCurrentStackMap();
132 DCHECK(stack_map->IsValid());
133 return stack_map->GetDexPc();
134 } else {
135 DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
136 return NterpGetDexPC(cur_quick_frame_);
137 }
138 } else {
139 return 0;
140 }
141 }
142
143 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
144 REQUIRES_SHARED(Locks::mutator_lock_);
145
GetThisObject() const146 ObjPtr<mirror::Object> StackVisitor::GetThisObject() const {
147 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
148 ArtMethod* m = GetMethod();
149 if (m->IsStatic()) {
150 return nullptr;
151 } else if (m->IsNative()) {
152 if (cur_quick_frame_ != nullptr) {
153 HandleScope* hs;
154 if (cur_oat_quick_method_header_ != nullptr) {
155 hs = reinterpret_cast<HandleScope*>(
156 reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*));
157 } else {
158 // GenericJNI frames have the HandleScope under the managed frame.
159 uint32_t shorty_len;
160 const char* shorty = m->GetShorty(&shorty_len);
161 const size_t num_handle_scope_references =
162 /* this */ 1u + std::count(shorty + 1, shorty + shorty_len, 'L');
163 hs = GetGenericJniHandleScope(cur_quick_frame_, num_handle_scope_references);
164 }
165 return hs->GetReference(0);
166 } else {
167 return cur_shadow_frame_->GetVRegReference(0);
168 }
169 } else if (m->IsProxyMethod()) {
170 if (cur_quick_frame_ != nullptr) {
171 return artQuickGetProxyThisObject(cur_quick_frame_);
172 } else {
173 return cur_shadow_frame_->GetVRegReference(0);
174 }
175 } else {
176 CodeItemDataAccessor accessor(m->DexInstructionData());
177 if (!accessor.HasCodeItem()) {
178 UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: "
179 << ArtMethod::PrettyMethod(m);
180 return nullptr;
181 } else {
182 uint16_t reg = accessor.RegistersSize() - accessor.InsSize();
183 uint32_t value = 0;
184 if (!GetVReg(m, reg, kReferenceVReg, &value)) {
185 return nullptr;
186 }
187 return reinterpret_cast<mirror::Object*>(value);
188 }
189 }
190 }
191
GetNativePcOffset() const192 size_t StackVisitor::GetNativePcOffset() const {
193 DCHECK(!IsShadowFrame());
194 return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
195 }
196
GetVRegFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind,uint32_t * val) const197 bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg,
198 VRegKind kind,
199 uint32_t* val) const {
200 size_t frame_id = const_cast<StackVisitor*>(this)->GetFrameId();
201 ShadowFrame* shadow_frame = thread_->FindDebuggerShadowFrame(frame_id);
202 if (shadow_frame != nullptr) {
203 bool* updated_vreg_flags = thread_->GetUpdatedVRegFlags(frame_id);
204 DCHECK(updated_vreg_flags != nullptr);
205 if (updated_vreg_flags[vreg]) {
206 // Value is set by the debugger.
207 if (kind == kReferenceVReg) {
208 *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
209 shadow_frame->GetVRegReference(vreg)));
210 } else {
211 *val = shadow_frame->GetVReg(vreg);
212 }
213 return true;
214 }
215 }
216 // No value is set by the debugger.
217 return false;
218 }
219
GetVReg(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val,std::optional<DexRegisterLocation> location) const220 bool StackVisitor::GetVReg(ArtMethod* m,
221 uint16_t vreg,
222 VRegKind kind,
223 uint32_t* val,
224 std::optional<DexRegisterLocation> location) const {
225 if (cur_quick_frame_ != nullptr) {
226 DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
227 DCHECK(m == GetMethod());
228 // Check if there is value set by the debugger.
229 if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
230 return true;
231 }
232 bool result = false;
233 if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
234 result = true;
235 *val = (kind == kReferenceVReg)
236 ? NterpGetVRegReference(cur_quick_frame_, vreg)
237 : NterpGetVReg(cur_quick_frame_, vreg);
238 } else {
239 DCHECK(cur_oat_quick_method_header_->IsOptimized());
240 if (location.has_value() && kind != kReferenceVReg) {
241 uint32_t val2 = *val;
242 // The caller already known the register location, so we can use the faster overload
243 // which does not decode the stack maps.
244 result = GetVRegFromOptimizedCode(location.value(), kind, val);
245 // Compare to the slower overload.
246 DCHECK_EQ(result, GetVRegFromOptimizedCode(m, vreg, kind, &val2));
247 DCHECK_EQ(*val, val2);
248 } else {
249 result = GetVRegFromOptimizedCode(m, vreg, kind, val);
250 }
251 }
252 if (kind == kReferenceVReg) {
253 // Perform a read barrier in case we are in a different thread and GC is ongoing.
254 mirror::Object* out = reinterpret_cast<mirror::Object*>(static_cast<uintptr_t>(*val));
255 uintptr_t ptr_out = reinterpret_cast<uintptr_t>(GcRoot<mirror::Object>(out).Read());
256 DCHECK_LT(ptr_out, std::numeric_limits<uint32_t>::max());
257 *val = static_cast<uint32_t>(ptr_out);
258 }
259 return result;
260 } else {
261 DCHECK(cur_shadow_frame_ != nullptr);
262 if (kind == kReferenceVReg) {
263 *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
264 cur_shadow_frame_->GetVRegReference(vreg)));
265 } else {
266 *val = cur_shadow_frame_->GetVReg(vreg);
267 }
268 return true;
269 }
270 }
271
GetVRegFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind,uint32_t * val) const272 bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
273 uint32_t* val) const {
274 DCHECK_EQ(m, GetMethod());
275 // Can't be null or how would we compile its instructions?
276 DCHECK(m->GetCodeItem() != nullptr) << m->PrettyMethod();
277 CodeItemDataAccessor accessor(m->DexInstructionData());
278 uint16_t number_of_dex_registers = accessor.RegistersSize();
279 DCHECK_LT(vreg, number_of_dex_registers);
280 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
281 CodeInfo code_info(method_header);
282
283 uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
284 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
285 DCHECK(stack_map.IsValid());
286
287 DexRegisterMap dex_register_map = IsInInlinedFrame()
288 ? code_info.GetInlineDexRegisterMapOf(stack_map, current_inline_frames_.back())
289 : code_info.GetDexRegisterMapOf(stack_map);
290 if (dex_register_map.empty()) {
291 return false;
292 }
293 DCHECK_EQ(dex_register_map.size(), number_of_dex_registers);
294 DexRegisterLocation::Kind location_kind = dex_register_map[vreg].GetKind();
295 switch (location_kind) {
296 case DexRegisterLocation::Kind::kInStack: {
297 const int32_t offset = dex_register_map[vreg].GetStackOffsetInBytes();
298 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
299 if (kind == kReferenceVReg && !stack_mask.LoadBit(offset / kFrameSlotSize)) {
300 return false;
301 }
302 const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
303 *val = *reinterpret_cast<const uint32_t*>(addr);
304 return true;
305 }
306 case DexRegisterLocation::Kind::kInRegister: {
307 uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
308 uint32_t reg = dex_register_map[vreg].GetMachineRegister();
309 if (kind == kReferenceVReg && !(register_mask & (1 << reg))) {
310 return false;
311 }
312 return GetRegisterIfAccessible(reg, kind, val);
313 }
314 case DexRegisterLocation::Kind::kInRegisterHigh:
315 case DexRegisterLocation::Kind::kInFpuRegister:
316 case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
317 if (kind == kReferenceVReg) {
318 return false;
319 }
320 uint32_t reg = dex_register_map[vreg].GetMachineRegister();
321 return GetRegisterIfAccessible(reg, kind, val);
322 }
323 case DexRegisterLocation::Kind::kConstant: {
324 uint32_t result = dex_register_map[vreg].GetConstant();
325 if (kind == kReferenceVReg && result != 0) {
326 return false;
327 }
328 *val = result;
329 return true;
330 }
331 case DexRegisterLocation::Kind::kNone:
332 return false;
333 default:
334 LOG(FATAL) << "Unexpected location kind " << dex_register_map[vreg].GetKind();
335 UNREACHABLE();
336 }
337 }
338
GetVRegFromOptimizedCode(DexRegisterLocation location,VRegKind kind,uint32_t * val) const339 bool StackVisitor::GetVRegFromOptimizedCode(DexRegisterLocation location,
340 VRegKind kind,
341 uint32_t* val) const {
342 switch (location.GetKind()) {
343 case DexRegisterLocation::Kind::kInvalid:
344 break;
345 case DexRegisterLocation::Kind::kInStack: {
346 const uint8_t* sp = reinterpret_cast<const uint8_t*>(cur_quick_frame_);
347 *val = *reinterpret_cast<const uint32_t*>(sp + location.GetStackOffsetInBytes());
348 return true;
349 }
350 case DexRegisterLocation::Kind::kInRegister:
351 case DexRegisterLocation::Kind::kInRegisterHigh:
352 case DexRegisterLocation::Kind::kInFpuRegister:
353 case DexRegisterLocation::Kind::kInFpuRegisterHigh:
354 return GetRegisterIfAccessible(location.GetMachineRegister(), kind, val);
355 case DexRegisterLocation::Kind::kConstant:
356 *val = location.GetConstant();
357 return true;
358 case DexRegisterLocation::Kind::kNone:
359 return false;
360 }
361 LOG(FATAL) << "Unexpected location kind " << location.GetKind();
362 UNREACHABLE();
363 }
364
GetRegisterIfAccessible(uint32_t reg,VRegKind kind,uint32_t * val) const365 bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
366 const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
367
368 if (kRuntimeISA == InstructionSet::kX86 && is_float) {
369 // X86 float registers are 64-bit and each XMM register is provided as two separate
370 // 32-bit registers by the context.
371 reg = (kind == kDoubleHiVReg) ? (2 * reg + 1) : (2 * reg);
372 }
373
374 if (!IsAccessibleRegister(reg, is_float)) {
375 return false;
376 }
377 uintptr_t ptr_val = GetRegister(reg, is_float);
378 const bool target64 = Is64BitInstructionSet(kRuntimeISA);
379 if (target64) {
380 const bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
381 const bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
382 int64_t value_long = static_cast<int64_t>(ptr_val);
383 if (wide_lo) {
384 ptr_val = static_cast<uintptr_t>(Low32Bits(value_long));
385 } else if (wide_hi) {
386 ptr_val = static_cast<uintptr_t>(High32Bits(value_long));
387 }
388 }
389 *val = ptr_val;
390 return true;
391 }
392
GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const393 bool StackVisitor::GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,
394 VRegKind kind_lo,
395 VRegKind kind_hi,
396 uint64_t* val) const {
397 uint32_t low_32bits;
398 uint32_t high_32bits;
399 bool success = GetVRegFromDebuggerShadowFrame(vreg, kind_lo, &low_32bits);
400 success &= GetVRegFromDebuggerShadowFrame(vreg + 1, kind_hi, &high_32bits);
401 if (success) {
402 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
403 }
404 return success;
405 }
406
GetVRegPair(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const407 bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
408 VRegKind kind_hi, uint64_t* val) const {
409 if (kind_lo == kLongLoVReg) {
410 DCHECK_EQ(kind_hi, kLongHiVReg);
411 } else if (kind_lo == kDoubleLoVReg) {
412 DCHECK_EQ(kind_hi, kDoubleHiVReg);
413 } else {
414 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
415 UNREACHABLE();
416 }
417 // Check if there is value set by the debugger.
418 if (GetVRegPairFromDebuggerShadowFrame(vreg, kind_lo, kind_hi, val)) {
419 return true;
420 }
421 if (cur_quick_frame_ == nullptr) {
422 DCHECK(cur_shadow_frame_ != nullptr);
423 *val = cur_shadow_frame_->GetVRegLong(vreg);
424 return true;
425 }
426 if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
427 uint64_t val_lo = NterpGetVReg(cur_quick_frame_, vreg);
428 uint64_t val_hi = NterpGetVReg(cur_quick_frame_, vreg + 1);
429 *val = (val_hi << 32) + val_lo;
430 return true;
431 }
432
433 DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
434 DCHECK(m == GetMethod());
435 DCHECK(cur_oat_quick_method_header_->IsOptimized());
436 return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
437 }
438
GetVRegPairFromOptimizedCode(ArtMethod * m,uint16_t vreg,VRegKind kind_lo,VRegKind kind_hi,uint64_t * val) const439 bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
440 VRegKind kind_lo, VRegKind kind_hi,
441 uint64_t* val) const {
442 uint32_t low_32bits;
443 uint32_t high_32bits;
444 bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits);
445 success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits);
446 if (success) {
447 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
448 }
449 return success;
450 }
451
GetRegisterPairIfAccessible(uint32_t reg_lo,uint32_t reg_hi,VRegKind kind_lo,uint64_t * val) const452 bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
453 VRegKind kind_lo, uint64_t* val) const {
454 const bool is_float = (kind_lo == kDoubleLoVReg);
455 if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
456 return false;
457 }
458 uintptr_t ptr_val_lo = GetRegister(reg_lo, is_float);
459 uintptr_t ptr_val_hi = GetRegister(reg_hi, is_float);
460 bool target64 = Is64BitInstructionSet(kRuntimeISA);
461 if (target64) {
462 int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo);
463 int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi);
464 ptr_val_lo = static_cast<uintptr_t>(Low32Bits(value_long_lo));
465 ptr_val_hi = static_cast<uintptr_t>(High32Bits(value_long_hi));
466 }
467 *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo);
468 return true;
469 }
470
PrepareSetVReg(ArtMethod * m,uint16_t vreg,bool wide)471 ShadowFrame* StackVisitor::PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide) {
472 CodeItemDataAccessor accessor(m->DexInstructionData());
473 if (!accessor.HasCodeItem()) {
474 return nullptr;
475 }
476 ShadowFrame* shadow_frame = GetCurrentShadowFrame();
477 if (shadow_frame == nullptr) {
478 // This is a compiled frame: we must prepare and update a shadow frame that will
479 // be executed by the interpreter after deoptimization of the stack.
480 const size_t frame_id = GetFrameId();
481 const uint16_t num_regs = accessor.RegistersSize();
482 shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
483 CHECK(shadow_frame != nullptr);
484 // Remember the vreg(s) has been set for debugging and must not be overwritten by the
485 // original value during deoptimization of the stack.
486 thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
487 if (wide) {
488 thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true;
489 }
490 }
491 return shadow_frame;
492 }
493
SetVReg(ArtMethod * m,uint16_t vreg,uint32_t new_value,VRegKind kind)494 bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) {
495 DCHECK(kind == kIntVReg || kind == kFloatVReg);
496 ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
497 if (shadow_frame == nullptr) {
498 return false;
499 }
500 shadow_frame->SetVReg(vreg, new_value);
501 return true;
502 }
503
SetVRegReference(ArtMethod * m,uint16_t vreg,ObjPtr<mirror::Object> new_value)504 bool StackVisitor::SetVRegReference(ArtMethod* m, uint16_t vreg, ObjPtr<mirror::Object> new_value) {
505 ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
506 if (shadow_frame == nullptr) {
507 return false;
508 }
509 shadow_frame->SetVRegReference(vreg, new_value);
510 return true;
511 }
512
SetVRegPair(ArtMethod * m,uint16_t vreg,uint64_t new_value,VRegKind kind_lo,VRegKind kind_hi)513 bool StackVisitor::SetVRegPair(ArtMethod* m,
514 uint16_t vreg,
515 uint64_t new_value,
516 VRegKind kind_lo,
517 VRegKind kind_hi) {
518 if (kind_lo == kLongLoVReg) {
519 DCHECK_EQ(kind_hi, kLongHiVReg);
520 } else if (kind_lo == kDoubleLoVReg) {
521 DCHECK_EQ(kind_hi, kDoubleHiVReg);
522 } else {
523 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
524 UNREACHABLE();
525 }
526 ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ true);
527 if (shadow_frame == nullptr) {
528 return false;
529 }
530 shadow_frame->SetVRegLong(vreg, new_value);
531 return true;
532 }
533
IsAccessibleGPR(uint32_t reg) const534 bool StackVisitor::IsAccessibleGPR(uint32_t reg) const {
535 DCHECK(context_ != nullptr);
536 return context_->IsAccessibleGPR(reg);
537 }
538
GetGPRAddress(uint32_t reg) const539 uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const {
540 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
541 DCHECK(context_ != nullptr);
542 return context_->GetGPRAddress(reg);
543 }
544
GetGPR(uint32_t reg) const545 uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
546 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
547 DCHECK(context_ != nullptr);
548 return context_->GetGPR(reg);
549 }
550
IsAccessibleFPR(uint32_t reg) const551 bool StackVisitor::IsAccessibleFPR(uint32_t reg) const {
552 DCHECK(context_ != nullptr);
553 return context_->IsAccessibleFPR(reg);
554 }
555
GetFPR(uint32_t reg) const556 uintptr_t StackVisitor::GetFPR(uint32_t reg) const {
557 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
558 DCHECK(context_ != nullptr);
559 return context_->GetFPR(reg);
560 }
561
GetReturnPcAddr() const562 uintptr_t StackVisitor::GetReturnPcAddr() const {
563 uintptr_t sp = reinterpret_cast<uintptr_t>(GetCurrentQuickFrame());
564 DCHECK_NE(sp, 0u);
565 return sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
566 }
567
GetReturnPc() const568 uintptr_t StackVisitor::GetReturnPc() const {
569 return *reinterpret_cast<uintptr_t*>(GetReturnPcAddr());
570 }
571
SetReturnPc(uintptr_t new_ret_pc)572 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
573 *reinterpret_cast<uintptr_t*>(GetReturnPcAddr()) = new_ret_pc;
574 }
575
ComputeNumFrames(Thread * thread,StackWalkKind walk_kind)576 size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
577 struct NumFramesVisitor : public StackVisitor {
578 NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
579 : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
580
581 bool VisitFrame() override {
582 frames++;
583 return true;
584 }
585
586 size_t frames;
587 };
588 NumFramesVisitor visitor(thread, walk_kind);
589 visitor.WalkStack(true);
590 return visitor.frames;
591 }
592
GetNextMethodAndDexPc(ArtMethod ** next_method,uint32_t * next_dex_pc)593 bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
594 struct HasMoreFramesVisitor : public StackVisitor {
595 HasMoreFramesVisitor(Thread* thread,
596 StackWalkKind walk_kind,
597 size_t num_frames,
598 size_t frame_height)
599 : StackVisitor(thread, nullptr, walk_kind, num_frames),
600 frame_height_(frame_height),
601 found_frame_(false),
602 has_more_frames_(false),
603 next_method_(nullptr),
604 next_dex_pc_(0) {
605 }
606
607 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
608 if (found_frame_) {
609 ArtMethod* method = GetMethod();
610 if (method != nullptr && !method->IsRuntimeMethod()) {
611 has_more_frames_ = true;
612 next_method_ = method;
613 next_dex_pc_ = GetDexPc();
614 return false; // End stack walk once next method is found.
615 }
616 } else if (GetFrameHeight() == frame_height_) {
617 found_frame_ = true;
618 }
619 return true;
620 }
621
622 size_t frame_height_;
623 bool found_frame_;
624 bool has_more_frames_;
625 ArtMethod* next_method_;
626 uint32_t next_dex_pc_;
627 };
628 HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
629 visitor.WalkStack(true);
630 *next_method = visitor.next_method_;
631 *next_dex_pc = visitor.next_dex_pc_;
632 return visitor.has_more_frames_;
633 }
634
DescribeStack(Thread * thread)635 void StackVisitor::DescribeStack(Thread* thread) {
636 struct DescribeStackVisitor : public StackVisitor {
637 explicit DescribeStackVisitor(Thread* thread_in)
638 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
639
640 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
641 LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
642 return true;
643 }
644 };
645 DescribeStackVisitor visitor(thread);
646 visitor.WalkStack(true);
647 }
648
DescribeLocation() const649 std::string StackVisitor::DescribeLocation() const {
650 std::string result("Visiting method '");
651 ArtMethod* m = GetMethod();
652 if (m == nullptr) {
653 return "upcall";
654 }
655 result += m->PrettyMethod();
656 result += StringPrintf("' at dex PC 0x%04x", GetDexPc());
657 if (!IsShadowFrame()) {
658 result += StringPrintf(" (native PC %p)", reinterpret_cast<void*>(GetCurrentQuickFramePc()));
659 }
660 return result;
661 }
662
SetMethod(ArtMethod * method)663 void StackVisitor::SetMethod(ArtMethod* method) {
664 DCHECK(GetMethod() != nullptr);
665 if (cur_shadow_frame_ != nullptr) {
666 cur_shadow_frame_->SetMethod(method);
667 } else {
668 DCHECK(cur_quick_frame_ != nullptr);
669 CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod: "
670 << GetMethod()->PrettyMethod() << " is inlined into "
671 << GetOuterMethod()->PrettyMethod();
672 *cur_quick_frame_ = method;
673 }
674 }
675
AssertPcIsWithinQuickCode(ArtMethod * method,uintptr_t pc)676 static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
677 REQUIRES_SHARED(Locks::mutator_lock_) {
678 if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
679 return;
680 }
681
682 if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
683 return;
684 }
685
686 Runtime* runtime = Runtime::Current();
687 if (runtime->UseJitCompilation() &&
688 runtime->GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(pc))) {
689 return;
690 }
691
692 const void* code = method->GetEntryPointFromQuickCompiledCode();
693 if (code == GetQuickInstrumentationEntryPoint() || code == GetInvokeObsoleteMethodStub()) {
694 return;
695 }
696
697 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
698 if (class_linker->IsQuickToInterpreterBridge(code) ||
699 class_linker->IsQuickResolutionStub(code)) {
700 return;
701 }
702
703 if (runtime->UseJitCompilation() && runtime->GetJit()->GetCodeCache()->ContainsPc(code)) {
704 return;
705 }
706
707 uint32_t code_size = OatQuickMethodHeader::FromEntryPoint(code)->GetCodeSize();
708 uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
709 CHECK(code_start <= pc && pc <= (code_start + code_size))
710 << method->PrettyMethod()
711 << " pc=" << std::hex << pc
712 << " code_start=" << code_start
713 << " code_size=" << code_size;
714 }
715
ValidateFrame() const716 void StackVisitor::ValidateFrame() const {
717 if (kIsDebugBuild) {
718 ArtMethod* method = GetMethod();
719 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
720 // Runtime methods have null declaring class.
721 if (!method->IsRuntimeMethod()) {
722 CHECK(declaring_class != nullptr);
723 CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
724 << declaring_class;
725 } else {
726 CHECK(declaring_class == nullptr);
727 }
728 Runtime* const runtime = Runtime::Current();
729 LinearAlloc* const linear_alloc = runtime->GetLinearAlloc();
730 if (!linear_alloc->Contains(method)) {
731 // Check class linker linear allocs.
732 // We get the canonical method as copied methods may have their declaring
733 // class from another class loader.
734 const PointerSize ptrSize = runtime->GetClassLinker()->GetImagePointerSize();
735 ArtMethod* canonical = method->GetCanonicalMethod(ptrSize);
736 ObjPtr<mirror::Class> klass = canonical->GetDeclaringClass();
737 LinearAlloc* const class_linear_alloc = (klass != nullptr)
738 ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
739 : linear_alloc;
740 if (!class_linear_alloc->Contains(canonical)) {
741 // Check image space.
742 bool in_image = false;
743 for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
744 if (space->IsImageSpace()) {
745 auto* image_space = space->AsImageSpace();
746 const auto& header = image_space->GetImageHeader();
747 const ImageSection& methods = header.GetMethodsSection();
748 const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
749 const size_t offset = reinterpret_cast<const uint8_t*>(canonical) - image_space->Begin();
750 if (methods.Contains(offset) || runtime_methods.Contains(offset)) {
751 in_image = true;
752 break;
753 }
754 }
755 }
756 CHECK(in_image) << canonical->PrettyMethod() << " not in linear alloc or image";
757 }
758 }
759 if (cur_quick_frame_ != nullptr) {
760 AssertPcIsWithinQuickCode(method, cur_quick_frame_pc_);
761 // Frame consistency checks.
762 size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
763 CHECK_NE(frame_size, 0u);
764 // For compiled code, we could try to have a rough guess at an upper size we expect
765 // to see for a frame:
766 // 256 registers
767 // 2 words HandleScope overhead
768 // 3+3 register spills
769 // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
770 const size_t kMaxExpectedFrameSize = interpreter::kNterpMaxFrame;
771 CHECK_LE(frame_size, kMaxExpectedFrameSize) << method->PrettyMethod();
772 size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset();
773 CHECK_LT(return_pc_offset, frame_size);
774 }
775 }
776 }
777
GetCurrentQuickFrameInfo() const778 QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
779 if (cur_oat_quick_method_header_ != nullptr) {
780 if (cur_oat_quick_method_header_->IsOptimized()) {
781 return cur_oat_quick_method_header_->GetFrameInfo();
782 } else {
783 DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
784 return NterpFrameInfo(cur_quick_frame_);
785 }
786 }
787
788 ArtMethod* method = GetMethod();
789 Runtime* runtime = Runtime::Current();
790
791 if (method->IsAbstract()) {
792 return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
793 }
794
795 // This goes before IsProxyMethod since runtime methods have a null declaring class.
796 if (method->IsRuntimeMethod()) {
797 return runtime->GetRuntimeMethodFrameInfo(method);
798 }
799
800 if (method->IsProxyMethod()) {
801 // There is only one direct method of a proxy class: the constructor. A direct method is
802 // cloned from the original java.lang.reflect.Proxy and is executed as usual quick
803 // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
804 DCHECK(!method->IsDirect() && !method->IsConstructor())
805 << "Constructors of proxy classes must have a OatQuickMethodHeader";
806 return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
807 }
808
809 // The only remaining cases are for native methods that either
810 // - use the Generic JNI stub, called either directly or through some
811 // (resolution, instrumentation) trampoline; or
812 // - fake a Generic JNI frame in art_jni_dlsym_lookup_critical_stub.
813 DCHECK(method->IsNative());
814 if (kIsDebugBuild && !method->IsCriticalNative()) {
815 ClassLinker* class_linker = runtime->GetClassLinker();
816 const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method,
817 kRuntimePointerSize);
818 CHECK(class_linker->IsQuickGenericJniStub(entry_point) ||
819 // The current entrypoint (after filtering out trampolines) may have changed
820 // from GenericJNI to JIT-compiled stub since we have entered this frame.
821 (runtime->GetJit() != nullptr &&
822 runtime->GetJit()->GetCodeCache()->ContainsPc(entry_point))) << method->PrettyMethod();
823 }
824 // Generic JNI frame is just like the SaveRefsAndArgs frame.
825 // Note that HandleScope, if any, is below the frame.
826 return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
827 }
828
829 template <StackVisitor::CountTransitions kCount>
WalkStack(bool include_transitions)830 void StackVisitor::WalkStack(bool include_transitions) {
831 if (check_suspended_) {
832 DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
833 }
834 CHECK_EQ(cur_depth_, 0U);
835 size_t inlined_frames_count = 0;
836
837 for (const ManagedStack* current_fragment = thread_->GetManagedStack();
838 current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
839 cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
840 cur_quick_frame_ = current_fragment->GetTopQuickFrame();
841 cur_quick_frame_pc_ = 0;
842 DCHECK(cur_oat_quick_method_header_ == nullptr);
843 if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
844 // Can't be both a shadow and a quick fragment.
845 DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
846 ArtMethod* method = *cur_quick_frame_;
847 DCHECK(method != nullptr);
848 bool header_retrieved = false;
849 if (method->IsNative()) {
850 // We do not have a PC for the first frame, so we cannot simply use
851 // ArtMethod::GetOatQuickMethodHeader() as we're unable to distinguish there
852 // between GenericJNI frame and JIT-compiled JNI stub; the entrypoint may have
853 // changed since the frame was entered. The top quick frame tag indicates
854 // GenericJNI here, otherwise it's either AOT-compiled or JNI-compiled JNI stub.
855 if (UNLIKELY(current_fragment->GetTopQuickFrameTag())) {
856 // The generic JNI does not have any method header.
857 cur_oat_quick_method_header_ = nullptr;
858 } else {
859 const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
860 CHECK(existing_entry_point != nullptr);
861 Runtime* runtime = Runtime::Current();
862 ClassLinker* class_linker = runtime->GetClassLinker();
863 // Check whether we can quickly get the header from the current entrypoint.
864 if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
865 !class_linker->IsQuickResolutionStub(existing_entry_point) &&
866 existing_entry_point != GetQuickInstrumentationEntryPoint()) {
867 cur_oat_quick_method_header_ =
868 OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
869 } else {
870 const void* code = method->GetOatMethodQuickCode(class_linker->GetImagePointerSize());
871 if (code != nullptr) {
872 cur_oat_quick_method_header_ = OatQuickMethodHeader::FromEntryPoint(code);
873 } else {
874 // This must be a JITted JNI stub frame.
875 CHECK(runtime->GetJit() != nullptr);
876 code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
877 CHECK(code != nullptr) << method->PrettyMethod();
878 cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
879 }
880 }
881 }
882 header_retrieved = true;
883 }
884 while (method != nullptr) {
885 if (!header_retrieved) {
886 cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
887 }
888 header_retrieved = false; // Force header retrieval in next iteration.
889 ValidateFrame();
890
891 if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
892 && (cur_oat_quick_method_header_ != nullptr)
893 && cur_oat_quick_method_header_->IsOptimized()
894 && !method->IsNative() // JNI methods cannot have any inlined frames.
895 && CodeInfo::HasInlineInfo(cur_oat_quick_method_header_->GetOptimizedCodeInfoPtr())) {
896 DCHECK_NE(cur_quick_frame_pc_, 0u);
897 CodeInfo* code_info = GetCurrentInlineInfo();
898 StackMap* stack_map = GetCurrentStackMap();
899 if (stack_map->IsValid() && stack_map->HasInlineInfo()) {
900 DCHECK_EQ(current_inline_frames_.size(), 0u);
901 for (current_inline_frames_ = code_info->GetInlineInfosOf(*stack_map);
902 !current_inline_frames_.empty();
903 current_inline_frames_.pop_back()) {
904 bool should_continue = VisitFrame();
905 if (UNLIKELY(!should_continue)) {
906 return;
907 }
908 cur_depth_++;
909 inlined_frames_count++;
910 }
911 }
912 }
913
914 bool should_continue = VisitFrame();
915 if (UNLIKELY(!should_continue)) {
916 return;
917 }
918
919 QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
920 if (context_ != nullptr) {
921 context_->FillCalleeSaves(reinterpret_cast<uint8_t*>(cur_quick_frame_), frame_info);
922 }
923 // Compute PC for next stack frame from return PC.
924 size_t frame_size = frame_info.FrameSizeInBytes();
925 uintptr_t return_pc_addr = GetReturnPcAddr();
926 uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
927
928 if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc)) {
929 // While profiling, the return pc is restored from the side stack, except when walking
930 // the stack for an exception where the side stack will be unwound in VisitFrame.
931 const std::map<uintptr_t, instrumentation::InstrumentationStackFrame>&
932 instrumentation_stack = *thread_->GetInstrumentationStack();
933 auto it = instrumentation_stack.find(return_pc_addr);
934 CHECK(it != instrumentation_stack.end());
935 const instrumentation::InstrumentationStackFrame& instrumentation_frame = it->second;
936 if (GetMethod() ==
937 Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves)) {
938 // Skip runtime save all callee frames which are used to deliver exceptions.
939 } else if (instrumentation_frame.interpreter_entry_) {
940 ArtMethod* callee =
941 Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs);
942 CHECK_EQ(GetMethod(), callee) << "Expected: " << ArtMethod::PrettyMethod(callee)
943 << " Found: " << ArtMethod::PrettyMethod(GetMethod());
944 } else if (!instrumentation_frame.method_->IsRuntimeMethod()) {
945 // Trampolines get replaced with their actual method in the stack,
946 // so don't do the check below for runtime methods.
947 // Instrumentation generally doesn't distinguish between a method's obsolete and
948 // non-obsolete version.
949 CHECK_EQ(instrumentation_frame.method_->GetNonObsoleteMethod(),
950 GetMethod()->GetNonObsoleteMethod())
951 << "Expected: "
952 << ArtMethod::PrettyMethod(instrumentation_frame.method_->GetNonObsoleteMethod())
953 << " Found: " << ArtMethod::PrettyMethod(GetMethod()->GetNonObsoleteMethod());
954 }
955 return_pc = instrumentation_frame.return_pc_;
956 }
957
958 cur_quick_frame_pc_ = return_pc;
959 uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
960 cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
961
962 if (kDebugStackWalk) {
963 LOG(INFO) << ArtMethod::PrettyMethod(method) << "@" << method << " size=" << frame_size
964 << std::boolalpha
965 << " optimized=" << (cur_oat_quick_method_header_ != nullptr &&
966 cur_oat_quick_method_header_->IsOptimized())
967 << " native=" << method->IsNative()
968 << std::noboolalpha
969 << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
970 << "," << (method->IsNative() ? method->GetEntryPointFromJni() : nullptr)
971 << " next=" << *cur_quick_frame_;
972 }
973
974 if (kCount == CountTransitions::kYes || !method->IsRuntimeMethod()) {
975 cur_depth_++;
976 }
977 method = *cur_quick_frame_;
978 }
979 // We reached a transition frame, it doesn't have a method header.
980 cur_oat_quick_method_header_ = nullptr;
981 } else if (cur_shadow_frame_ != nullptr) {
982 do {
983 ValidateFrame();
984 bool should_continue = VisitFrame();
985 if (UNLIKELY(!should_continue)) {
986 return;
987 }
988 cur_depth_++;
989 cur_shadow_frame_ = cur_shadow_frame_->GetLink();
990 } while (cur_shadow_frame_ != nullptr);
991 }
992 if (include_transitions) {
993 bool should_continue = VisitFrame();
994 if (!should_continue) {
995 return;
996 }
997 }
998 if (kCount == CountTransitions::kYes) {
999 cur_depth_++;
1000 }
1001 }
1002 if (num_frames_ != 0) {
1003 CHECK_EQ(cur_depth_, num_frames_);
1004 }
1005 }
1006
1007 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kYes>(bool);
1008 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kNo>(bool);
1009
1010 } // namespace art
1011