1 /* Copyright (C) 2016 The Android Open Source Project
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3  *
4  * This file implements interfaces from the file jvmti.h. This implementation
5  * is licensed under the same terms as the file jvmti.h.  The
6  * copyright and license information for the file jvmti.h follows.
7  *
8  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10  *
11  * This code is free software; you can redistribute it and/or modify it
12  * under the terms of the GNU General Public License version 2 only, as
13  * published by the Free Software Foundation.  Oracle designates this
14  * particular file as subject to the "Classpath" exception as provided
15  * by Oracle in the LICENSE file that accompanied this code.
16  *
17  * This code is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20  * version 2 for more details (a copy is included in the LICENSE file that
21  * accompanied this code).
22  *
23  * You should have received a copy of the GNU General Public License version
24  * 2 along with this work; if not, write to the Free Software Foundation,
25  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26  *
27  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28  * or visit www.oracle.com if you need additional information or have any
29  * questions.
30  */
31 
32 #include "ti_stack.h"
33 
34 #include <algorithm>
35 #include <initializer_list>
36 #include <list>
37 #include <unordered_map>
38 #include <vector>
39 
40 #include "android-base/macros.h"
41 #include "android-base/thread_annotations.h"
42 #include "arch/context.h"
43 #include "art_field-inl.h"
44 #include "art_method-inl.h"
45 #include "art_jvmti.h"
46 #include "art_method-inl.h"
47 #include "barrier.h"
48 #include "base/bit_utils.h"
49 #include "base/enums.h"
50 #include "base/locks.h"
51 #include "base/macros.h"
52 #include "base/mutex.h"
53 #include "deopt_manager.h"
54 #include "dex/code_item_accessors-inl.h"
55 #include "dex/dex_file.h"
56 #include "dex/dex_file_annotations.h"
57 #include "dex/dex_file_types.h"
58 #include "dex/dex_instruction-inl.h"
59 #include "dex/primitive.h"
60 #include "events.h"
61 #include "gc_root.h"
62 #include "handle_scope-inl.h"
63 #include "instrumentation.h"
64 #include "interpreter/shadow_frame-inl.h"
65 #include "interpreter/shadow_frame.h"
66 #include "jni/jni_env_ext.h"
67 #include "jni/jni_internal.h"
68 #include "jvalue-inl.h"
69 #include "jvalue.h"
70 #include "jvmti.h"
71 #include "mirror/class.h"
72 #include "mirror/dex_cache.h"
73 #include "nativehelper/scoped_local_ref.h"
74 #include "scoped_thread_state_change-inl.h"
75 #include "scoped_thread_state_change.h"
76 #include "stack.h"
77 #include "thread.h"
78 #include "thread_state.h"
79 #include "ti_logging.h"
80 #include "ti_thread.h"
81 #include "thread-current-inl.h"
82 #include "thread_list.h"
83 #include "thread_pool.h"
84 #include "ti_thread.h"
85 #include "well_known_classes.h"
86 
87 namespace openjdkjvmti {
88 
89 template <typename FrameFn>
90 struct GetStackTraceVisitor : public art::StackVisitor {
GetStackTraceVisitoropenjdkjvmti::GetStackTraceVisitor91   GetStackTraceVisitor(art::Thread* thread_in,
92                        size_t start_,
93                        size_t stop_,
94                        FrameFn fn_)
95       : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
96         fn(fn_),
97         start(start_),
98         stop(stop_) {}
99   GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
100   GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
101 
VisitFrameopenjdkjvmti::GetStackTraceVisitor102   bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
103     art::ArtMethod* m = GetMethod();
104     if (m->IsRuntimeMethod()) {
105       return true;
106     }
107 
108     if (start == 0) {
109       m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
110       jmethodID id = art::jni::EncodeArtMethod(m);
111 
112       uint32_t dex_pc = GetDexPc(false);
113       jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
114 
115       jvmtiFrameInfo info = { id, dex_location };
116       fn(info);
117 
118       if (stop == 1) {
119         return false;  // We're done.
120       } else if (stop > 0) {
121         stop--;
122       }
123     } else {
124       start--;
125     }
126 
127     return true;
128   }
129 
130   FrameFn fn;
131   size_t start;
132   size_t stop;
133 };
134 
GetOrCreateShadowFrame(bool * created_frame)135 art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) {
136   art::ShadowFrame* cur = GetCurrentShadowFrame();
137   if (cur == nullptr) {
138     *created_frame = true;
139     art::ArtMethod* method = GetMethod();
140     const uint16_t num_regs = method->DexInstructionData().RegistersSize();
141     cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(),
142                                                        num_regs,
143                                                        method,
144                                                        GetDexPc());
145     DCHECK(cur != nullptr);
146   } else {
147     *created_frame = false;
148   }
149   return cur;
150 }
151 
152 template <typename FrameFn>
MakeStackTraceVisitor(art::Thread * thread_in,size_t start,size_t stop,FrameFn fn)153 GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
154                                                     size_t start,
155                                                     size_t stop,
156                                                     FrameFn fn) {
157   return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
158 }
159 
160 struct GetStackTraceVectorClosure : public art::Closure {
161  public:
GetStackTraceVectorClosureopenjdkjvmti::GetStackTraceVectorClosure162   GetStackTraceVectorClosure(size_t start, size_t stop)
163       : start_input(start),
164         stop_input(stop),
165         start_result(0),
166         stop_result(0) {}
167 
Runopenjdkjvmti::GetStackTraceVectorClosure168   void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
169     auto frames_fn = [&](jvmtiFrameInfo info) {
170       frames.push_back(info);
171     };
172     auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
173     visitor.WalkStack(/* include_transitions= */ false);
174 
175     start_result = visitor.start;
176     stop_result = visitor.stop;
177   }
178 
179   const size_t start_input;
180   const size_t stop_input;
181 
182   std::vector<jvmtiFrameInfo> frames;
183   size_t start_result;
184   size_t stop_result;
185 };
186 
TranslateFrameVector(const std::vector<jvmtiFrameInfo> & frames,jint start_depth,size_t start_result,jint max_frame_count,jvmtiFrameInfo * frame_buffer,jint * count_ptr)187 static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
188                                        jint start_depth,
189                                        size_t start_result,
190                                        jint max_frame_count,
191                                        jvmtiFrameInfo* frame_buffer,
192                                        jint* count_ptr) {
193   size_t collected_frames = frames.size();
194 
195   // Assume we're here having collected something.
196   DCHECK_GT(max_frame_count, 0);
197 
198   // Frames from the top.
199   if (start_depth >= 0) {
200     if (start_result != 0) {
201       // Not enough frames.
202       return ERR(ILLEGAL_ARGUMENT);
203     }
204     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
205     if (frames.size() > 0) {
206       memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
207     }
208     *count_ptr = static_cast<jint>(frames.size());
209     return ERR(NONE);
210   }
211 
212   // Frames from the bottom.
213   if (collected_frames < static_cast<size_t>(-start_depth)) {
214     return ERR(ILLEGAL_ARGUMENT);
215   }
216 
217   size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
218   memcpy(frame_buffer,
219          &frames.data()[collected_frames + start_depth],
220          count * sizeof(jvmtiFrameInfo));
221   *count_ptr = static_cast<jint>(count);
222   return ERR(NONE);
223 }
224 
225 struct GetStackTraceDirectClosure : public art::Closure {
226  public:
GetStackTraceDirectClosureopenjdkjvmti::GetStackTraceDirectClosure227   GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
228       : frame_buffer(frame_buffer_),
229         start_input(start),
230         stop_input(stop),
231         index(0) {
232     DCHECK_GE(start_input, 0u);
233   }
234 
Runopenjdkjvmti::GetStackTraceDirectClosure235   void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
236     auto frames_fn = [&](jvmtiFrameInfo info) {
237       frame_buffer[index] = info;
238       ++index;
239     };
240     auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
241     visitor.WalkStack(/* include_transitions= */ false);
242   }
243 
244   jvmtiFrameInfo* frame_buffer;
245 
246   const size_t start_input;
247   const size_t stop_input;
248 
249   size_t index = 0;
250 };
251 
GetStackTrace(jvmtiEnv * jvmti_env,jthread java_thread,jint start_depth,jint max_frame_count,jvmtiFrameInfo * frame_buffer,jint * count_ptr)252 jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env,
253                                     jthread java_thread,
254                                     jint start_depth,
255                                     jint max_frame_count,
256                                     jvmtiFrameInfo* frame_buffer,
257                                     jint* count_ptr) {
258   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
259   // that the thread isn't dying on us.
260   art::ScopedObjectAccess soa(art::Thread::Current());
261   art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
262 
263   art::Thread* thread;
264   jvmtiError thread_error = ERR(INTERNAL);
265   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
266     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
267     return thread_error;
268   }
269   DCHECK(thread != nullptr);
270 
271   art::ThreadState state = thread->GetState();
272   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
273     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
274     return ERR(THREAD_NOT_ALIVE);
275   }
276 
277   if (max_frame_count < 0) {
278     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
279     return ERR(ILLEGAL_ARGUMENT);
280   }
281   if (frame_buffer == nullptr || count_ptr == nullptr) {
282     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
283     return ERR(NULL_POINTER);
284   }
285 
286   if (max_frame_count == 0) {
287     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
288     *count_ptr = 0;
289     return ERR(NONE);
290   }
291 
292   if (start_depth >= 0) {
293     // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
294     GetStackTraceDirectClosure closure(frame_buffer,
295                                        static_cast<size_t>(start_depth),
296                                        static_cast<size_t>(max_frame_count));
297     // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
298     if (!thread->RequestSynchronousCheckpoint(&closure)) {
299       return ERR(THREAD_NOT_ALIVE);
300     }
301     *count_ptr = static_cast<jint>(closure.index);
302     if (closure.index == 0) {
303       JVMTI_LOG(INFO, jvmti_env) << "The stack is not large enough for a start_depth of "
304                                  << start_depth << ".";
305       return ERR(ILLEGAL_ARGUMENT);
306     }
307     return ERR(NONE);
308   } else {
309     GetStackTraceVectorClosure closure(0, 0);
310     // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
311     if (!thread->RequestSynchronousCheckpoint(&closure)) {
312       return ERR(THREAD_NOT_ALIVE);
313     }
314 
315     return TranslateFrameVector(closure.frames,
316                                 start_depth,
317                                 closure.start_result,
318                                 max_frame_count,
319                                 frame_buffer,
320                                 count_ptr);
321   }
322 }
323 
324 template <typename Data>
325 struct GetAllStackTracesVectorClosure : public art::Closure {
GetAllStackTracesVectorClosureopenjdkjvmti::GetAllStackTracesVectorClosure326   GetAllStackTracesVectorClosure(size_t stop, Data* data_)
327       : barrier(0), stop_input(stop), data(data_) {}
328 
Runopenjdkjvmti::GetAllStackTracesVectorClosure329   void Run(art::Thread* thread) override
330       REQUIRES_SHARED(art::Locks::mutator_lock_)
331       REQUIRES(!data->mutex) {
332     art::Thread* self = art::Thread::Current();
333     Work(thread, self);
334     barrier.Pass(self);
335   }
336 
Workopenjdkjvmti::GetAllStackTracesVectorClosure337   void Work(art::Thread* thread, art::Thread* self)
338       REQUIRES_SHARED(art::Locks::mutator_lock_)
339       REQUIRES(!data->mutex) {
340     // Skip threads that are still starting.
341     if (thread->IsStillStarting()) {
342       return;
343     }
344 
345     std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
346     if (thread_frames == nullptr) {
347       return;
348     }
349 
350     // Now collect the data.
351     auto frames_fn = [&](jvmtiFrameInfo info) {
352       thread_frames->push_back(info);
353     };
354     auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
355     visitor.WalkStack(/* include_transitions= */ false);
356   }
357 
358   art::Barrier barrier;
359   const size_t stop_input;
360   Data* data;
361 };
362 
363 template <typename Data>
RunCheckpointAndWait(Data * data,size_t max_frame_count)364 static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
365     REQUIRES_SHARED(art::Locks::mutator_lock_) {
366   // Note: requires the mutator lock as the checkpoint requires the mutator lock.
367   GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
368   size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
369   if (barrier_count == 0) {
370     return;
371   }
372   art::Thread* self = art::Thread::Current();
373   art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
374   closure.barrier.Increment(self, barrier_count);
375 }
376 
GetAllStackTraces(jvmtiEnv * env,jint max_frame_count,jvmtiStackInfo ** stack_info_ptr,jint * thread_count_ptr)377 jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
378                                         jint max_frame_count,
379                                         jvmtiStackInfo** stack_info_ptr,
380                                         jint* thread_count_ptr) {
381   if (max_frame_count < 0) {
382     return ERR(ILLEGAL_ARGUMENT);
383   }
384   if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
385     return ERR(NULL_POINTER);
386   }
387 
388   struct AllStackTracesData {
389     AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
390     ~AllStackTracesData() {
391       JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
392       for (jthread global_thread_ref : thread_peers) {
393         jni_env->DeleteGlobalRef(global_thread_ref);
394       }
395     }
396 
397     std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
398         REQUIRES_SHARED(art::Locks::mutator_lock_)
399         REQUIRES(!mutex) {
400       art::MutexLock mu(self, mutex);
401 
402       threads.push_back(thread);
403 
404       jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
405           self, thread->GetPeerFromOtherThread());
406       thread_peers.push_back(peer);
407 
408       frames.emplace_back(new std::vector<jvmtiFrameInfo>());
409       return frames.back().get();
410     }
411 
412     art::Mutex mutex;
413 
414     // Storage. Only access directly after completion.
415 
416     std::vector<art::Thread*> threads;
417     // "thread_peers" contains global references to their peers.
418     std::vector<jthread> thread_peers;
419 
420     std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
421   };
422 
423   AllStackTracesData data;
424   art::Thread* current = art::Thread::Current();
425   {
426     art::ScopedObjectAccess soa(current);
427     RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
428   }
429 
430   // Convert the data into our output format.
431 
432   // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
433   //       allocate one big chunk for this and the actual frames, which means we need
434   //       to either be conservative or rearrange things later (the latter is implemented).
435   std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
436   std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
437   frame_infos.reserve(data.frames.size());
438 
439   // Now run through and add data for each thread.
440   size_t sum_frames = 0;
441   for (size_t index = 0; index < data.frames.size(); ++index) {
442     jvmtiStackInfo& stack_info = stack_info_array.get()[index];
443     memset(&stack_info, 0, sizeof(jvmtiStackInfo));
444 
445     const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
446 
447     // For the time being, set the thread to null. We'll fix it up in the second stage.
448     stack_info.thread = nullptr;
449     stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
450 
451     size_t collected_frames = thread_frames.size();
452     if (max_frame_count == 0 || collected_frames == 0) {
453       stack_info.frame_count = 0;
454       stack_info.frame_buffer = nullptr;
455       continue;
456     }
457     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
458 
459     jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
460     frame_infos.emplace_back(frame_info);
461 
462     jint count;
463     jvmtiError translate_result = TranslateFrameVector(thread_frames,
464                                                        0,
465                                                        0,
466                                                        static_cast<jint>(collected_frames),
467                                                        frame_info,
468                                                        &count);
469     DCHECK(translate_result == JVMTI_ERROR_NONE);
470     stack_info.frame_count = static_cast<jint>(collected_frames);
471     stack_info.frame_buffer = frame_info;
472     sum_frames += static_cast<size_t>(count);
473   }
474 
475   // No errors, yet. Now put it all into an output buffer.
476   size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
477                                                 alignof(jvmtiFrameInfo));
478   size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
479   unsigned char* chunk_data;
480   jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
481   if (alloc_result != ERR(NONE)) {
482     return alloc_result;
483   }
484 
485   jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
486   // First copy in all the basic data.
487   memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
488 
489   // Now copy the frames and fix up the pointers.
490   jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
491       chunk_data + rounded_stack_info_size);
492   for (size_t i = 0; i < data.frames.size(); ++i) {
493     jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
494     jvmtiStackInfo& new_stack_info = stack_info[i];
495 
496     // Translate the global ref into a local ref.
497     new_stack_info.thread =
498         static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
499 
500     if (old_stack_info.frame_count > 0) {
501       // Only copy when there's data - leave the nullptr alone.
502       size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
503       memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
504       new_stack_info.frame_buffer = frame_info;
505       frame_info += old_stack_info.frame_count;
506     }
507   }
508 
509   *stack_info_ptr = stack_info;
510   *thread_count_ptr = static_cast<jint>(data.frames.size());
511 
512   return ERR(NONE);
513 }
514 
GetThreadListStackTraces(jvmtiEnv * env,jint thread_count,const jthread * thread_list,jint max_frame_count,jvmtiStackInfo ** stack_info_ptr)515 jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
516                                                jint thread_count,
517                                                const jthread* thread_list,
518                                                jint max_frame_count,
519                                                jvmtiStackInfo** stack_info_ptr) {
520   if (max_frame_count < 0) {
521     return ERR(ILLEGAL_ARGUMENT);
522   }
523   if (thread_count < 0) {
524     return ERR(ILLEGAL_ARGUMENT);
525   }
526   if (thread_count == 0) {
527     *stack_info_ptr = nullptr;
528     return ERR(NONE);
529   }
530   if (thread_list == nullptr || stack_info_ptr == nullptr) {
531     return ERR(NULL_POINTER);
532   }
533 
534   art::Thread* current = art::Thread::Current();
535   art::ScopedObjectAccess soa(current);      // Now we know we have the shared lock.
536 
537   struct SelectStackTracesData {
538     SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
539 
540     std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
541               REQUIRES_SHARED(art::Locks::mutator_lock_)
542               REQUIRES(!mutex) {
543       art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
544       for (size_t index = 0; index != handles.size(); ++index) {
545         if (peer == handles[index].Get()) {
546           // Found the thread.
547           art::MutexLock mu(self, mutex);
548 
549           threads.push_back(thread);
550           thread_list_indices.push_back(index);
551 
552           frames.emplace_back(new std::vector<jvmtiFrameInfo>());
553           return frames.back().get();
554         }
555       }
556       return nullptr;
557     }
558 
559     art::Mutex mutex;
560 
561     // Selection data.
562 
563     std::vector<art::Handle<art::mirror::Object>> handles;
564 
565     // Storage. Only access directly after completion.
566 
567     std::vector<art::Thread*> threads;
568     std::vector<size_t> thread_list_indices;
569 
570     std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
571   };
572 
573   SelectStackTracesData data;
574 
575   // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
576   art::VariableSizedHandleScope hs(current);
577   for (jint i = 0; i != thread_count; ++i) {
578     if (thread_list[i] == nullptr) {
579       return ERR(INVALID_THREAD);
580     }
581     if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
582       return ERR(INVALID_THREAD);
583     }
584     data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
585   }
586 
587   RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
588 
589   // Convert the data into our output format.
590 
591   // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
592   //       allocate one big chunk for this and the actual frames, which means we need
593   //       to either be conservative or rearrange things later (the latter is implemented).
594   std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
595   std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
596   frame_infos.reserve(data.frames.size());
597 
598   // Now run through and add data for each thread.
599   size_t sum_frames = 0;
600   for (size_t index = 0; index < data.frames.size(); ++index) {
601     jvmtiStackInfo& stack_info = stack_info_array.get()[index];
602     memset(&stack_info, 0, sizeof(jvmtiStackInfo));
603 
604     art::Thread* self = data.threads[index];
605     const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
606 
607     // For the time being, set the thread to null. We don't have good ScopedLocalRef
608     // infrastructure.
609     DCHECK(self->GetPeerFromOtherThread() != nullptr);
610     stack_info.thread = nullptr;
611     stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
612 
613     size_t collected_frames = thread_frames.size();
614     if (max_frame_count == 0 || collected_frames == 0) {
615       stack_info.frame_count = 0;
616       stack_info.frame_buffer = nullptr;
617       continue;
618     }
619     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
620 
621     jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
622     frame_infos.emplace_back(frame_info);
623 
624     jint count;
625     jvmtiError translate_result = TranslateFrameVector(thread_frames,
626                                                        0,
627                                                        0,
628                                                        static_cast<jint>(collected_frames),
629                                                        frame_info,
630                                                        &count);
631     DCHECK(translate_result == JVMTI_ERROR_NONE);
632     stack_info.frame_count = static_cast<jint>(collected_frames);
633     stack_info.frame_buffer = frame_info;
634     sum_frames += static_cast<size_t>(count);
635   }
636 
637   // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
638   // potentially.
639   size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
640                                                 alignof(jvmtiFrameInfo));
641   size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
642   unsigned char* chunk_data;
643   jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
644   if (alloc_result != ERR(NONE)) {
645     return alloc_result;
646   }
647 
648   jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
649   jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
650       chunk_data + rounded_stack_info_size);
651 
652   for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
653     // Check whether we found a running thread for this.
654     // Note: For simplicity, and with the expectation that the list is usually small, use a simple
655     //       search. (The list is *not* sorted!)
656     auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
657     if (it == data.thread_list_indices.end()) {
658       // No native thread. Must be new or dead. We need to fill out the stack info now.
659       // (Need to read the Java "started" field to know whether this is starting or terminated.)
660       art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
661       art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
662       art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
663       CHECK(started_field != nullptr);
664       bool started = started_field->GetBoolean(peer) != 0;
665       constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
666       constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
667           JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
668       stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
669       stack_info[i].state = started ? kTerminatedState : kStartedState;
670       stack_info[i].frame_count = 0;
671       stack_info[i].frame_buffer = nullptr;
672     } else {
673       // Had a native thread and frames.
674       size_t f_index = it - data.thread_list_indices.begin();
675 
676       jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
677       jvmtiStackInfo& new_stack_info = stack_info[i];
678 
679       memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
680       new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
681       if (old_stack_info.frame_count > 0) {
682         // Only copy when there's data - leave the nullptr alone.
683         size_t frames_size =
684             static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
685         memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
686         new_stack_info.frame_buffer = frame_info;
687         frame_info += old_stack_info.frame_count;
688       }
689     }
690   }
691 
692   *stack_info_ptr = stack_info;
693 
694   return ERR(NONE);
695 }
696 
697 struct GetFrameCountClosure : public art::Closure {
698  public:
GetFrameCountClosureopenjdkjvmti::GetFrameCountClosure699   GetFrameCountClosure() : count(0) {}
700 
Runopenjdkjvmti::GetFrameCountClosure701   void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
702     // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be
703     // counted.
704     art::StackVisitor::WalkStack(
705         [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
706           art::ArtMethod* m = stack_visitor->GetMethod();
707           if (m != nullptr && !m->IsRuntimeMethod()) {
708             count++;
709           }
710           return true;
711         },
712         self,
713         /* context= */ nullptr,
714         art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
715   }
716 
717   size_t count;
718 };
719 
GetFrameCount(jvmtiEnv * env ATTRIBUTE_UNUSED,jthread java_thread,jint * count_ptr)720 jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
721                                     jthread java_thread,
722                                     jint* count_ptr) {
723   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
724   // that the thread isn't dying on us.
725   art::ScopedObjectAccess soa(art::Thread::Current());
726   art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
727 
728   art::Thread* thread;
729   jvmtiError thread_error = ERR(INTERNAL);
730   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
731     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
732     return thread_error;
733   }
734 
735   DCHECK(thread != nullptr);
736   art::ThreadState state = thread->GetState();
737   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
738     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
739     return ERR(THREAD_NOT_ALIVE);
740   }
741 
742   if (count_ptr == nullptr) {
743     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
744     return ERR(NULL_POINTER);
745   }
746 
747   GetFrameCountClosure closure;
748   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
749   if (!thread->RequestSynchronousCheckpoint(&closure)) {
750     return ERR(THREAD_NOT_ALIVE);
751   }
752 
753   *count_ptr = closure.count;
754   return ERR(NONE);
755 }
756 
757 struct GetLocationClosure : public art::Closure {
758  public:
GetLocationClosureopenjdkjvmti::GetLocationClosure759   explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
760 
Runopenjdkjvmti::GetLocationClosure761   void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
762     // Walks up the stack 'n' callers.
763     size_t count = 0u;
764     art::StackVisitor::WalkStack(
765         [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
766           art::ArtMethod* m = stack_visitor->GetMethod();
767           if (m != nullptr && !m->IsRuntimeMethod()) {
768             DCHECK(method == nullptr);
769             if (count == n) {
770               method = m;
771               dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false);
772               return false;
773             }
774             count++;
775           }
776           return true;
777         },
778         self,
779         /* context= */ nullptr,
780         art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
781   }
782 
783   const size_t n;
784   art::ArtMethod* method;
785   uint32_t dex_pc;
786 };
787 
GetFrameLocation(jvmtiEnv * env ATTRIBUTE_UNUSED,jthread java_thread,jint depth,jmethodID * method_ptr,jlocation * location_ptr)788 jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
789                                        jthread java_thread,
790                                        jint depth,
791                                        jmethodID* method_ptr,
792                                        jlocation* location_ptr) {
793   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
794   // that the thread isn't dying on us.
795   art::ScopedObjectAccess soa(art::Thread::Current());
796   art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
797 
798   art::Thread* thread;
799   jvmtiError thread_error = ERR(INTERNAL);
800   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
801     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
802     return thread_error;
803   }
804   DCHECK(thread != nullptr);
805 
806   art::ThreadState state = thread->GetState();
807   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
808     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
809     return ERR(THREAD_NOT_ALIVE);
810   }
811 
812   if (depth < 0) {
813     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
814     return ERR(ILLEGAL_ARGUMENT);
815   }
816   if (method_ptr == nullptr || location_ptr == nullptr) {
817     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
818     return ERR(NULL_POINTER);
819   }
820 
821   GetLocationClosure closure(static_cast<size_t>(depth));
822   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
823   if (!thread->RequestSynchronousCheckpoint(&closure)) {
824     return ERR(THREAD_NOT_ALIVE);
825   }
826 
827   if (closure.method == nullptr) {
828     return ERR(NO_MORE_FRAMES);
829   }
830 
831   *method_ptr = art::jni::EncodeArtMethod(closure.method);
832   if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
833     *location_ptr = -1;
834   } else {
835     if (closure.dex_pc == art::dex::kDexNoIndex) {
836       return ERR(INTERNAL);
837     }
838     *location_ptr = static_cast<jlocation>(closure.dex_pc);
839   }
840 
841   return ERR(NONE);
842 }
843 
844 struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
845   // We need a context because VisitLocks needs it retrieve the monitor objects.
846   explicit MonitorVisitor(art::Thread* thread)
REQUIRES_SHAREDopenjdkjvmti::MonitorVisitor847       REQUIRES_SHARED(art::Locks::mutator_lock_)
848       : art::StackVisitor(thread,
849                           art::Context::Create(),
850                           art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
851         hs(art::Thread::Current()),
852         current_stack_depth(0) {}
853 
~MonitorVisitoropenjdkjvmti::MonitorVisitor854   ~MonitorVisitor() {
855     delete context_;
856   }
857 
VisitFrameopenjdkjvmti::MonitorVisitor858   bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
859     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
860     if (!GetMethod()->IsRuntimeMethod()) {
861       art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
862       ++current_stack_depth;
863     }
864     return true;
865   }
866 
AppendOwnedMonitorsopenjdkjvmti::MonitorVisitor867   static void AppendOwnedMonitors(art::ObjPtr<art::mirror::Object> owned_monitor, void* arg)
868       REQUIRES_SHARED(art::Locks::mutator_lock_) {
869     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
870     MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
871     // Filter out duplicates.
872     for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
873       if (monitor.Get() == owned_monitor) {
874         return;
875       }
876     }
877     visitor->monitors.push_back(visitor->hs.NewHandle(owned_monitor));
878     visitor->stack_depths.push_back(visitor->current_stack_depth);
879   }
880 
VisitRootopenjdkjvmti::MonitorVisitor881   void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
882       override REQUIRES_SHARED(art::Locks::mutator_lock_) {
883     for (const art::Handle<art::mirror::Object>& m : monitors) {
884       if (m.Get() == obj) {
885         return;
886       }
887     }
888     monitors.push_back(hs.NewHandle(obj));
889     stack_depths.push_back(-1);
890   }
891 
892   art::VariableSizedHandleScope hs;
893   jint current_stack_depth;
894   std::vector<art::Handle<art::mirror::Object>> monitors;
895   std::vector<jint> stack_depths;
896 };
897 
898 template<typename Fn>
899 struct MonitorInfoClosure : public art::Closure {
900  public:
MonitorInfoClosureopenjdkjvmti::MonitorInfoClosure901   explicit MonitorInfoClosure(Fn handle_results)
902       : err_(OK), handle_results_(handle_results) {}
903 
Runopenjdkjvmti::MonitorInfoClosure904   void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
905     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
906     // Find the monitors on the stack.
907     MonitorVisitor visitor(target);
908     visitor.WalkStack(/* include_transitions= */ false);
909     // Find any other monitors, including ones acquired in native code.
910     art::RootInfo root_info(art::kRootVMInternal);
911     target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
912     err_ = handle_results_(visitor);
913   }
914 
GetErroropenjdkjvmti::MonitorInfoClosure915   jvmtiError GetError() {
916     return err_;
917   }
918 
919  private:
920   jvmtiError err_;
921   Fn handle_results_;
922 };
923 
924 
925 template <typename Fn>
GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable & soa,jthread thread,Fn handle_results)926 static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
927                                             jthread thread,
928                                             Fn handle_results)
929     REQUIRES_SHARED(art::Locks::mutator_lock_) {
930   art::Thread* self = art::Thread::Current();
931   MonitorInfoClosure<Fn> closure(handle_results);
932   bool called_method = false;
933   {
934     art::Locks::thread_list_lock_->ExclusiveLock(self);
935     art::Thread* target = nullptr;
936     jvmtiError err = ERR(INTERNAL);
937     if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
938       art::Locks::thread_list_lock_->ExclusiveUnlock(self);
939       return err;
940     }
941     if (target != self) {
942       called_method = true;
943       // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
944       // Since this deals with object references we need to avoid going to sleep.
945       art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage");
946       if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
947         return ERR(THREAD_NOT_ALIVE);
948       }
949     } else {
950       art::Locks::thread_list_lock_->ExclusiveUnlock(self);
951     }
952   }
953   // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
954   // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
955   // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
956   // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
957   if (!called_method) {
958     closure.Run(self);
959   }
960   return closure.GetError();
961 }
962 
GetOwnedMonitorStackDepthInfo(jvmtiEnv * env,jthread thread,jint * info_cnt,jvmtiMonitorStackDepthInfo ** info_ptr)963 jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
964                                                     jthread thread,
965                                                     jint* info_cnt,
966                                                     jvmtiMonitorStackDepthInfo** info_ptr) {
967   if (info_cnt == nullptr || info_ptr == nullptr) {
968     return ERR(NULL_POINTER);
969   }
970   art::ScopedObjectAccess soa(art::Thread::Current());
971   std::vector<art::GcRoot<art::mirror::Object>> mons;
972   std::vector<uint32_t> depths;
973   auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
974     for (size_t i = 0; i < visitor.monitors.size(); i++) {
975       mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
976       depths.push_back(visitor.stack_depths[i]);
977     }
978     return OK;
979   };
980   jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
981   if (err != OK) {
982     return err;
983   }
984   auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
985   err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
986   if (err != OK) {
987     return err;
988   }
989   *info_cnt = mons.size();
990   for (uint32_t i = 0; i < mons.size(); i++) {
991     (*info_ptr)[i] = {
992       soa.AddLocalReference<jobject>(mons[i].Read()),
993       static_cast<jint>(depths[i])
994     };
995   }
996   return err;
997 }
998 
GetOwnedMonitorInfo(jvmtiEnv * env,jthread thread,jint * owned_monitor_count_ptr,jobject ** owned_monitors_ptr)999 jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
1000                                           jthread thread,
1001                                           jint* owned_monitor_count_ptr,
1002                                           jobject** owned_monitors_ptr) {
1003   if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
1004     return ERR(NULL_POINTER);
1005   }
1006   art::ScopedObjectAccess soa(art::Thread::Current());
1007   std::vector<art::GcRoot<art::mirror::Object>> mons;
1008   auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1009     for (size_t i = 0; i < visitor.monitors.size(); i++) {
1010       mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
1011     }
1012     return OK;
1013   };
1014   jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
1015   if (err != OK) {
1016     return err;
1017   }
1018   auto nbytes = sizeof(jobject) * mons.size();
1019   err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
1020   if (err != OK) {
1021     return err;
1022   }
1023   *owned_monitor_count_ptr = mons.size();
1024   for (uint32_t i = 0; i < mons.size(); i++) {
1025     (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
1026   }
1027   return err;
1028 }
1029 
NotifyFramePop(jvmtiEnv * env,jthread thread,jint depth)1030 jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
1031   if (depth < 0) {
1032     return ERR(ILLEGAL_ARGUMENT);
1033   }
1034   ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
1035   art::Thread* self = art::Thread::Current();
1036   art::Thread* target;
1037 
1038   ScopedNoUserCodeSuspension snucs(self);
1039   // From now on we know we cannot get suspended by user-code.
1040   // NB This does a SuspendCheck (during thread state change) so we need to make
1041   // sure we don't have the 'suspend_lock' locked here.
1042   art::ScopedObjectAccess soa(self);
1043   art::Locks::thread_list_lock_->ExclusiveLock(self);
1044   jvmtiError err = ERR(INTERNAL);
1045   if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1046     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1047     return err;
1048   }
1049   if (target != self) {
1050     // TODO This is part of the spec but we could easily avoid needing to do it.
1051     // We would just put all the logic into a sync-checkpoint.
1052     art::Locks::thread_suspend_count_lock_->ExclusiveLock(self);
1053     if (target->GetUserCodeSuspendCount() == 0) {
1054       art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1055       art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1056       return ERR(THREAD_NOT_SUSPENDED);
1057     }
1058     art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1059   }
1060   // We hold the user_code_suspension_lock_ so the target thread is staying
1061   // suspended until we are done (unless it's 'self' in which case we don't care
1062   // since we aren't going to be returning).
1063   // TODO We could implement this using a synchronous checkpoint and not bother
1064   // with any of the suspension stuff. The spec does specifically say to return
1065   // THREAD_NOT_SUSPENDED though. Find the requested stack frame.
1066   std::unique_ptr<art::Context> context(art::Context::Create());
1067   FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1068   visitor.WalkStack();
1069   if (!visitor.FoundFrame()) {
1070     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1071     return ERR(NO_MORE_FRAMES);
1072   }
1073   art::ArtMethod* method = visitor.GetMethod();
1074   if (method->IsNative()) {
1075     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1076     return ERR(OPAQUE_FRAME);
1077   }
1078   // From here we are sure to succeed.
1079   bool needs_instrument = false;
1080   // Get/create a shadow frame
1081   art::ShadowFrame* shadow_frame =
1082       visitor.GetOrCreateShadowFrame(&needs_instrument);
1083   {
1084     art::WriterMutexLock lk(self, tienv->event_info_mutex_);
1085     if (LIKELY(!shadow_frame->NeedsNotifyPop())) {
1086       // Ensure we won't miss exceptions being thrown if we get jit-compiled. We
1087       // only do this for the first NotifyPopFrame.
1088       target->IncrementForceInterpreterCount();
1089 
1090       // Mark shadow frame as needs_notify_pop_
1091       shadow_frame->SetNotifyPop(true);
1092     }
1093     tienv->notify_frames.insert(shadow_frame);
1094   }
1095   // Make sure can we will go to the interpreter and use the shadow frames.
1096   if (needs_instrument) {
1097     art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1098       DeoptManager::Get()->DeoptimizeThread(self);
1099     });
1100     target->RequestSynchronousCheckpoint(&fc);
1101   } else {
1102     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1103   }
1104   return OK;
1105 }
1106 
1107 namespace {
1108 
1109 enum class NonStandardExitType {
1110   kPopFrame,
1111   kForceReturn,
1112 };
1113 
1114 template<NonStandardExitType kExitType>
1115 class NonStandardExitFrames {
1116  public:
NonStandardExitFrames(art::Thread * self,jvmtiEnv * env,jthread thread)1117   NonStandardExitFrames(art::Thread* self, jvmtiEnv* env, jthread thread)
1118       REQUIRES(!art::Locks::thread_suspend_count_lock_)
1119       ACQUIRE_SHARED(art::Locks::mutator_lock_)
1120       ACQUIRE(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
1121       : snucs_(self) {
1122     // We keep the user-code-suspend-count lock.
1123     art::Locks::user_code_suspension_lock_->AssertExclusiveHeld(self);
1124 
1125     // From now on we know we cannot get suspended by user-code.
1126     // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1127     // have the 'suspend_lock' locked here.
1128     old_state_ = self->TransitionFromSuspendedToRunnable();
1129     art::ScopedObjectAccessUnchecked soau(self);
1130 
1131     art::Locks::thread_list_lock_->ExclusiveLock(self);
1132 
1133     if (!ThreadUtil::GetAliveNativeThread(thread, soau, &target_, &result_)) {
1134       return;
1135     }
1136     {
1137       art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1138       if (target_ != self && target_->GetUserCodeSuspendCount() == 0) {
1139         // We cannot be the current thread for this function.
1140         result_ = ERR(THREAD_NOT_SUSPENDED);
1141         return;
1142       }
1143     }
1144     JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target_);
1145     constexpr art::StackVisitor::StackWalkKind kWalkKind =
1146         art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
1147     if (tls_data != nullptr &&
1148         tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
1149         tls_data->disable_pop_frame_depth ==
1150             art::StackVisitor::ComputeNumFrames(target_, kWalkKind)) {
1151       JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. "
1152                               << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
1153                               << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
1154                               << "more information.";
1155       result_ = ERR(OPAQUE_FRAME);
1156       return;
1157     }
1158     // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1159     // done.
1160     std::unique_ptr<art::Context> context(art::Context::Create());
1161     FindFrameAtDepthVisitor final_frame(target_, context.get(), 0);
1162     FindFrameAtDepthVisitor penultimate_frame(target_, context.get(), 1);
1163     final_frame.WalkStack();
1164     penultimate_frame.WalkStack();
1165 
1166     if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
1167       // Cannot do it if there is only one frame!
1168       JVMTI_LOG(INFO, env) << "Can not pop final frame off of a stack";
1169       result_ = ERR(NO_MORE_FRAMES);
1170       return;
1171     }
1172 
1173     art::ArtMethod* called_method = final_frame.GetMethod();
1174     art::ArtMethod* calling_method = penultimate_frame.GetMethod();
1175     if (!CheckFunctions(env, calling_method, called_method)) {
1176       return;
1177     }
1178     DCHECK(!called_method->IsNative()) << called_method->PrettyMethod();
1179 
1180     // From here we are sure to succeed.
1181     result_ = OK;
1182 
1183     // Get/create a shadow frame
1184     final_frame_ = final_frame.GetOrCreateShadowFrame(&created_final_frame_);
1185     penultimate_frame_ =
1186         (calling_method->IsNative()
1187              ? nullptr
1188              : penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame_));
1189 
1190     final_frame_id_ = final_frame.GetFrameId();
1191     penultimate_frame_id_ = penultimate_frame.GetFrameId();
1192 
1193     CHECK_NE(final_frame_, penultimate_frame_) << "Frames at different depths not different!";
1194   }
1195 
1196   bool CheckFunctions(jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called)
1197       REQUIRES(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
1198       REQUIRES_SHARED(art::Locks::mutator_lock_);
1199 
RELEASE_SHARED(art::Locks::mutator_lock_)1200   ~NonStandardExitFrames() RELEASE_SHARED(art::Locks::mutator_lock_)
1201       REQUIRES(!art::Locks::thread_list_lock_)
1202       RELEASE(art::Locks::user_code_suspension_lock_) {
1203     art::Thread* self = art::Thread::Current();
1204     DCHECK_EQ(old_state_, art::ThreadState::kNative)
1205         << "Unexpected thread state on entering PopFrame!";
1206     self->TransitionFromRunnableToSuspended(old_state_);
1207   }
1208 
1209   ScopedNoUserCodeSuspension snucs_;
1210   art::ShadowFrame* final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
1211   art::ShadowFrame* penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
1212   bool created_final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
1213   bool created_penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
1214   uint32_t final_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
1215   uint32_t penultimate_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
1216   art::Thread* target_ GUARDED_BY(art::Locks::thread_list_lock_) = nullptr;
1217   art::ThreadState old_state_ = art::ThreadState::kTerminated;
1218   jvmtiError result_ = ERR(INTERNAL);
1219 };
1220 
1221 template <>
CheckFunctions(jvmtiEnv * env,art::ArtMethod * calling ATTRIBUTE_UNUSED,art::ArtMethod * called)1222 bool NonStandardExitFrames<NonStandardExitType::kForceReturn>::CheckFunctions(
1223     jvmtiEnv* env, art::ArtMethod* calling ATTRIBUTE_UNUSED, art::ArtMethod* called) {
1224   if (UNLIKELY(called->IsNative())) {
1225     result_ = ERR(OPAQUE_FRAME);
1226     JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod()
1227                          << " because it is native.";
1228     return false;
1229   } else {
1230     return true;
1231   }
1232 }
1233 
1234 template <>
CheckFunctions(jvmtiEnv * env,art::ArtMethod * calling,art::ArtMethod * called)1235 bool NonStandardExitFrames<NonStandardExitType::kPopFrame>::CheckFunctions(
1236     jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called) {
1237   if (UNLIKELY(calling->IsNative() || called->IsNative())) {
1238     result_ = ERR(OPAQUE_FRAME);
1239     JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod() << " to "
1240                          << calling->PrettyMethod() << " because at least one of them is native.";
1241     return false;
1242   } else {
1243     return true;
1244   }
1245 }
1246 
1247 class SetupMethodExitEvents {
1248  public:
SetupMethodExitEvents(art::Thread * self,EventHandler * event_handler,jthread target)1249   SetupMethodExitEvents(art::Thread* self,
1250                         EventHandler* event_handler,
1251                         jthread target) REQUIRES(!art::Locks::mutator_lock_,
1252                                                  !art::Locks::user_code_suspension_lock_,
1253                                                  !art::Locks::thread_list_lock_)
1254       : self_(self), event_handler_(event_handler), target_(target) {
1255     DCHECK(target != nullptr);
1256     art::Locks::mutator_lock_->AssertNotHeld(self_);
1257     art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
1258     art::Locks::thread_list_lock_->AssertNotHeld(self_);
1259     event_handler_->SetInternalEvent(
1260         target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_ENABLE);
1261   }
1262 
1263   ~SetupMethodExitEvents() REQUIRES(!art::Locks::mutator_lock_,
1264                                     !art::Locks::user_code_suspension_lock_,
1265                                     !art::Locks::thread_list_lock_) {
1266     art::Locks::mutator_lock_->AssertNotHeld(self_);
1267     art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
1268     art::Locks::thread_list_lock_->AssertNotHeld(self_);
1269     if (failed_) {
1270       event_handler_->SetInternalEvent(
1271           target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
1272     }
1273   }
1274 
NotifyFailure()1275   void NotifyFailure() {
1276     failed_ = true;
1277   }
1278 
1279  private:
1280   art::Thread* self_;
1281   EventHandler* event_handler_;
1282   jthread target_;
1283   bool failed_ = false;
1284 };
1285 
1286 template <typename T>
1287 void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value)
1288     REQUIRES_SHARED(art::Locks::mutator_lock_)
1289     REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
1290 
1291 template <typename T>
AddDelayedMethodExitEvent(EventHandler * handler,art::ShadowFrame * frame,T value)1292 void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value) {
1293   art::JValue val = art::JValue::FromPrimitive(value);
1294   jvalue jval{ .j = val.GetJ() };
1295   handler->AddDelayedNonStandardExitEvent(frame, false, jval);
1296 }
1297 
1298 template <>
AddDelayedMethodExitEvent(EventHandler * handler,art::ShadowFrame * frame,std::nullptr_t null_val ATTRIBUTE_UNUSED)1299 void AddDelayedMethodExitEvent<std::nullptr_t>(EventHandler* handler,
1300                                                art::ShadowFrame* frame,
1301                                                std::nullptr_t null_val ATTRIBUTE_UNUSED) {
1302   jvalue jval;
1303   memset(&jval, 0, sizeof(jval));
1304   handler->AddDelayedNonStandardExitEvent(frame, false, jval);
1305 }
1306 
1307 template <>
AddDelayedMethodExitEvent(EventHandler * handler,art::ShadowFrame * frame,jobject obj)1308 void AddDelayedMethodExitEvent<jobject>(EventHandler* handler,
1309                                         art::ShadowFrame* frame,
1310                                         jobject obj) {
1311   jvalue jval{ .l = art::Thread::Current()->GetJniEnv()->NewGlobalRef(obj) };
1312   handler->AddDelayedNonStandardExitEvent(frame, true, jval);
1313 }
1314 
1315 template <typename T>
1316 bool ValidReturnType(art::Thread* self, art::ObjPtr<art::mirror::Class> return_type, T value)
1317     REQUIRES_SHARED(art::Locks::mutator_lock_)
1318         REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
1319 
1320 #define SIMPLE_VALID_RETURN_TYPE(type, ...)                                                        \
1321   template <>                                                                                      \
1322   bool ValidReturnType<type>(art::Thread * self ATTRIBUTE_UNUSED,                                  \
1323                              art::ObjPtr<art::mirror::Class> return_type,                          \
1324                              type value ATTRIBUTE_UNUSED) {                                        \
1325     static constexpr std::initializer_list<art::Primitive::Type> types{ __VA_ARGS__ };             \
1326     return std::find(types.begin(), types.end(), return_type->GetPrimitiveType()) != types.end();  \
1327   }
1328 
1329 SIMPLE_VALID_RETURN_TYPE(jlong, art::Primitive::kPrimLong);
1330 SIMPLE_VALID_RETURN_TYPE(jfloat, art::Primitive::kPrimFloat);
1331 SIMPLE_VALID_RETURN_TYPE(jdouble, art::Primitive::kPrimDouble);
1332 SIMPLE_VALID_RETURN_TYPE(nullptr_t, art::Primitive::kPrimVoid);
1333 SIMPLE_VALID_RETURN_TYPE(jint,
1334                          art::Primitive::kPrimInt,
1335                          art::Primitive::kPrimChar,
1336                          art::Primitive::kPrimBoolean,
1337                          art::Primitive::kPrimShort,
1338                          art::Primitive::kPrimByte);
1339 #undef SIMPLE_VALID_RETURN_TYPE
1340 
1341 template <>
ValidReturnType(art::Thread * self,art::ObjPtr<art::mirror::Class> return_type,jobject return_value)1342 bool ValidReturnType<jobject>(art::Thread* self,
1343                               art::ObjPtr<art::mirror::Class> return_type,
1344                               jobject return_value) {
1345   if (return_type->IsPrimitive()) {
1346     return false;
1347   }
1348   if (return_value == nullptr) {
1349     // Null can be used for anything.
1350     return true;
1351   }
1352   return return_type->IsAssignableFrom(self->DecodeJObject(return_value)->GetClass());
1353 }
1354 
1355 }  // namespace
1356 
PopFrame(jvmtiEnv * env,jthread thread)1357 jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
1358   art::Thread* self = art::Thread::Current();
1359   NonStandardExitFrames<NonStandardExitType::kPopFrame> frames(self, env, thread);
1360   if (frames.result_ != OK) {
1361     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1362     return frames.result_;
1363   }
1364   // Tell the shadow-frame to return immediately and skip all exit events.
1365   frames.penultimate_frame_->SetForceRetryInstruction(true);
1366   frames.final_frame_->SetForcePopFrame(true);
1367   frames.final_frame_->SetSkipMethodExitEvents(true);
1368   if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
1369     art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
1370       DeoptManager::Get()->DeoptimizeThread(self);
1371     });
1372     frames.target_->RequestSynchronousCheckpoint(&fc);
1373   } else {
1374     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1375   }
1376   return OK;
1377 }
1378 
1379 template <typename T>
1380 jvmtiError
ForceEarlyReturn(jvmtiEnv * env,EventHandler * event_handler,jthread thread,T value)1381 StackUtil::ForceEarlyReturn(jvmtiEnv* env, EventHandler* event_handler, jthread thread, T value) {
1382   art::Thread* self = art::Thread::Current();
1383   // We don't want to use the null == current-thread idiom since for events (that we use internally
1384   // to implement force-early-return) we instead have null == all threads. Instead just get the
1385   // current jthread if needed.
1386   ScopedLocalRef<jthread> cur_thread(self->GetJniEnv(), nullptr);
1387   if (UNLIKELY(thread == nullptr)) {
1388     art::ScopedObjectAccess soa(self);
1389     cur_thread.reset(soa.AddLocalReference<jthread>(self->GetPeer()));
1390     thread = cur_thread.get();
1391   }
1392   // This sets up the exit events we implement early return using before we have the locks and
1393   // thanks to destructor ordering will tear them down if something goes wrong.
1394   SetupMethodExitEvents smee(self, event_handler, thread);
1395   NonStandardExitFrames<NonStandardExitType::kForceReturn> frames(self, env, thread);
1396   if (frames.result_ != OK) {
1397     smee.NotifyFailure();
1398     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1399     return frames.result_;
1400   } else if (!ValidReturnType<T>(
1401                  self, frames.final_frame_->GetMethod()->ResolveReturnType(), value)) {
1402     smee.NotifyFailure();
1403     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1404     return ERR(TYPE_MISMATCH);
1405   } else if (frames.final_frame_->GetForcePopFrame()) {
1406     // TODO We should really support this.
1407     smee.NotifyFailure();
1408     std::string thread_name;
1409     frames.target_->GetThreadName(thread_name);
1410     JVMTI_LOG(WARNING, env) << "PopFrame or force-return already pending on thread " << thread_name;
1411     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1412     return ERR(OPAQUE_FRAME);
1413   }
1414   // Tell the shadow-frame to return immediately and skip all exit events.
1415   frames.final_frame_->SetForcePopFrame(true);
1416   AddDelayedMethodExitEvent<T>(event_handler, frames.final_frame_, value);
1417   if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
1418     art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
1419       DeoptManager::Get()->DeoptimizeThread(self);
1420     });
1421     frames.target_->RequestSynchronousCheckpoint(&fc);
1422   } else {
1423     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1424   }
1425   return OK;
1426 }
1427 
1428 // Instantiate the ForceEarlyReturn templates.
1429 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jint);
1430 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jlong);
1431 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jfloat);
1432 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jdouble);
1433 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jobject);
1434 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, nullptr_t);
1435 
1436 }  // namespace openjdkjvmti
1437