1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * Mterp entry point and support functions.
19  */
20 #include "mterp.h"
21 
22 #include "base/quasi_atomic.h"
23 #include "debugger.h"
24 #include "entrypoints/entrypoint_utils-inl.h"
25 #include "interpreter/interpreter_common.h"
26 #include "interpreter/interpreter_intrinsics.h"
27 #include "interpreter/shadow_frame-inl.h"
28 #include "mirror/string-alloc-inl.h"
29 
30 namespace art {
31 namespace interpreter {
32 /*
33  * Verify some constants used by the mterp interpreter.
34  */
CheckMterpAsmConstants()35 void CheckMterpAsmConstants() {
36   /*
37    * If we're using computed goto instruction transitions, make sure
38    * none of the handlers overflows the byte limit.  This won't tell
39    * which one did, but if any one is too big the total size will
40    * overflow.
41    */
42   const int width = kMterpHandlerSize;
43   int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
44                     (uintptr_t) artMterpAsmInstructionStart;
45   if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
46       LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
47                  << "(did an instruction handler exceed " << width << " bytes?)";
48   }
49 }
50 
InitMterpTls(Thread * self)51 void InitMterpTls(Thread* self) {
52   self->SetMterpCurrentIBase(artMterpAsmInstructionStart);
53 }
54 
55 /*
56  * Find the matching case.  Returns the offset to the handler instructions.
57  *
58  * Returns 3 if we don't find a match (it's the size of the sparse-switch
59  * instruction).
60  */
MterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)61 extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
62   const int kInstrLen = 3;
63   uint16_t size;
64   const int32_t* keys;
65   const int32_t* entries;
66 
67   /*
68    * Sparse switch data format:
69    *  ushort ident = 0x0200   magic value
70    *  ushort size             number of entries in the table; > 0
71    *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
72    *  int targets[size]       branch targets, relative to switch opcode
73    *
74    * Total size is (2+size*4) 16-bit code units.
75    */
76 
77   uint16_t signature = *switchData++;
78   DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
79 
80   size = *switchData++;
81 
82   /* The keys are guaranteed to be aligned on a 32-bit boundary;
83    * we can treat them as a native int array.
84    */
85   keys = reinterpret_cast<const int32_t*>(switchData);
86 
87   /* The entries are guaranteed to be aligned on a 32-bit boundary;
88    * we can treat them as a native int array.
89    */
90   entries = keys + size;
91 
92   /*
93    * Binary-search through the array of keys, which are guaranteed to
94    * be sorted low-to-high.
95    */
96   int lo = 0;
97   int hi = size - 1;
98   while (lo <= hi) {
99     int mid = (lo + hi) >> 1;
100 
101     int32_t foundVal = keys[mid];
102     if (testVal < foundVal) {
103       hi = mid - 1;
104     } else if (testVal > foundVal) {
105       lo = mid + 1;
106     } else {
107       return entries[mid];
108     }
109   }
110   return kInstrLen;
111 }
112 
MterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)113 extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
114   const int kInstrLen = 3;
115 
116   /*
117    * Packed switch data format:
118    *  ushort ident = 0x0100   magic value
119    *  ushort size             number of entries in the table
120    *  int first_key           first (and lowest) switch case value
121    *  int targets[size]       branch targets, relative to switch opcode
122    *
123    * Total size is (4+size*2) 16-bit code units.
124    */
125   uint16_t signature = *switchData++;
126   DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
127 
128   uint16_t size = *switchData++;
129 
130   int32_t firstKey = *switchData++;
131   firstKey |= (*switchData++) << 16;
132 
133   int index = testVal - firstKey;
134   if (index < 0 || index >= size) {
135     return kInstrLen;
136   }
137 
138   /*
139    * The entries are guaranteed to be aligned on a 32-bit boundary;
140    * we can treat them as a native int array.
141    */
142   const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
143   return entries[index];
144 }
145 
CanUseMterp()146 bool CanUseMterp()
147     REQUIRES_SHARED(Locks::mutator_lock_) {
148   const Runtime* const runtime = Runtime::Current();
149   return
150       !runtime->IsAotCompiler() &&
151       !runtime->GetInstrumentation()->IsActive() &&
152       // mterp only knows how to deal with the normal exits. It cannot handle any of the
153       // non-standard force-returns.
154       !runtime->AreNonStandardExitsEnabled() &&
155       // An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
156       // know how to deal with these so we could end up never dealing with it if we are in an
157       // infinite loop.
158       !runtime->AreAsyncExceptionsThrown() &&
159       (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
160 }
161 
162 
MterpInvokeVirtual(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)163 extern "C" size_t MterpInvokeVirtual(Thread* self,
164                                      ShadowFrame* shadow_frame,
165                                      uint16_t* dex_pc_ptr,
166                                      uint16_t inst_data)
167     REQUIRES_SHARED(Locks::mutator_lock_) {
168   JValue* result_register = shadow_frame->GetResultRegister();
169   const Instruction* inst = Instruction::At(dex_pc_ptr);
170   return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
171       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
172 }
173 
MterpInvokeSuper(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)174 extern "C" size_t MterpInvokeSuper(Thread* self,
175                                    ShadowFrame* shadow_frame,
176                                    uint16_t* dex_pc_ptr,
177                                    uint16_t inst_data)
178     REQUIRES_SHARED(Locks::mutator_lock_) {
179   JValue* result_register = shadow_frame->GetResultRegister();
180   const Instruction* inst = Instruction::At(dex_pc_ptr);
181   return DoInvoke<kSuper, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
182       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
183 }
184 
MterpInvokeInterface(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)185 extern "C" size_t MterpInvokeInterface(Thread* self,
186                                        ShadowFrame* shadow_frame,
187                                        uint16_t* dex_pc_ptr,
188                                        uint16_t inst_data)
189     REQUIRES_SHARED(Locks::mutator_lock_) {
190   JValue* result_register = shadow_frame->GetResultRegister();
191   const Instruction* inst = Instruction::At(dex_pc_ptr);
192   return DoInvoke<kInterface, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
193       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
194 }
195 
MterpInvokeDirect(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)196 extern "C" size_t MterpInvokeDirect(Thread* self,
197                                     ShadowFrame* shadow_frame,
198                                     uint16_t* dex_pc_ptr,
199                                     uint16_t inst_data)
200     REQUIRES_SHARED(Locks::mutator_lock_) {
201   JValue* result_register = shadow_frame->GetResultRegister();
202   const Instruction* inst = Instruction::At(dex_pc_ptr);
203   return DoInvoke<kDirect, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
204       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
205 }
206 
MterpInvokeStatic(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)207 extern "C" size_t MterpInvokeStatic(Thread* self,
208                                     ShadowFrame* shadow_frame,
209                                     uint16_t* dex_pc_ptr,
210                                     uint16_t inst_data)
211     REQUIRES_SHARED(Locks::mutator_lock_) {
212   JValue* result_register = shadow_frame->GetResultRegister();
213   const Instruction* inst = Instruction::At(dex_pc_ptr);
214   return DoInvoke<kStatic, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
215       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
216 }
217 
MterpInvokeCustom(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)218 extern "C" size_t MterpInvokeCustom(Thread* self,
219                                     ShadowFrame* shadow_frame,
220                                     uint16_t* dex_pc_ptr,
221                                     uint16_t inst_data)
222     REQUIRES_SHARED(Locks::mutator_lock_) {
223   JValue* result_register = shadow_frame->GetResultRegister();
224   const Instruction* inst = Instruction::At(dex_pc_ptr);
225   return DoInvokeCustom</* is_range= */ false>(
226       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
227 }
228 
MterpInvokePolymorphic(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)229 extern "C" size_t MterpInvokePolymorphic(Thread* self,
230                                          ShadowFrame* shadow_frame,
231                                          uint16_t* dex_pc_ptr,
232                                          uint16_t inst_data)
233     REQUIRES_SHARED(Locks::mutator_lock_) {
234   JValue* result_register = shadow_frame->GetResultRegister();
235   const Instruction* inst = Instruction::At(dex_pc_ptr);
236   return DoInvokePolymorphic</* is_range= */ false>(
237       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
238 }
239 
MterpInvokeVirtualRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)240 extern "C" size_t MterpInvokeVirtualRange(Thread* self,
241                                           ShadowFrame* shadow_frame,
242                                           uint16_t* dex_pc_ptr,
243                                           uint16_t inst_data)
244     REQUIRES_SHARED(Locks::mutator_lock_) {
245   JValue* result_register = shadow_frame->GetResultRegister();
246   const Instruction* inst = Instruction::At(dex_pc_ptr);
247   return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
248       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
249 }
250 
MterpInvokeSuperRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)251 extern "C" size_t MterpInvokeSuperRange(Thread* self,
252                                         ShadowFrame* shadow_frame,
253                                         uint16_t* dex_pc_ptr,
254                                         uint16_t inst_data)
255     REQUIRES_SHARED(Locks::mutator_lock_) {
256   JValue* result_register = shadow_frame->GetResultRegister();
257   const Instruction* inst = Instruction::At(dex_pc_ptr);
258   return DoInvoke<kSuper, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
259       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
260 }
261 
MterpInvokeInterfaceRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)262 extern "C" size_t MterpInvokeInterfaceRange(Thread* self,
263                                             ShadowFrame* shadow_frame,
264                                             uint16_t* dex_pc_ptr,
265                                             uint16_t inst_data)
266     REQUIRES_SHARED(Locks::mutator_lock_) {
267   JValue* result_register = shadow_frame->GetResultRegister();
268   const Instruction* inst = Instruction::At(dex_pc_ptr);
269   return DoInvoke<kInterface, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
270       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
271 }
272 
MterpInvokeDirectRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)273 extern "C" size_t MterpInvokeDirectRange(Thread* self,
274                                          ShadowFrame* shadow_frame,
275                                          uint16_t* dex_pc_ptr,
276                                          uint16_t inst_data)
277     REQUIRES_SHARED(Locks::mutator_lock_) {
278   JValue* result_register = shadow_frame->GetResultRegister();
279   const Instruction* inst = Instruction::At(dex_pc_ptr);
280   return DoInvoke<kDirect, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
281       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
282 }
283 
MterpInvokeStaticRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)284 extern "C" size_t MterpInvokeStaticRange(Thread* self,
285                                          ShadowFrame* shadow_frame,
286                                          uint16_t* dex_pc_ptr,
287                                          uint16_t inst_data)
288     REQUIRES_SHARED(Locks::mutator_lock_) {
289   JValue* result_register = shadow_frame->GetResultRegister();
290   const Instruction* inst = Instruction::At(dex_pc_ptr);
291   return DoInvoke<kStatic, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
292       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
293 }
294 
MterpInvokeCustomRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)295 extern "C" size_t MterpInvokeCustomRange(Thread* self,
296                                          ShadowFrame* shadow_frame,
297                                          uint16_t* dex_pc_ptr,
298                                          uint16_t inst_data)
299     REQUIRES_SHARED(Locks::mutator_lock_) {
300   JValue* result_register = shadow_frame->GetResultRegister();
301   const Instruction* inst = Instruction::At(dex_pc_ptr);
302   return DoInvokeCustom</*is_range=*/ true>(
303       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
304 }
305 
MterpInvokePolymorphicRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)306 extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
307                                               ShadowFrame* shadow_frame,
308                                               uint16_t* dex_pc_ptr,
309                                               uint16_t inst_data)
310     REQUIRES_SHARED(Locks::mutator_lock_) {
311   JValue* result_register = shadow_frame->GetResultRegister();
312   const Instruction* inst = Instruction::At(dex_pc_ptr);
313   return DoInvokePolymorphic</* is_range= */ true>(
314       self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
315 }
316 
MterpInvokeVirtualQuick(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)317 extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
318                                           ShadowFrame* shadow_frame,
319                                           uint16_t* dex_pc_ptr,
320                                           uint16_t inst_data)
321     REQUIRES_SHARED(Locks::mutator_lock_) {
322   JValue* result_register = shadow_frame->GetResultRegister();
323   const Instruction* inst = Instruction::At(dex_pc_ptr);
324   return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true,
325       /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
326 }
327 
MterpInvokeVirtualQuickRange(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint16_t inst_data)328 extern "C" size_t MterpInvokeVirtualQuickRange(Thread* self,
329                                                ShadowFrame* shadow_frame,
330                                                uint16_t* dex_pc_ptr,
331                                                uint16_t inst_data)
332     REQUIRES_SHARED(Locks::mutator_lock_) {
333   JValue* result_register = shadow_frame->GetResultRegister();
334   const Instruction* inst = Instruction::At(dex_pc_ptr);
335   return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true,
336       /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
337 }
338 
MterpThreadFenceForConstructor()339 extern "C" void MterpThreadFenceForConstructor() {
340   QuasiAtomic::ThreadFenceForConstructor();
341 }
342 
MterpConstString(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)343 extern "C" size_t MterpConstString(uint32_t index,
344                                    uint32_t tgt_vreg,
345                                    ShadowFrame* shadow_frame,
346                                    Thread* self)
347     REQUIRES_SHARED(Locks::mutator_lock_) {
348   ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, dex::StringIndex(index));
349   if (UNLIKELY(s == nullptr)) {
350     return 1u;
351   }
352   shadow_frame->SetVRegReference(tgt_vreg, s);
353   return 0u;
354 }
355 
MterpConstClass(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)356 extern "C" size_t MterpConstClass(uint32_t index,
357                                   uint32_t tgt_vreg,
358                                   ShadowFrame* shadow_frame,
359                                   Thread* self)
360     REQUIRES_SHARED(Locks::mutator_lock_) {
361   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
362                                                    shadow_frame->GetMethod(),
363                                                    self,
364                                                    /* can_run_clinit= */ false,
365                                                    /* verify_access= */ false);
366   if (UNLIKELY(c == nullptr)) {
367     return 1u;
368   }
369   shadow_frame->SetVRegReference(tgt_vreg, c);
370   return 0u;
371 }
372 
MterpConstMethodHandle(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)373 extern "C" size_t MterpConstMethodHandle(uint32_t index,
374                                          uint32_t tgt_vreg,
375                                          ShadowFrame* shadow_frame,
376                                          Thread* self)
377     REQUIRES_SHARED(Locks::mutator_lock_) {
378   ObjPtr<mirror::MethodHandle> mh = ResolveMethodHandle(self, index, shadow_frame->GetMethod());
379   if (UNLIKELY(mh == nullptr)) {
380     return 1u;
381   }
382   shadow_frame->SetVRegReference(tgt_vreg, mh);
383   return 0u;
384 }
385 
MterpConstMethodType(uint32_t index,uint32_t tgt_vreg,ShadowFrame * shadow_frame,Thread * self)386 extern "C" size_t MterpConstMethodType(uint32_t index,
387                                        uint32_t tgt_vreg,
388                                        ShadowFrame* shadow_frame,
389                                        Thread* self)
390     REQUIRES_SHARED(Locks::mutator_lock_) {
391   ObjPtr<mirror::MethodType> mt =
392       ResolveMethodType(self, dex::ProtoIndex(index), shadow_frame->GetMethod());
393   if (UNLIKELY(mt == nullptr)) {
394     return 1u;
395   }
396   shadow_frame->SetVRegReference(tgt_vreg, mt);
397   return 0u;
398 }
399 
MterpCheckCast(uint32_t index,StackReference<mirror::Object> * vreg_addr,art::ArtMethod * method,Thread * self)400 extern "C" size_t MterpCheckCast(uint32_t index,
401                                  StackReference<mirror::Object>* vreg_addr,
402                                  art::ArtMethod* method,
403                                  Thread* self)
404     REQUIRES_SHARED(Locks::mutator_lock_) {
405   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
406                                                    method,
407                                                    self,
408                                                    false,
409                                                    false);
410   if (UNLIKELY(c == nullptr)) {
411     return 1u;
412   }
413   // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
414   ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
415   if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
416     ThrowClassCastException(c, obj->GetClass());
417     return 1u;
418   }
419   return 0u;
420 }
421 
MterpInstanceOf(uint32_t index,StackReference<mirror::Object> * vreg_addr,art::ArtMethod * method,Thread * self)422 extern "C" size_t MterpInstanceOf(uint32_t index,
423                                   StackReference<mirror::Object>* vreg_addr,
424                                   art::ArtMethod* method,
425                                   Thread* self)
426     REQUIRES_SHARED(Locks::mutator_lock_) {
427   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
428                                                    method,
429                                                    self,
430                                                    false,
431                                                    false);
432   if (UNLIKELY(c == nullptr)) {
433     return 0u;  // Caller will check for pending exception.  Return value unimportant.
434   }
435   // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
436   ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
437   return (obj != nullptr) && obj->InstanceOf(c) ? 1u : 0u;
438 }
439 
MterpFillArrayData(mirror::Object * obj,const Instruction::ArrayDataPayload * payload)440 extern "C" size_t MterpFillArrayData(mirror::Object* obj,
441                                      const Instruction::ArrayDataPayload* payload)
442     REQUIRES_SHARED(Locks::mutator_lock_) {
443   return FillArrayData(obj, payload) ? 1u : 0u;
444 }
445 
MterpNewInstance(ShadowFrame * shadow_frame,Thread * self,uint32_t inst_data)446 extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
447     REQUIRES_SHARED(Locks::mutator_lock_) {
448   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
449   ObjPtr<mirror::Object> obj = nullptr;
450   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
451                                                    shadow_frame->GetMethod(),
452                                                    self,
453                                                    /* can_run_clinit= */ false,
454                                                    /* verify_access= */ false);
455   if (LIKELY(c != nullptr)) {
456     if (UNLIKELY(c->IsStringClass())) {
457       gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
458       obj = mirror::String::AllocEmptyString(self, allocator_type);
459     } else {
460       obj = AllocObjectFromCode(c, self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
461     }
462   }
463   if (UNLIKELY(obj == nullptr)) {
464     return 0u;
465   }
466   obj->GetClass()->AssertInitializedOrInitializingInThread(self);
467   shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj);
468   return 1u;
469 }
470 
MterpIputObjectQuick(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data)471 extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
472                                        uint16_t* dex_pc_ptr,
473                                        uint32_t inst_data)
474     REQUIRES_SHARED(Locks::mutator_lock_) {
475   const Instruction* inst = Instruction::At(dex_pc_ptr);
476   return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data) ? 1u : 0u;
477 }
478 
MterpAputObject(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data)479 extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
480                                   uint16_t* dex_pc_ptr,
481                                   uint32_t inst_data)
482     REQUIRES_SHARED(Locks::mutator_lock_) {
483   const Instruction* inst = Instruction::At(dex_pc_ptr);
484   ObjPtr<mirror::Object> a = shadow_frame->GetVRegReference(inst->VRegB_23x());
485   if (UNLIKELY(a == nullptr)) {
486     return 0u;
487   }
488   int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
489   ObjPtr<mirror::Object> val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
490   ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
491   if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
492     array->SetWithoutChecks<false>(index, val);
493     return 1u;
494   }
495   return 0u;
496 }
497 
MterpFilledNewArray(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,Thread * self)498 extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
499                                       uint16_t* dex_pc_ptr,
500                                       Thread* self)
501     REQUIRES_SHARED(Locks::mutator_lock_) {
502   const Instruction* inst = Instruction::At(dex_pc_ptr);
503   return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
504                                                shadow_frame->GetResultRegister()) ? 1u : 0u;
505 }
506 
MterpFilledNewArrayRange(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,Thread * self)507 extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
508                                            uint16_t* dex_pc_ptr,
509                                            Thread* self)
510     REQUIRES_SHARED(Locks::mutator_lock_) {
511   const Instruction* inst = Instruction::At(dex_pc_ptr);
512   return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
513                                               shadow_frame->GetResultRegister()) ? 1u : 0u;
514 }
515 
MterpNewArray(ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr,uint32_t inst_data,Thread * self)516 extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
517                                 uint16_t* dex_pc_ptr,
518                                 uint32_t inst_data, Thread* self)
519     REQUIRES_SHARED(Locks::mutator_lock_) {
520   const Instruction* inst = Instruction::At(dex_pc_ptr);
521   int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
522   ObjPtr<mirror::Object> obj = AllocArrayFromCode</*kAccessCheck=*/ false>(
523       dex::TypeIndex(inst->VRegC_22c()), length, shadow_frame->GetMethod(), self,
524       Runtime::Current()->GetHeap()->GetCurrentAllocator());
525   if (UNLIKELY(obj == nullptr)) {
526       return 0u;
527   }
528   shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj);
529   return 1u;
530 }
531 
MterpHandleException(Thread * self,ShadowFrame * shadow_frame)532 extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
533     REQUIRES_SHARED(Locks::mutator_lock_) {
534   DCHECK(self->IsExceptionPending());
535   const instrumentation::Instrumentation* const instrumentation =
536       Runtime::Current()->GetInstrumentation();
537   return MoveToExceptionHandler(self, *shadow_frame, instrumentation) ? 1u : 0u;
538 }
539 
540 struct MterpCheckHelper {
541   DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
542 };
543 DEFINE_RUNTIME_DEBUG_FLAG(MterpCheckHelper, kSlowMode);
544 
MterpCheckBefore(Thread * self,ShadowFrame * shadow_frame,uint16_t * dex_pc_ptr)545 extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
546     REQUIRES_SHARED(Locks::mutator_lock_) {
547   // Check that we are using the right interpreter.
548   if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
549     // The flag might be currently being updated on all threads. Retry with lock.
550     MutexLock tll_mu(self, *Locks::thread_list_lock_);
551     DCHECK_EQ(self->UseMterp(), CanUseMterp());
552   }
553   DCHECK(!Runtime::Current()->IsActiveTransaction());
554   const Instruction* inst = Instruction::At(dex_pc_ptr);
555   uint16_t inst_data = inst->Fetch16(0);
556   if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
557     self->AssertPendingException();
558   } else {
559     self->AssertNoPendingException();
560   }
561   if (kTraceExecutionEnabled) {
562     uint32_t dex_pc = dex_pc_ptr - shadow_frame->GetDexInstructions();
563     TraceExecution(*shadow_frame, inst, dex_pc);
564   }
565   if (kTestExportPC) {
566     // Save invalid dex pc to force segfault if improperly used.
567     shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(kExportPCPoison));
568   }
569   if (MterpCheckHelper::kSlowMode) {
570     shadow_frame->CheckConsistentVRegs();
571   }
572 }
573 
MterpLogDivideByZeroException(Thread * self,ShadowFrame * shadow_frame)574 extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
575     REQUIRES_SHARED(Locks::mutator_lock_) {
576   UNUSED(self);
577   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
578   uint16_t inst_data = inst->Fetch16(0);
579   LOG(INFO) << "DivideByZero: " << inst->Opcode(inst_data);
580 }
581 
MterpLogArrayIndexException(Thread * self,ShadowFrame * shadow_frame)582 extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
583     REQUIRES_SHARED(Locks::mutator_lock_) {
584   UNUSED(self);
585   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
586   uint16_t inst_data = inst->Fetch16(0);
587   LOG(INFO) << "ArrayIndex: " << inst->Opcode(inst_data);
588 }
589 
MterpLogNegativeArraySizeException(Thread * self,ShadowFrame * shadow_frame)590 extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
591     REQUIRES_SHARED(Locks::mutator_lock_) {
592   UNUSED(self);
593   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
594   uint16_t inst_data = inst->Fetch16(0);
595   LOG(INFO) << "NegativeArraySize: " << inst->Opcode(inst_data);
596 }
597 
MterpLogNoSuchMethodException(Thread * self,ShadowFrame * shadow_frame)598 extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
599     REQUIRES_SHARED(Locks::mutator_lock_) {
600   UNUSED(self);
601   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
602   uint16_t inst_data = inst->Fetch16(0);
603   LOG(INFO) << "NoSuchMethod: " << inst->Opcode(inst_data);
604 }
605 
MterpLogExceptionThrownException(Thread * self,ShadowFrame * shadow_frame)606 extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
607     REQUIRES_SHARED(Locks::mutator_lock_) {
608   UNUSED(self);
609   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
610   uint16_t inst_data = inst->Fetch16(0);
611   LOG(INFO) << "ExceptionThrown: " << inst->Opcode(inst_data);
612 }
613 
MterpLogNullObjectException(Thread * self,ShadowFrame * shadow_frame)614 extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
615     REQUIRES_SHARED(Locks::mutator_lock_) {
616   UNUSED(self);
617   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
618   uint16_t inst_data = inst->Fetch16(0);
619   LOG(INFO) << "NullObject: " << inst->Opcode(inst_data);
620 }
621 
MterpLogFallback(Thread * self,ShadowFrame * shadow_frame)622 extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
623     REQUIRES_SHARED(Locks::mutator_lock_) {
624   UNUSED(self);
625   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
626   uint16_t inst_data = inst->Fetch16(0);
627   LOG(INFO) << "Fallback: " << inst->Opcode(inst_data) << ", Suspend Pending?: "
628             << self->IsExceptionPending();
629 }
630 
MterpLogOSR(Thread * self,ShadowFrame * shadow_frame,int32_t offset)631 extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
632     REQUIRES_SHARED(Locks::mutator_lock_) {
633   UNUSED(self);
634   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
635   uint16_t inst_data = inst->Fetch16(0);
636   LOG(INFO) << "OSR: " << inst->Opcode(inst_data) << ", offset = " << offset;
637 }
638 
MterpLogSuspendFallback(Thread * self,ShadowFrame * shadow_frame,uint32_t flags)639 extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
640     REQUIRES_SHARED(Locks::mutator_lock_) {
641   UNUSED(self);
642   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
643   uint16_t inst_data = inst->Fetch16(0);
644   if (flags & kCheckpointRequest) {
645     LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
646   } else if (flags & kSuspendRequest) {
647     LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
648   } else if (flags & kEmptyCheckpointRequest) {
649     LOG(INFO) << "Empty checkpoint fallback: " << inst->Opcode(inst_data);
650   }
651 }
652 
MterpSuspendCheck(Thread * self)653 extern "C" size_t MterpSuspendCheck(Thread* self)
654     REQUIRES_SHARED(Locks::mutator_lock_) {
655   self->AllowThreadSuspension();
656   return !self->UseMterp();
657 }
658 
659 // Execute single field access instruction (get/put, static/instance).
660 // The template arguments reduce this to fairly small amount of code.
661 // It requires the target object and field to be already resolved.
662 template<typename PrimType, FindFieldType kAccessType>
MterpFieldAccess(Instruction * inst,uint16_t inst_data,ShadowFrame * shadow_frame,ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_volatile)663 ALWAYS_INLINE void MterpFieldAccess(Instruction* inst,
664                                     uint16_t inst_data,
665                                     ShadowFrame* shadow_frame,
666                                     ObjPtr<mirror::Object> obj,
667                                     MemberOffset offset,
668                                     bool is_volatile)
669     REQUIRES_SHARED(Locks::mutator_lock_) {
670   static_assert(std::is_integral<PrimType>::value, "Unexpected primitive type");
671   constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
672   constexpr bool kIsPrimitive = (kAccessType & FindFieldFlags::PrimitiveBit) != 0;
673   constexpr bool kIsRead = (kAccessType & FindFieldFlags::ReadBit) != 0;
674 
675   uint16_t vRegA = kIsStatic ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
676   if (kIsPrimitive) {
677     if (kIsRead) {
678       PrimType value = UNLIKELY(is_volatile)
679           ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset)
680           : obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset);
681       if (sizeof(PrimType) == sizeof(uint64_t)) {
682         shadow_frame->SetVRegLong(vRegA, value);  // Set two consecutive registers.
683       } else {
684         shadow_frame->SetVReg(vRegA, static_cast<int32_t>(value));  // Sign/zero extend.
685       }
686     } else {  // Write.
687       uint64_t value = (sizeof(PrimType) == sizeof(uint64_t))
688           ? shadow_frame->GetVRegLong(vRegA)
689           : shadow_frame->GetVReg(vRegA);
690       if (UNLIKELY(is_volatile)) {
691         obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset, value);
692       } else {
693         obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset, value);
694       }
695     }
696   } else {  // Object.
697     if (kIsRead) {
698       ObjPtr<mirror::Object> value = UNLIKELY(is_volatile)
699           ? obj->GetFieldObjectVolatile<mirror::Object>(offset)
700           : obj->GetFieldObject<mirror::Object>(offset);
701       shadow_frame->SetVRegReference(vRegA, value);
702     } else {  // Write.
703       ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA);
704       if (UNLIKELY(is_volatile)) {
705         obj->SetFieldObjectVolatile</*kTransactionActive=*/ false>(offset, value);
706       } else {
707         obj->SetFieldObject</*kTransactionActive=*/ false>(offset, value);
708       }
709     }
710   }
711 }
712 
713 template<typename PrimType, FindFieldType kAccessType>
MterpFieldAccessSlow(Instruction * inst,uint16_t inst_data,ShadowFrame * shadow_frame,Thread * self)714 NO_INLINE bool MterpFieldAccessSlow(Instruction* inst,
715                                     uint16_t inst_data,
716                                     ShadowFrame* shadow_frame,
717                                     Thread* self)
718     REQUIRES_SHARED(Locks::mutator_lock_) {
719   constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
720   constexpr bool kIsRead = (kAccessType & FindFieldFlags::ReadBit) != 0;
721 
722   // Update the dex pc in shadow frame, just in case anything throws.
723   shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(inst));
724   ArtMethod* referrer = shadow_frame->GetMethod();
725   uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
726   ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
727       field_idx, referrer, self, sizeof(PrimType));
728   if (UNLIKELY(field == nullptr)) {
729     DCHECK(self->IsExceptionPending());
730     return false;
731   }
732   ObjPtr<mirror::Object> obj = kIsStatic
733       ? field->GetDeclaringClass().Ptr()
734       : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
735   if (UNLIKELY(obj == nullptr)) {
736     ThrowNullPointerExceptionForFieldAccess(field, kIsRead);
737     return false;
738   }
739   MterpFieldAccess<PrimType, kAccessType>(
740       inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
741   return true;
742 }
743 
744 // This methods is called from assembly to handle field access instructions.
745 //
746 // This method is fairly hot.  It is long, but it has been carefully optimized.
747 // It contains only fully inlined methods -> no spills -> no prologue/epilogue.
748 template<typename PrimType, FindFieldType kAccessType>
MterpFieldAccessFast(Instruction * inst,uint16_t inst_data,ShadowFrame * shadow_frame,Thread * self)749 ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst,
750                                         uint16_t inst_data,
751                                         ShadowFrame* shadow_frame,
752                                         Thread* self)
753     REQUIRES_SHARED(Locks::mutator_lock_) {
754   constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
755 
756   // Try to find the field in small thread-local cache first.
757   InterpreterCache* tls_cache = self->GetInterpreterCache();
758   size_t tls_value;
759   if (LIKELY(tls_cache->Get(inst, &tls_value))) {
760     // The meaning of the cache value is opcode-specific.
761     // It is ArtFiled* for static fields and the raw offset for instance fields.
762     size_t offset = kIsStatic
763         ? reinterpret_cast<ArtField*>(tls_value)->GetOffset().SizeValue()
764         : tls_value;
765     if (kIsDebugBuild) {
766       uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
767       ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
768           field_idx, shadow_frame->GetMethod(), self, sizeof(PrimType));
769       DCHECK_EQ(offset, field->GetOffset().SizeValue());
770     }
771     ObjPtr<mirror::Object> obj = kIsStatic
772         ? reinterpret_cast<ArtField*>(tls_value)->GetDeclaringClass()
773         : ObjPtr<mirror::Object>(shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data)));
774     if (LIKELY(obj != nullptr)) {
775       MterpFieldAccess<PrimType, kAccessType>(
776           inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile= */ false);
777       return true;
778     }
779   }
780 
781   // This effectively inlines the fast path from ArtMethod::GetDexCache.
782   ArtMethod* referrer = shadow_frame->GetMethod();
783   if (LIKELY(!referrer->IsObsolete())) {
784     // Avoid read barriers, since we need only the pointer to the native (non-movable)
785     // DexCache field array which we can get even through from-space objects.
786     ObjPtr<mirror::Class> klass = referrer->GetDeclaringClass<kWithoutReadBarrier>();
787     ObjPtr<mirror::DexCache> dex_cache =
788         klass->GetDexCache<kDefaultVerifyFlags, kWithoutReadBarrier>();
789 
790     // Try to find the desired field in DexCache.
791     uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
792     ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
793     if (LIKELY(field != nullptr)) {
794       bool visibly_initialized = !kIsStatic || field->GetDeclaringClass()->IsVisiblyInitialized();
795       if (LIKELY(visibly_initialized)) {
796         DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks= */ false>(
797             field_idx, referrer, self, sizeof(PrimType))));
798         ObjPtr<mirror::Object> obj = kIsStatic
799             ? field->GetDeclaringClass().Ptr()
800             : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
801         if (LIKELY(kIsStatic || obj != nullptr)) {
802           // Only non-volatile fields are allowed in the thread-local cache.
803           if (LIKELY(!field->IsVolatile())) {
804             if (kIsStatic) {
805               tls_cache->Set(inst, reinterpret_cast<uintptr_t>(field));
806             } else {
807               tls_cache->Set(inst, field->GetOffset().SizeValue());
808             }
809           }
810           MterpFieldAccess<PrimType, kAccessType>(
811               inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
812           return true;
813         }
814       }
815     }
816   }
817 
818   // Slow path. Last and with identical arguments so that it becomes single instruction tail call.
819   return MterpFieldAccessSlow<PrimType, kAccessType>(inst, inst_data, shadow_frame, self);
820 }
821 
822 #define MTERP_FIELD_ACCESSOR(Name, PrimType, AccessType)                                          \
823 extern "C" bool Name(Instruction* inst, uint16_t inst_data, ShadowFrame* sf, Thread* self)        \
824     REQUIRES_SHARED(Locks::mutator_lock_) {                                                       \
825   return MterpFieldAccessFast<PrimType, AccessType>(inst, inst_data, sf, self);                   \
826 }
827 
828 #define MTERP_FIELD_ACCESSORS_FOR_TYPE(Sufix, PrimType, Kind)                                     \
829   MTERP_FIELD_ACCESSOR(MterpIGet##Sufix, PrimType, Instance##Kind##Read)                          \
830   MTERP_FIELD_ACCESSOR(MterpIPut##Sufix, PrimType, Instance##Kind##Write)                         \
831   MTERP_FIELD_ACCESSOR(MterpSGet##Sufix, PrimType, Static##Kind##Read)                            \
832   MTERP_FIELD_ACCESSOR(MterpSPut##Sufix, PrimType, Static##Kind##Write)
833 
834 MTERP_FIELD_ACCESSORS_FOR_TYPE(I8, int8_t, Primitive)
835 MTERP_FIELD_ACCESSORS_FOR_TYPE(U8, uint8_t, Primitive)
836 MTERP_FIELD_ACCESSORS_FOR_TYPE(I16, int16_t, Primitive)
837 MTERP_FIELD_ACCESSORS_FOR_TYPE(U16, uint16_t, Primitive)
838 MTERP_FIELD_ACCESSORS_FOR_TYPE(U32, uint32_t, Primitive)
839 MTERP_FIELD_ACCESSORS_FOR_TYPE(U64, uint64_t, Primitive)
840 MTERP_FIELD_ACCESSORS_FOR_TYPE(Obj, uint32_t, Object)
841 
842 // Check that the primitive type for Obj variant above is correct.
843 // It really must be primitive type for the templates to compile.
844 // In the case of objects, it is only used to get the field size.
845 static_assert(kHeapReferenceSize == sizeof(uint32_t), "Unexpected kHeapReferenceSize");
846 
847 #undef MTERP_FIELD_ACCESSORS_FOR_TYPE
848 #undef MTERP_FIELD_ACCESSOR
849 
artAGetObjectFromMterp(mirror::Object * arr,int32_t index)850 extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr,
851                                                   int32_t index)
852     REQUIRES_SHARED(Locks::mutator_lock_) {
853   if (UNLIKELY(arr == nullptr)) {
854     ThrowNullPointerExceptionFromInterpreter();
855     return nullptr;
856   }
857   ObjPtr<mirror::ObjectArray<mirror::Object>> array = arr->AsObjectArray<mirror::Object>();
858   if (LIKELY(array->CheckIsValidIndex(index))) {
859     return array->GetWithoutChecks(index).Ptr();
860   } else {
861     return nullptr;
862   }
863 }
864 
artIGetObjectFromMterp(mirror::Object * obj,uint32_t field_offset)865 extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj,
866                                                   uint32_t field_offset)
867     REQUIRES_SHARED(Locks::mutator_lock_) {
868   if (UNLIKELY(obj == nullptr)) {
869     ThrowNullPointerExceptionFromInterpreter();
870     return nullptr;
871   }
872   return obj->GetFieldObject<mirror::Object>(MemberOffset(field_offset));
873 }
874 
875 /*
876  * Create a hotness_countdown based on the current method hotness_count and profiling
877  * mode.  In short, determine how many hotness events we hit before reporting back
878  * to the full instrumentation via MterpAddHotnessBatch.  Called once on entry to the method,
879  * and regenerated following batch updates.
880  */
MterpSetUpHotnessCountdown(ArtMethod * method,ShadowFrame * shadow_frame,Thread * self)881 extern "C" ssize_t MterpSetUpHotnessCountdown(ArtMethod* method,
882                                               ShadowFrame* shadow_frame,
883                                               Thread* self)
884     REQUIRES_SHARED(Locks::mutator_lock_) {
885   uint16_t hotness_count = method->GetCounter();
886   int32_t countdown_value = jit::kJitHotnessDisabled;
887   jit::Jit* jit = Runtime::Current()->GetJit();
888   if (jit != nullptr) {
889     int32_t warm_threshold = jit->WarmMethodThreshold();
890     int32_t hot_threshold = jit->HotMethodThreshold();
891     int32_t osr_threshold = jit->OSRMethodThreshold();
892     if (hotness_count < warm_threshold) {
893       countdown_value = warm_threshold - hotness_count;
894     } else if (hotness_count < hot_threshold) {
895       countdown_value = hot_threshold - hotness_count;
896     } else if (hotness_count < osr_threshold) {
897       countdown_value = osr_threshold - hotness_count;
898     } else {
899       countdown_value = jit::kJitCheckForOSR;
900     }
901     if (jit::Jit::ShouldUsePriorityThreadWeight(self)) {
902       int32_t priority_thread_weight = jit->PriorityThreadWeight();
903       countdown_value = std::min(countdown_value, countdown_value / priority_thread_weight);
904     }
905   }
906   /*
907    * The actual hotness threshold may exceed the range of our int16_t countdown value.  This is
908    * not a problem, though.  We can just break it down into smaller chunks.
909    */
910   countdown_value = std::min(countdown_value,
911                              static_cast<int32_t>(std::numeric_limits<int16_t>::max()));
912   shadow_frame->SetCachedHotnessCountdown(countdown_value);
913   shadow_frame->SetHotnessCountdown(countdown_value);
914   return countdown_value;
915 }
916 
917 /*
918  * Report a batch of hotness events to the instrumentation and then return the new
919  * countdown value to the next time we should report.
920  */
MterpAddHotnessBatch(ArtMethod * method,ShadowFrame * shadow_frame,Thread * self)921 extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
922                                         ShadowFrame* shadow_frame,
923                                         Thread* self)
924     REQUIRES_SHARED(Locks::mutator_lock_) {
925   jit::Jit* jit = Runtime::Current()->GetJit();
926   if (jit != nullptr) {
927     int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
928     jit->AddSamples(self, method, count, /*with_backedges=*/ true);
929   }
930   return MterpSetUpHotnessCountdown(method, shadow_frame, self);
931 }
932 
MterpMaybeDoOnStackReplacement(Thread * self,ShadowFrame * shadow_frame,int32_t offset)933 extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
934                                                  ShadowFrame* shadow_frame,
935                                                  int32_t offset)
936     REQUIRES_SHARED(Locks::mutator_lock_) {
937   int16_t osr_countdown = shadow_frame->GetCachedHotnessCountdown() - 1;
938   bool did_osr = false;
939   /*
940    * To reduce the cost of polling the compiler to determine whether the requested OSR
941    * compilation has completed, only check every Nth time.  NOTE: the "osr_countdown <= 0"
942    * condition is satisfied either by the decrement below or the initial setting of
943    * the cached countdown field to kJitCheckForOSR, which elsewhere is asserted to be -1.
944    */
945   if (osr_countdown <= 0) {
946     ArtMethod* method = shadow_frame->GetMethod();
947     JValue* result = shadow_frame->GetResultRegister();
948     uint32_t dex_pc = shadow_frame->GetDexPC();
949     jit::Jit* jit = Runtime::Current()->GetJit();
950     osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
951     if (offset <= 0) {
952       // Keep updating hotness in case a compilation request was dropped.  Eventually it will retry.
953       jit->AddSamples(self, method, osr_countdown, /*with_backedges=*/ true);
954     }
955     did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
956   }
957   shadow_frame->SetCachedHotnessCountdown(osr_countdown);
958   return did_osr ? 1u : 0u;
959 }
960 
961 }  // namespace interpreter
962 }  // namespace art
963