1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * Mterp entry point and support functions.
19 */
20 #include "nterp.h"
21
22 #include "base/quasi_atomic.h"
23 #include "dex/dex_instruction_utils.h"
24 #include "debugger.h"
25 #include "entrypoints/entrypoint_utils-inl.h"
26 #include "interpreter/interpreter_common.h"
27 #include "interpreter/interpreter_intrinsics.h"
28 #include "interpreter/shadow_frame-inl.h"
29 #include "mirror/string-alloc-inl.h"
30 #include "nterp_helpers.h"
31
32 namespace art {
33 namespace interpreter {
34
IsNterpSupported()35 bool IsNterpSupported() {
36 return !kPoisonHeapReferences && kUseReadBarrier;
37 }
38
CanRuntimeUseNterp()39 bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
40 Runtime* runtime = Runtime::Current();
41 instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
42 // Nterp shares the same restrictions as Mterp.
43 // If the runtime is interpreter only, we currently don't use nterp as some
44 // parts of the runtime (like instrumentation) make assumption on an
45 // interpreter-only runtime to always be in a switch-like interpreter.
46 return IsNterpSupported() && CanUseMterp() && !instr->InterpretOnly();
47 }
48
CanMethodUseNterp(ArtMethod * method)49 bool CanMethodUseNterp(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
50 return !method->IsNative() &&
51 method->SkipAccessChecks() &&
52 method->IsInvokable() &&
53 !method->MustCountLocks() &&
54 method->GetDexFile()->IsStandardDexFile() &&
55 // Proxy methods do not go through the JIT like other methods, so we don't
56 // run them with nterp.
57 !method->IsProxyMethod() &&
58 NterpGetFrameSize(method) < kNterpMaxFrame;
59 }
60
GetNterpEntryPoint()61 const void* GetNterpEntryPoint() {
62 return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
63 }
64
65 /*
66 * Verify some constants used by the nterp interpreter.
67 */
CheckNterpAsmConstants()68 void CheckNterpAsmConstants() {
69 /*
70 * If we're using computed goto instruction transitions, make sure
71 * none of the handlers overflows the byte limit. This won't tell
72 * which one did, but if any one is too big the total size will
73 * overflow.
74 */
75 const int width = kMterpHandlerSize;
76 ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
77 reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
78 if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
79 LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
80 << "(did an instruction handler exceed " << width << " bytes?)";
81 }
82 }
83
UpdateHotness(ArtMethod * method)84 inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
85 // The hotness we will add to a method when we perform a
86 // field/method/class/string lookup.
87 constexpr uint16_t kNterpHotnessLookup = 0xf;
88
89 // Convert to uint32_t to handle uint16_t overflow.
90 uint32_t counter = method->GetCounter();
91 uint32_t new_counter = counter + kNterpHotnessLookup;
92 if (new_counter > kNterpHotnessMask) {
93 // Let the nterp code actually call the compilation: we want to make sure
94 // there's at least a second execution of the method or a back-edge to avoid
95 // compiling straightline initialization methods.
96 method->SetCounter(kNterpHotnessMask);
97 } else {
98 method->SetCounter(new_counter);
99 }
100 }
101
102 template<typename T>
UpdateCache(Thread * self,uint16_t * dex_pc_ptr,T value)103 inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T value) {
104 DCHECK(kUseReadBarrier) << "Nterp only works with read barriers";
105 // For simplicity, only update the cache if weak ref accesses are enabled. If
106 // they are disabled, this means the GC is processing the cache, and is
107 // reading it concurrently.
108 if (self->GetWeakRefAccessEnabled()) {
109 self->GetInterpreterCache()->Set(dex_pc_ptr, value);
110 }
111 }
112
113 template<typename T>
UpdateCache(Thread * self,uint16_t * dex_pc_ptr,T * value)114 inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T* value) {
115 UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
116 }
117
NterpGetCodeItem(ArtMethod * method)118 extern "C" const dex::CodeItem* NterpGetCodeItem(ArtMethod* method)
119 REQUIRES_SHARED(Locks::mutator_lock_) {
120 ScopedAssertNoThreadSuspension sants("In nterp");
121 return method->GetCodeItem();
122 }
123
NterpGetShorty(ArtMethod * method)124 extern "C" const char* NterpGetShorty(ArtMethod* method)
125 REQUIRES_SHARED(Locks::mutator_lock_) {
126 ScopedAssertNoThreadSuspension sants("In nterp");
127 return method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty();
128 }
129
NterpGetShortyFromMethodId(ArtMethod * caller,uint32_t method_index)130 extern "C" const char* NterpGetShortyFromMethodId(ArtMethod* caller, uint32_t method_index)
131 REQUIRES_SHARED(Locks::mutator_lock_) {
132 ScopedAssertNoThreadSuspension sants("In nterp");
133 return caller->GetDexFile()->GetMethodShorty(method_index);
134 }
135
NterpGetShortyFromInvokePolymorphic(ArtMethod * caller,uint16_t * dex_pc_ptr)136 extern "C" const char* NterpGetShortyFromInvokePolymorphic(ArtMethod* caller, uint16_t* dex_pc_ptr)
137 REQUIRES_SHARED(Locks::mutator_lock_) {
138 ScopedAssertNoThreadSuspension sants("In nterp");
139 const Instruction* inst = Instruction::At(dex_pc_ptr);
140 dex::ProtoIndex proto_idx(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC
141 ? inst->VRegH_45cc()
142 : inst->VRegH_4rcc());
143 return caller->GetDexFile()->GetShorty(proto_idx);
144 }
145
NterpGetShortyFromInvokeCustom(ArtMethod * caller,uint16_t * dex_pc_ptr)146 extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_t* dex_pc_ptr)
147 REQUIRES_SHARED(Locks::mutator_lock_) {
148 ScopedAssertNoThreadSuspension sants("In nterp");
149 const Instruction* inst = Instruction::At(dex_pc_ptr);
150 uint16_t call_site_index = (inst->Opcode() == Instruction::INVOKE_CUSTOM
151 ? inst->VRegB_35c()
152 : inst->VRegB_3rc());
153 const DexFile* dex_file = caller->GetDexFile();
154 dex::ProtoIndex proto_idx = dex_file->GetProtoIndexForCallSite(call_site_index);
155 return dex_file->GetShorty(proto_idx);
156 }
157
NterpGetMethod(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)158 extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
159 REQUIRES_SHARED(Locks::mutator_lock_) {
160 UpdateHotness(caller);
161 const Instruction* inst = Instruction::At(dex_pc_ptr);
162 InvokeType invoke_type = kStatic;
163 uint16_t method_index = 0;
164 switch (inst->Opcode()) {
165 case Instruction::INVOKE_DIRECT: {
166 method_index = inst->VRegB_35c();
167 invoke_type = kDirect;
168 break;
169 }
170
171 case Instruction::INVOKE_INTERFACE: {
172 method_index = inst->VRegB_35c();
173 invoke_type = kInterface;
174 break;
175 }
176
177 case Instruction::INVOKE_STATIC: {
178 method_index = inst->VRegB_35c();
179 invoke_type = kStatic;
180 break;
181 }
182
183 case Instruction::INVOKE_SUPER: {
184 method_index = inst->VRegB_35c();
185 invoke_type = kSuper;
186 break;
187 }
188 case Instruction::INVOKE_VIRTUAL: {
189 method_index = inst->VRegB_35c();
190 invoke_type = kVirtual;
191 break;
192 }
193
194 case Instruction::INVOKE_DIRECT_RANGE: {
195 method_index = inst->VRegB_3rc();
196 invoke_type = kDirect;
197 break;
198 }
199
200 case Instruction::INVOKE_INTERFACE_RANGE: {
201 method_index = inst->VRegB_3rc();
202 invoke_type = kInterface;
203 break;
204 }
205
206 case Instruction::INVOKE_STATIC_RANGE: {
207 method_index = inst->VRegB_3rc();
208 invoke_type = kStatic;
209 break;
210 }
211
212 case Instruction::INVOKE_SUPER_RANGE: {
213 method_index = inst->VRegB_3rc();
214 invoke_type = kSuper;
215 break;
216 }
217
218 case Instruction::INVOKE_VIRTUAL_RANGE: {
219 method_index = inst->VRegB_3rc();
220 invoke_type = kVirtual;
221 break;
222 }
223
224 default:
225 LOG(FATAL) << "Unknown instruction " << inst->Opcode();
226 }
227
228 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
229 ArtMethod* resolved_method = caller->SkipAccessChecks()
230 ? class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
231 self, method_index, caller, invoke_type)
232 : class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
233 self, method_index, caller, invoke_type);
234 if (resolved_method == nullptr) {
235 DCHECK(self->IsExceptionPending());
236 return 0;
237 }
238
239 // ResolveMethod returns the method based on the method_id. For super invokes
240 // we must use the executing class's context to find the right method.
241 if (invoke_type == kSuper) {
242 ObjPtr<mirror::Class> executing_class = caller->GetDeclaringClass();
243 ObjPtr<mirror::Class> referenced_class = class_linker->LookupResolvedType(
244 executing_class->GetDexFile().GetMethodId(method_index).class_idx_,
245 executing_class->GetDexCache(),
246 executing_class->GetClassLoader());
247 DCHECK(referenced_class != nullptr); // We have already resolved a method from this class.
248 if (!referenced_class->IsAssignableFrom(executing_class)) {
249 // We cannot determine the target method.
250 ThrowNoSuchMethodError(invoke_type,
251 resolved_method->GetDeclaringClass(),
252 resolved_method->GetName(),
253 resolved_method->GetSignature());
254 return 0;
255 }
256 if (referenced_class->IsInterface()) {
257 resolved_method = referenced_class->FindVirtualMethodForInterfaceSuper(
258 resolved_method, class_linker->GetImagePointerSize());
259 } else {
260 uint16_t vtable_index = resolved_method->GetMethodIndex();
261 ObjPtr<mirror::Class> super_class = executing_class->GetSuperClass();
262 if (super_class == nullptr ||
263 !super_class->HasVTable() ||
264 vtable_index >= static_cast<uint32_t>(super_class->GetVTableLength())) {
265 // Behavior to agree with that of the verifier.
266 ThrowNoSuchMethodError(invoke_type,
267 resolved_method->GetDeclaringClass(),
268 resolved_method->GetName(),
269 resolved_method->GetSignature());
270 return 0;
271 } else {
272 resolved_method = executing_class->GetSuperClass()->GetVTableEntry(
273 vtable_index, class_linker->GetImagePointerSize());
274 }
275 }
276 }
277
278 if (invoke_type == kInterface) {
279 if (resolved_method->GetDeclaringClass()->IsObjectClass()) {
280 // Don't update the cache and return a value with high bit set to notify the
281 // interpreter it should do a vtable call instead.
282 DCHECK_LT(resolved_method->GetMethodIndex(), 0x10000);
283 return resolved_method->GetMethodIndex() | (1U << 31);
284 } else {
285 DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
286 UpdateCache(self, dex_pc_ptr, resolved_method->GetImtIndex());
287 return resolved_method->GetImtIndex();
288 }
289 } else if (resolved_method->GetDeclaringClass()->IsStringClass()
290 && !resolved_method->IsStatic()
291 && resolved_method->IsConstructor()) {
292 resolved_method = WellKnownClasses::StringInitToStringFactory(resolved_method);
293 // Or the result with 1 to notify to nterp this is a string init method. We
294 // also don't cache the result as we don't want nterp to have its fast path always
295 // check for it, and we expect a lot more regular calls than string init
296 // calls.
297 return reinterpret_cast<size_t>(resolved_method) | 1;
298 } else if (invoke_type == kVirtual) {
299 UpdateCache(self, dex_pc_ptr, resolved_method->GetMethodIndex());
300 return resolved_method->GetMethodIndex();
301 } else {
302 UpdateCache(self, dex_pc_ptr, resolved_method);
303 return reinterpret_cast<size_t>(resolved_method);
304 }
305 }
306
ResolveFieldWithAccessChecks(Thread * self,ClassLinker * class_linker,uint16_t field_index,ArtMethod * caller,bool is_static,bool is_put)307 static ArtField* ResolveFieldWithAccessChecks(Thread* self,
308 ClassLinker* class_linker,
309 uint16_t field_index,
310 ArtMethod* caller,
311 bool is_static,
312 bool is_put)
313 REQUIRES_SHARED(Locks::mutator_lock_) {
314 if (caller->SkipAccessChecks()) {
315 return class_linker->ResolveField(field_index, caller, is_static);
316 }
317
318 caller = caller->GetInterfaceMethodIfProxy(kRuntimePointerSize);
319
320 StackHandleScope<2> hs(self);
321 Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(caller->GetDexCache()));
322 Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(caller->GetClassLoader()));
323
324 ArtField* resolved_field = class_linker->ResolveFieldJLS(field_index,
325 h_dex_cache,
326 h_class_loader);
327 if (resolved_field == nullptr) {
328 return nullptr;
329 }
330
331 ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
332 if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
333 ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, caller);
334 return nullptr;
335 }
336 ObjPtr<mirror::Class> referring_class = caller->GetDeclaringClass();
337 if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class,
338 resolved_field,
339 caller->GetDexCache(),
340 field_index))) {
341 return nullptr;
342 }
343 if (UNLIKELY(is_put && resolved_field->IsFinal() && (fields_class != referring_class))) {
344 ThrowIllegalAccessErrorFinalField(caller, resolved_field);
345 return nullptr;
346 }
347 return resolved_field;
348 }
349
NterpGetStaticField(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)350 extern "C" size_t NterpGetStaticField(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
351 REQUIRES_SHARED(Locks::mutator_lock_) {
352 UpdateHotness(caller);
353 const Instruction* inst = Instruction::At(dex_pc_ptr);
354 uint16_t field_index = inst->VRegB_21c();
355 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
356 ArtField* resolved_field = ResolveFieldWithAccessChecks(
357 self,
358 class_linker,
359 field_index,
360 caller,
361 /* is_static */ true,
362 /* is_put */ IsInstructionSPut(inst->Opcode()));
363
364 if (resolved_field == nullptr) {
365 DCHECK(self->IsExceptionPending());
366 return 0;
367 }
368 if (UNLIKELY(!resolved_field->GetDeclaringClass()->IsVisiblyInitialized())) {
369 StackHandleScope<1> hs(self);
370 Handle<mirror::Class> h_class(hs.NewHandle(resolved_field->GetDeclaringClass()));
371 if (UNLIKELY(!class_linker->EnsureInitialized(
372 self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
373 DCHECK(self->IsExceptionPending());
374 return 0;
375 }
376 DCHECK(h_class->IsInitializing());
377 }
378 if (resolved_field->IsVolatile()) {
379 // Or the result with 1 to notify to nterp this is a volatile field. We
380 // also don't cache the result as we don't want nterp to have its fast path always
381 // check for it.
382 return reinterpret_cast<size_t>(resolved_field) | 1;
383 } else {
384 UpdateCache(self, dex_pc_ptr, resolved_field);
385 return reinterpret_cast<size_t>(resolved_field);
386 }
387 }
388
NterpGetInstanceFieldOffset(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)389 extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
390 ArtMethod* caller,
391 uint16_t* dex_pc_ptr)
392 REQUIRES_SHARED(Locks::mutator_lock_) {
393 UpdateHotness(caller);
394 const Instruction* inst = Instruction::At(dex_pc_ptr);
395 uint16_t field_index = inst->VRegC_22c();
396 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
397 ArtField* resolved_field = ResolveFieldWithAccessChecks(
398 self,
399 class_linker,
400 field_index,
401 caller,
402 /* is_static */ false,
403 /* is_put */ IsInstructionIPut(inst->Opcode()));
404 if (resolved_field == nullptr) {
405 DCHECK(self->IsExceptionPending());
406 return 0;
407 }
408 if (resolved_field->IsVolatile()) {
409 // Don't cache for a volatile field, and return a negative offset as marker
410 // of volatile.
411 return -resolved_field->GetOffset().Uint32Value();
412 }
413 UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
414 return resolved_field->GetOffset().Uint32Value();
415 }
416
NterpGetClassOrAllocateObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)417 extern "C" mirror::Object* NterpGetClassOrAllocateObject(Thread* self,
418 ArtMethod* caller,
419 uint16_t* dex_pc_ptr)
420 REQUIRES_SHARED(Locks::mutator_lock_) {
421 UpdateHotness(caller);
422 const Instruction* inst = Instruction::At(dex_pc_ptr);
423 dex::TypeIndex index;
424 switch (inst->Opcode()) {
425 case Instruction::NEW_INSTANCE:
426 index = dex::TypeIndex(inst->VRegB_21c());
427 break;
428 case Instruction::CHECK_CAST:
429 index = dex::TypeIndex(inst->VRegB_21c());
430 break;
431 case Instruction::INSTANCE_OF:
432 index = dex::TypeIndex(inst->VRegC_22c());
433 break;
434 case Instruction::CONST_CLASS:
435 index = dex::TypeIndex(inst->VRegB_21c());
436 break;
437 case Instruction::NEW_ARRAY:
438 index = dex::TypeIndex(inst->VRegC_22c());
439 break;
440 default:
441 LOG(FATAL) << "Unreachable";
442 }
443 ObjPtr<mirror::Class> c =
444 ResolveVerifyAndClinit(index,
445 caller,
446 self,
447 /* can_run_clinit= */ false,
448 /* verify_access= */ !caller->SkipAccessChecks());
449 if (c == nullptr) {
450 DCHECK(self->IsExceptionPending());
451 return nullptr;
452 }
453
454 if (inst->Opcode() == Instruction::NEW_INSTANCE) {
455 gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
456 if (UNLIKELY(c->IsStringClass())) {
457 // We don't cache the class for strings as we need to special case their
458 // allocation.
459 return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
460 } else {
461 if (!c->IsFinalizable() && c->IsInstantiable()) {
462 // Cache non-finalizable classes for next calls.
463 UpdateCache(self, dex_pc_ptr, c.Ptr());
464 }
465 return AllocObjectFromCode(c, self, allocator_type).Ptr();
466 }
467 } else {
468 // For all other cases, cache the class.
469 UpdateCache(self, dex_pc_ptr, c.Ptr());
470 }
471 return c.Ptr();
472 }
473
NterpLoadObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)474 extern "C" mirror::Object* NterpLoadObject(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
475 REQUIRES_SHARED(Locks::mutator_lock_) {
476 const Instruction* inst = Instruction::At(dex_pc_ptr);
477 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
478 switch (inst->Opcode()) {
479 case Instruction::CONST_STRING:
480 case Instruction::CONST_STRING_JUMBO: {
481 UpdateHotness(caller);
482 dex::StringIndex string_index(
483 (inst->Opcode() == Instruction::CONST_STRING)
484 ? inst->VRegB_21c()
485 : inst->VRegB_31c());
486 ObjPtr<mirror::String> str = class_linker->ResolveString(string_index, caller);
487 if (str == nullptr) {
488 DCHECK(self->IsExceptionPending());
489 return nullptr;
490 }
491 UpdateCache(self, dex_pc_ptr, str.Ptr());
492 return str.Ptr();
493 }
494 case Instruction::CONST_METHOD_HANDLE: {
495 // Don't cache: we don't expect this to be performance sensitive, and we
496 // don't want the cache to conflict with a performance sensitive entry.
497 return class_linker->ResolveMethodHandle(self, inst->VRegB_21c(), caller).Ptr();
498 }
499 case Instruction::CONST_METHOD_TYPE: {
500 // Don't cache: we don't expect this to be performance sensitive, and we
501 // don't want the cache to conflict with a performance sensitive entry.
502 return class_linker->ResolveMethodType(
503 self, dex::ProtoIndex(inst->VRegB_21c()), caller).Ptr();
504 }
505 default:
506 LOG(FATAL) << "Unreachable";
507 }
508 return nullptr;
509 }
510
NterpUnimplemented()511 extern "C" void NterpUnimplemented() {
512 LOG(FATAL) << "Unimplemented";
513 }
514
DoFilledNewArray(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,uint32_t * regs,bool is_range)515 static mirror::Object* DoFilledNewArray(Thread* self,
516 ArtMethod* caller,
517 uint16_t* dex_pc_ptr,
518 uint32_t* regs,
519 bool is_range)
520 REQUIRES_SHARED(Locks::mutator_lock_) {
521 const Instruction* inst = Instruction::At(dex_pc_ptr);
522 if (kIsDebugBuild) {
523 if (is_range) {
524 DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
525 } else {
526 DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
527 }
528 }
529 const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
530 DCHECK_GE(length, 0);
531 if (!is_range) {
532 // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
533 DCHECK_LE(length, 5);
534 }
535 uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
536 ObjPtr<mirror::Class> array_class =
537 ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
538 caller,
539 self,
540 /* can_run_clinit= */ true,
541 /* verify_access= */ !caller->SkipAccessChecks());
542 if (UNLIKELY(array_class == nullptr)) {
543 DCHECK(self->IsExceptionPending());
544 return nullptr;
545 }
546 DCHECK(array_class->IsArrayClass());
547 ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
548 const bool is_primitive_int_component = component_class->IsPrimitiveInt();
549 if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
550 if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
551 ThrowRuntimeException("Bad filled array request for type %s",
552 component_class->PrettyDescriptor().c_str());
553 } else {
554 self->ThrowNewExceptionF(
555 "Ljava/lang/InternalError;",
556 "Found type %s; filled-new-array not implemented for anything but 'int'",
557 component_class->PrettyDescriptor().c_str());
558 }
559 return nullptr;
560 }
561 ObjPtr<mirror::Object> new_array = mirror::Array::Alloc(
562 self,
563 array_class,
564 length,
565 array_class->GetComponentSizeShift(),
566 Runtime::Current()->GetHeap()->GetCurrentAllocator());
567 if (UNLIKELY(new_array == nullptr)) {
568 self->AssertPendingOOMException();
569 return nullptr;
570 }
571 uint32_t arg[Instruction::kMaxVarArgRegs]; // only used in filled-new-array.
572 uint32_t vregC = 0; // only used in filled-new-array-range.
573 if (is_range) {
574 vregC = inst->VRegC_3rc();
575 } else {
576 inst->GetVarArgs(arg);
577 }
578 for (int32_t i = 0; i < length; ++i) {
579 size_t src_reg = is_range ? vregC + i : arg[i];
580 if (is_primitive_int_component) {
581 new_array->AsIntArray()->SetWithoutChecks</* kTransactionActive= */ false>(i, regs[src_reg]);
582 } else {
583 new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks</* kTransactionActive= */ false>(
584 i, reinterpret_cast<mirror::Object*>(regs[src_reg]));
585 }
586 }
587 return new_array.Ptr();
588 }
589
NterpFilledNewArray(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)590 extern "C" mirror::Object* NterpFilledNewArray(Thread* self,
591 ArtMethod* caller,
592 uint32_t* registers,
593 uint16_t* dex_pc_ptr)
594 REQUIRES_SHARED(Locks::mutator_lock_) {
595 return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ false);
596 }
597
NterpFilledNewArrayRange(Thread * self,ArtMethod * caller,uint32_t * registers,uint16_t * dex_pc_ptr)598 extern "C" mirror::Object* NterpFilledNewArrayRange(Thread* self,
599 ArtMethod* caller,
600 uint32_t* registers,
601 uint16_t* dex_pc_ptr)
602 REQUIRES_SHARED(Locks::mutator_lock_) {
603 return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ true);
604 }
605
NterpHotMethod(ArtMethod * method,uint16_t * dex_pc_ptr,uint32_t * vregs)606 extern "C" jit::OsrData* NterpHotMethod(ArtMethod* method, uint16_t* dex_pc_ptr, uint32_t* vregs)
607 REQUIRES_SHARED(Locks::mutator_lock_) {
608 ScopedAssertNoThreadSuspension sants("In nterp");
609 jit::Jit* jit = Runtime::Current()->GetJit();
610 if (jit != nullptr && jit->UseJitCompilation()) {
611 // Nterp passes null on entry where we don't want to OSR.
612 if (dex_pc_ptr != nullptr) {
613 // This could be a loop back edge, check if we can OSR.
614 CodeItemInstructionAccessor accessor(method->DexInstructions());
615 uint32_t dex_pc = dex_pc_ptr - accessor.Insns();
616 jit::OsrData* osr_data = jit->PrepareForOsr(
617 method->GetInterfaceMethodIfProxy(kRuntimePointerSize), dex_pc, vregs);
618 if (osr_data != nullptr) {
619 return osr_data;
620 }
621 }
622 jit->EnqueueCompilationFromNterp(method, Thread::Current());
623 }
624 return nullptr;
625 }
626
627 extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal);
NterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)628 extern "C" ssize_t NterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal)
629 REQUIRES_SHARED(Locks::mutator_lock_) {
630 ScopedAssertNoThreadSuspension sants("In nterp");
631 return MterpDoPackedSwitch(switchData, testVal);
632 }
633
634 extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal);
NterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)635 extern "C" ssize_t NterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal)
636 REQUIRES_SHARED(Locks::mutator_lock_) {
637 ScopedAssertNoThreadSuspension sants("In nterp");
638 return MterpDoSparseSwitch(switchData, testVal);
639 }
640
641 } // namespace interpreter
642 } // namespace art
643