1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "art_method-inl.h"
18 #include "base/callee_save_type.h"
19 #include "base/enums.h"
20 #include "callee_save_frame.h"
21 #include "common_throws.h"
22 #include "class_root-inl.h"
23 #include "debug_print.h"
24 #include "debugger.h"
25 #include "dex/dex_file-inl.h"
26 #include "dex/dex_file_types.h"
27 #include "dex/dex_instruction-inl.h"
28 #include "dex/method_reference.h"
29 #include "entrypoints/entrypoint_utils-inl.h"
30 #include "entrypoints/quick/callee_save_frame.h"
31 #include "entrypoints/runtime_asm_entrypoints.h"
32 #include "gc/accounting/card_table-inl.h"
33 #include "imt_conflict_table.h"
34 #include "imtable-inl.h"
35 #include "instrumentation.h"
36 #include "interpreter/interpreter.h"
37 #include "interpreter/interpreter_common.h"
38 #include "interpreter/shadow_frame-inl.h"
39 #include "jit/jit.h"
40 #include "jit/jit_code_cache.h"
41 #include "linear_alloc.h"
42 #include "method_handles.h"
43 #include "mirror/class-inl.h"
44 #include "mirror/dex_cache-inl.h"
45 #include "mirror/method.h"
46 #include "mirror/method_handle_impl.h"
47 #include "mirror/object-inl.h"
48 #include "mirror/object_array-inl.h"
49 #include "mirror/var_handle.h"
50 #include "oat.h"
51 #include "oat_file.h"
52 #include "oat_quick_method_header.h"
53 #include "quick_exception_handler.h"
54 #include "runtime.h"
55 #include "scoped_thread_state_change-inl.h"
56 #include "stack.h"
57 #include "thread-inl.h"
58 #include "var_handles.h"
59 #include "well_known_classes.h"
60
61 namespace art {
62
63 // Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame.
64 class QuickArgumentVisitor {
65 // Number of bytes for each out register in the caller method's frame.
66 static constexpr size_t kBytesStackArgLocation = 4;
67 // Frame size in bytes of a callee-save frame for RefsAndArgs.
68 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
69 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
70 // Offset of first GPR arg.
71 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
72 RuntimeCalleeSaveFrame::GetGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
73 // Offset of first FPR arg.
74 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
75 RuntimeCalleeSaveFrame::GetFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
76 // Offset of return address.
77 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset =
78 RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs);
79 #if defined(__arm__)
80 // The callee save frame is pointed to by SP.
81 // | argN | |
82 // | ... | |
83 // | arg4 | |
84 // | arg3 spill | | Caller's frame
85 // | arg2 spill | |
86 // | arg1 spill | |
87 // | Method* | ---
88 // | LR |
89 // | ... | 4x6 bytes callee saves
90 // | R3 |
91 // | R2 |
92 // | R1 |
93 // | S15 |
94 // | : |
95 // | S0 |
96 // | | 4x2 bytes padding
97 // | Method* | <- sp
98 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
99 static constexpr bool kAlignPairRegister = true;
100 static constexpr bool kQuickSoftFloatAbi = false;
101 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = true;
102 static constexpr bool kQuickSkipOddFpRegisters = false;
103 static constexpr size_t kNumQuickGprArgs = 3;
104 static constexpr size_t kNumQuickFprArgs = 16;
105 static constexpr bool kGprFprLockstep = false;
GprIndexToGprOffset(uint32_t gpr_index)106 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
107 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
108 }
109 #elif defined(__aarch64__)
110 // The callee save frame is pointed to by SP.
111 // | argN | |
112 // | ... | |
113 // | arg4 | |
114 // | arg3 spill | | Caller's frame
115 // | arg2 spill | |
116 // | arg1 spill | |
117 // | Method* | ---
118 // | LR |
119 // | X29 |
120 // | : |
121 // | X20 |
122 // | X7 |
123 // | : |
124 // | X1 |
125 // | D7 |
126 // | : |
127 // | D0 |
128 // | | padding
129 // | Method* | <- sp
130 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
131 static constexpr bool kAlignPairRegister = false;
132 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
133 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
134 static constexpr bool kQuickSkipOddFpRegisters = false;
135 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
136 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
137 static constexpr bool kGprFprLockstep = false;
GprIndexToGprOffset(uint32_t gpr_index)138 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
139 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
140 }
141 #elif defined(__i386__)
142 // The callee save frame is pointed to by SP.
143 // | argN | |
144 // | ... | |
145 // | arg4 | |
146 // | arg3 spill | | Caller's frame
147 // | arg2 spill | |
148 // | arg1 spill | |
149 // | Method* | ---
150 // | Return |
151 // | EBP,ESI,EDI | callee saves
152 // | EBX | arg3
153 // | EDX | arg2
154 // | ECX | arg1
155 // | XMM3 | float arg 4
156 // | XMM2 | float arg 3
157 // | XMM1 | float arg 2
158 // | XMM0 | float arg 1
159 // | EAX/Method* | <- sp
160 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
161 static constexpr bool kAlignPairRegister = false;
162 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
163 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
164 static constexpr bool kQuickSkipOddFpRegisters = false;
165 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
166 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs.
167 static constexpr bool kGprFprLockstep = false;
GprIndexToGprOffset(uint32_t gpr_index)168 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
169 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
170 }
171 #elif defined(__x86_64__)
172 // The callee save frame is pointed to by SP.
173 // | argN | |
174 // | ... | |
175 // | reg. arg spills | | Caller's frame
176 // | Method* | ---
177 // | Return |
178 // | R15 | callee save
179 // | R14 | callee save
180 // | R13 | callee save
181 // | R12 | callee save
182 // | R9 | arg5
183 // | R8 | arg4
184 // | RSI/R6 | arg1
185 // | RBP/R5 | callee save
186 // | RBX/R3 | callee save
187 // | RDX/R2 | arg2
188 // | RCX/R1 | arg3
189 // | XMM7 | float arg 8
190 // | XMM6 | float arg 7
191 // | XMM5 | float arg 6
192 // | XMM4 | float arg 5
193 // | XMM3 | float arg 4
194 // | XMM2 | float arg 3
195 // | XMM1 | float arg 2
196 // | XMM0 | float arg 1
197 // | Padding |
198 // | RDI/Method* | <- sp
199 static constexpr bool kSplitPairAcrossRegisterAndStack = false;
200 static constexpr bool kAlignPairRegister = false;
201 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
202 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
203 static constexpr bool kQuickSkipOddFpRegisters = false;
204 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs.
205 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
206 static constexpr bool kGprFprLockstep = false;
GprIndexToGprOffset(uint32_t gpr_index)207 static size_t GprIndexToGprOffset(uint32_t gpr_index) {
208 switch (gpr_index) {
209 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
210 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
211 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
212 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
213 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
214 default:
215 LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
216 UNREACHABLE();
217 }
218 }
219 #else
220 #error "Unsupported architecture"
221 #endif
222
223 public:
224 // Special handling for proxy methods. Proxy methods are instance methods so the
225 // 'this' object is the 1st argument. They also have the same frame layout as the
226 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
227 // 1st GPR.
GetProxyThisObjectReference(ArtMethod ** sp)228 static StackReference<mirror::Object>* GetProxyThisObjectReference(ArtMethod** sp)
229 REQUIRES_SHARED(Locks::mutator_lock_) {
230 CHECK((*sp)->IsProxyMethod());
231 CHECK_GT(kNumQuickGprArgs, 0u);
232 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
233 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
234 GprIndexToGprOffset(kThisGprIndex);
235 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset;
236 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address);
237 }
238
GetCallingMethod(ArtMethod ** sp)239 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
240 DCHECK((*sp)->IsCalleeSaveMethod());
241 return GetCalleeSaveMethodCaller(sp, CalleeSaveType::kSaveRefsAndArgs);
242 }
243
GetOuterMethod(ArtMethod ** sp)244 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
245 DCHECK((*sp)->IsCalleeSaveMethod());
246 uint8_t* previous_sp =
247 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
248 return *reinterpret_cast<ArtMethod**>(previous_sp);
249 }
250
GetCallingDexPc(ArtMethod ** sp)251 static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
252 DCHECK((*sp)->IsCalleeSaveMethod());
253 constexpr size_t callee_frame_size =
254 RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
255 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
256 reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
257 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
258 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc);
259 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
260
261 if (current_code->IsOptimized()) {
262 CodeInfo code_info = CodeInfo::DecodeInlineInfoOnly(current_code);
263 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
264 DCHECK(stack_map.IsValid());
265 BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
266 if (!inline_infos.empty()) {
267 return inline_infos.back().GetDexPc();
268 } else {
269 return stack_map.GetDexPc();
270 }
271 } else {
272 return current_code->ToDexPc(caller_sp, outer_pc);
273 }
274 }
275
GetCallingPcAddr(ArtMethod ** sp)276 static uint8_t* GetCallingPcAddr(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
277 DCHECK((*sp)->IsCalleeSaveMethod());
278 uint8_t* return_adress_spill =
279 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset;
280 return return_adress_spill;
281 }
282
283 // For the given quick ref and args quick frame, return the caller's PC.
GetCallingPc(ArtMethod ** sp)284 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
285 return *reinterpret_cast<uintptr_t*>(GetCallingPcAddr(sp));
286 }
287
QuickArgumentVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len)288 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
289 uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) :
290 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
291 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
292 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
293 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
294 + sizeof(ArtMethod*)), // Skip ArtMethod*.
295 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
296 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
297 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
298 "Number of Quick FPR arguments unexpected");
299 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
300 "Double alignment unexpected");
301 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the
302 // next register is even.
303 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
304 "Number of Quick FPR arguments not even");
305 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
306 }
307
~QuickArgumentVisitor()308 virtual ~QuickArgumentVisitor() {}
309
310 virtual void Visit() = 0;
311
GetParamPrimitiveType() const312 Primitive::Type GetParamPrimitiveType() const {
313 return cur_type_;
314 }
315
GetParamAddress() const316 uint8_t* GetParamAddress() const {
317 if (!kQuickSoftFloatAbi) {
318 Primitive::Type type = GetParamPrimitiveType();
319 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
320 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) {
321 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
322 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
323 }
324 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
325 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
326 }
327 return stack_args_ + (stack_index_ * kBytesStackArgLocation);
328 }
329 }
330 if (gpr_index_ < kNumQuickGprArgs) {
331 return gpr_args_ + GprIndexToGprOffset(gpr_index_);
332 }
333 return stack_args_ + (stack_index_ * kBytesStackArgLocation);
334 }
335
IsSplitLongOrDouble() const336 bool IsSplitLongOrDouble() const {
337 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) ||
338 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
339 return is_split_long_or_double_;
340 } else {
341 return false; // An optimization for when GPR and FPRs are 64bit.
342 }
343 }
344
IsParamAReference() const345 bool IsParamAReference() const {
346 return GetParamPrimitiveType() == Primitive::kPrimNot;
347 }
348
IsParamALongOrDouble() const349 bool IsParamALongOrDouble() const {
350 Primitive::Type type = GetParamPrimitiveType();
351 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
352 }
353
ReadSplitLongParam() const354 uint64_t ReadSplitLongParam() const {
355 // The splitted long is always available through the stack.
356 return *reinterpret_cast<uint64_t*>(stack_args_
357 + stack_index_ * kBytesStackArgLocation);
358 }
359
IncGprIndex()360 void IncGprIndex() {
361 gpr_index_++;
362 if (kGprFprLockstep) {
363 fpr_index_++;
364 }
365 }
366
IncFprIndex()367 void IncFprIndex() {
368 fpr_index_++;
369 if (kGprFprLockstep) {
370 gpr_index_++;
371 }
372 }
373
VisitArguments()374 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) {
375 // (a) 'stack_args_' should point to the first method's argument
376 // (b) whatever the argument type it is, the 'stack_index_' should
377 // be moved forward along with every visiting.
378 gpr_index_ = 0;
379 fpr_index_ = 0;
380 if (kQuickDoubleRegAlignedFloatBackFilled) {
381 fpr_double_index_ = 0;
382 }
383 stack_index_ = 0;
384 if (!is_static_) { // Handle this.
385 cur_type_ = Primitive::kPrimNot;
386 is_split_long_or_double_ = false;
387 Visit();
388 stack_index_++;
389 if (kNumQuickGprArgs > 0) {
390 IncGprIndex();
391 }
392 }
393 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
394 cur_type_ = Primitive::GetType(shorty_[shorty_index]);
395 switch (cur_type_) {
396 case Primitive::kPrimNot:
397 case Primitive::kPrimBoolean:
398 case Primitive::kPrimByte:
399 case Primitive::kPrimChar:
400 case Primitive::kPrimShort:
401 case Primitive::kPrimInt:
402 is_split_long_or_double_ = false;
403 Visit();
404 stack_index_++;
405 if (gpr_index_ < kNumQuickGprArgs) {
406 IncGprIndex();
407 }
408 break;
409 case Primitive::kPrimFloat:
410 is_split_long_or_double_ = false;
411 Visit();
412 stack_index_++;
413 if (kQuickSoftFloatAbi) {
414 if (gpr_index_ < kNumQuickGprArgs) {
415 IncGprIndex();
416 }
417 } else {
418 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
419 IncFprIndex();
420 if (kQuickDoubleRegAlignedFloatBackFilled) {
421 // Double should not overlap with float.
422 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
423 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2));
424 // Float should not overlap with double.
425 if (fpr_index_ % 2 == 0) {
426 fpr_index_ = std::max(fpr_double_index_, fpr_index_);
427 }
428 } else if (kQuickSkipOddFpRegisters) {
429 IncFprIndex();
430 }
431 }
432 }
433 break;
434 case Primitive::kPrimDouble:
435 case Primitive::kPrimLong:
436 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
437 if (cur_type_ == Primitive::kPrimLong &&
438 gpr_index_ == 0 &&
439 kAlignPairRegister) {
440 // Currently, this is only for ARM, where we align long parameters with
441 // even-numbered registers by skipping R1 and using R2 instead.
442 IncGprIndex();
443 }
444 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
445 ((gpr_index_ + 1) == kNumQuickGprArgs);
446 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) {
447 // We don't want to split this. Pass over this register.
448 gpr_index_++;
449 is_split_long_or_double_ = false;
450 }
451 Visit();
452 if (kBytesStackArgLocation == 4) {
453 stack_index_+= 2;
454 } else {
455 CHECK_EQ(kBytesStackArgLocation, 8U);
456 stack_index_++;
457 }
458 if (gpr_index_ < kNumQuickGprArgs) {
459 IncGprIndex();
460 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
461 if (gpr_index_ < kNumQuickGprArgs) {
462 IncGprIndex();
463 }
464 }
465 }
466 } else {
467 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
468 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled;
469 Visit();
470 if (kBytesStackArgLocation == 4) {
471 stack_index_+= 2;
472 } else {
473 CHECK_EQ(kBytesStackArgLocation, 8U);
474 stack_index_++;
475 }
476 if (kQuickDoubleRegAlignedFloatBackFilled) {
477 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
478 fpr_double_index_ += 2;
479 // Float should not overlap with double.
480 if (fpr_index_ % 2 == 0) {
481 fpr_index_ = std::max(fpr_double_index_, fpr_index_);
482 }
483 }
484 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
485 IncFprIndex();
486 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
487 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
488 IncFprIndex();
489 }
490 }
491 }
492 }
493 break;
494 default:
495 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
496 }
497 }
498 }
499
500 protected:
501 const bool is_static_;
502 const char* const shorty_;
503 const uint32_t shorty_len_;
504
505 private:
506 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame.
507 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame.
508 uint8_t* const stack_args_; // Address of stack arguments in caller's frame.
509 uint32_t gpr_index_; // Index into spilled GPRs.
510 // Index into spilled FPRs.
511 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_
512 // holds a higher register number.
513 uint32_t fpr_index_;
514 // Index into spilled FPRs for aligned double.
515 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in
516 // terms of singles, may be behind fpr_index.
517 uint32_t fpr_double_index_;
518 uint32_t stack_index_; // Index into arguments on the stack.
519 // The current type of argument during VisitArguments.
520 Primitive::Type cur_type_;
521 // Does a 64bit parameter straddle the register and stack arguments?
522 bool is_split_long_or_double_;
523 };
524
525 // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
526 // allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
artQuickGetProxyThisObject(ArtMethod ** sp)527 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
528 REQUIRES_SHARED(Locks::mutator_lock_) {
529 return QuickArgumentVisitor::GetProxyThisObjectReference(sp)->AsMirrorPtr();
530 }
531
532 // Visits arguments on the stack placing them into the shadow frame.
533 class BuildQuickShadowFrameVisitor final : public QuickArgumentVisitor {
534 public:
BuildQuickShadowFrameVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len,ShadowFrame * sf,size_t first_arg_reg)535 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
536 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
537 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
538
539 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
540
541 private:
542 ShadowFrame* const sf_;
543 uint32_t cur_reg_;
544
545 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
546 };
547
Visit()548 void BuildQuickShadowFrameVisitor::Visit() {
549 Primitive::Type type = GetParamPrimitiveType();
550 switch (type) {
551 case Primitive::kPrimLong: // Fall-through.
552 case Primitive::kPrimDouble:
553 if (IsSplitLongOrDouble()) {
554 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
555 } else {
556 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
557 }
558 ++cur_reg_;
559 break;
560 case Primitive::kPrimNot: {
561 StackReference<mirror::Object>* stack_ref =
562 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
563 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
564 }
565 break;
566 case Primitive::kPrimBoolean: // Fall-through.
567 case Primitive::kPrimByte: // Fall-through.
568 case Primitive::kPrimChar: // Fall-through.
569 case Primitive::kPrimShort: // Fall-through.
570 case Primitive::kPrimInt: // Fall-through.
571 case Primitive::kPrimFloat:
572 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
573 break;
574 case Primitive::kPrimVoid:
575 LOG(FATAL) << "UNREACHABLE";
576 UNREACHABLE();
577 }
578 ++cur_reg_;
579 }
580
581 // Don't inline. See b/65159206.
582 NO_INLINE
HandleDeoptimization(JValue * result,ArtMethod * method,ShadowFrame * deopt_frame,ManagedStack * fragment)583 static void HandleDeoptimization(JValue* result,
584 ArtMethod* method,
585 ShadowFrame* deopt_frame,
586 ManagedStack* fragment)
587 REQUIRES_SHARED(Locks::mutator_lock_) {
588 // Coming from partial-fragment deopt.
589 Thread* self = Thread::Current();
590 if (kIsDebugBuild) {
591 // Consistency-check: are the methods as expected? We check that the last shadow frame
592 // (the bottom of the call-stack) corresponds to the called method.
593 ShadowFrame* linked = deopt_frame;
594 while (linked->GetLink() != nullptr) {
595 linked = linked->GetLink();
596 }
597 CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " "
598 << ArtMethod::PrettyMethod(linked->GetMethod());
599 }
600
601 if (VLOG_IS_ON(deopt)) {
602 // Print out the stack to verify that it was a partial-fragment deopt.
603 LOG(INFO) << "Continue-ing from deopt. Stack is:";
604 QuickExceptionHandler::DumpFramesWithType(self, true);
605 }
606
607 ObjPtr<mirror::Throwable> pending_exception;
608 bool from_code = false;
609 DeoptimizationMethodType method_type;
610 self->PopDeoptimizationContext(/* out */ result,
611 /* out */ &pending_exception,
612 /* out */ &from_code,
613 /* out */ &method_type);
614
615 // Push a transition back into managed code onto the linked list in thread.
616 self->PushManagedStackFragment(fragment);
617
618 // Ensure that the stack is still in order.
619 if (kIsDebugBuild) {
620 class EntireStackVisitor : public StackVisitor {
621 public:
622 explicit EntireStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
623 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
624
625 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
626 // Nothing to do here. In a debug build, ValidateFrame will do the work in the walking
627 // logic. Just always say we want to continue.
628 return true;
629 }
630 };
631 EntireStackVisitor esv(self);
632 esv.WalkStack();
633 }
634
635 // Restore the exception that was pending before deoptimization then interpret the
636 // deoptimized frames.
637 if (pending_exception != nullptr) {
638 self->SetException(pending_exception);
639 }
640 interpreter::EnterInterpreterFromDeoptimize(self,
641 deopt_frame,
642 result,
643 from_code,
644 DeoptimizationMethodType::kDefault);
645 }
646
artQuickToInterpreterBridge(ArtMethod * method,Thread * self,ArtMethod ** sp)647 extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
648 REQUIRES_SHARED(Locks::mutator_lock_) {
649 // Ensure we don't get thread suspension until the object arguments are safely in the shadow
650 // frame.
651 ScopedQuickEntrypointChecks sqec(self);
652
653 if (UNLIKELY(!method->IsInvokable())) {
654 method->ThrowInvocationTimeError();
655 return 0;
656 }
657
658 JValue tmp_value;
659 ShadowFrame* deopt_frame = self->PopStackedShadowFrame(
660 StackedShadowFrameType::kDeoptimizationShadowFrame, false);
661 ManagedStack fragment;
662
663 DCHECK(!method->IsNative()) << method->PrettyMethod();
664 uint32_t shorty_len = 0;
665 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
666 DCHECK(non_proxy_method->GetCodeItem() != nullptr) << method->PrettyMethod();
667 CodeItemDataAccessor accessor(non_proxy_method->DexInstructionData());
668 const char* shorty = non_proxy_method->GetShorty(&shorty_len);
669
670 JValue result;
671 bool force_frame_pop = false;
672
673 if (UNLIKELY(deopt_frame != nullptr)) {
674 HandleDeoptimization(&result, method, deopt_frame, &fragment);
675 } else {
676 const char* old_cause = self->StartAssertNoThreadSuspension(
677 "Building interpreter shadow frame");
678 uint16_t num_regs = accessor.RegistersSize();
679 // No last shadow coming from quick.
680 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
681 CREATE_SHADOW_FRAME(num_regs, /* link= */ nullptr, method, /* dex_pc= */ 0);
682 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
683 size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize();
684 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
685 shadow_frame, first_arg_reg);
686 shadow_frame_builder.VisitArguments();
687 // Push a transition back into managed code onto the linked list in thread.
688 self->PushManagedStackFragment(&fragment);
689 self->PushShadowFrame(shadow_frame);
690 self->EndAssertNoThreadSuspension(old_cause);
691
692 if (NeedsClinitCheckBeforeCall(method)) {
693 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
694 if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
695 // Ensure static method's class is initialized.
696 StackHandleScope<1> hs(self);
697 Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
698 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
699 DCHECK(Thread::Current()->IsExceptionPending()) << method->PrettyMethod();
700 self->PopManagedStackFragment(fragment);
701 return 0;
702 }
703 }
704 }
705
706 result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame);
707 force_frame_pop = shadow_frame->GetForcePopFrame();
708 }
709
710 // Pop transition.
711 self->PopManagedStackFragment(fragment);
712
713 // Request a stack deoptimization if needed
714 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
715 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
716 // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
717 // should be done and it knows the real return pc. NB If the upcall is null we don't need to do
718 // anything. This can happen during shutdown or early startup.
719 if (UNLIKELY(
720 caller != nullptr &&
721 caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
722 (self->IsForceInterpreter() || Dbg::IsForcedInterpreterNeededForUpcall(self, caller)))) {
723 if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
724 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
725 << caller->PrettyMethod();
726 } else {
727 VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod()
728 << " to " << caller->PrettyMethod()
729 << (force_frame_pop ? " for frame-pop" : "");
730 DCHECK(!force_frame_pop || result.GetJ() == 0) << "Force frame pop should have no result.";
731 if (force_frame_pop && self->GetException() != nullptr) {
732 LOG(WARNING) << "Suppressing exception for instruction-retry: "
733 << self->GetException()->Dump();
734 }
735 // Push the context of the deoptimization stack so we can restore the return value and the
736 // exception before executing the deoptimized frames.
737 self->PushDeoptimizationContext(
738 result,
739 shorty[0] == 'L' || shorty[0] == '[', /* class or array */
740 force_frame_pop ? nullptr : self->GetException(),
741 /* from_code= */ false,
742 DeoptimizationMethodType::kDefault);
743
744 // Set special exception to cause deoptimization.
745 self->SetException(Thread::GetDeoptimizationException());
746 }
747 }
748
749 // No need to restore the args since the method has already been run by the interpreter.
750 return result.GetJ();
751 }
752
753 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted
754 // to jobjects.
755 class BuildQuickArgumentVisitor final : public QuickArgumentVisitor {
756 public:
BuildQuickArgumentVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len,ScopedObjectAccessUnchecked * soa,std::vector<jvalue> * args)757 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
758 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
759 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
760
761 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
762
763 private:
764 ScopedObjectAccessUnchecked* const soa_;
765 std::vector<jvalue>* const args_;
766
767 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
768 };
769
Visit()770 void BuildQuickArgumentVisitor::Visit() {
771 jvalue val;
772 Primitive::Type type = GetParamPrimitiveType();
773 switch (type) {
774 case Primitive::kPrimNot: {
775 StackReference<mirror::Object>* stack_ref =
776 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
777 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
778 break;
779 }
780 case Primitive::kPrimLong: // Fall-through.
781 case Primitive::kPrimDouble:
782 if (IsSplitLongOrDouble()) {
783 val.j = ReadSplitLongParam();
784 } else {
785 val.j = *reinterpret_cast<jlong*>(GetParamAddress());
786 }
787 break;
788 case Primitive::kPrimBoolean: // Fall-through.
789 case Primitive::kPrimByte: // Fall-through.
790 case Primitive::kPrimChar: // Fall-through.
791 case Primitive::kPrimShort: // Fall-through.
792 case Primitive::kPrimInt: // Fall-through.
793 case Primitive::kPrimFloat:
794 val.i = *reinterpret_cast<jint*>(GetParamAddress());
795 break;
796 case Primitive::kPrimVoid:
797 LOG(FATAL) << "UNREACHABLE";
798 UNREACHABLE();
799 }
800 args_->push_back(val);
801 }
802
803 // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
804 // which is responsible for recording callee save registers. We explicitly place into jobjects the
805 // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
806 // field within the proxy object, which will box the primitive arguments and deal with error cases.
artQuickProxyInvokeHandler(ArtMethod * proxy_method,mirror::Object * receiver,Thread * self,ArtMethod ** sp)807 extern "C" uint64_t artQuickProxyInvokeHandler(
808 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
809 REQUIRES_SHARED(Locks::mutator_lock_) {
810 DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod();
811 DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod();
812 // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
813 const char* old_cause =
814 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
815 // Register the top of the managed stack, making stack crawlable.
816 DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod();
817 self->VerifyStack();
818 // Start new JNI local reference state.
819 JNIEnvExt* env = self->GetJniEnv();
820 ScopedObjectAccessUnchecked soa(env);
821 ScopedJniEnvLocalRefState env_state(env);
822 // Create local ref. copies of proxy method and the receiver.
823 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
824
825 // Placing arguments into args vector and remove the receiver.
826 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
827 CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " "
828 << non_proxy_method->PrettyMethod();
829 std::vector<jvalue> args;
830 uint32_t shorty_len = 0;
831 const char* shorty = non_proxy_method->GetShorty(&shorty_len);
832 BuildQuickArgumentVisitor local_ref_visitor(
833 sp, /* is_static= */ false, shorty, shorty_len, &soa, &args);
834
835 local_ref_visitor.VisitArguments();
836 DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod();
837 args.erase(args.begin());
838
839 // Convert proxy method into expected interface method.
840 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize);
841 DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod();
842 DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod();
843 self->EndAssertNoThreadSuspension(old_cause);
844 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
845 DCHECK(!Runtime::Current()->IsActiveTransaction());
846 ObjPtr<mirror::Method> interface_reflect_method =
847 mirror::Method::CreateFromArtMethod<kRuntimePointerSize>(soa.Self(), interface_method);
848 if (interface_reflect_method == nullptr) {
849 soa.Self()->AssertPendingOOMException();
850 return 0;
851 }
852 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_reflect_method);
853
854 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
855 // that performs allocations or instrumentation events.
856 instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
857 if (instr->HasMethodEntryListeners()) {
858 instr->MethodEnterEvent(soa.Self(),
859 soa.Decode<mirror::Object>(rcvr_jobj),
860 proxy_method,
861 0);
862 if (soa.Self()->IsExceptionPending()) {
863 instr->MethodUnwindEvent(self,
864 soa.Decode<mirror::Object>(rcvr_jobj),
865 proxy_method,
866 0);
867 return 0;
868 }
869 }
870 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
871 if (soa.Self()->IsExceptionPending()) {
872 if (instr->HasMethodUnwindListeners()) {
873 instr->MethodUnwindEvent(self,
874 soa.Decode<mirror::Object>(rcvr_jobj),
875 proxy_method,
876 0);
877 }
878 } else if (instr->HasMethodExitListeners()) {
879 instr->MethodExitEvent(self,
880 soa.Decode<mirror::Object>(rcvr_jobj),
881 proxy_method,
882 0,
883 {},
884 result);
885 }
886 return result.GetJ();
887 }
888
889 // Visitor returning a reference argument at a given position in a Quick stack frame.
890 // NOTE: Only used for testing purposes.
891 class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor {
892 public:
GetQuickReferenceArgumentAtVisitor(ArtMethod ** sp,const char * shorty,uint32_t shorty_len,size_t arg_pos)893 GetQuickReferenceArgumentAtVisitor(ArtMethod** sp,
894 const char* shorty,
895 uint32_t shorty_len,
896 size_t arg_pos)
897 : QuickArgumentVisitor(sp, /* is_static= */ false, shorty, shorty_len),
898 cur_pos_(0u),
899 arg_pos_(arg_pos),
900 ref_arg_(nullptr) {
901 CHECK_LT(arg_pos, shorty_len) << "Argument position greater than the number arguments";
902 }
903
Visit()904 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
905 if (cur_pos_ == arg_pos_) {
906 Primitive::Type type = GetParamPrimitiveType();
907 CHECK_EQ(type, Primitive::kPrimNot) << "Argument at searched position is not a reference";
908 ref_arg_ = reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
909 }
910 ++cur_pos_;
911 }
912
GetReferenceArgument()913 StackReference<mirror::Object>* GetReferenceArgument() {
914 return ref_arg_;
915 }
916
917 private:
918 // The position of the currently visited argument.
919 size_t cur_pos_;
920 // The position of the searched argument.
921 const size_t arg_pos_;
922 // The reference argument, if found.
923 StackReference<mirror::Object>* ref_arg_;
924
925 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentAtVisitor);
926 };
927
928 // Returning reference argument at position `arg_pos` in Quick stack frame at address `sp`.
929 // NOTE: Only used for testing purposes.
artQuickGetProxyReferenceArgumentAt(size_t arg_pos,ArtMethod ** sp)930 extern "C" StackReference<mirror::Object>* artQuickGetProxyReferenceArgumentAt(size_t arg_pos,
931 ArtMethod** sp)
932 REQUIRES_SHARED(Locks::mutator_lock_) {
933 ArtMethod* proxy_method = *sp;
934 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
935 CHECK(!non_proxy_method->IsStatic())
936 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
937 uint32_t shorty_len = 0;
938 const char* shorty = non_proxy_method->GetShorty(&shorty_len);
939 GetQuickReferenceArgumentAtVisitor ref_arg_visitor(sp, shorty, shorty_len, arg_pos);
940 ref_arg_visitor.VisitArguments();
941 StackReference<mirror::Object>* ref_arg = ref_arg_visitor.GetReferenceArgument();
942 return ref_arg;
943 }
944
945 // Visitor returning all the reference arguments in a Quick stack frame.
946 class GetQuickReferenceArgumentsVisitor final : public QuickArgumentVisitor {
947 public:
GetQuickReferenceArgumentsVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len)948 GetQuickReferenceArgumentsVisitor(ArtMethod** sp,
949 bool is_static,
950 const char* shorty,
951 uint32_t shorty_len)
952 : QuickArgumentVisitor(sp, is_static, shorty, shorty_len) {}
953
Visit()954 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override {
955 Primitive::Type type = GetParamPrimitiveType();
956 if (type == Primitive::kPrimNot) {
957 StackReference<mirror::Object>* ref_arg =
958 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
959 ref_args_.push_back(ref_arg);
960 }
961 }
962
GetReferenceArguments()963 std::vector<StackReference<mirror::Object>*> GetReferenceArguments() {
964 return ref_args_;
965 }
966
967 private:
968 // The reference arguments.
969 std::vector<StackReference<mirror::Object>*> ref_args_;
970
971 DISALLOW_COPY_AND_ASSIGN(GetQuickReferenceArgumentsVisitor);
972 };
973
974 // Returning all reference arguments in Quick stack frame at address `sp`.
GetProxyReferenceArguments(ArtMethod ** sp)975 std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp)
976 REQUIRES_SHARED(Locks::mutator_lock_) {
977 ArtMethod* proxy_method = *sp;
978 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
979 CHECK(!non_proxy_method->IsStatic())
980 << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
981 uint32_t shorty_len = 0;
982 const char* shorty = non_proxy_method->GetShorty(&shorty_len);
983 GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /*is_static=*/ false, shorty, shorty_len);
984 ref_args_visitor.VisitArguments();
985 std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments();
986 return ref_args;
987 }
988
989 // Read object references held in arguments from quick frames and place in a JNI local references,
990 // so they don't get garbage collected.
991 class RememberForGcArgumentVisitor final : public QuickArgumentVisitor {
992 public:
RememberForGcArgumentVisitor(ArtMethod ** sp,bool is_static,const char * shorty,uint32_t shorty_len,ScopedObjectAccessUnchecked * soa)993 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
994 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
995 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
996
997 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
998
999 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
1000
1001 private:
1002 ScopedObjectAccessUnchecked* const soa_;
1003 // References which we must update when exiting in case the GC moved the objects.
1004 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
1005
1006 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
1007 };
1008
Visit()1009 void RememberForGcArgumentVisitor::Visit() {
1010 if (IsParamAReference()) {
1011 StackReference<mirror::Object>* stack_ref =
1012 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1013 jobject reference =
1014 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
1015 references_.push_back(std::make_pair(reference, stack_ref));
1016 }
1017 }
1018
FixupReferences()1019 void RememberForGcArgumentVisitor::FixupReferences() {
1020 // Fixup any references which may have changed.
1021 for (const auto& pair : references_) {
1022 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first));
1023 soa_->Env()->DeleteLocalRef(pair.first);
1024 }
1025 }
1026
artInstrumentationMethodEntryFromCode(ArtMethod * method,mirror::Object * this_object,Thread * self,ArtMethod ** sp)1027 extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method,
1028 mirror::Object* this_object,
1029 Thread* self,
1030 ArtMethod** sp)
1031 REQUIRES_SHARED(Locks::mutator_lock_) {
1032 const void* result;
1033 // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip
1034 // that part.
1035 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
1036 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1037 DCHECK(!method->IsProxyMethod())
1038 << "Proxy method " << method->PrettyMethod()
1039 << " (declaring class: " << method->GetDeclaringClass()->PrettyClass() << ")"
1040 << " should not hit instrumentation entrypoint.";
1041 if (instrumentation->IsDeoptimized(method)) {
1042 result = GetQuickToInterpreterBridge();
1043 } else {
1044 // This will get the entry point either from the oat file, the JIT or the appropriate bridge
1045 // method if none of those can be found.
1046 result = instrumentation->GetCodeForInvoke(method);
1047 jit::Jit* jit = Runtime::Current()->GetJit();
1048 DCHECK_NE(result, GetQuickInstrumentationEntryPoint()) << method->PrettyMethod();
1049 DCHECK(jit == nullptr ||
1050 // Native methods come through here in Interpreter entrypoints. We might not have
1051 // disabled jit-gc but that is fine since we won't return jit-code for native methods.
1052 method->IsNative() ||
1053 !jit->GetCodeCache()->GetGarbageCollectCode());
1054 DCHECK(!method->IsNative() ||
1055 jit == nullptr ||
1056 !jit->GetCodeCache()->ContainsPc(result))
1057 << method->PrettyMethod() << " code will jump to possibly cleaned up jit code!";
1058 }
1059
1060 bool interpreter_entry = (result == GetQuickToInterpreterBridge());
1061 bool is_static = method->IsStatic();
1062 uint32_t shorty_len;
1063 const char* shorty =
1064 method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
1065
1066 ScopedObjectAccessUnchecked soa(self);
1067 RememberForGcArgumentVisitor visitor(sp, is_static, shorty, shorty_len, &soa);
1068 visitor.VisitArguments();
1069
1070 instrumentation->PushInstrumentationStackFrame(self,
1071 is_static ? nullptr : this_object,
1072 method,
1073 reinterpret_cast<uintptr_t>(
1074 QuickArgumentVisitor::GetCallingPcAddr(sp)),
1075 QuickArgumentVisitor::GetCallingPc(sp),
1076 interpreter_entry);
1077
1078 visitor.FixupReferences();
1079 if (UNLIKELY(self->IsExceptionPending())) {
1080 return nullptr;
1081 }
1082 CHECK(result != nullptr) << method->PrettyMethod();
1083 return result;
1084 }
1085
artInstrumentationMethodExitFromCode(Thread * self,ArtMethod ** sp,uint64_t * gpr_result,uint64_t * fpr_result)1086 extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self,
1087 ArtMethod** sp,
1088 uint64_t* gpr_result,
1089 uint64_t* fpr_result)
1090 REQUIRES_SHARED(Locks::mutator_lock_) {
1091 DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current()));
1092 CHECK(gpr_result != nullptr);
1093 CHECK(fpr_result != nullptr);
1094 // Instrumentation exit stub must not be entered with a pending exception.
1095 CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
1096 << self->GetException()->Dump();
1097 // Compute address of return PC and check that it currently holds 0.
1098 constexpr size_t return_pc_offset =
1099 RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything);
1100 uintptr_t* return_pc_addr = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
1101 return_pc_offset);
1102 CHECK_EQ(*return_pc_addr, 0U);
1103
1104 // Pop the frame filling in the return pc. The low half of the return value is 0 when
1105 // deoptimization shouldn't be performed with the high-half having the return address. When
1106 // deoptimization should be performed the low half is zero and the high-half the address of the
1107 // deoptimization entry point.
1108 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1109 TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(
1110 self, return_pc_addr, gpr_result, fpr_result);
1111 if (self->IsExceptionPending() || self->ObserveAsyncException()) {
1112 return GetTwoWordFailureValue();
1113 }
1114 return return_or_deoptimize_pc;
1115 }
1116
DumpInstruction(ArtMethod * method,uint32_t dex_pc)1117 static std::string DumpInstruction(ArtMethod* method, uint32_t dex_pc)
1118 REQUIRES_SHARED(Locks::mutator_lock_) {
1119 if (dex_pc == static_cast<uint32_t>(-1)) {
1120 CHECK(method == jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt));
1121 return "<native>";
1122 } else {
1123 CodeItemInstructionAccessor accessor = method->DexInstructions();
1124 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
1125 return accessor.InstructionAt(dex_pc).DumpString(method->GetDexFile());
1126 }
1127 }
1128
DumpB74410240ClassData(ObjPtr<mirror::Class> klass)1129 static void DumpB74410240ClassData(ObjPtr<mirror::Class> klass)
1130 REQUIRES_SHARED(Locks::mutator_lock_) {
1131 std::string storage;
1132 const char* descriptor = klass->GetDescriptor(&storage);
1133 LOG(FATAL_WITHOUT_ABORT) << " " << DescribeLoaders(klass->GetClassLoader(), descriptor);
1134 const OatDexFile* oat_dex_file = klass->GetDexFile().GetOatDexFile();
1135 if (oat_dex_file != nullptr) {
1136 const OatFile* oat_file = oat_dex_file->GetOatFile();
1137 const char* dex2oat_cmdline =
1138 oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
1139 LOG(FATAL_WITHOUT_ABORT) << " OatFile: " << oat_file->GetLocation()
1140 << "; " << (dex2oat_cmdline != nullptr ? dex2oat_cmdline : "<not recorded>");
1141 }
1142 }
1143
DumpB74410240DebugData(ArtMethod ** sp)1144 static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
1145 // Mimick the search for the caller and dump some data while doing so.
1146 LOG(FATAL_WITHOUT_ABORT) << "Dumping debugging data, please attach a bugreport to b/74410240.";
1147
1148 constexpr CalleeSaveType type = CalleeSaveType::kSaveRefsAndArgs;
1149 CHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
1150
1151 constexpr size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
1152 auto** caller_sp = reinterpret_cast<ArtMethod**>(
1153 reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
1154 constexpr size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type);
1155 uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
1156 (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
1157 ArtMethod* outer_method = *caller_sp;
1158
1159 if (UNLIKELY(caller_pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
1160 LOG(FATAL_WITHOUT_ABORT) << "Method: " << outer_method->PrettyMethod()
1161 << " native pc: " << caller_pc << " Instrumented!";
1162 return;
1163 }
1164
1165 const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
1166 CHECK(current_code != nullptr);
1167 CHECK(current_code->IsOptimized());
1168 uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
1169 CodeInfo code_info(current_code);
1170 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
1171 CHECK(stack_map.IsValid());
1172 uint32_t dex_pc = stack_map.GetDexPc();
1173
1174 // Log the outer method and its associated dex file and class table pointer which can be used
1175 // to find out if the inlined methods were defined by other dex file(s) or class loader(s).
1176 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1177 LOG(FATAL_WITHOUT_ABORT) << "Outer: " << outer_method->PrettyMethod()
1178 << " native pc: " << caller_pc
1179 << " dex pc: " << dex_pc
1180 << " dex file: " << outer_method->GetDexFile()->GetLocation()
1181 << " class table: " << class_linker->ClassTableForClassLoader(outer_method->GetClassLoader());
1182 DumpB74410240ClassData(outer_method->GetDeclaringClass());
1183 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(outer_method, dex_pc);
1184
1185 ArtMethod* caller = outer_method;
1186 BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
1187 for (InlineInfo inline_info : inline_infos) {
1188 const char* tag = "";
1189 dex_pc = inline_info.GetDexPc();
1190 if (inline_info.EncodesArtMethod()) {
1191 tag = "encoded ";
1192 caller = inline_info.GetArtMethod();
1193 } else {
1194 uint32_t method_index = code_info.GetMethodIndexOf(inline_info);
1195 if (dex_pc == static_cast<uint32_t>(-1)) {
1196 tag = "special ";
1197 CHECK(inline_info.Equals(inline_infos.back()));
1198 caller = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
1199 CHECK_EQ(caller->GetDexMethodIndex(), method_index);
1200 } else {
1201 ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
1202 ObjPtr<mirror::ClassLoader> class_loader = caller->GetClassLoader();
1203 caller = class_linker->LookupResolvedMethod(method_index, dex_cache, class_loader);
1204 CHECK(caller != nullptr);
1205 }
1206 }
1207 LOG(FATAL_WITHOUT_ABORT) << "InlineInfo #" << inline_info.Row()
1208 << ": " << tag << caller->PrettyMethod()
1209 << " dex pc: " << dex_pc
1210 << " dex file: " << caller->GetDexFile()->GetLocation()
1211 << " class table: "
1212 << class_linker->ClassTableForClassLoader(caller->GetClassLoader());
1213 DumpB74410240ClassData(caller->GetDeclaringClass());
1214 LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(caller, dex_pc);
1215 }
1216 }
1217
1218 // Lazily resolve a method for quick. Called by stub code.
artQuickResolutionTrampoline(ArtMethod * called,mirror::Object * receiver,Thread * self,ArtMethod ** sp)1219 extern "C" const void* artQuickResolutionTrampoline(
1220 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
1221 REQUIRES_SHARED(Locks::mutator_lock_) {
1222 // The resolution trampoline stashes the resolved method into the callee-save frame to transport
1223 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
1224 // does not have the same stack layout as the callee-save method).
1225 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
1226 // Start new JNI local reference state
1227 JNIEnvExt* env = self->GetJniEnv();
1228 ScopedObjectAccessUnchecked soa(env);
1229 ScopedJniEnvLocalRefState env_state(env);
1230 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
1231
1232 // Compute details about the called method (avoid GCs)
1233 ClassLinker* linker = Runtime::Current()->GetClassLinker();
1234 InvokeType invoke_type;
1235 MethodReference called_method(nullptr, 0);
1236 const bool called_method_known_on_entry = !called->IsRuntimeMethod();
1237 ArtMethod* caller = nullptr;
1238 if (!called_method_known_on_entry) {
1239 caller = QuickArgumentVisitor::GetCallingMethod(sp);
1240 called_method.dex_file = caller->GetDexFile();
1241
1242 {
1243 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
1244 CodeItemInstructionAccessor accessor(caller->DexInstructions());
1245 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
1246 const Instruction& instr = accessor.InstructionAt(dex_pc);
1247 Instruction::Code instr_code = instr.Opcode();
1248 bool is_range;
1249 switch (instr_code) {
1250 case Instruction::INVOKE_DIRECT:
1251 invoke_type = kDirect;
1252 is_range = false;
1253 break;
1254 case Instruction::INVOKE_DIRECT_RANGE:
1255 invoke_type = kDirect;
1256 is_range = true;
1257 break;
1258 case Instruction::INVOKE_STATIC:
1259 invoke_type = kStatic;
1260 is_range = false;
1261 break;
1262 case Instruction::INVOKE_STATIC_RANGE:
1263 invoke_type = kStatic;
1264 is_range = true;
1265 break;
1266 case Instruction::INVOKE_SUPER:
1267 invoke_type = kSuper;
1268 is_range = false;
1269 break;
1270 case Instruction::INVOKE_SUPER_RANGE:
1271 invoke_type = kSuper;
1272 is_range = true;
1273 break;
1274 case Instruction::INVOKE_VIRTUAL:
1275 invoke_type = kVirtual;
1276 is_range = false;
1277 break;
1278 case Instruction::INVOKE_VIRTUAL_RANGE:
1279 invoke_type = kVirtual;
1280 is_range = true;
1281 break;
1282 case Instruction::INVOKE_INTERFACE:
1283 invoke_type = kInterface;
1284 is_range = false;
1285 break;
1286 case Instruction::INVOKE_INTERFACE_RANGE:
1287 invoke_type = kInterface;
1288 is_range = true;
1289 break;
1290 default:
1291 DumpB74410240DebugData(sp);
1292 LOG(FATAL) << "Unexpected call into trampoline: " << instr.DumpString(nullptr);
1293 UNREACHABLE();
1294 }
1295 called_method.index = (is_range) ? instr.VRegB_3rc() : instr.VRegB_35c();
1296 VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " "
1297 << called_method.index;
1298 }
1299 } else {
1300 invoke_type = kStatic;
1301 called_method.dex_file = called->GetDexFile();
1302 called_method.index = called->GetDexMethodIndex();
1303 }
1304 uint32_t shorty_len;
1305 const char* shorty =
1306 called_method.dex_file->GetMethodShorty(called_method.GetMethodId(), &shorty_len);
1307 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
1308 visitor.VisitArguments();
1309 self->EndAssertNoThreadSuspension(old_cause);
1310 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
1311 // Resolve method filling in dex cache.
1312 if (!called_method_known_on_entry) {
1313 StackHandleScope<1> hs(self);
1314 mirror::Object* fake_receiver = nullptr;
1315 HandleWrapper<mirror::Object> h_receiver(
1316 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &fake_receiver));
1317 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
1318 called = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
1319 self, called_method.index, caller, invoke_type);
1320
1321 // If successful, update .bss entry in oat file if any.
1322 if (called != nullptr) {
1323 MaybeUpdateBssMethodEntry(called, called_method);
1324 }
1325 }
1326 const void* code = nullptr;
1327 if (LIKELY(!self->IsExceptionPending())) {
1328 // Incompatible class change should have been handled in resolve method.
1329 CHECK(!called->CheckIncompatibleClassChange(invoke_type))
1330 << called->PrettyMethod() << " " << invoke_type;
1331 if (virtual_or_interface || invoke_type == kSuper) {
1332 // Refine called method based on receiver for kVirtual/kInterface, and
1333 // caller for kSuper.
1334 ArtMethod* orig_called = called;
1335 if (invoke_type == kVirtual) {
1336 CHECK(receiver != nullptr) << invoke_type;
1337 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize);
1338 } else if (invoke_type == kInterface) {
1339 CHECK(receiver != nullptr) << invoke_type;
1340 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize);
1341 } else {
1342 DCHECK_EQ(invoke_type, kSuper);
1343 CHECK(caller != nullptr) << invoke_type;
1344 ObjPtr<mirror::Class> ref_class = linker->LookupResolvedType(
1345 caller->GetDexFile()->GetMethodId(called_method.index).class_idx_, caller);
1346 if (ref_class->IsInterface()) {
1347 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize);
1348 } else {
1349 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry(
1350 called->GetMethodIndex(), kRuntimePointerSize);
1351 }
1352 }
1353
1354 CHECK(called != nullptr) << orig_called->PrettyMethod() << " "
1355 << mirror::Object::PrettyTypeOf(receiver) << " "
1356 << invoke_type << " " << orig_called->GetVtableIndex();
1357 }
1358
1359 // Static invokes need class initialization check but instance invokes can proceed even if
1360 // the class is erroneous, i.e. in the edge case of escaping instances of erroneous classes.
1361 bool success = true;
1362 ObjPtr<mirror::Class> called_class = called->GetDeclaringClass();
1363 if (NeedsClinitCheckBeforeCall(called) && !called_class->IsVisiblyInitialized()) {
1364 // Ensure that the called method's class is initialized.
1365 StackHandleScope<1> hs(soa.Self());
1366 HandleWrapperObjPtr<mirror::Class> h_called_class(hs.NewHandleWrapper(&called_class));
1367 success = linker->EnsureInitialized(soa.Self(), h_called_class, true, true);
1368 }
1369 if (success) {
1370 code = called->GetEntryPointFromQuickCompiledCode();
1371 if (linker->IsQuickResolutionStub(code)) {
1372 DCHECK_EQ(invoke_type, kStatic);
1373 // Go to JIT or oat and grab code.
1374 code = linker->GetQuickOatCodeFor(called);
1375 }
1376 if (linker->ShouldUseInterpreterEntrypoint(called, code)) {
1377 code = GetQuickToInterpreterBridge();
1378 }
1379 } else {
1380 DCHECK(called_class->IsErroneous());
1381 DCHECK(self->IsExceptionPending());
1382 }
1383 }
1384 CHECK_EQ(code == nullptr, self->IsExceptionPending());
1385 // Fixup any locally saved objects may have moved during a GC.
1386 visitor.FixupReferences();
1387 // Place called method in callee-save frame to be placed as first argument to quick method.
1388 *sp = called;
1389
1390 return code;
1391 }
1392
1393 /*
1394 * This class uses a couple of observations to unite the different calling conventions through
1395 * a few constants.
1396 *
1397 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
1398 * possible alignment.
1399 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
1400 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
1401 * when we have to split things
1402 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
1403 * and we can use Int handling directly.
1404 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
1405 * necessary when widening. Also, widening of Ints will take place implicitly, and the
1406 * extension should be compatible with Aarch64, which mandates copying the available bits
1407 * into LSB and leaving the rest unspecified.
1408 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
1409 * the stack.
1410 * 6) There is only little endian.
1411 *
1412 *
1413 * Actual work is supposed to be done in a delegate of the template type. The interface is as
1414 * follows:
1415 *
1416 * void PushGpr(uintptr_t): Add a value for the next GPR
1417 *
1418 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need
1419 * padding, that is, think the architecture is 32b and aligns 64b.
1420 *
1421 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to
1422 * split this if necessary. The current state will have aligned, if
1423 * necessary.
1424 *
1425 * void PushStack(uintptr_t): Push a value to the stack.
1426 *
1427 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
1428 * as this might be important for null initialization.
1429 * Must return the jobject, that is, the reference to the
1430 * entry in the HandleScope (nullptr if necessary).
1431 *
1432 */
1433 template<class T> class BuildNativeCallFrameStateMachine {
1434 public:
1435 #if defined(__arm__)
1436 static constexpr bool kNativeSoftFloatAbi = true;
1437 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3
1438 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
1439
1440 static constexpr size_t kRegistersNeededForLong = 2;
1441 static constexpr size_t kRegistersNeededForDouble = 2;
1442 static constexpr bool kMultiRegistersAligned = true;
1443 static constexpr bool kMultiFPRegistersWidened = false;
1444 static constexpr bool kMultiGPRegistersWidened = false;
1445 static constexpr bool kAlignLongOnStack = true;
1446 static constexpr bool kAlignDoubleOnStack = true;
1447 #elif defined(__aarch64__)
1448 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
1449 static constexpr size_t kNumNativeGprArgs = 8; // 8 arguments passed in GPRs.
1450 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs.
1451
1452 static constexpr size_t kRegistersNeededForLong = 1;
1453 static constexpr size_t kRegistersNeededForDouble = 1;
1454 static constexpr bool kMultiRegistersAligned = false;
1455 static constexpr bool kMultiFPRegistersWidened = false;
1456 static constexpr bool kMultiGPRegistersWidened = false;
1457 static constexpr bool kAlignLongOnStack = false;
1458 static constexpr bool kAlignDoubleOnStack = false;
1459 #elif defined(__i386__)
1460 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp
1461 static constexpr size_t kNumNativeGprArgs = 0; // 0 arguments passed in GPRs.
1462 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
1463
1464 static constexpr size_t kRegistersNeededForLong = 2;
1465 static constexpr size_t kRegistersNeededForDouble = 2;
1466 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways
1467 static constexpr bool kMultiFPRegistersWidened = false;
1468 static constexpr bool kMultiGPRegistersWidened = false;
1469 static constexpr bool kAlignLongOnStack = false;
1470 static constexpr bool kAlignDoubleOnStack = false;
1471 #elif defined(__x86_64__)
1472 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI.
1473 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs.
1474 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs.
1475
1476 static constexpr size_t kRegistersNeededForLong = 1;
1477 static constexpr size_t kRegistersNeededForDouble = 1;
1478 static constexpr bool kMultiRegistersAligned = false;
1479 static constexpr bool kMultiFPRegistersWidened = false;
1480 static constexpr bool kMultiGPRegistersWidened = false;
1481 static constexpr bool kAlignLongOnStack = false;
1482 static constexpr bool kAlignDoubleOnStack = false;
1483 #else
1484 #error "Unsupported architecture"
1485 #endif
1486
1487 public:
BuildNativeCallFrameStateMachine(T * delegate)1488 explicit BuildNativeCallFrameStateMachine(T* delegate)
1489 : gpr_index_(kNumNativeGprArgs),
1490 fpr_index_(kNumNativeFprArgs),
1491 stack_entries_(0),
1492 delegate_(delegate) {
1493 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
1494 // the next register is even; counting down is just to make the compiler happy...
1495 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even");
1496 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even");
1497 }
1498
~BuildNativeCallFrameStateMachine()1499 virtual ~BuildNativeCallFrameStateMachine() {}
1500
HavePointerGpr() const1501 bool HavePointerGpr() const {
1502 return gpr_index_ > 0;
1503 }
1504
AdvancePointer(const void * val)1505 void AdvancePointer(const void* val) {
1506 if (HavePointerGpr()) {
1507 gpr_index_--;
1508 PushGpr(reinterpret_cast<uintptr_t>(val));
1509 } else {
1510 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b
1511 PushStack(reinterpret_cast<uintptr_t>(val));
1512 gpr_index_ = 0;
1513 }
1514 }
1515
HaveHandleScopeGpr() const1516 bool HaveHandleScopeGpr() const {
1517 return gpr_index_ > 0;
1518 }
1519
AdvanceHandleScope(mirror::Object * ptr)1520 void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
1521 uintptr_t handle = PushHandle(ptr);
1522 if (HaveHandleScopeGpr()) {
1523 gpr_index_--;
1524 PushGpr(handle);
1525 } else {
1526 stack_entries_++;
1527 PushStack(handle);
1528 gpr_index_ = 0;
1529 }
1530 }
1531
HaveIntGpr() const1532 bool HaveIntGpr() const {
1533 return gpr_index_ > 0;
1534 }
1535
AdvanceInt(uint32_t val)1536 void AdvanceInt(uint32_t val) {
1537 if (HaveIntGpr()) {
1538 gpr_index_--;
1539 if (kMultiGPRegistersWidened) {
1540 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1541 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1542 } else {
1543 PushGpr(val);
1544 }
1545 } else {
1546 stack_entries_++;
1547 if (kMultiGPRegistersWidened) {
1548 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1549 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1550 } else {
1551 PushStack(val);
1552 }
1553 gpr_index_ = 0;
1554 }
1555 }
1556
HaveLongGpr() const1557 bool HaveLongGpr() const {
1558 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
1559 }
1560
LongGprNeedsPadding() const1561 bool LongGprNeedsPadding() const {
1562 return kRegistersNeededForLong > 1 && // only pad when using multiple registers
1563 kAlignLongOnStack && // and when it needs alignment
1564 (gpr_index_ & 1) == 1; // counter is odd, see constructor
1565 }
1566
LongStackNeedsPadding() const1567 bool LongStackNeedsPadding() const {
1568 return kRegistersNeededForLong > 1 && // only pad when using multiple registers
1569 kAlignLongOnStack && // and when it needs 8B alignment
1570 (stack_entries_ & 1) == 1; // counter is odd
1571 }
1572
AdvanceLong(uint64_t val)1573 void AdvanceLong(uint64_t val) {
1574 if (HaveLongGpr()) {
1575 if (LongGprNeedsPadding()) {
1576 PushGpr(0);
1577 gpr_index_--;
1578 }
1579 if (kRegistersNeededForLong == 1) {
1580 PushGpr(static_cast<uintptr_t>(val));
1581 } else {
1582 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1583 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1584 }
1585 gpr_index_ -= kRegistersNeededForLong;
1586 } else {
1587 if (LongStackNeedsPadding()) {
1588 PushStack(0);
1589 stack_entries_++;
1590 }
1591 if (kRegistersNeededForLong == 1) {
1592 PushStack(static_cast<uintptr_t>(val));
1593 stack_entries_++;
1594 } else {
1595 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1596 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1597 stack_entries_ += 2;
1598 }
1599 gpr_index_ = 0;
1600 }
1601 }
1602
HaveFloatFpr() const1603 bool HaveFloatFpr() const {
1604 return fpr_index_ > 0;
1605 }
1606
AdvanceFloat(float val)1607 void AdvanceFloat(float val) {
1608 if (kNativeSoftFloatAbi) {
1609 AdvanceInt(bit_cast<uint32_t, float>(val));
1610 } else {
1611 if (HaveFloatFpr()) {
1612 fpr_index_--;
1613 if (kRegistersNeededForDouble == 1) {
1614 if (kMultiFPRegistersWidened) {
1615 PushFpr8(bit_cast<uint64_t, double>(val));
1616 } else {
1617 // No widening, just use the bits.
1618 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val)));
1619 }
1620 } else {
1621 PushFpr4(val);
1622 }
1623 } else {
1624 stack_entries_++;
1625 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) {
1626 // Need to widen before storing: Note the "double" in the template instantiation.
1627 // Note: We need to jump through those hoops to make the compiler happy.
1628 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
1629 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val)));
1630 } else {
1631 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val)));
1632 }
1633 fpr_index_ = 0;
1634 }
1635 }
1636 }
1637
HaveDoubleFpr() const1638 bool HaveDoubleFpr() const {
1639 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1640 }
1641
DoubleFprNeedsPadding() const1642 bool DoubleFprNeedsPadding() const {
1643 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
1644 kAlignDoubleOnStack && // and when it needs alignment
1645 (fpr_index_ & 1) == 1; // counter is odd, see constructor
1646 }
1647
DoubleStackNeedsPadding() const1648 bool DoubleStackNeedsPadding() const {
1649 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
1650 kAlignDoubleOnStack && // and when it needs 8B alignment
1651 (stack_entries_ & 1) == 1; // counter is odd
1652 }
1653
AdvanceDouble(uint64_t val)1654 void AdvanceDouble(uint64_t val) {
1655 if (kNativeSoftFloatAbi) {
1656 AdvanceLong(val);
1657 } else {
1658 if (HaveDoubleFpr()) {
1659 if (DoubleFprNeedsPadding()) {
1660 PushFpr4(0);
1661 fpr_index_--;
1662 }
1663 PushFpr8(val);
1664 fpr_index_ -= kRegistersNeededForDouble;
1665 } else {
1666 if (DoubleStackNeedsPadding()) {
1667 PushStack(0);
1668 stack_entries_++;
1669 }
1670 if (kRegistersNeededForDouble == 1) {
1671 PushStack(static_cast<uintptr_t>(val));
1672 stack_entries_++;
1673 } else {
1674 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1675 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1676 stack_entries_ += 2;
1677 }
1678 fpr_index_ = 0;
1679 }
1680 }
1681 }
1682
GetStackEntries() const1683 uint32_t GetStackEntries() const {
1684 return stack_entries_;
1685 }
1686
GetNumberOfUsedGprs() const1687 uint32_t GetNumberOfUsedGprs() const {
1688 return kNumNativeGprArgs - gpr_index_;
1689 }
1690
GetNumberOfUsedFprs() const1691 uint32_t GetNumberOfUsedFprs() const {
1692 return kNumNativeFprArgs - fpr_index_;
1693 }
1694
1695 private:
PushGpr(uintptr_t val)1696 void PushGpr(uintptr_t val) {
1697 delegate_->PushGpr(val);
1698 }
PushFpr4(float val)1699 void PushFpr4(float val) {
1700 delegate_->PushFpr4(val);
1701 }
PushFpr8(uint64_t val)1702 void PushFpr8(uint64_t val) {
1703 delegate_->PushFpr8(val);
1704 }
PushStack(uintptr_t val)1705 void PushStack(uintptr_t val) {
1706 delegate_->PushStack(val);
1707 }
PushHandle(mirror::Object * ref)1708 uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
1709 return delegate_->PushHandle(ref);
1710 }
1711
1712 uint32_t gpr_index_; // Number of free GPRs
1713 uint32_t fpr_index_; // Number of free FPRs
1714 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not
1715 // extended
1716 T* const delegate_; // What Push implementation gets called
1717 };
1718
1719 // Computes the sizes of register stacks and call stack area. Handling of references can be extended
1720 // in subclasses.
1721 //
1722 // To handle native pointers, use "L" in the shorty for an object reference, which simulates
1723 // them with handles.
1724 class ComputeNativeCallFrameSize {
1725 public:
ComputeNativeCallFrameSize()1726 ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
1727
~ComputeNativeCallFrameSize()1728 virtual ~ComputeNativeCallFrameSize() {}
1729
GetStackSize() const1730 uint32_t GetStackSize() const {
1731 return num_stack_entries_ * sizeof(uintptr_t);
1732 }
1733
LayoutStackArgs(uint8_t * sp8) const1734 uint8_t* LayoutStackArgs(uint8_t* sp8) const {
1735 sp8 -= GetStackSize();
1736 // Align by kStackAlignment; it is at least as strict as native stack alignment.
1737 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1738 return sp8;
1739 }
1740
WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> * sm ATTRIBUTE_UNUSED)1741 virtual void WalkHeader(
1742 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
1743 REQUIRES_SHARED(Locks::mutator_lock_) {
1744 }
1745
Walk(const char * shorty,uint32_t shorty_len)1746 void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) {
1747 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
1748
1749 WalkHeader(&sm);
1750
1751 for (uint32_t i = 1; i < shorty_len; ++i) {
1752 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
1753 switch (cur_type_) {
1754 case Primitive::kPrimNot:
1755 // TODO: fix abuse of mirror types.
1756 sm.AdvanceHandleScope(
1757 reinterpret_cast<mirror::Object*>(0x12345678));
1758 break;
1759
1760 case Primitive::kPrimBoolean:
1761 case Primitive::kPrimByte:
1762 case Primitive::kPrimChar:
1763 case Primitive::kPrimShort:
1764 case Primitive::kPrimInt:
1765 sm.AdvanceInt(0);
1766 break;
1767 case Primitive::kPrimFloat:
1768 sm.AdvanceFloat(0);
1769 break;
1770 case Primitive::kPrimDouble:
1771 sm.AdvanceDouble(0);
1772 break;
1773 case Primitive::kPrimLong:
1774 sm.AdvanceLong(0);
1775 break;
1776 default:
1777 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1778 UNREACHABLE();
1779 }
1780 }
1781
1782 num_stack_entries_ = sm.GetStackEntries();
1783 }
1784
PushGpr(uintptr_t)1785 void PushGpr(uintptr_t /* val */) {
1786 // not optimizing registers, yet
1787 }
1788
PushFpr4(float)1789 void PushFpr4(float /* val */) {
1790 // not optimizing registers, yet
1791 }
1792
PushFpr8(uint64_t)1793 void PushFpr8(uint64_t /* val */) {
1794 // not optimizing registers, yet
1795 }
1796
PushStack(uintptr_t)1797 void PushStack(uintptr_t /* val */) {
1798 // counting is already done in the superclass
1799 }
1800
PushHandle(mirror::Object *)1801 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) {
1802 return reinterpret_cast<uintptr_t>(nullptr);
1803 }
1804
1805 protected:
1806 uint32_t num_stack_entries_;
1807 };
1808
1809 class ComputeGenericJniFrameSize final : public ComputeNativeCallFrameSize {
1810 public:
ComputeGenericJniFrameSize(bool critical_native)1811 explicit ComputeGenericJniFrameSize(bool critical_native)
1812 : num_handle_scope_references_(0), critical_native_(critical_native) {}
1813
ComputeLayout(Thread * self,ArtMethod ** managed_sp,const char * shorty,uint32_t shorty_len,HandleScope ** handle_scope)1814 uintptr_t* ComputeLayout(Thread* self,
1815 ArtMethod** managed_sp,
1816 const char* shorty,
1817 uint32_t shorty_len,
1818 HandleScope** handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
1819 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
1820
1821 Walk(shorty, shorty_len);
1822
1823 // Add space for cookie and HandleScope.
1824 void* storage = GetGenericJniHandleScope(managed_sp, num_handle_scope_references_);
1825 DCHECK_ALIGNED(storage, sizeof(uintptr_t));
1826 *handle_scope =
1827 HandleScope::Create(storage, self->GetTopHandleScope(), num_handle_scope_references_);
1828 DCHECK_EQ(*handle_scope, storage);
1829 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*handle_scope);
1830 DCHECK_GE(static_cast<size_t>(reinterpret_cast<uint8_t*>(managed_sp) - sp8),
1831 HandleScope::SizeOf(num_handle_scope_references_) + kJniCookieSize);
1832
1833 // Layout stack arguments.
1834 sp8 = LayoutStackArgs(sp8);
1835
1836 // Return the new bottom.
1837 DCHECK_ALIGNED(sp8, sizeof(uintptr_t));
1838 return reinterpret_cast<uintptr_t*>(sp8);
1839 }
1840
GetStartGprRegs(uintptr_t * reserved_area)1841 static uintptr_t* GetStartGprRegs(uintptr_t* reserved_area) {
1842 return reserved_area;
1843 }
1844
GetStartFprRegs(uintptr_t * reserved_area)1845 static uint32_t* GetStartFprRegs(uintptr_t* reserved_area) {
1846 constexpr size_t num_gprs =
1847 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
1848 return reinterpret_cast<uint32_t*>(GetStartGprRegs(reserved_area) + num_gprs);
1849 }
1850
GetHiddenArgSlot(uintptr_t * reserved_area)1851 static uintptr_t* GetHiddenArgSlot(uintptr_t* reserved_area) {
1852 // Note: `num_fprs` is 0 on architectures where sizeof(uintptr_t) does not match the
1853 // FP register size (it is actually 0 on all supported 32-bit architectures).
1854 constexpr size_t num_fprs =
1855 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
1856 return reinterpret_cast<uintptr_t*>(GetStartFprRegs(reserved_area)) + num_fprs;
1857 }
1858
GetOutArgsSpSlot(uintptr_t * reserved_area)1859 static uintptr_t* GetOutArgsSpSlot(uintptr_t* reserved_area) {
1860 return GetHiddenArgSlot(reserved_area) + 1;
1861 }
1862
1863 uintptr_t PushHandle(mirror::Object* /* ptr */) override;
1864
1865 // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
1866 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) override
1867 REQUIRES_SHARED(Locks::mutator_lock_);
1868
1869 private:
1870 uint32_t num_handle_scope_references_;
1871 const bool critical_native_;
1872 };
1873
PushHandle(mirror::Object *)1874 uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
1875 num_handle_scope_references_++;
1876 return reinterpret_cast<uintptr_t>(nullptr);
1877 }
1878
WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> * sm)1879 void ComputeGenericJniFrameSize::WalkHeader(
1880 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
1881 // First 2 parameters are always excluded for @CriticalNative.
1882 if (UNLIKELY(critical_native_)) {
1883 return;
1884 }
1885
1886 // JNIEnv
1887 sm->AdvancePointer(nullptr);
1888
1889 // Class object or this as first argument
1890 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
1891 }
1892
1893 // Class to push values to three separate regions. Used to fill the native call part. Adheres to
1894 // the template requirements of BuildGenericJniFrameStateMachine.
1895 class FillNativeCall {
1896 public:
FillNativeCall(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1897 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
1898 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
1899
~FillNativeCall()1900 virtual ~FillNativeCall() {}
1901
Reset(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args)1902 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
1903 cur_gpr_reg_ = gpr_regs;
1904 cur_fpr_reg_ = fpr_regs;
1905 cur_stack_arg_ = stack_args;
1906 }
1907
PushGpr(uintptr_t val)1908 void PushGpr(uintptr_t val) {
1909 *cur_gpr_reg_ = val;
1910 cur_gpr_reg_++;
1911 }
1912
PushFpr4(float val)1913 void PushFpr4(float val) {
1914 *cur_fpr_reg_ = val;
1915 cur_fpr_reg_++;
1916 }
1917
PushFpr8(uint64_t val)1918 void PushFpr8(uint64_t val) {
1919 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1920 *tmp = val;
1921 cur_fpr_reg_ += 2;
1922 }
1923
PushStack(uintptr_t val)1924 void PushStack(uintptr_t val) {
1925 *cur_stack_arg_ = val;
1926 cur_stack_arg_++;
1927 }
1928
PushHandle(mirror::Object *)1929 virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) {
1930 LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
1931 UNREACHABLE();
1932 }
1933
1934 private:
1935 uintptr_t* cur_gpr_reg_;
1936 uint32_t* cur_fpr_reg_;
1937 uintptr_t* cur_stack_arg_;
1938 };
1939
1940 // Visits arguments on the stack placing them into a region lower down the stack for the benefit
1941 // of transitioning into native code.
1942 class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
1943 public:
BuildGenericJniFrameVisitor(Thread * self,bool is_static,bool critical_native,const char * shorty,uint32_t shorty_len,ArtMethod ** managed_sp,uintptr_t * reserved_area)1944 BuildGenericJniFrameVisitor(Thread* self,
1945 bool is_static,
1946 bool critical_native,
1947 const char* shorty,
1948 uint32_t shorty_len,
1949 ArtMethod** managed_sp,
1950 uintptr_t* reserved_area)
1951 : QuickArgumentVisitor(managed_sp, is_static, shorty, shorty_len),
1952 jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native),
1953 sm_(&jni_call_) {
1954 DCHECK_ALIGNED(managed_sp, kStackAlignment);
1955 DCHECK_ALIGNED(reserved_area, sizeof(uintptr_t));
1956
1957 ComputeGenericJniFrameSize fsc(critical_native);
1958 uintptr_t* out_args_sp =
1959 fsc.ComputeLayout(self, managed_sp, shorty, shorty_len, &handle_scope_);
1960
1961 // Store hidden argument for @CriticalNative.
1962 uintptr_t* hidden_arg_slot = fsc.GetHiddenArgSlot(reserved_area);
1963 constexpr uintptr_t kGenericJniTag = 1u;
1964 ArtMethod* method = *managed_sp;
1965 *hidden_arg_slot = critical_native ? (reinterpret_cast<uintptr_t>(method) | kGenericJniTag)
1966 : 0xebad6a89u; // Bad value.
1967
1968 // Set out args SP.
1969 uintptr_t* out_args_sp_slot = fsc.GetOutArgsSpSlot(reserved_area);
1970 *out_args_sp_slot = reinterpret_cast<uintptr_t>(out_args_sp);
1971
1972 jni_call_.Reset(fsc.GetStartGprRegs(reserved_area),
1973 fsc.GetStartFprRegs(reserved_area),
1974 out_args_sp,
1975 handle_scope_);
1976
1977 // First 2 parameters are always excluded for CriticalNative methods.
1978 if (LIKELY(!critical_native)) {
1979 // jni environment is always first argument
1980 sm_.AdvancePointer(self->GetJniEnv());
1981
1982 if (is_static) {
1983 sm_.AdvanceHandleScope(method->GetDeclaringClass().Ptr());
1984 } // else "this" reference is already handled by QuickArgumentVisitor.
1985 }
1986 }
1987
1988 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) override;
1989
1990 void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
1991
GetFirstHandleScopeEntry()1992 StackReference<mirror::Object>* GetFirstHandleScopeEntry() {
1993 return handle_scope_->GetHandle(0).GetReference();
1994 }
1995
GetFirstHandleScopeJObject() const1996 jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
1997 return handle_scope_->GetHandle(0).ToJObject();
1998 }
1999
2000 private:
2001 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
2002 class FillJniCall final : public FillNativeCall {
2003 public:
FillJniCall(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args,HandleScope * handle_scope,bool critical_native)2004 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
2005 HandleScope* handle_scope, bool critical_native)
2006 : FillNativeCall(gpr_regs, fpr_regs, stack_args),
2007 handle_scope_(handle_scope),
2008 cur_entry_(0),
2009 critical_native_(critical_native) {}
2010
2011 uintptr_t PushHandle(mirror::Object* ref) override REQUIRES_SHARED(Locks::mutator_lock_);
2012
Reset(uintptr_t * gpr_regs,uint32_t * fpr_regs,uintptr_t * stack_args,HandleScope * scope)2013 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
2014 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
2015 handle_scope_ = scope;
2016 cur_entry_ = 0U;
2017 }
2018
ResetRemainingScopeSlots()2019 void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) {
2020 // Initialize padding entries.
2021 size_t expected_slots = handle_scope_->NumberOfReferences();
2022 while (cur_entry_ < expected_slots) {
2023 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
2024 }
2025
2026 if (!critical_native_) {
2027 // Non-critical natives have at least the self class (jclass) or this (jobject).
2028 DCHECK_NE(cur_entry_, 0U);
2029 }
2030 }
2031
CriticalNative() const2032 bool CriticalNative() const {
2033 return critical_native_;
2034 }
2035
2036 private:
2037 HandleScope* handle_scope_;
2038 size_t cur_entry_;
2039 const bool critical_native_;
2040 };
2041
2042 HandleScope* handle_scope_;
2043 FillJniCall jni_call_;
2044
2045 BuildNativeCallFrameStateMachine<FillJniCall> sm_;
2046
2047 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
2048 };
2049
PushHandle(mirror::Object * ref)2050 uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
2051 uintptr_t tmp;
2052 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_);
2053 h.Assign(ref);
2054 tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
2055 cur_entry_++;
2056 return tmp;
2057 }
2058
Visit()2059 void BuildGenericJniFrameVisitor::Visit() {
2060 Primitive::Type type = GetParamPrimitiveType();
2061 switch (type) {
2062 case Primitive::kPrimLong: {
2063 jlong long_arg;
2064 if (IsSplitLongOrDouble()) {
2065 long_arg = ReadSplitLongParam();
2066 } else {
2067 long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
2068 }
2069 sm_.AdvanceLong(long_arg);
2070 break;
2071 }
2072 case Primitive::kPrimDouble: {
2073 uint64_t double_arg;
2074 if (IsSplitLongOrDouble()) {
2075 // Read into union so that we don't case to a double.
2076 double_arg = ReadSplitLongParam();
2077 } else {
2078 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
2079 }
2080 sm_.AdvanceDouble(double_arg);
2081 break;
2082 }
2083 case Primitive::kPrimNot: {
2084 StackReference<mirror::Object>* stack_ref =
2085 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
2086 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
2087 break;
2088 }
2089 case Primitive::kPrimFloat:
2090 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
2091 break;
2092 case Primitive::kPrimBoolean: // Fall-through.
2093 case Primitive::kPrimByte: // Fall-through.
2094 case Primitive::kPrimChar: // Fall-through.
2095 case Primitive::kPrimShort: // Fall-through.
2096 case Primitive::kPrimInt: // Fall-through.
2097 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
2098 break;
2099 case Primitive::kPrimVoid:
2100 LOG(FATAL) << "UNREACHABLE";
2101 UNREACHABLE();
2102 }
2103 }
2104
FinalizeHandleScope(Thread * self)2105 void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
2106 // Clear out rest of the scope.
2107 jni_call_.ResetRemainingScopeSlots();
2108 if (!jni_call_.CriticalNative()) {
2109 // Install HandleScope.
2110 self->PushHandleScope(handle_scope_);
2111 }
2112 }
2113
2114 /*
2115 * Initializes the reserved area assumed to be directly below `managed_sp` for a native call:
2116 *
2117 * On entry, the stack has a standard callee-save frame above `managed_sp`,
2118 * and the reserved area below it. Starting below `managed_sp`, we reserve space
2119 * for local reference cookie (not present for @CriticalNative), HandleScope
2120 * (not present for @CriticalNative) and stack args (if args do not fit into
2121 * registers). At the bottom of the reserved area, there is space for register
2122 * arguments, hidden arg (for @CriticalNative) and the SP for the native call
2123 * (i.e. pointer to the stack args area), which the calling stub shall load
2124 * to perform the native call. We fill all these fields, perform class init
2125 * check (for static methods) and/or locking (for synchronized methods) if
2126 * needed and return to the stub.
2127 *
2128 * The return value is the pointer to the native code, null on failure.
2129 */
artQuickGenericJniTrampoline(Thread * self,ArtMethod ** managed_sp,uintptr_t * reserved_area)2130 extern "C" const void* artQuickGenericJniTrampoline(Thread* self,
2131 ArtMethod** managed_sp,
2132 uintptr_t* reserved_area)
2133 REQUIRES_SHARED(Locks::mutator_lock_) {
2134 // Note: We cannot walk the stack properly until fixed up below.
2135 ArtMethod* called = *managed_sp;
2136 DCHECK(called->IsNative()) << called->PrettyMethod(true);
2137 Runtime* runtime = Runtime::Current();
2138 uint32_t shorty_len = 0;
2139 const char* shorty = called->GetShorty(&shorty_len);
2140 bool critical_native = called->IsCriticalNative();
2141 bool fast_native = called->IsFastNative();
2142 bool normal_native = !critical_native && !fast_native;
2143
2144 // Run the visitor and update sp.
2145 BuildGenericJniFrameVisitor visitor(self,
2146 called->IsStatic(),
2147 critical_native,
2148 shorty,
2149 shorty_len,
2150 managed_sp,
2151 reserved_area);
2152 {
2153 ScopedAssertNoThreadSuspension sants(__FUNCTION__);
2154 visitor.VisitArguments();
2155 // FinalizeHandleScope pushes the handle scope on the thread.
2156 visitor.FinalizeHandleScope(self);
2157 }
2158
2159 // Fix up managed-stack things in Thread. After this we can walk the stack.
2160 self->SetTopOfStackTagged(managed_sp);
2161
2162 self->VerifyStack();
2163
2164 // We can now walk the stack if needed by JIT GC from MethodEntered() for JIT-on-first-use.
2165 jit::Jit* jit = runtime->GetJit();
2166 if (jit != nullptr) {
2167 jit->MethodEntered(self, called);
2168 }
2169
2170 // We can set the entrypoint of a native method to generic JNI even when the
2171 // class hasn't been initialized, so we need to do the initialization check
2172 // before invoking the native code.
2173 if (NeedsClinitCheckBeforeCall(called)) {
2174 ObjPtr<mirror::Class> declaring_class = called->GetDeclaringClass();
2175 if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
2176 // Ensure static method's class is initialized.
2177 StackHandleScope<1> hs(self);
2178 Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
2179 if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
2180 DCHECK(Thread::Current()->IsExceptionPending()) << called->PrettyMethod();
2181 self->PopHandleScope();
2182 return nullptr; // Report error.
2183 }
2184 }
2185 }
2186
2187 uint32_t cookie;
2188 uint32_t* sp32;
2189 // Skip calling JniMethodStart for @CriticalNative.
2190 if (LIKELY(!critical_native)) {
2191 // Start JNI, save the cookie.
2192 if (called->IsSynchronized()) {
2193 DCHECK(normal_native) << " @FastNative and synchronize is not supported";
2194 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
2195 if (self->IsExceptionPending()) {
2196 self->PopHandleScope();
2197 return nullptr; // Report error.
2198 }
2199 } else {
2200 if (fast_native) {
2201 cookie = JniMethodFastStart(self);
2202 } else {
2203 DCHECK(normal_native);
2204 cookie = JniMethodStart(self);
2205 }
2206 }
2207 sp32 = reinterpret_cast<uint32_t*>(managed_sp);
2208 *(sp32 - 1) = cookie;
2209 }
2210
2211 // Retrieve the stored native code.
2212 // Note that it may point to the lookup stub or trampoline.
2213 // FIXME: This is broken for @CriticalNative as the art_jni_dlsym_lookup_stub
2214 // does not handle that case. Calls from compiled stubs are also broken.
2215 void const* nativeCode = called->GetEntryPointFromJni();
2216
2217 VLOG(third_party_jni) << "GenericJNI: "
2218 << called->PrettyMethod()
2219 << " -> "
2220 << std::hex << reinterpret_cast<uintptr_t>(nativeCode);
2221
2222 // Return native code.
2223 return nativeCode;
2224 }
2225
2226 // Defined in quick_jni_entrypoints.cc.
2227 extern uint64_t GenericJniMethodEnd(Thread* self,
2228 uint32_t saved_local_ref_cookie,
2229 jvalue result,
2230 uint64_t result_f,
2231 ArtMethod* called);
2232
2233 /*
2234 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
2235 * unlocking.
2236 */
artQuickGenericJniEndTrampoline(Thread * self,jvalue result,uint64_t result_f)2237 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
2238 jvalue result,
2239 uint64_t result_f) {
2240 // We're here just back from a native call. We don't have the shared mutator lock at this point
2241 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing
2242 // anything that requires a mutator lock before that would cause problems as GC may have the
2243 // exclusive mutator lock and may be moving objects, etc.
2244 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
2245 DCHECK(self->GetManagedStack()->GetTopQuickFrameTag());
2246 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
2247 ArtMethod* called = *sp;
2248 uint32_t cookie = *(sp32 - 1);
2249 if (kIsDebugBuild && !called->IsCriticalNative()) {
2250 BaseHandleScope* handle_scope = self->GetTopHandleScope();
2251 DCHECK(handle_scope != nullptr);
2252 DCHECK(!handle_scope->IsVariableSized());
2253 // Note: We do not hold mutator lock here for normal JNI, so we cannot use the method's shorty
2254 // to determine the number of references. Instead rely on the value from the HandleScope.
2255 DCHECK_EQ(handle_scope, GetGenericJniHandleScope(sp, handle_scope->NumberOfReferences()));
2256 }
2257 return GenericJniMethodEnd(self, cookie, result, result_f, called);
2258 }
2259
2260 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
2261 // for the method pointer.
2262 //
2263 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming
2264 // to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
2265
2266 template <InvokeType type, bool access_check>
artInvokeCommon(uint32_t method_idx,ObjPtr<mirror::Object> this_object,Thread * self,ArtMethod ** sp)2267 static TwoWordReturn artInvokeCommon(uint32_t method_idx,
2268 ObjPtr<mirror::Object> this_object,
2269 Thread* self,
2270 ArtMethod** sp) {
2271 ScopedQuickEntrypointChecks sqec(self);
2272 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2273 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2274 ArtMethod* method = FindMethodFast<type, access_check>(method_idx, this_object, caller_method);
2275 if (UNLIKELY(method == nullptr)) {
2276 const DexFile* dex_file = caller_method->GetDexFile();
2277 uint32_t shorty_len;
2278 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
2279 {
2280 // Remember the args in case a GC happens in FindMethodFromCode.
2281 ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2282 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
2283 visitor.VisitArguments();
2284 method = FindMethodFromCode<type, access_check>(method_idx,
2285 &this_object,
2286 caller_method,
2287 self);
2288 visitor.FixupReferences();
2289 }
2290
2291 if (UNLIKELY(method == nullptr)) {
2292 CHECK(self->IsExceptionPending());
2293 return GetTwoWordFailureValue(); // Failure.
2294 }
2295 }
2296 DCHECK(!self->IsExceptionPending());
2297 const void* code = method->GetEntryPointFromQuickCompiledCode();
2298
2299 // When we return, the caller will branch to this address, so it had better not be 0!
2300 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod()
2301 << " location: "
2302 << method->GetDexFile()->GetLocation();
2303
2304 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2305 reinterpret_cast<uintptr_t>(method));
2306 }
2307
2308 // Explicit artInvokeCommon template function declarations to please analysis tool.
2309 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \
2310 template REQUIRES_SHARED(Locks::mutator_lock_) \
2311 TwoWordReturn artInvokeCommon<type, access_check>( \
2312 uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp)
2313
2314 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
2315 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
2316 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
2317 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
2318 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
2319 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
2320 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
2321 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
2322 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
2323 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
2324 #undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
2325
2326 // See comments in runtime_support_asm.S
artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2327 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
2328 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2329 REQUIRES_SHARED(Locks::mutator_lock_) {
2330 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
2331 }
2332
artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2333 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
2334 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2335 REQUIRES_SHARED(Locks::mutator_lock_) {
2336 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
2337 }
2338
artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object ATTRIBUTE_UNUSED,Thread * self,ArtMethod ** sp)2339 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
2340 uint32_t method_idx,
2341 mirror::Object* this_object ATTRIBUTE_UNUSED,
2342 Thread* self,
2343 ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
2344 // For static, this_object is not required and may be random garbage. Don't pass it down so that
2345 // it doesn't cause ObjPtr alignment failure check.
2346 return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp);
2347 }
2348
artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2349 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
2350 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2351 REQUIRES_SHARED(Locks::mutator_lock_) {
2352 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
2353 }
2354
artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,mirror::Object * this_object,Thread * self,ArtMethod ** sp)2355 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
2356 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2357 REQUIRES_SHARED(Locks::mutator_lock_) {
2358 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
2359 }
2360
2361 // Helper function for art_quick_imt_conflict_trampoline to look up the interface method.
artLookupResolvedMethod(uint32_t method_index,ArtMethod * referrer)2362 extern "C" ArtMethod* artLookupResolvedMethod(uint32_t method_index, ArtMethod* referrer)
2363 REQUIRES_SHARED(Locks::mutator_lock_) {
2364 ScopedAssertNoThreadSuspension ants(__FUNCTION__);
2365 DCHECK(!referrer->IsProxyMethod());
2366 ArtMethod* result = Runtime::Current()->GetClassLinker()->LookupResolvedMethod(
2367 method_index, referrer->GetDexCache(), referrer->GetClassLoader());
2368 DCHECK(result == nullptr ||
2369 result->GetDeclaringClass()->IsInterface() ||
2370 result->GetDeclaringClass() ==
2371 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object))
2372 << result->PrettyMethod();
2373 return result;
2374 }
2375
2376 // Determine target of interface dispatch. The interface method and this object are known non-null.
2377 // The interface method is the method returned by the dex cache in the conflict trampoline.
artInvokeInterfaceTrampoline(ArtMethod * interface_method,mirror::Object * raw_this_object,Thread * self,ArtMethod ** sp)2378 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method,
2379 mirror::Object* raw_this_object,
2380 Thread* self,
2381 ArtMethod** sp)
2382 REQUIRES_SHARED(Locks::mutator_lock_) {
2383 ScopedQuickEntrypointChecks sqec(self);
2384 StackHandleScope<2> hs(self);
2385 Handle<mirror::Object> this_object = hs.NewHandle(raw_this_object);
2386 Handle<mirror::Class> cls = hs.NewHandle(this_object->GetClass());
2387
2388 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2389 ArtMethod* method = nullptr;
2390 ImTable* imt = cls->GetImt(kRuntimePointerSize);
2391
2392 if (UNLIKELY(interface_method == nullptr)) {
2393 // The interface method is unresolved, so resolve it in the dex file of the caller.
2394 // Fetch the dex_method_idx of the target interface method from the caller.
2395 uint32_t dex_method_idx;
2396 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
2397 const Instruction& instr = caller_method->DexInstructions().InstructionAt(dex_pc);
2398 Instruction::Code instr_code = instr.Opcode();
2399 DCHECK(instr_code == Instruction::INVOKE_INTERFACE ||
2400 instr_code == Instruction::INVOKE_INTERFACE_RANGE)
2401 << "Unexpected call into interface trampoline: " << instr.DumpString(nullptr);
2402 if (instr_code == Instruction::INVOKE_INTERFACE) {
2403 dex_method_idx = instr.VRegB_35c();
2404 } else {
2405 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
2406 dex_method_idx = instr.VRegB_3rc();
2407 }
2408
2409 const DexFile& dex_file = *caller_method->GetDexFile();
2410 uint32_t shorty_len;
2411 const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_idx),
2412 &shorty_len);
2413 {
2414 // Remember the args in case a GC happens in ClassLinker::ResolveMethod().
2415 ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2416 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
2417 visitor.VisitArguments();
2418 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
2419 interface_method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
2420 self, dex_method_idx, caller_method, kInterface);
2421 visitor.FixupReferences();
2422 }
2423
2424 if (UNLIKELY(interface_method == nullptr)) {
2425 CHECK(self->IsExceptionPending());
2426 return GetTwoWordFailureValue(); // Failure.
2427 }
2428 }
2429
2430 // The compiler and interpreter make sure the conflict trampoline is never
2431 // called on a method that resolves to j.l.Object.
2432 CHECK(!interface_method->GetDeclaringClass()->IsObjectClass());
2433 CHECK(interface_method->GetDeclaringClass()->IsInterface());
2434
2435 DCHECK(!interface_method->IsRuntimeMethod());
2436 // Look whether we have a match in the ImtConflictTable.
2437 uint32_t imt_index = interface_method->GetImtIndex();
2438 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
2439 if (LIKELY(conflict_method->IsRuntimeMethod())) {
2440 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
2441 DCHECK(current_table != nullptr);
2442 method = current_table->Lookup(interface_method, kRuntimePointerSize);
2443 } else {
2444 // It seems we aren't really a conflict method!
2445 if (kIsDebugBuild) {
2446 ArtMethod* m = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
2447 CHECK_EQ(conflict_method, m)
2448 << interface_method->PrettyMethod() << " / " << conflict_method->PrettyMethod() << " / "
2449 << " / " << ArtMethod::PrettyMethod(m) << " / " << cls->PrettyClass();
2450 }
2451 method = conflict_method;
2452 }
2453 if (method != nullptr) {
2454 return GetTwoWordSuccessValue(
2455 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()),
2456 reinterpret_cast<uintptr_t>(method));
2457 }
2458
2459 // No match, use the IfTable.
2460 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
2461 if (UNLIKELY(method == nullptr)) {
2462 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
2463 interface_method, this_object.Get(), caller_method);
2464 return GetTwoWordFailureValue(); // Failure.
2465 }
2466
2467 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
2468 // We create a new table with the new pair { interface_method, method }.
2469 DCHECK(conflict_method->IsRuntimeMethod());
2470
2471 // Classes in the boot image should never need to update conflict methods in
2472 // their IMT.
2473 CHECK(!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(cls.Get())) << cls->PrettyClass();
2474 ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
2475 cls.Get(),
2476 conflict_method,
2477 interface_method,
2478 method,
2479 /*force_new_conflict_method=*/false);
2480 if (new_conflict_method != conflict_method) {
2481 // Update the IMT if we create a new conflict method. No fence needed here, as the
2482 // data is consistent.
2483 imt->Set(imt_index,
2484 new_conflict_method,
2485 kRuntimePointerSize);
2486 }
2487
2488 const void* code = method->GetEntryPointFromQuickCompiledCode();
2489
2490 // When we return, the caller will branch to this address, so it had better not be 0!
2491 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod()
2492 << " location: " << method->GetDexFile()->GetLocation();
2493
2494 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2495 reinterpret_cast<uintptr_t>(method));
2496 }
2497
2498 // Returns uint64_t representing raw bits from JValue.
artInvokePolymorphic(mirror::Object * raw_receiver,Thread * self,ArtMethod ** sp)2499 extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* self, ArtMethod** sp)
2500 REQUIRES_SHARED(Locks::mutator_lock_) {
2501 ScopedQuickEntrypointChecks sqec(self);
2502 DCHECK(raw_receiver != nullptr);
2503 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2504
2505 // Start new JNI local reference state
2506 JNIEnvExt* env = self->GetJniEnv();
2507 ScopedObjectAccessUnchecked soa(env);
2508 ScopedJniEnvLocalRefState env_state(env);
2509 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
2510
2511 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
2512 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2513 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
2514 const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc);
2515 DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC ||
2516 inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
2517 const dex::ProtoIndex proto_idx(inst.VRegH());
2518 const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx);
2519 const size_t shorty_length = strlen(shorty);
2520 static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static.
2521 RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa);
2522 gc_visitor.VisitArguments();
2523
2524 // Wrap raw_receiver in a Handle for safety.
2525 StackHandleScope<3> hs(self);
2526 Handle<mirror::Object> receiver_handle(hs.NewHandle(raw_receiver));
2527 raw_receiver = nullptr;
2528 self->EndAssertNoThreadSuspension(old_cause);
2529
2530 // Resolve method.
2531 ClassLinker* linker = Runtime::Current()->GetClassLinker();
2532 ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
2533 self, inst.VRegB(), caller_method, kVirtual);
2534
2535 Handle<mirror::MethodType> method_type(
2536 hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
2537 if (UNLIKELY(method_type.IsNull())) {
2538 // This implies we couldn't resolve one or more types in this method handle.
2539 CHECK(self->IsExceptionPending());
2540 return 0UL;
2541 }
2542
2543 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA());
2544 DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic);
2545
2546 // Fix references before constructing the shadow frame.
2547 gc_visitor.FixupReferences();
2548
2549 // Construct shadow frame placing arguments consecutively from |first_arg|.
2550 const bool is_range = (inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
2551 const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
2552 const size_t first_arg = 0;
2553 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
2554 CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, resolved_method, dex_pc);
2555 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
2556 ScopedStackedShadowFramePusher
2557 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
2558 BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
2559 kMethodIsStatic,
2560 shorty,
2561 strlen(shorty),
2562 shadow_frame,
2563 first_arg);
2564 shadow_frame_builder.VisitArguments();
2565
2566 // Push a transition back into managed code onto the linked list in thread.
2567 ManagedStack fragment;
2568 self->PushManagedStackFragment(&fragment);
2569
2570 // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in
2571 // consecutive order.
2572 RangeInstructionOperands operands(first_arg + 1, num_vregs - 1);
2573 Intrinsics intrinsic = static_cast<Intrinsics>(resolved_method->GetIntrinsic());
2574 JValue result;
2575 bool success = false;
2576 if (resolved_method->GetDeclaringClass() == GetClassRoot<mirror::MethodHandle>(linker)) {
2577 Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
2578 ObjPtr<mirror::MethodHandle>::DownCast(receiver_handle.Get())));
2579 if (intrinsic == Intrinsics::kMethodHandleInvokeExact) {
2580 success = MethodHandleInvokeExact(self,
2581 *shadow_frame,
2582 method_handle,
2583 method_type,
2584 &operands,
2585 &result);
2586 } else {
2587 DCHECK_EQ(static_cast<uint32_t>(intrinsic),
2588 static_cast<uint32_t>(Intrinsics::kMethodHandleInvoke));
2589 success = MethodHandleInvoke(self,
2590 *shadow_frame,
2591 method_handle,
2592 method_type,
2593 &operands,
2594 &result);
2595 }
2596 } else {
2597 DCHECK_EQ(GetClassRoot<mirror::VarHandle>(linker), resolved_method->GetDeclaringClass());
2598 Handle<mirror::VarHandle> var_handle(hs.NewHandle(
2599 ObjPtr<mirror::VarHandle>::DownCast(receiver_handle.Get())));
2600 mirror::VarHandle::AccessMode access_mode =
2601 mirror::VarHandle::GetAccessModeByIntrinsic(intrinsic);
2602 success = VarHandleInvokeAccessor(self,
2603 *shadow_frame,
2604 var_handle,
2605 method_type,
2606 access_mode,
2607 &operands,
2608 &result);
2609 }
2610
2611 DCHECK(success || self->IsExceptionPending());
2612
2613 // Pop transition record.
2614 self->PopManagedStackFragment(fragment);
2615
2616 return result.GetJ();
2617 }
2618
2619 // Returns uint64_t representing raw bits from JValue.
artInvokeCustom(uint32_t call_site_idx,Thread * self,ArtMethod ** sp)2620 extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMethod** sp)
2621 REQUIRES_SHARED(Locks::mutator_lock_) {
2622 ScopedQuickEntrypointChecks sqec(self);
2623 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
2624
2625 // invoke-custom is effectively a static call (no receiver).
2626 static constexpr bool kMethodIsStatic = true;
2627
2628 // Start new JNI local reference state
2629 JNIEnvExt* env = self->GetJniEnv();
2630 ScopedObjectAccessUnchecked soa(env);
2631 ScopedJniEnvLocalRefState env_state(env);
2632
2633 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
2634
2635 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
2636 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2637 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
2638 const DexFile* dex_file = caller_method->GetDexFile();
2639 const dex::ProtoIndex proto_idx(dex_file->GetProtoIndexForCallSite(call_site_idx));
2640 const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx);
2641 const uint32_t shorty_len = strlen(shorty);
2642
2643 // Construct the shadow frame placing arguments consecutively from |first_arg|.
2644 const size_t first_arg = 0;
2645 const size_t num_vregs = ArtMethod::NumArgRegisters(shorty);
2646 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
2647 CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, caller_method, dex_pc);
2648 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
2649 ScopedStackedShadowFramePusher
2650 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
2651 BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
2652 kMethodIsStatic,
2653 shorty,
2654 shorty_len,
2655 shadow_frame,
2656 first_arg);
2657 shadow_frame_builder.VisitArguments();
2658
2659 // Push a transition back into managed code onto the linked list in thread.
2660 ManagedStack fragment;
2661 self->PushManagedStackFragment(&fragment);
2662 self->EndAssertNoThreadSuspension(old_cause);
2663
2664 // Perform the invoke-custom operation.
2665 RangeInstructionOperands operands(first_arg, num_vregs);
2666 JValue result;
2667 bool success =
2668 interpreter::DoInvokeCustom(self, *shadow_frame, call_site_idx, &operands, &result);
2669 DCHECK(success || self->IsExceptionPending());
2670
2671 // Pop transition record.
2672 self->PopManagedStackFragment(fragment);
2673
2674 return result.GetJ();
2675 }
2676
2677 } // namespace art
2678