1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "calling_convention_arm.h"
18 
19 #include <android-base/logging.h>
20 
21 #include "arch/arm/jni_frame_arm.h"
22 #include "arch/instruction_set.h"
23 #include "base/macros.h"
24 #include "handle_scope-inl.h"
25 #include "utils/arm/managed_register_arm.h"
26 
27 namespace art {
28 namespace arm {
29 
30 //
31 // JNI calling convention constants.
32 //
33 
34 // List of parameters passed via registers for JNI.
35 // JNI uses soft-float, so there is only a GPR list.
36 static const Register kJniArgumentRegisters[] = {
37   R0, R1, R2, R3
38 };
39 
40 static_assert(kJniArgumentRegisterCount == arraysize(kJniArgumentRegisters));
41 
42 //
43 // Managed calling convention constants.
44 //
45 
46 // Used by hard float. (General purpose registers.)
47 static const Register kHFCoreArgumentRegisters[] = {
48   R0, R1, R2, R3
49 };
50 static constexpr size_t kHFCoreArgumentRegistersCount = arraysize(kHFCoreArgumentRegisters);
51 
52 // (VFP single-precision registers.)
53 static const SRegister kHFSArgumentRegisters[] = {
54   S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15
55 };
56 static constexpr size_t kHFSArgumentRegistersCount = arraysize(kHFSArgumentRegisters);
57 
58 // (VFP double-precision registers.)
59 static const DRegister kHFDArgumentRegisters[] = {
60   D0, D1, D2, D3, D4, D5, D6, D7
61 };
62 static constexpr size_t kHFDArgumentRegistersCount = arraysize(kHFDArgumentRegisters);
63 
64 static_assert(kHFDArgumentRegistersCount * 2 == kHFSArgumentRegistersCount,
65     "ks d argument registers mismatch");
66 
67 //
68 // Shared managed+JNI calling convention constants.
69 //
70 
71 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
72     // Core registers.
73     ArmManagedRegister::FromCoreRegister(R5),
74     ArmManagedRegister::FromCoreRegister(R6),
75     ArmManagedRegister::FromCoreRegister(R7),
76     ArmManagedRegister::FromCoreRegister(R8),
77     ArmManagedRegister::FromCoreRegister(R10),
78     ArmManagedRegister::FromCoreRegister(R11),
79     ArmManagedRegister::FromCoreRegister(LR),
80     // Hard float registers.
81     ArmManagedRegister::FromSRegister(S16),
82     ArmManagedRegister::FromSRegister(S17),
83     ArmManagedRegister::FromSRegister(S18),
84     ArmManagedRegister::FromSRegister(S19),
85     ArmManagedRegister::FromSRegister(S20),
86     ArmManagedRegister::FromSRegister(S21),
87     ArmManagedRegister::FromSRegister(S22),
88     ArmManagedRegister::FromSRegister(S23),
89     ArmManagedRegister::FromSRegister(S24),
90     ArmManagedRegister::FromSRegister(S25),
91     ArmManagedRegister::FromSRegister(S26),
92     ArmManagedRegister::FromSRegister(S27),
93     ArmManagedRegister::FromSRegister(S28),
94     ArmManagedRegister::FromSRegister(S29),
95     ArmManagedRegister::FromSRegister(S30),
96     ArmManagedRegister::FromSRegister(S31)
97 };
98 
99 template <size_t size>
CalculateCoreCalleeSpillMask(const ManagedRegister (& callee_saves)[size])100 static constexpr uint32_t CalculateCoreCalleeSpillMask(
101     const ManagedRegister (&callee_saves)[size]) {
102   // LR is a special callee save which is not reported by CalleeSaveRegisters().
103   uint32_t result = 0u;
104   for (auto&& r : callee_saves) {
105     if (r.AsArm().IsCoreRegister()) {
106       result |= (1u << r.AsArm().AsCoreRegister());
107     }
108   }
109   return result;
110 }
111 
112 template <size_t size>
CalculateFpCalleeSpillMask(const ManagedRegister (& callee_saves)[size])113 static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) {
114   uint32_t result = 0u;
115   for (auto&& r : callee_saves) {
116     if (r.AsArm().IsSRegister()) {
117       result |= (1u << r.AsArm().AsSRegister());
118     }
119   }
120   return result;
121 }
122 
123 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
124 static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters);
125 
126 static constexpr ManagedRegister kAapcsCalleeSaveRegisters[] = {
127     // Core registers.
128     ArmManagedRegister::FromCoreRegister(R4),
129     ArmManagedRegister::FromCoreRegister(R5),
130     ArmManagedRegister::FromCoreRegister(R6),
131     ArmManagedRegister::FromCoreRegister(R7),
132     ArmManagedRegister::FromCoreRegister(R8),
133     ArmManagedRegister::FromCoreRegister(R9),  // The platform register is callee-save on Android.
134     ArmManagedRegister::FromCoreRegister(R10),
135     ArmManagedRegister::FromCoreRegister(R11),
136     ArmManagedRegister::FromCoreRegister(LR),
137     // Hard float registers.
138     ArmManagedRegister::FromSRegister(S16),
139     ArmManagedRegister::FromSRegister(S17),
140     ArmManagedRegister::FromSRegister(S18),
141     ArmManagedRegister::FromSRegister(S19),
142     ArmManagedRegister::FromSRegister(S20),
143     ArmManagedRegister::FromSRegister(S21),
144     ArmManagedRegister::FromSRegister(S22),
145     ArmManagedRegister::FromSRegister(S23),
146     ArmManagedRegister::FromSRegister(S24),
147     ArmManagedRegister::FromSRegister(S25),
148     ArmManagedRegister::FromSRegister(S26),
149     ArmManagedRegister::FromSRegister(S27),
150     ArmManagedRegister::FromSRegister(S28),
151     ArmManagedRegister::FromSRegister(S29),
152     ArmManagedRegister::FromSRegister(S30),
153     ArmManagedRegister::FromSRegister(S31)
154 };
155 
156 static constexpr uint32_t kAapcsCoreCalleeSpillMask =
157     CalculateCoreCalleeSpillMask(kAapcsCalleeSaveRegisters);
158 static constexpr uint32_t kAapcsFpCalleeSpillMask =
159     CalculateFpCalleeSpillMask(kAapcsCalleeSaveRegisters);
160 
161 // Calling convention
162 
ReturnRegister()163 ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() {
164   switch (GetShorty()[0]) {
165     case 'V':
166       return ArmManagedRegister::NoRegister();
167     case 'D':
168       return ArmManagedRegister::FromDRegister(D0);
169     case 'F':
170       return ArmManagedRegister::FromSRegister(S0);
171     case 'J':
172       return ArmManagedRegister::FromRegisterPair(R0_R1);
173     default:
174       return ArmManagedRegister::FromCoreRegister(R0);
175   }
176 }
177 
ReturnRegister()178 ManagedRegister ArmJniCallingConvention::ReturnRegister() {
179   switch (GetShorty()[0]) {
180   case 'V':
181     return ArmManagedRegister::NoRegister();
182   case 'D':
183   case 'J':
184     return ArmManagedRegister::FromRegisterPair(R0_R1);
185   default:
186     return ArmManagedRegister::FromCoreRegister(R0);
187   }
188 }
189 
IntReturnRegister()190 ManagedRegister ArmJniCallingConvention::IntReturnRegister() {
191   return ArmManagedRegister::FromCoreRegister(R0);
192 }
193 
194 // Managed runtime calling convention
195 
MethodRegister()196 ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() {
197   return ArmManagedRegister::FromCoreRegister(R0);
198 }
199 
ResetIterator(FrameOffset displacement)200 void ArmManagedRuntimeCallingConvention::ResetIterator(FrameOffset displacement) {
201   ManagedRuntimeCallingConvention::ResetIterator(displacement);
202   gpr_index_ = 1u;  // Skip r0 for ArtMethod*
203   float_index_ = 0u;
204   double_index_ = 0u;
205 }
206 
Next()207 void ArmManagedRuntimeCallingConvention::Next() {
208   if (IsCurrentParamAFloatOrDouble()) {
209     if (float_index_ % 2 == 0) {
210       // The register for the current float is the same as the first register for double.
211       DCHECK_EQ(float_index_, double_index_ * 2u);
212     } else {
213       // There is a space for an extra float before space for a double.
214       DCHECK_LT(float_index_, double_index_ * 2u);
215     }
216     if (IsCurrentParamADouble()) {
217       double_index_ += 1u;
218       if (float_index_ % 2 == 0) {
219         float_index_ = double_index_ * 2u;
220       }
221     } else {
222       if (float_index_ % 2 == 0) {
223         float_index_ += 1u;
224         double_index_ += 1u;  // Leaves space for one more float before the next double.
225       } else {
226         float_index_ = double_index_ * 2u;
227       }
228     }
229   } else {  // Not a float/double.
230     if (IsCurrentParamALong()) {
231       // Note that the alignment to even register is done lazily.
232       gpr_index_ = RoundUp(gpr_index_, 2u) + 2u;
233     } else {
234       gpr_index_ += 1u;
235     }
236   }
237   ManagedRuntimeCallingConvention::Next();
238 }
239 
IsCurrentParamInRegister()240 bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
241   if (IsCurrentParamAFloatOrDouble()) {
242     if (IsCurrentParamADouble()) {
243       return double_index_ < kHFDArgumentRegistersCount;
244     } else {
245       return float_index_ < kHFSArgumentRegistersCount;
246     }
247   } else {
248     if (IsCurrentParamALong()) {
249       // Round up to even register and do not split a long between the last register and the stack.
250       return RoundUp(gpr_index_, 2u) + 1u < kHFCoreArgumentRegistersCount;
251     } else {
252       return gpr_index_ < kHFCoreArgumentRegistersCount;
253     }
254   }
255 }
256 
IsCurrentParamOnStack()257 bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
258   return !IsCurrentParamInRegister();
259 }
260 
CurrentParamRegister()261 ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
262   DCHECK(IsCurrentParamInRegister());
263   if (IsCurrentParamAFloatOrDouble()) {
264     if (IsCurrentParamADouble()) {
265       return ArmManagedRegister::FromDRegister(kHFDArgumentRegisters[double_index_]);
266     } else {
267       return ArmManagedRegister::FromSRegister(kHFSArgumentRegisters[float_index_]);
268     }
269   } else {
270     if (IsCurrentParamALong()) {
271       // Currently the only register pair for a long parameter is r2-r3.
272       // Note that the alignment to even register is done lazily.
273       CHECK_EQ(RoundUp(gpr_index_, 2u), 2u);
274       return ArmManagedRegister::FromRegisterPair(R2_R3);
275     } else {
276       return ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index_]);
277     }
278   }
279 }
280 
CurrentParamStackOffset()281 FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
282   return FrameOffset(displacement_.Int32Value() +        // displacement
283                      kFramePointerSize +                 // Method*
284                      (itr_slots_ * kFramePointerSize));  // offset into in args
285 }
286 
287 // JNI calling convention
288 
ArmJniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)289 ArmJniCallingConvention::ArmJniCallingConvention(bool is_static,
290                                                  bool is_synchronized,
291                                                  bool is_critical_native,
292                                                  const char* shorty)
293     : JniCallingConvention(is_static,
294                            is_synchronized,
295                            is_critical_native,
296                            shorty,
297                            kArmPointerSize) {
298   // AAPCS 4.1 specifies fundamental alignments for each type. All of our stack arguments are
299   // usually 4-byte aligned, however longs and doubles must be 8 bytes aligned. Add padding to
300   // maintain 8-byte alignment invariant.
301   //
302   // Compute padding to ensure longs and doubles are not split in AAPCS.
303   size_t shift = 0;
304 
305   size_t cur_arg, cur_reg;
306   if (LIKELY(HasExtraArgumentsForJni())) {
307     // Ignore the 'this' jobject or jclass for static methods and the JNIEnv.
308     // We start at the aligned register r2.
309     //
310     // Ignore the first 2 parameters because they are guaranteed to be aligned.
311     cur_arg = NumImplicitArgs();  // skip the "this" arg.
312     cur_reg = 2;  // skip {r0=JNIEnv, r1=jobject} / {r0=JNIEnv, r1=jclass} parameters (start at r2).
313   } else {
314     // Check every parameter.
315     cur_arg = 0;
316     cur_reg = 0;
317   }
318 
319   // TODO: Maybe should just use IsCurrentParamALongOrDouble instead to be cleaner?
320   // (this just seems like an unnecessary micro-optimization).
321 
322   // Shift across a logical register mapping that looks like:
323   //
324   //   | r0 | r1 | r2 | r3 | SP | SP+4| SP+8 | SP+12 | ... | SP+n | SP+n+4 |
325   //
326   //   (where SP is some arbitrary stack pointer that our 0th stack arg would go into).
327   //
328   // Any time there would normally be a long/double in an odd logical register,
329   // we have to push out the rest of the mappings by 4 bytes to maintain an 8-byte alignment.
330   //
331   // This works for both physical register pairs {r0, r1}, {r2, r3} and for when
332   // the value is on the stack.
333   //
334   // For example:
335   // (a) long would normally go into r1, but we shift it into r2
336   //  | INT | (PAD) | LONG      |
337   //  | r0  |  r1   |  r2  | r3 |
338   //
339   // (b) long would normally go into r3, but we shift it into SP
340   //  | INT | INT | INT | (PAD) | LONG     |
341   //  | r0  |  r1 |  r2 |  r3   | SP+4 SP+8|
342   //
343   // where INT is any <=4 byte arg, and LONG is any 8-byte arg.
344   for (; cur_arg < NumArgs(); cur_arg++) {
345     if (IsParamALongOrDouble(cur_arg)) {
346       if ((cur_reg & 1) != 0) {  // check that it's in a logical contiguous register pair
347         shift += 4;
348         cur_reg++;  // additional bump to ensure alignment
349       }
350       cur_reg += 2;  // bump the iterator twice for every long argument
351     } else {
352       cur_reg++;  // bump the iterator for every non-long argument
353     }
354   }
355 
356   if (cur_reg <= kJniArgumentRegisterCount) {
357     // As a special case when, as a result of shifting (or not) there are no arguments on the stack,
358     // we actually have 0 stack padding.
359     //
360     // For example with @CriticalNative and:
361     // (int, long) -> shifts the long but doesn't need to pad the stack
362     //
363     //          shift
364     //           \/
365     //  | INT | (PAD) | LONG      | (EMPTY) ...
366     //  | r0  |  r1   |  r2  | r3 |   SP    ...
367     //                                /\
368     //                          no stack padding
369     padding_ = 0;
370   } else {
371     padding_ = shift;
372   }
373 
374   // TODO: add some new JNI tests for @CriticalNative that introduced new edge cases
375   // (a) Using r0,r1 pair = f(long,...)
376   // (b) Shifting r1 long into r2,r3 pair = f(int, long, int, ...);
377   // (c) Shifting but not introducing a stack padding = f(int, long);
378 }
379 
CoreSpillMask() const380 uint32_t ArmJniCallingConvention::CoreSpillMask() const {
381   // Compute spill mask to agree with callee saves initialized in the constructor
382   return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
383 }
384 
FpSpillMask() const385 uint32_t ArmJniCallingConvention::FpSpillMask() const {
386   return is_critical_native_ ? 0u : kFpCalleeSpillMask;
387 }
388 
ReturnScratchRegister() const389 ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
390   return ArmManagedRegister::FromCoreRegister(R2);
391 }
392 
FrameSize() const393 size_t ArmJniCallingConvention::FrameSize() const {
394   if (UNLIKELY(is_critical_native_)) {
395     CHECK(!SpillsMethod());
396     CHECK(!HasLocalReferenceSegmentState());
397     CHECK(!HasHandleScope());
398     CHECK(!SpillsReturnValue());
399     return 0u;  // There is no managed frame for @CriticalNative.
400   }
401 
402   // Method*, callee save area size, local reference segment state
403   CHECK(SpillsMethod());
404   const size_t method_ptr_size = static_cast<size_t>(kArmPointerSize);
405   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
406   size_t total_size = method_ptr_size + callee_save_area_size;
407 
408   CHECK(HasLocalReferenceSegmentState());
409   // local reference segment state
410   total_size += kFramePointerSize;
411   // TODO: Probably better to use sizeof(IRTSegmentState) here...
412 
413   CHECK(HasHandleScope());
414   total_size += HandleScope::SizeOf(kArmPointerSize, ReferenceCount());
415 
416   // Plus return value spill area size
417   CHECK(SpillsReturnValue());
418   total_size += SizeOfReturnValue();
419 
420   return RoundUp(total_size, kStackAlignment);
421 }
422 
OutFrameSize() const423 size_t ArmJniCallingConvention::OutFrameSize() const {
424   // Count param args, including JNIEnv* and jclass*; count 8-byte args twice.
425   size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs() + NumLongOrDoubleArgs();
426   // Account for arguments passed through r0-r3. (No FP args, AAPCS32 is soft-float.)
427   size_t stack_args = all_args - std::min(kJniArgumentRegisterCount, all_args);
428   // The size of outgoing arguments.
429   size_t size = stack_args * kFramePointerSize + padding_;
430 
431   // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS.
432   static_assert((kCoreCalleeSpillMask & ~kAapcsCoreCalleeSpillMask) == 0u);
433   static_assert((kFpCalleeSpillMask & ~kAapcsFpCalleeSpillMask) == 0u);
434 
435   // For @CriticalNative, we can make a tail call if there are no stack args and the
436   // return type is not an FP type (otherwise we need to move the result to FP register).
437   DCHECK(!RequiresSmallResultTypeExtension());
438   if (is_critical_native_ && (size != 0u || GetShorty()[0] == 'F' || GetShorty()[0] == 'D')) {
439     size += kFramePointerSize;  // We need to spill LR with the args.
440   }
441   size_t out_args_size = RoundUp(size, kAapcsStackAlignment);
442   if (UNLIKELY(IsCriticalNative())) {
443     DCHECK_EQ(out_args_size, GetCriticalNativeStubFrameSize(GetShorty(), NumArgs() + 1u));
444   }
445   return out_args_size;
446 }
447 
CalleeSaveRegisters() const448 ArrayRef<const ManagedRegister> ArmJniCallingConvention::CalleeSaveRegisters() const {
449   if (UNLIKELY(IsCriticalNative())) {
450     if (UseTailCall()) {
451       return ArrayRef<const ManagedRegister>();  // Do not spill anything.
452     } else {
453       // Spill LR with out args.
454       static_assert((kCoreCalleeSpillMask >> LR) == 1u);  // Contains LR as the highest bit.
455       constexpr size_t lr_index = POPCOUNT(kCoreCalleeSpillMask) - 1u;
456       static_assert(kCalleeSaveRegisters[lr_index].Equals(
457                         ArmManagedRegister::FromCoreRegister(LR)));
458       return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
459           /*pos*/ lr_index, /*length=*/ 1u);
460     }
461   } else {
462     return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
463   }
464 }
465 
466 // JniCallingConvention ABI follows AAPCS where longs and doubles must occur
467 // in even register numbers and stack slots
Next()468 void ArmJniCallingConvention::Next() {
469   // Update the iterator by usual JNI rules.
470   JniCallingConvention::Next();
471 
472   if (LIKELY(HasNext())) {  // Avoid CHECK failure for IsCurrentParam
473     // Ensure slot is 8-byte aligned for longs/doubles (AAPCS).
474     if (IsCurrentParamALongOrDouble() && ((itr_slots_ & 0x1u) != 0)) {
475       // itr_slots_ needs to be an even number, according to AAPCS.
476       itr_slots_++;
477     }
478   }
479 }
480 
IsCurrentParamInRegister()481 bool ArmJniCallingConvention::IsCurrentParamInRegister() {
482   return itr_slots_ < kJniArgumentRegisterCount;
483 }
484 
IsCurrentParamOnStack()485 bool ArmJniCallingConvention::IsCurrentParamOnStack() {
486   return !IsCurrentParamInRegister();
487 }
488 
CurrentParamRegister()489 ManagedRegister ArmJniCallingConvention::CurrentParamRegister() {
490   CHECK_LT(itr_slots_, kJniArgumentRegisterCount);
491   if (IsCurrentParamALongOrDouble()) {
492     // AAPCS 5.1.1 requires 64-bit values to be in a consecutive register pair:
493     // "A double-word sized type is passed in two consecutive registers (e.g., r0 and r1, or r2 and
494     // r3). The content of the registers is as if the value had been loaded from memory
495     // representation with a single LDM instruction."
496     if (itr_slots_ == 0u) {
497       return ArmManagedRegister::FromRegisterPair(R0_R1);
498     } else if (itr_slots_ == 2u) {
499       return ArmManagedRegister::FromRegisterPair(R2_R3);
500     } else {
501       // The register can either be R0 (+R1) or R2 (+R3). Cannot be other values.
502       LOG(FATAL) << "Invalid iterator register position for a long/double " << itr_args_;
503       UNREACHABLE();
504     }
505   } else {
506     // All other types can fit into one register.
507     return ArmManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
508   }
509 }
510 
CurrentParamStackOffset()511 FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
512   CHECK_GE(itr_slots_, kJniArgumentRegisterCount);
513   size_t offset =
514       displacement_.Int32Value()
515           - OutFrameSize()
516           + ((itr_slots_ - kJniArgumentRegisterCount) * kFramePointerSize);
517   CHECK_LT(offset, OutFrameSize());
518   return FrameOffset(offset);
519 }
520 
HiddenArgumentRegister() const521 ManagedRegister ArmJniCallingConvention::HiddenArgumentRegister() const {
522   CHECK(IsCriticalNative());
523   // R4 is neither managed callee-save, nor argument register, nor scratch register.
524   // (It is native callee-save but the value coming from managed code can be clobbered.)
525   // TODO: Change to static_assert; std::none_of should be constexpr since C++20.
526   DCHECK(std::none_of(kCalleeSaveRegisters,
527                       kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
528                       [](ManagedRegister callee_save) constexpr {
529                         return callee_save.Equals(ArmManagedRegister::FromCoreRegister(R4));
530                       }));
531   DCHECK(std::none_of(kJniArgumentRegisters,
532                       kJniArgumentRegisters + std::size(kJniArgumentRegisters),
533                       [](Register reg) { return reg == R4; }));
534   return ArmManagedRegister::FromCoreRegister(R4);
535 }
536 
537 // Whether to use tail call (used only for @CriticalNative).
UseTailCall() const538 bool ArmJniCallingConvention::UseTailCall() const {
539   CHECK(IsCriticalNative());
540   return OutFrameSize() == 0u;
541 }
542 
543 }  // namespace arm
544 }  // namespace art
545