1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "calling_convention_x86.h"
18
19 #include <android-base/logging.h>
20
21 #include "arch/instruction_set.h"
22 #include "arch/x86/jni_frame_x86.h"
23 #include "handle_scope-inl.h"
24 #include "utils/x86/managed_register_x86.h"
25
26 namespace art {
27 namespace x86 {
28
29 static constexpr Register kManagedCoreArgumentRegisters[] = {
30 EAX, ECX, EDX, EBX
31 };
32 static constexpr size_t kManagedCoreArgumentRegistersCount =
33 arraysize(kManagedCoreArgumentRegisters);
34 static constexpr size_t kManagedFpArgumentRegistersCount = 4u;
35
36 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
37 // Core registers.
38 X86ManagedRegister::FromCpuRegister(EBP),
39 X86ManagedRegister::FromCpuRegister(ESI),
40 X86ManagedRegister::FromCpuRegister(EDI),
41 // No hard float callee saves.
42 };
43
44 template <size_t size>
CalculateCoreCalleeSpillMask(const ManagedRegister (& callee_saves)[size])45 static constexpr uint32_t CalculateCoreCalleeSpillMask(
46 const ManagedRegister (&callee_saves)[size]) {
47 // The spilled PC gets a special marker.
48 uint32_t result = 1 << kNumberOfCpuRegisters;
49 for (auto&& r : callee_saves) {
50 if (r.AsX86().IsCpuRegister()) {
51 result |= (1 << r.AsX86().AsCpuRegister());
52 }
53 }
54 return result;
55 }
56
57 static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
58 static constexpr uint32_t kFpCalleeSpillMask = 0u;
59
60 static constexpr ManagedRegister kNativeCalleeSaveRegisters[] = {
61 // Core registers.
62 X86ManagedRegister::FromCpuRegister(EBX),
63 X86ManagedRegister::FromCpuRegister(EBP),
64 X86ManagedRegister::FromCpuRegister(ESI),
65 X86ManagedRegister::FromCpuRegister(EDI),
66 // No hard float callee saves.
67 };
68
69 static constexpr uint32_t kNativeCoreCalleeSpillMask =
70 CalculateCoreCalleeSpillMask(kNativeCalleeSaveRegisters);
71 static constexpr uint32_t kNativeFpCalleeSpillMask = 0u;
72
73 // Calling convention
74
ReturnScratchRegister() const75 ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const {
76 return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop
77 }
78
ReturnRegisterForShorty(const char * shorty,bool jni)79 static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
80 if (shorty[0] == 'F' || shorty[0] == 'D') {
81 if (jni) {
82 return X86ManagedRegister::FromX87Register(ST0);
83 } else {
84 return X86ManagedRegister::FromXmmRegister(XMM0);
85 }
86 } else if (shorty[0] == 'J') {
87 return X86ManagedRegister::FromRegisterPair(EAX_EDX);
88 } else if (shorty[0] == 'V') {
89 return ManagedRegister::NoRegister();
90 } else {
91 return X86ManagedRegister::FromCpuRegister(EAX);
92 }
93 }
94
ReturnRegister()95 ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() {
96 return ReturnRegisterForShorty(GetShorty(), false);
97 }
98
ReturnRegister()99 ManagedRegister X86JniCallingConvention::ReturnRegister() {
100 return ReturnRegisterForShorty(GetShorty(), true);
101 }
102
IntReturnRegister()103 ManagedRegister X86JniCallingConvention::IntReturnRegister() {
104 return X86ManagedRegister::FromCpuRegister(EAX);
105 }
106
107 // Managed runtime calling convention
108
MethodRegister()109 ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() {
110 return X86ManagedRegister::FromCpuRegister(EAX);
111 }
112
ResetIterator(FrameOffset displacement)113 void X86ManagedRuntimeCallingConvention::ResetIterator(FrameOffset displacement) {
114 ManagedRuntimeCallingConvention::ResetIterator(displacement);
115 gpr_arg_count_ = 1u; // Skip EAX for ArtMethod*
116 }
117
Next()118 void X86ManagedRuntimeCallingConvention::Next() {
119 if (!IsCurrentParamAFloatOrDouble()) {
120 gpr_arg_count_ += IsCurrentParamALong() ? 2u : 1u;
121 }
122 ManagedRuntimeCallingConvention::Next();
123 }
124
IsCurrentParamInRegister()125 bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
126 if (IsCurrentParamAFloatOrDouble()) {
127 return itr_float_and_doubles_ < kManagedFpArgumentRegistersCount;
128 } else {
129 // Don't split a long between the last register and the stack.
130 size_t extra_regs = IsCurrentParamALong() ? 1u : 0u;
131 return gpr_arg_count_ + extra_regs < kManagedCoreArgumentRegistersCount;
132 }
133 }
134
IsCurrentParamOnStack()135 bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
136 return !IsCurrentParamInRegister();
137 }
138
CurrentParamRegister()139 ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() {
140 DCHECK(IsCurrentParamInRegister());
141 if (IsCurrentParamAFloatOrDouble()) {
142 // First four float parameters are passed via XMM0..XMM3
143 XmmRegister reg = static_cast<XmmRegister>(XMM0 + itr_float_and_doubles_);
144 return X86ManagedRegister::FromXmmRegister(reg);
145 } else {
146 if (IsCurrentParamALong()) {
147 switch (gpr_arg_count_) {
148 case 1:
149 static_assert(kManagedCoreArgumentRegisters[1] == ECX);
150 static_assert(kManagedCoreArgumentRegisters[2] == EDX);
151 return X86ManagedRegister::FromRegisterPair(ECX_EDX);
152 case 2:
153 static_assert(kManagedCoreArgumentRegisters[2] == EDX);
154 static_assert(kManagedCoreArgumentRegisters[3] == EBX);
155 return X86ManagedRegister::FromRegisterPair(EDX_EBX);
156 default:
157 LOG(FATAL) << "UNREACHABLE";
158 UNREACHABLE();
159 }
160 } else {
161 Register core_reg = kManagedCoreArgumentRegisters[gpr_arg_count_];
162 return X86ManagedRegister::FromCpuRegister(core_reg);
163 }
164 }
165 }
166
CurrentParamStackOffset()167 FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
168 return FrameOffset(displacement_.Int32Value() + // displacement
169 kFramePointerSize + // Method*
170 (itr_slots_ * kFramePointerSize)); // offset into in args
171 }
172
173 // JNI calling convention
174
X86JniCallingConvention(bool is_static,bool is_synchronized,bool is_critical_native,const char * shorty)175 X86JniCallingConvention::X86JniCallingConvention(bool is_static,
176 bool is_synchronized,
177 bool is_critical_native,
178 const char* shorty)
179 : JniCallingConvention(is_static,
180 is_synchronized,
181 is_critical_native,
182 shorty,
183 kX86PointerSize) {
184 }
185
CoreSpillMask() const186 uint32_t X86JniCallingConvention::CoreSpillMask() const {
187 return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
188 }
189
FpSpillMask() const190 uint32_t X86JniCallingConvention::FpSpillMask() const {
191 return is_critical_native_ ? 0u : kFpCalleeSpillMask;
192 }
193
FrameSize() const194 size_t X86JniCallingConvention::FrameSize() const {
195 if (is_critical_native_) {
196 CHECK(!SpillsMethod());
197 CHECK(!HasLocalReferenceSegmentState());
198 CHECK(!HasHandleScope());
199 CHECK(!SpillsReturnValue());
200 return 0u; // There is no managed frame for @CriticalNative.
201 }
202
203 // Method*, PC return address and callee save area size, local reference segment state
204 CHECK(SpillsMethod());
205 const size_t method_ptr_size = static_cast<size_t>(kX86PointerSize);
206 const size_t pc_return_addr_size = kFramePointerSize;
207 const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
208 size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
209
210 CHECK(HasLocalReferenceSegmentState());
211 total_size += kFramePointerSize;
212
213 CHECK(HasHandleScope());
214 total_size += HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
215
216 // Plus return value spill area size
217 CHECK(SpillsReturnValue());
218 total_size += SizeOfReturnValue();
219
220 return RoundUp(total_size, kStackAlignment);
221 }
222
OutFrameSize() const223 size_t X86JniCallingConvention::OutFrameSize() const {
224 // The size of outgoing arguments.
225 size_t size = GetNativeOutArgsSize(/*num_args=*/ NumberOfExtraArgumentsForJni() + NumArgs(),
226 NumLongOrDoubleArgs());
227
228 // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS.
229 static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u);
230 static_assert((kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask) == 0u);
231
232 if (UNLIKELY(IsCriticalNative())) {
233 // Add return address size for @CriticalNative.
234 // For normal native the return PC is part of the managed stack frame instead of out args.
235 size += kFramePointerSize;
236 // For @CriticalNative, we can make a tail call if there are no stack args
237 // and the return type is not FP type (needs moving from ST0 to MMX0) and
238 // we do not need to extend the result.
239 bool return_type_ok = GetShorty()[0] == 'I' || GetShorty()[0] == 'J' || GetShorty()[0] == 'V';
240 DCHECK_EQ(
241 return_type_ok,
242 GetShorty()[0] != 'F' && GetShorty()[0] != 'D' && !RequiresSmallResultTypeExtension());
243 if (return_type_ok && size == kFramePointerSize) {
244 // Note: This is not aligned to kNativeStackAlignment but that's OK for tail call.
245 static_assert(kFramePointerSize < kNativeStackAlignment);
246 // The stub frame size is considered 0 in the callee where the return PC is a part of
247 // the callee frame but it is kPointerSize in the compiled stub before the tail call.
248 DCHECK_EQ(0u, GetCriticalNativeStubFrameSize(GetShorty(), NumArgs() + 1u));
249 return kFramePointerSize;
250 }
251 }
252
253 size_t out_args_size = RoundUp(size, kNativeStackAlignment);
254 if (UNLIKELY(IsCriticalNative())) {
255 DCHECK_EQ(out_args_size, GetCriticalNativeStubFrameSize(GetShorty(), NumArgs() + 1u));
256 }
257 return out_args_size;
258 }
259
CalleeSaveRegisters() const260 ArrayRef<const ManagedRegister> X86JniCallingConvention::CalleeSaveRegisters() const {
261 if (UNLIKELY(IsCriticalNative())) {
262 // Do not spill anything, whether tail call or not (return PC is already on the stack).
263 return ArrayRef<const ManagedRegister>();
264 } else {
265 return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
266 }
267 }
268
IsCurrentParamInRegister()269 bool X86JniCallingConvention::IsCurrentParamInRegister() {
270 return false; // Everything is passed by stack.
271 }
272
IsCurrentParamOnStack()273 bool X86JniCallingConvention::IsCurrentParamOnStack() {
274 return true; // Everything is passed by stack.
275 }
276
CurrentParamRegister()277 ManagedRegister X86JniCallingConvention::CurrentParamRegister() {
278 LOG(FATAL) << "Should not reach here";
279 UNREACHABLE();
280 }
281
CurrentParamStackOffset()282 FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
283 return
284 FrameOffset(displacement_.Int32Value() - OutFrameSize() + (itr_slots_ * kFramePointerSize));
285 }
286
HiddenArgumentRegister() const287 ManagedRegister X86JniCallingConvention::HiddenArgumentRegister() const {
288 CHECK(IsCriticalNative());
289 // EAX is neither managed callee-save, nor argument register, nor scratch register.
290 DCHECK(std::none_of(kCalleeSaveRegisters,
291 kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
292 [](ManagedRegister callee_save) constexpr {
293 return callee_save.Equals(X86ManagedRegister::FromCpuRegister(EAX));
294 }));
295 return X86ManagedRegister::FromCpuRegister(EAX);
296 }
297
UseTailCall() const298 bool X86JniCallingConvention::UseTailCall() const {
299 CHECK(IsCriticalNative());
300 return OutFrameSize() == kFramePointerSize;
301 }
302
303 } // namespace x86
304 } // namespace art
305