1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jni_macro_assembler_arm_vixl.h"
18
19 #include <iostream>
20 #include <type_traits>
21
22 #include "entrypoints/quick/quick_entrypoints.h"
23 #include "thread.h"
24
25 using namespace vixl::aarch32; // NOLINT(build/namespaces)
26 namespace vixl32 = vixl::aarch32;
27
28 using vixl::ExactAssemblyScope;
29 using vixl::CodeBufferCheckScope;
30
31 namespace art {
32 namespace arm {
33
34 #ifdef ___
35 #error "ARM Assembler macro already defined."
36 #else
37 #define ___ asm_.GetVIXLAssembler()->
38 #endif
39
40 // The AAPCS requires 8-byte alignement. This is not as strict as the Managed ABI stack alignment.
41 static constexpr size_t kAapcsStackAlignment = 8u;
42 static_assert(kAapcsStackAlignment < kStackAlignment);
43
44 // STRD immediate can encode any 4-byte aligned offset smaller than this cutoff.
45 static constexpr size_t kStrdOffsetCutoff = 1024u;
46
AsVIXLRegister(ArmManagedRegister reg)47 vixl::aarch32::Register AsVIXLRegister(ArmManagedRegister reg) {
48 CHECK(reg.IsCoreRegister());
49 return vixl::aarch32::Register(reg.RegId());
50 }
51
AsVIXLSRegister(ArmManagedRegister reg)52 static inline vixl::aarch32::SRegister AsVIXLSRegister(ArmManagedRegister reg) {
53 CHECK(reg.IsSRegister());
54 return vixl::aarch32::SRegister(reg.RegId() - kNumberOfCoreRegIds);
55 }
56
AsVIXLDRegister(ArmManagedRegister reg)57 static inline vixl::aarch32::DRegister AsVIXLDRegister(ArmManagedRegister reg) {
58 CHECK(reg.IsDRegister());
59 return vixl::aarch32::DRegister(reg.RegId() - kNumberOfCoreRegIds - kNumberOfSRegIds);
60 }
61
AsVIXLRegisterPairLow(ArmManagedRegister reg)62 static inline vixl::aarch32::Register AsVIXLRegisterPairLow(ArmManagedRegister reg) {
63 return vixl::aarch32::Register(reg.AsRegisterPairLow());
64 }
65
AsVIXLRegisterPairHigh(ArmManagedRegister reg)66 static inline vixl::aarch32::Register AsVIXLRegisterPairHigh(ArmManagedRegister reg) {
67 return vixl::aarch32::Register(reg.AsRegisterPairHigh());
68 }
69
FinalizeCode()70 void ArmVIXLJNIMacroAssembler::FinalizeCode() {
71 for (const std::unique_ptr<
72 ArmVIXLJNIMacroAssembler::ArmException>& exception : exception_blocks_) {
73 EmitExceptionPoll(exception.get());
74 }
75 asm_.FinalizeCode();
76 }
77
78 static constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
79
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> callee_save_regs)80 void ArmVIXLJNIMacroAssembler::BuildFrame(size_t frame_size,
81 ManagedRegister method_reg,
82 ArrayRef<const ManagedRegister> callee_save_regs) {
83 // If we're creating an actual frame with the method, enforce managed stack alignment,
84 // otherwise only the native stack alignment.
85 if (method_reg.IsNoRegister()) {
86 CHECK_ALIGNED_PARAM(frame_size, kAapcsStackAlignment);
87 } else {
88 CHECK_ALIGNED_PARAM(frame_size, kStackAlignment);
89 }
90
91 // Push callee saves and link register.
92 RegList core_spill_mask = 0;
93 uint32_t fp_spill_mask = 0;
94 for (const ManagedRegister& reg : callee_save_regs) {
95 if (reg.AsArm().IsCoreRegister()) {
96 core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
97 } else {
98 fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
99 }
100 }
101 if (core_spill_mask != 0u) {
102 ___ Push(RegisterList(core_spill_mask));
103 cfi().AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
104 cfi().RelOffsetForMany(DWARFReg(r0), 0, core_spill_mask, kFramePointerSize);
105 }
106 if (fp_spill_mask != 0) {
107 uint32_t first = CTZ(fp_spill_mask);
108
109 // Check that list is contiguous.
110 DCHECK_EQ(fp_spill_mask >> CTZ(fp_spill_mask), ~0u >> (32 - POPCOUNT(fp_spill_mask)));
111
112 ___ Vpush(SRegisterList(vixl32::SRegister(first), POPCOUNT(fp_spill_mask)));
113 cfi().AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
114 cfi().RelOffsetForMany(DWARFReg(s0), 0, fp_spill_mask, kFramePointerSize);
115 }
116
117 // Increase frame to required size.
118 int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
119 // Must at least have space for Method* if we're going to spill it.
120 CHECK_GE(frame_size, (pushed_values + (method_reg.IsRegister() ? 1u : 0u)) * kFramePointerSize);
121 IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize); // handles CFI as well.
122
123 if (method_reg.IsRegister()) {
124 // Write out Method*.
125 CHECK(r0.Is(AsVIXLRegister(method_reg.AsArm())));
126 asm_.StoreToOffset(kStoreWord, r0, sp, 0);
127 }
128 }
129
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> callee_save_regs,bool may_suspend)130 void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size,
131 ArrayRef<const ManagedRegister> callee_save_regs,
132 bool may_suspend) {
133 CHECK_ALIGNED(frame_size, kAapcsStackAlignment);
134 cfi().RememberState();
135
136 // Compute callee saves to pop.
137 RegList core_spill_mask = 0u;
138 uint32_t fp_spill_mask = 0u;
139 for (const ManagedRegister& reg : callee_save_regs) {
140 if (reg.AsArm().IsCoreRegister()) {
141 core_spill_mask |= 1u << reg.AsArm().AsCoreRegister();
142 } else {
143 fp_spill_mask |= 1u << reg.AsArm().AsSRegister();
144 }
145 }
146
147 // Decrease frame to start of callee saves.
148 size_t pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
149 CHECK_GE(frame_size, pop_values * kFramePointerSize);
150 DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize)); // handles CFI as well.
151
152 // Pop FP callee saves.
153 if (fp_spill_mask != 0u) {
154 uint32_t first = CTZ(fp_spill_mask);
155 // Check that list is contiguous.
156 DCHECK_EQ(fp_spill_mask >> CTZ(fp_spill_mask), ~0u >> (32 - POPCOUNT(fp_spill_mask)));
157
158 ___ Vpop(SRegisterList(vixl32::SRegister(first), POPCOUNT(fp_spill_mask)));
159 cfi().AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
160 cfi().RestoreMany(DWARFReg(s0), fp_spill_mask);
161 }
162
163 // Pop core callee saves and LR.
164 if (core_spill_mask != 0u) {
165 ___ Pop(RegisterList(core_spill_mask));
166 }
167
168 if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
169 if (may_suspend) {
170 // The method may be suspended; refresh the Marking Register.
171 ___ Ldr(mr, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
172 } else {
173 // The method shall not be suspended; no need to refresh the Marking Register.
174
175 // The Marking Register is a callee-save register, and thus has been
176 // preserved by native code following the AAPCS calling convention.
177
178 // The following condition is a compile-time one, so it does not have a run-time cost.
179 if (kIsDebugBuild) {
180 // The following condition is a run-time one; it is executed after the
181 // previous compile-time test, to avoid penalizing non-debug builds.
182 if (emit_run_time_checks_in_debug_mode_) {
183 // Emit a run-time check verifying that the Marking Register is up-to-date.
184 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
185 vixl32::Register temp = temps.Acquire();
186 // Ensure we are not clobbering a callee-save register that was restored before.
187 DCHECK_EQ(core_spill_mask & (1 << temp.GetCode()), 0)
188 << "core_spill_mask hould not contain scratch register R" << temp.GetCode();
189 asm_.GenerateMarkingRegisterCheck(temp);
190 }
191 }
192 }
193 }
194
195 // Return to LR.
196 ___ Bx(vixl32::lr);
197
198 // The CFI should be restored for any code that follows the exit block.
199 cfi().RestoreState();
200 cfi().DefCFAOffset(frame_size);
201 }
202
203
IncreaseFrameSize(size_t adjust)204 void ArmVIXLJNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
205 if (adjust != 0u) {
206 asm_.AddConstant(sp, -adjust);
207 cfi().AdjustCFAOffset(adjust);
208 }
209 }
210
DecreaseFrameSize(size_t adjust)211 void ArmVIXLJNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
212 if (adjust != 0u) {
213 asm_.AddConstant(sp, adjust);
214 cfi().AdjustCFAOffset(-adjust);
215 }
216 }
217
Store(FrameOffset dest,ManagedRegister m_src,size_t size)218 void ArmVIXLJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister m_src, size_t size) {
219 ArmManagedRegister src = m_src.AsArm();
220 if (src.IsNoRegister()) {
221 CHECK_EQ(0u, size);
222 } else if (src.IsCoreRegister()) {
223 CHECK_EQ(4u, size);
224 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
225 temps.Exclude(AsVIXLRegister(src));
226 asm_.StoreToOffset(kStoreWord, AsVIXLRegister(src), sp, dest.Int32Value());
227 } else if (src.IsRegisterPair()) {
228 CHECK_EQ(8u, size);
229 ___ Strd(AsVIXLRegisterPairLow(src),
230 AsVIXLRegisterPairHigh(src),
231 MemOperand(sp, dest.Int32Value()));
232 } else if (src.IsSRegister()) {
233 CHECK_EQ(4u, size);
234 asm_.StoreSToOffset(AsVIXLSRegister(src), sp, dest.Int32Value());
235 } else {
236 CHECK_EQ(8u, size);
237 CHECK(src.IsDRegister()) << src;
238 asm_.StoreDToOffset(AsVIXLDRegister(src), sp, dest.Int32Value());
239 }
240 }
241
StoreRef(FrameOffset dest,ManagedRegister msrc)242 void ArmVIXLJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
243 vixl::aarch32::Register src = AsVIXLRegister(msrc.AsArm());
244 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
245 temps.Exclude(src);
246 asm_.StoreToOffset(kStoreWord, src, sp, dest.Int32Value());
247 }
248
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)249 void ArmVIXLJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
250 vixl::aarch32::Register src = AsVIXLRegister(msrc.AsArm());
251 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
252 temps.Exclude(src);
253 asm_.StoreToOffset(kStoreWord, src, sp, dest.Int32Value());
254 }
255
StoreSpanning(FrameOffset dest,ManagedRegister msrc,FrameOffset in_off)256 void ArmVIXLJNIMacroAssembler::StoreSpanning(FrameOffset dest,
257 ManagedRegister msrc,
258 FrameOffset in_off) {
259 vixl::aarch32::Register src = AsVIXLRegister(msrc.AsArm());
260 asm_.StoreToOffset(kStoreWord, src, sp, dest.Int32Value());
261 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
262 vixl32::Register scratch = temps.Acquire();
263 asm_.LoadFromOffset(kLoadWord, scratch, sp, in_off.Int32Value());
264 asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value() + 4);
265 }
266
CopyRef(FrameOffset dest,FrameOffset src)267 void ArmVIXLJNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src) {
268 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
269 vixl32::Register scratch = temps.Acquire();
270 asm_.LoadFromOffset(kLoadWord, scratch, sp, src.Int32Value());
271 asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value());
272 }
273
CopyRef(FrameOffset dest,ManagedRegister base,MemberOffset offs,bool unpoison_reference)274 void ArmVIXLJNIMacroAssembler::CopyRef(FrameOffset dest,
275 ManagedRegister base,
276 MemberOffset offs,
277 bool unpoison_reference) {
278 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
279 vixl32::Register scratch = temps.Acquire();
280 asm_.LoadFromOffset(kLoadWord, scratch, AsVIXLRegister(base.AsArm()), offs.Int32Value());
281 if (unpoison_reference) {
282 asm_.MaybeUnpoisonHeapReference(scratch);
283 }
284 asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value());
285 }
286
LoadRef(ManagedRegister mdest,ManagedRegister mbase,MemberOffset offs,bool unpoison_reference)287 void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister mdest,
288 ManagedRegister mbase,
289 MemberOffset offs,
290 bool unpoison_reference) {
291 vixl::aarch32::Register dest = AsVIXLRegister(mdest.AsArm());
292 vixl::aarch32::Register base = AsVIXLRegister(mbase.AsArm());
293 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
294 temps.Exclude(dest, base);
295 asm_.LoadFromOffset(kLoadWord, dest, base, offs.Int32Value());
296
297 if (unpoison_reference) {
298 asm_.MaybeUnpoisonHeapReference(dest);
299 }
300 }
301
LoadRef(ManagedRegister dest ATTRIBUTE_UNUSED,FrameOffset src ATTRIBUTE_UNUSED)302 void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister dest ATTRIBUTE_UNUSED,
303 FrameOffset src ATTRIBUTE_UNUSED) {
304 UNIMPLEMENTED(FATAL);
305 }
306
LoadRawPtr(ManagedRegister dest ATTRIBUTE_UNUSED,ManagedRegister base ATTRIBUTE_UNUSED,Offset offs ATTRIBUTE_UNUSED)307 void ArmVIXLJNIMacroAssembler::LoadRawPtr(ManagedRegister dest ATTRIBUTE_UNUSED,
308 ManagedRegister base ATTRIBUTE_UNUSED,
309 Offset offs ATTRIBUTE_UNUSED) {
310 UNIMPLEMENTED(FATAL);
311 }
312
StoreImmediateToFrame(FrameOffset dest,uint32_t imm)313 void ArmVIXLJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm) {
314 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
315 vixl32::Register scratch = temps.Acquire();
316 asm_.LoadImmediate(scratch, imm);
317 asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value());
318 }
319
Load(ManagedRegister m_dst,FrameOffset src,size_t size)320 void ArmVIXLJNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
321 return Load(m_dst.AsArm(), sp, src.Int32Value(), size);
322 }
323
LoadFromThread(ManagedRegister m_dst,ThreadOffset32 src,size_t size)324 void ArmVIXLJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst,
325 ThreadOffset32 src,
326 size_t size) {
327 return Load(m_dst.AsArm(), tr, src.Int32Value(), size);
328 }
329
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset32 offs)330 void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
331 vixl::aarch32::Register dest = AsVIXLRegister(mdest.AsArm());
332 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
333 temps.Exclude(dest);
334 asm_.LoadFromOffset(kLoadWord, dest, tr, offs.Int32Value());
335 }
336
CopyRawPtrFromThread(FrameOffset fr_offs,ThreadOffset32 thr_offs)337 void ArmVIXLJNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset32 thr_offs) {
338 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
339 vixl32::Register scratch = temps.Acquire();
340 asm_.LoadFromOffset(kLoadWord, scratch, tr, thr_offs.Int32Value());
341 asm_.StoreToOffset(kStoreWord, scratch, sp, fr_offs.Int32Value());
342 }
343
CopyRawPtrToThread(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,FrameOffset fr_offs ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED)344 void ArmVIXLJNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
345 FrameOffset fr_offs ATTRIBUTE_UNUSED,
346 ManagedRegister mscratch ATTRIBUTE_UNUSED) {
347 UNIMPLEMENTED(FATAL);
348 }
349
StoreStackOffsetToThread(ThreadOffset32 thr_offs,FrameOffset fr_offs)350 void ArmVIXLJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
351 FrameOffset fr_offs) {
352 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
353 vixl32::Register scratch = temps.Acquire();
354 asm_.AddConstant(scratch, sp, fr_offs.Int32Value());
355 asm_.StoreToOffset(kStoreWord, scratch, tr, thr_offs.Int32Value());
356 }
357
StoreStackPointerToThread(ThreadOffset32 thr_offs)358 void ArmVIXLJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
359 asm_.StoreToOffset(kStoreWord, sp, tr, thr_offs.Int32Value());
360 }
361
SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)362 void ArmVIXLJNIMacroAssembler::SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
363 size_t size ATTRIBUTE_UNUSED) {
364 UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
365 }
366
ZeroExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)367 void ArmVIXLJNIMacroAssembler::ZeroExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
368 size_t size ATTRIBUTE_UNUSED) {
369 UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
370 }
371
IsCoreRegisterOrPair(ArmManagedRegister reg)372 static inline bool IsCoreRegisterOrPair(ArmManagedRegister reg) {
373 return reg.IsCoreRegister() || reg.IsRegisterPair();
374 }
375
NoSpillGap(const ArgumentLocation & loc1,const ArgumentLocation & loc2)376 static inline bool NoSpillGap(const ArgumentLocation& loc1, const ArgumentLocation& loc2) {
377 DCHECK(!loc1.IsRegister());
378 DCHECK(!loc2.IsRegister());
379 uint32_t loc1_offset = loc1.GetFrameOffset().Uint32Value();
380 uint32_t loc2_offset = loc2.GetFrameOffset().Uint32Value();
381 DCHECK_LT(loc1_offset, loc2_offset);
382 return loc1_offset + loc1.GetSize() == loc2_offset;
383 }
384
GetSRegisterNumber(ArmManagedRegister reg)385 static inline uint32_t GetSRegisterNumber(ArmManagedRegister reg) {
386 if (reg.IsSRegister()) {
387 return static_cast<uint32_t>(reg.AsSRegister());
388 } else {
389 DCHECK(reg.IsDRegister());
390 return 2u * static_cast<uint32_t>(reg.AsDRegister());
391 }
392 }
393
394 // Get the number of locations to spill together.
GetSpillChunkSize(ArrayRef<ArgumentLocation> dests,ArrayRef<ArgumentLocation> srcs,size_t start,bool have_extra_temp)395 static inline size_t GetSpillChunkSize(ArrayRef<ArgumentLocation> dests,
396 ArrayRef<ArgumentLocation> srcs,
397 size_t start,
398 bool have_extra_temp) {
399 DCHECK_LT(start, dests.size());
400 DCHECK_ALIGNED(dests[start].GetFrameOffset().Uint32Value(), 4u);
401 const ArgumentLocation& first_src = srcs[start];
402 if (!first_src.IsRegister()) {
403 DCHECK_ALIGNED(first_src.GetFrameOffset().Uint32Value(), 4u);
404 // If we have an extra temporary, look for opportunities to move 2 words
405 // at a time with LDRD/STRD when the source types are word-sized.
406 if (have_extra_temp &&
407 start + 1u != dests.size() &&
408 !srcs[start + 1u].IsRegister() &&
409 first_src.GetSize() == 4u &&
410 srcs[start + 1u].GetSize() == 4u &&
411 NoSpillGap(first_src, srcs[start + 1u]) &&
412 NoSpillGap(dests[start], dests[start + 1u]) &&
413 dests[start].GetFrameOffset().Uint32Value() < kStrdOffsetCutoff) {
414 // Note: The source and destination may not be 8B aligned (but they are 4B aligned).
415 return 2u;
416 }
417 return 1u;
418 }
419 ArmManagedRegister first_src_reg = first_src.GetRegister().AsArm();
420 size_t end = start + 1u;
421 if (IsCoreRegisterOrPair(first_src_reg)) {
422 while (end != dests.size() &&
423 NoSpillGap(dests[end - 1u], dests[end]) &&
424 srcs[end].IsRegister() &&
425 IsCoreRegisterOrPair(srcs[end].GetRegister().AsArm())) {
426 ++end;
427 }
428 } else {
429 DCHECK(first_src_reg.IsSRegister() || first_src_reg.IsDRegister());
430 uint32_t next_sreg = GetSRegisterNumber(first_src_reg) + first_src.GetSize() / kSRegSizeInBytes;
431 while (end != dests.size() &&
432 NoSpillGap(dests[end - 1u], dests[end]) &&
433 srcs[end].IsRegister() &&
434 !IsCoreRegisterOrPair(srcs[end].GetRegister().AsArm()) &&
435 GetSRegisterNumber(srcs[end].GetRegister().AsArm()) == next_sreg) {
436 next_sreg += srcs[end].GetSize() / kSRegSizeInBytes;
437 ++end;
438 }
439 }
440 return end - start;
441 }
442
GetCoreRegisterMask(ArmManagedRegister reg)443 static inline uint32_t GetCoreRegisterMask(ArmManagedRegister reg) {
444 if (reg.IsCoreRegister()) {
445 return 1u << static_cast<size_t>(reg.AsCoreRegister());
446 } else {
447 DCHECK(reg.IsRegisterPair());
448 DCHECK_LT(reg.AsRegisterPairLow(), reg.AsRegisterPairHigh());
449 return (1u << static_cast<size_t>(reg.AsRegisterPairLow())) |
450 (1u << static_cast<size_t>(reg.AsRegisterPairHigh()));
451 }
452 }
453
GetCoreRegisterMask(ArrayRef<ArgumentLocation> srcs)454 static inline uint32_t GetCoreRegisterMask(ArrayRef<ArgumentLocation> srcs) {
455 uint32_t mask = 0u;
456 for (const ArgumentLocation& loc : srcs) {
457 DCHECK(loc.IsRegister());
458 mask |= GetCoreRegisterMask(loc.GetRegister().AsArm());
459 }
460 return mask;
461 }
462
UseStrdForChunk(ArrayRef<ArgumentLocation> srcs,size_t start,size_t length)463 static inline bool UseStrdForChunk(ArrayRef<ArgumentLocation> srcs, size_t start, size_t length) {
464 DCHECK_GE(length, 2u);
465 DCHECK(srcs[start].IsRegister());
466 DCHECK(srcs[start + 1u].IsRegister());
467 // The destination may not be 8B aligned (but it is 4B aligned).
468 // Allow arbitrary destination offset, macro assembler will use a temp if needed.
469 // Note: T32 allows unrelated registers in STRD. (A32 does not.)
470 return length == 2u &&
471 srcs[start].GetRegister().AsArm().IsCoreRegister() &&
472 srcs[start + 1u].GetRegister().AsArm().IsCoreRegister();
473 }
474
UseVstrForChunk(ArrayRef<ArgumentLocation> srcs,size_t start,size_t length)475 static inline bool UseVstrForChunk(ArrayRef<ArgumentLocation> srcs, size_t start, size_t length) {
476 DCHECK_GE(length, 2u);
477 DCHECK(srcs[start].IsRegister());
478 DCHECK(srcs[start + 1u].IsRegister());
479 // The destination may not be 8B aligned (but it is 4B aligned).
480 // Allow arbitrary destination offset, macro assembler will use a temp if needed.
481 return length == 2u &&
482 srcs[start].GetRegister().AsArm().IsSRegister() &&
483 srcs[start + 1u].GetRegister().AsArm().IsSRegister() &&
484 IsAligned<2u>(static_cast<size_t>(srcs[start].GetRegister().AsArm().AsSRegister()));
485 }
486
MoveArguments(ArrayRef<ArgumentLocation> dests,ArrayRef<ArgumentLocation> srcs)487 void ArmVIXLJNIMacroAssembler::MoveArguments(ArrayRef<ArgumentLocation> dests,
488 ArrayRef<ArgumentLocation> srcs) {
489 DCHECK_EQ(dests.size(), srcs.size());
490
491 // Native ABI is soft-float, so all destinations should be core registers or stack offsets.
492 // And register locations should be first, followed by stack locations with increasing offset.
493 auto is_register = [](const ArgumentLocation& loc) { return loc.IsRegister(); };
494 DCHECK(std::is_partitioned(dests.begin(), dests.end(), is_register));
495 size_t num_reg_dests =
496 std::distance(dests.begin(), std::partition_point(dests.begin(), dests.end(), is_register));
497 DCHECK(std::is_sorted(
498 dests.begin() + num_reg_dests,
499 dests.end(),
500 [](const ArgumentLocation& lhs, const ArgumentLocation& rhs) {
501 return lhs.GetFrameOffset().Uint32Value() < rhs.GetFrameOffset().Uint32Value();
502 }));
503
504 // Collect registers to move. No need to record FP regs as destinations are only core regs.
505 uint32_t src_regs = 0u;
506 uint32_t dest_regs = 0u;
507 for (size_t i = 0; i != num_reg_dests; ++i) {
508 const ArgumentLocation& src = srcs[i];
509 const ArgumentLocation& dest = dests[i];
510 DCHECK(dest.IsRegister() && IsCoreRegisterOrPair(dest.GetRegister().AsArm()));
511 if (src.IsRegister() && IsCoreRegisterOrPair(src.GetRegister().AsArm())) {
512 if (src.GetRegister().Equals(dest.GetRegister())) {
513 continue;
514 }
515 src_regs |= GetCoreRegisterMask(src.GetRegister().AsArm());
516 }
517 dest_regs |= GetCoreRegisterMask(dest.GetRegister().AsArm());
518 }
519
520 // Spill args first. Look for opportunities to spill multiple arguments at once.
521 {
522 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
523 vixl32::Register xtemp; // Extra temp register;
524 if ((dest_regs & ~src_regs) != 0u) {
525 xtemp = vixl32::Register(CTZ(dest_regs & ~src_regs));
526 DCHECK(!temps.IsAvailable(xtemp));
527 }
528 auto move_two_words = [&](FrameOffset dest_offset, FrameOffset src_offset) {
529 DCHECK(xtemp.IsValid());
530 DCHECK_LT(dest_offset.Uint32Value(), kStrdOffsetCutoff);
531 // VIXL macro assembler can use destination registers for loads from large offsets.
532 UseScratchRegisterScope temps2(asm_.GetVIXLAssembler());
533 vixl32::Register temp2 = temps2.Acquire();
534 ___ Ldrd(xtemp, temp2, MemOperand(sp, src_offset.Uint32Value()));
535 ___ Strd(xtemp, temp2, MemOperand(sp, dest_offset.Uint32Value()));
536 };
537 for (size_t i = num_reg_dests, arg_count = dests.size(); i != arg_count; ) {
538 const ArgumentLocation& src = srcs[i];
539 const ArgumentLocation& dest = dests[i];
540 DCHECK_EQ(src.GetSize(), dest.GetSize());
541 DCHECK(!dest.IsRegister());
542 uint32_t frame_offset = dest.GetFrameOffset().Uint32Value();
543 size_t chunk_size = GetSpillChunkSize(dests, srcs, i, xtemp.IsValid());
544 DCHECK_NE(chunk_size, 0u);
545 if (chunk_size == 1u) {
546 if (src.IsRegister()) {
547 Store(dest.GetFrameOffset(), src.GetRegister(), dest.GetSize());
548 } else if (dest.GetSize() == 8u && xtemp.IsValid() && frame_offset < kStrdOffsetCutoff) {
549 move_two_words(dest.GetFrameOffset(), src.GetFrameOffset());
550 } else {
551 Copy(dest.GetFrameOffset(), src.GetFrameOffset(), dest.GetSize());
552 }
553 } else if (!src.IsRegister()) {
554 DCHECK_EQ(chunk_size, 2u);
555 DCHECK_EQ(dest.GetSize(), 4u);
556 DCHECK_EQ(dests[i + 1u].GetSize(), 4u);
557 move_two_words(dest.GetFrameOffset(), src.GetFrameOffset());
558 } else if (UseStrdForChunk(srcs, i, chunk_size)) {
559 ___ Strd(AsVIXLRegister(srcs[i].GetRegister().AsArm()),
560 AsVIXLRegister(srcs[i + 1u].GetRegister().AsArm()),
561 MemOperand(sp, frame_offset));
562 } else if (UseVstrForChunk(srcs, i, chunk_size)) {
563 size_t sreg = GetSRegisterNumber(src.GetRegister().AsArm());
564 DCHECK_ALIGNED(sreg, 2u);
565 ___ Vstr(vixl32::DRegister(sreg / 2u), MemOperand(sp, frame_offset));
566 } else {
567 UseScratchRegisterScope temps2(asm_.GetVIXLAssembler());
568 vixl32::Register base_reg;
569 if (frame_offset == 0u) {
570 base_reg = sp;
571 } else {
572 base_reg = temps2.Acquire();
573 ___ Add(base_reg, sp, frame_offset);
574 }
575
576 ArmManagedRegister src_reg = src.GetRegister().AsArm();
577 if (IsCoreRegisterOrPair(src_reg)) {
578 uint32_t core_reg_mask = GetCoreRegisterMask(srcs.SubArray(i, chunk_size));
579 ___ Stm(base_reg, NO_WRITE_BACK, RegisterList(core_reg_mask));
580 } else {
581 uint32_t start_sreg = GetSRegisterNumber(src_reg);
582 const ArgumentLocation& last_dest = dests[i + chunk_size - 1u];
583 uint32_t total_size =
584 last_dest.GetFrameOffset().Uint32Value() + last_dest.GetSize() - frame_offset;
585 if (IsAligned<2u>(start_sreg) &&
586 IsAligned<kDRegSizeInBytes>(frame_offset) &&
587 IsAligned<kDRegSizeInBytes>(total_size)) {
588 uint32_t dreg_count = total_size / kDRegSizeInBytes;
589 DRegisterList dreg_list(vixl32::DRegister(start_sreg / 2u), dreg_count);
590 ___ Vstm(F64, base_reg, NO_WRITE_BACK, dreg_list);
591 } else {
592 uint32_t sreg_count = total_size / kSRegSizeInBytes;
593 SRegisterList sreg_list(vixl32::SRegister(start_sreg), sreg_count);
594 ___ Vstm(F32, base_reg, NO_WRITE_BACK, sreg_list);
595 }
596 }
597 }
598 i += chunk_size;
599 }
600 }
601
602 // Fill destination registers from source core registers.
603 // There should be no cycles, so this algorithm should make progress.
604 while (src_regs != 0u) {
605 uint32_t old_src_regs = src_regs;
606 for (size_t i = 0; i != num_reg_dests; ++i) {
607 DCHECK(dests[i].IsRegister() && IsCoreRegisterOrPair(dests[i].GetRegister().AsArm()));
608 if (!srcs[i].IsRegister() || !IsCoreRegisterOrPair(srcs[i].GetRegister().AsArm())) {
609 continue;
610 }
611 uint32_t dest_reg_mask = GetCoreRegisterMask(dests[i].GetRegister().AsArm());
612 if ((dest_reg_mask & dest_regs) == 0u) {
613 continue; // Equals source, or already filled in one of previous iterations.
614 }
615 // There are no partial overlaps of 8-byte arguments, otherwise we would have to
616 // tweak this check; Move() can deal with partial overlap for historical reasons.
617 if ((dest_reg_mask & src_regs) != 0u) {
618 continue; // Cannot clobber this register yet.
619 }
620 Move(dests[i].GetRegister(), srcs[i].GetRegister(), dests[i].GetSize());
621 uint32_t src_reg_mask = GetCoreRegisterMask(srcs[i].GetRegister().AsArm());
622 DCHECK_EQ(src_regs & src_reg_mask, src_reg_mask);
623 src_regs &= ~src_reg_mask; // Allow clobbering the source register or pair.
624 dest_regs &= ~dest_reg_mask; // Destination register or pair was filled.
625 }
626 CHECK_NE(old_src_regs, src_regs);
627 DCHECK_EQ(0u, src_regs & ~old_src_regs);
628 }
629
630 // Now fill destination registers from FP registers or stack slots, looking for
631 // opportunities to use LDRD/VMOV to fill 2 registers with one instruction.
632 for (size_t i = 0, j; i != num_reg_dests; i = j) {
633 j = i + 1u;
634 DCHECK(dests[i].IsRegister() && IsCoreRegisterOrPair(dests[i].GetRegister().AsArm()));
635 if (srcs[i].IsRegister() && IsCoreRegisterOrPair(srcs[i].GetRegister().AsArm())) {
636 DCHECK_EQ(GetCoreRegisterMask(dests[i].GetRegister().AsArm()) & dest_regs, 0u);
637 continue; // Equals destination or moved above.
638 }
639 DCHECK_NE(GetCoreRegisterMask(dests[i].GetRegister().AsArm()) & dest_regs, 0u);
640 if (dests[i].GetSize() == 4u) {
641 // Find next register to load.
642 while (j != num_reg_dests &&
643 (srcs[j].IsRegister() && IsCoreRegisterOrPair(srcs[j].GetRegister().AsArm()))) {
644 DCHECK_EQ(GetCoreRegisterMask(dests[j].GetRegister().AsArm()) & dest_regs, 0u);
645 ++j; // Equals destination or moved above.
646 }
647 if (j != num_reg_dests && dests[j].GetSize() == 4u) {
648 if (!srcs[i].IsRegister() && !srcs[j].IsRegister() && NoSpillGap(srcs[i], srcs[j])) {
649 ___ Ldrd(AsVIXLRegister(dests[i].GetRegister().AsArm()),
650 AsVIXLRegister(dests[j].GetRegister().AsArm()),
651 MemOperand(sp, srcs[i].GetFrameOffset().Uint32Value()));
652 ++j;
653 continue;
654 }
655 if (srcs[i].IsRegister() && srcs[j].IsRegister()) {
656 uint32_t first_sreg = GetSRegisterNumber(srcs[i].GetRegister().AsArm());
657 if (IsAligned<2u>(first_sreg) &&
658 first_sreg + 1u == GetSRegisterNumber(srcs[j].GetRegister().AsArm())) {
659 ___ Vmov(AsVIXLRegister(dests[i].GetRegister().AsArm()),
660 AsVIXLRegister(dests[j].GetRegister().AsArm()),
661 vixl32::DRegister(first_sreg / 2u));
662 ++j;
663 continue;
664 }
665 }
666 }
667 }
668 if (srcs[i].IsRegister()) {
669 Move(dests[i].GetRegister(), srcs[i].GetRegister(), dests[i].GetSize());
670 } else {
671 Load(dests[i].GetRegister(), srcs[i].GetFrameOffset(), dests[i].GetSize());
672 }
673 }
674 }
675
Move(ManagedRegister mdst,ManagedRegister msrc,size_t size ATTRIBUTE_UNUSED)676 void ArmVIXLJNIMacroAssembler::Move(ManagedRegister mdst,
677 ManagedRegister msrc,
678 size_t size ATTRIBUTE_UNUSED) {
679 ArmManagedRegister dst = mdst.AsArm();
680 if (kIsDebugBuild) {
681 // Check that the destination is not a scratch register.
682 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
683 if (dst.IsCoreRegister()) {
684 CHECK(!temps.IsAvailable(AsVIXLRegister(dst)));
685 } else if (dst.IsDRegister()) {
686 CHECK(!temps.IsAvailable(AsVIXLDRegister(dst)));
687 } else if (dst.IsSRegister()) {
688 CHECK(!temps.IsAvailable(AsVIXLSRegister(dst)));
689 } else {
690 CHECK(dst.IsRegisterPair()) << dst;
691 CHECK(!temps.IsAvailable(AsVIXLRegisterPairLow(dst)));
692 CHECK(!temps.IsAvailable(AsVIXLRegisterPairHigh(dst)));
693 }
694 }
695 ArmManagedRegister src = msrc.AsArm();
696 if (!dst.Equals(src)) {
697 if (dst.IsCoreRegister()) {
698 if (src.IsCoreRegister()) {
699 ___ Mov(AsVIXLRegister(dst), AsVIXLRegister(src));
700 } else {
701 CHECK(src.IsSRegister()) << src;
702 ___ Vmov(AsVIXLRegister(dst), AsVIXLSRegister(src));
703 }
704 } else if (dst.IsDRegister()) {
705 if (src.IsDRegister()) {
706 ___ Vmov(F64, AsVIXLDRegister(dst), AsVIXLDRegister(src));
707 } else {
708 // VMOV Dn, Rlo, Rhi (Dn = {Rlo, Rhi})
709 CHECK(src.IsRegisterPair()) << src;
710 ___ Vmov(AsVIXLDRegister(dst), AsVIXLRegisterPairLow(src), AsVIXLRegisterPairHigh(src));
711 }
712 } else if (dst.IsSRegister()) {
713 if (src.IsSRegister()) {
714 ___ Vmov(F32, AsVIXLSRegister(dst), AsVIXLSRegister(src));
715 } else {
716 // VMOV Sn, Rn (Sn = Rn)
717 CHECK(src.IsCoreRegister()) << src;
718 ___ Vmov(AsVIXLSRegister(dst), AsVIXLRegister(src));
719 }
720 } else {
721 CHECK(dst.IsRegisterPair()) << dst;
722 if (src.IsRegisterPair()) {
723 // Ensure that the first move doesn't clobber the input of the second.
724 if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
725 ___ Mov(AsVIXLRegisterPairLow(dst), AsVIXLRegisterPairLow(src));
726 ___ Mov(AsVIXLRegisterPairHigh(dst), AsVIXLRegisterPairHigh(src));
727 } else {
728 ___ Mov(AsVIXLRegisterPairHigh(dst), AsVIXLRegisterPairHigh(src));
729 ___ Mov(AsVIXLRegisterPairLow(dst), AsVIXLRegisterPairLow(src));
730 }
731 } else {
732 CHECK(src.IsDRegister()) << src;
733 ___ Vmov(AsVIXLRegisterPairLow(dst), AsVIXLRegisterPairHigh(dst), AsVIXLDRegister(src));
734 }
735 }
736 }
737 }
738
Copy(FrameOffset dest,FrameOffset src,size_t size)739 void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) {
740 DCHECK(size == 4 || size == 8) << size;
741 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
742 vixl32::Register scratch = temps.Acquire();
743 if (size == 4) {
744 asm_.LoadFromOffset(kLoadWord, scratch, sp, src.Int32Value());
745 asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value());
746 } else if (size == 8) {
747 asm_.LoadFromOffset(kLoadWord, scratch, sp, src.Int32Value());
748 asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value());
749 asm_.LoadFromOffset(kLoadWord, scratch, sp, src.Int32Value() + 4);
750 asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value() + 4);
751 }
752 }
753
Copy(FrameOffset dest ATTRIBUTE_UNUSED,ManagedRegister src_base ATTRIBUTE_UNUSED,Offset src_offset ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)754 void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
755 ManagedRegister src_base ATTRIBUTE_UNUSED,
756 Offset src_offset ATTRIBUTE_UNUSED,
757 ManagedRegister mscratch ATTRIBUTE_UNUSED,
758 size_t size ATTRIBUTE_UNUSED) {
759 UNIMPLEMENTED(FATAL);
760 }
761
Copy(ManagedRegister dest_base ATTRIBUTE_UNUSED,Offset dest_offset ATTRIBUTE_UNUSED,FrameOffset src ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)762 void ArmVIXLJNIMacroAssembler::Copy(ManagedRegister dest_base ATTRIBUTE_UNUSED,
763 Offset dest_offset ATTRIBUTE_UNUSED,
764 FrameOffset src ATTRIBUTE_UNUSED,
765 ManagedRegister mscratch ATTRIBUTE_UNUSED,
766 size_t size ATTRIBUTE_UNUSED) {
767 UNIMPLEMENTED(FATAL);
768 }
769
Copy(FrameOffset dst ATTRIBUTE_UNUSED,FrameOffset src_base ATTRIBUTE_UNUSED,Offset src_offset ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)770 void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dst ATTRIBUTE_UNUSED,
771 FrameOffset src_base ATTRIBUTE_UNUSED,
772 Offset src_offset ATTRIBUTE_UNUSED,
773 ManagedRegister mscratch ATTRIBUTE_UNUSED,
774 size_t size ATTRIBUTE_UNUSED) {
775 UNIMPLEMENTED(FATAL);
776 }
777
Copy(ManagedRegister dest ATTRIBUTE_UNUSED,Offset dest_offset ATTRIBUTE_UNUSED,ManagedRegister src ATTRIBUTE_UNUSED,Offset src_offset ATTRIBUTE_UNUSED,ManagedRegister mscratch ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)778 void ArmVIXLJNIMacroAssembler::Copy(ManagedRegister dest ATTRIBUTE_UNUSED,
779 Offset dest_offset ATTRIBUTE_UNUSED,
780 ManagedRegister src ATTRIBUTE_UNUSED,
781 Offset src_offset ATTRIBUTE_UNUSED,
782 ManagedRegister mscratch ATTRIBUTE_UNUSED,
783 size_t size ATTRIBUTE_UNUSED) {
784 UNIMPLEMENTED(FATAL);
785 }
786
Copy(FrameOffset dst ATTRIBUTE_UNUSED,Offset dest_offset ATTRIBUTE_UNUSED,FrameOffset src ATTRIBUTE_UNUSED,Offset src_offset ATTRIBUTE_UNUSED,ManagedRegister scratch ATTRIBUTE_UNUSED,size_t size ATTRIBUTE_UNUSED)787 void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dst ATTRIBUTE_UNUSED,
788 Offset dest_offset ATTRIBUTE_UNUSED,
789 FrameOffset src ATTRIBUTE_UNUSED,
790 Offset src_offset ATTRIBUTE_UNUSED,
791 ManagedRegister scratch ATTRIBUTE_UNUSED,
792 size_t size ATTRIBUTE_UNUSED) {
793 UNIMPLEMENTED(FATAL);
794 }
795
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)796 void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
797 FrameOffset handle_scope_offset,
798 ManagedRegister min_reg,
799 bool null_allowed) {
800 vixl::aarch32::Register out_reg = AsVIXLRegister(mout_reg.AsArm());
801 vixl::aarch32::Register in_reg =
802 min_reg.AsArm().IsNoRegister() ? vixl::aarch32::Register() : AsVIXLRegister(min_reg.AsArm());
803 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
804 temps.Exclude(out_reg);
805 if (null_allowed) {
806 // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
807 // the address in the handle scope holding the reference.
808 // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
809 if (!in_reg.IsValid()) {
810 asm_.LoadFromOffset(kLoadWord, out_reg, sp, handle_scope_offset.Int32Value());
811 in_reg = out_reg;
812 }
813
814 temps.Exclude(in_reg);
815 ___ Cmp(in_reg, 0);
816
817 if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value())) {
818 if (!out_reg.Is(in_reg)) {
819 ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
820 3 * vixl32::kMaxInstructionSizeInBytes,
821 CodeBufferCheckScope::kMaximumSize);
822 ___ it(eq, 0xc);
823 ___ mov(eq, out_reg, 0);
824 asm_.AddConstantInIt(out_reg, sp, handle_scope_offset.Int32Value(), ne);
825 } else {
826 ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
827 2 * vixl32::kMaxInstructionSizeInBytes,
828 CodeBufferCheckScope::kMaximumSize);
829 ___ it(ne, 0x8);
830 asm_.AddConstantInIt(out_reg, sp, handle_scope_offset.Int32Value(), ne);
831 }
832 } else {
833 // TODO: Implement this (old arm assembler would have crashed here).
834 UNIMPLEMENTED(FATAL);
835 }
836 } else {
837 asm_.AddConstant(out_reg, sp, handle_scope_offset.Int32Value());
838 }
839 }
840
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,bool null_allowed)841 void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
842 FrameOffset handle_scope_offset,
843 bool null_allowed) {
844 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
845 vixl32::Register scratch = temps.Acquire();
846 if (null_allowed) {
847 asm_.LoadFromOffset(kLoadWord, scratch, sp, handle_scope_offset.Int32Value());
848 // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
849 // the address in the handle scope holding the reference.
850 // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
851 ___ Cmp(scratch, 0);
852
853 if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value())) {
854 ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
855 2 * vixl32::kMaxInstructionSizeInBytes,
856 CodeBufferCheckScope::kMaximumSize);
857 ___ it(ne, 0x8);
858 asm_.AddConstantInIt(scratch, sp, handle_scope_offset.Int32Value(), ne);
859 } else {
860 // TODO: Implement this (old arm assembler would have crashed here).
861 UNIMPLEMENTED(FATAL);
862 }
863 } else {
864 asm_.AddConstant(scratch, sp, handle_scope_offset.Int32Value());
865 }
866 asm_.StoreToOffset(kStoreWord, scratch, sp, out_off.Int32Value());
867 }
868
LoadReferenceFromHandleScope(ManagedRegister mout_reg ATTRIBUTE_UNUSED,ManagedRegister min_reg ATTRIBUTE_UNUSED)869 void ArmVIXLJNIMacroAssembler::LoadReferenceFromHandleScope(
870 ManagedRegister mout_reg ATTRIBUTE_UNUSED,
871 ManagedRegister min_reg ATTRIBUTE_UNUSED) {
872 UNIMPLEMENTED(FATAL);
873 }
874
VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,bool could_be_null ATTRIBUTE_UNUSED)875 void ArmVIXLJNIMacroAssembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
876 bool could_be_null ATTRIBUTE_UNUSED) {
877 // TODO: not validating references.
878 }
879
VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,bool could_be_null ATTRIBUTE_UNUSED)880 void ArmVIXLJNIMacroAssembler::VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,
881 bool could_be_null ATTRIBUTE_UNUSED) {
882 // TODO: not validating references.
883 }
884
Jump(ManagedRegister mbase,Offset offset)885 void ArmVIXLJNIMacroAssembler::Jump(ManagedRegister mbase, Offset offset) {
886 vixl::aarch32::Register base = AsVIXLRegister(mbase.AsArm());
887 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
888 vixl32::Register scratch = temps.Acquire();
889 asm_.LoadFromOffset(kLoadWord, scratch, base, offset.Int32Value());
890 ___ Bx(scratch);
891 }
892
Call(ManagedRegister mbase,Offset offset)893 void ArmVIXLJNIMacroAssembler::Call(ManagedRegister mbase, Offset offset) {
894 vixl::aarch32::Register base = AsVIXLRegister(mbase.AsArm());
895 asm_.LoadFromOffset(kLoadWord, lr, base, offset.Int32Value());
896 ___ Blx(lr);
897 // TODO: place reference map on call.
898 }
899
Call(FrameOffset base,Offset offset)900 void ArmVIXLJNIMacroAssembler::Call(FrameOffset base, Offset offset) {
901 // Call *(*(SP + base) + offset)
902 asm_.LoadFromOffset(kLoadWord, lr, sp, base.Int32Value());
903 asm_.LoadFromOffset(kLoadWord, lr, lr, offset.Int32Value());
904 ___ Blx(lr);
905 // TODO: place reference map on call
906 }
907
CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED)908 void ArmVIXLJNIMacroAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED) {
909 UNIMPLEMENTED(FATAL);
910 }
911
GetCurrentThread(ManagedRegister dest)912 void ArmVIXLJNIMacroAssembler::GetCurrentThread(ManagedRegister dest) {
913 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
914 temps.Exclude(AsVIXLRegister(dest.AsArm()));
915 ___ Mov(AsVIXLRegister(dest.AsArm()), tr);
916 }
917
GetCurrentThread(FrameOffset dest_offset)918 void ArmVIXLJNIMacroAssembler::GetCurrentThread(FrameOffset dest_offset) {
919 asm_.StoreToOffset(kStoreWord, tr, sp, dest_offset.Int32Value());
920 }
921
ExceptionPoll(size_t stack_adjust)922 void ArmVIXLJNIMacroAssembler::ExceptionPoll(size_t stack_adjust) {
923 CHECK_ALIGNED(stack_adjust, kAapcsStackAlignment);
924 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
925 vixl32::Register scratch = temps.Acquire();
926 exception_blocks_.emplace_back(
927 new ArmVIXLJNIMacroAssembler::ArmException(scratch, stack_adjust));
928 asm_.LoadFromOffset(kLoadWord,
929 scratch,
930 tr,
931 Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
932
933 ___ Cmp(scratch, 0);
934 vixl32::Label* label = exception_blocks_.back()->Entry();
935 ___ BPreferNear(ne, label);
936 // TODO: think about using CBNZ here.
937 }
938
CreateLabel()939 std::unique_ptr<JNIMacroLabel> ArmVIXLJNIMacroAssembler::CreateLabel() {
940 return std::unique_ptr<JNIMacroLabel>(new ArmVIXLJNIMacroLabel());
941 }
942
Jump(JNIMacroLabel * label)943 void ArmVIXLJNIMacroAssembler::Jump(JNIMacroLabel* label) {
944 CHECK(label != nullptr);
945 ___ B(ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
946 }
947
TestGcMarking(JNIMacroLabel * label,JNIMacroUnaryCondition cond)948 void ArmVIXLJNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) {
949 CHECK(label != nullptr);
950
951 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
952 vixl32::Register scratch = temps.Acquire();
953 DCHECK_EQ(Thread::IsGcMarkingSize(), 4u);
954 ___ Ldr(scratch, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
955 switch (cond) {
956 case JNIMacroUnaryCondition::kZero:
957 ___ CompareAndBranchIfZero(scratch, ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
958 break;
959 case JNIMacroUnaryCondition::kNotZero:
960 ___ CompareAndBranchIfNonZero(scratch, ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
961 break;
962 default:
963 LOG(FATAL) << "Not implemented unary condition: " << static_cast<int>(cond);
964 UNREACHABLE();
965 }
966 }
967
Bind(JNIMacroLabel * label)968 void ArmVIXLJNIMacroAssembler::Bind(JNIMacroLabel* label) {
969 CHECK(label != nullptr);
970 ___ Bind(ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
971 }
972
EmitExceptionPoll(ArmVIXLJNIMacroAssembler::ArmException * exception)973 void ArmVIXLJNIMacroAssembler::EmitExceptionPoll(
974 ArmVIXLJNIMacroAssembler::ArmException* exception) {
975 ___ Bind(exception->Entry());
976 if (exception->stack_adjust_ != 0) { // Fix up the frame.
977 DecreaseFrameSize(exception->stack_adjust_);
978 }
979
980 vixl32::Register scratch = exception->scratch_;
981 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
982 temps.Exclude(scratch);
983 // Pass exception object as argument.
984 // Don't care about preserving r0 as this won't return.
985 ___ Mov(r0, scratch);
986 ___ Ldr(lr,
987 MemOperand(tr,
988 QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value()));
989 ___ Blx(lr);
990 }
991
MemoryBarrier(ManagedRegister scratch ATTRIBUTE_UNUSED)992 void ArmVIXLJNIMacroAssembler::MemoryBarrier(ManagedRegister scratch ATTRIBUTE_UNUSED) {
993 UNIMPLEMENTED(FATAL);
994 }
995
Load(ArmManagedRegister dest,vixl32::Register base,int32_t offset,size_t size)996 void ArmVIXLJNIMacroAssembler::Load(ArmManagedRegister
997 dest,
998 vixl32::Register base,
999 int32_t offset,
1000 size_t size) {
1001 if (dest.IsNoRegister()) {
1002 CHECK_EQ(0u, size) << dest;
1003 } else if (dest.IsCoreRegister()) {
1004 vixl::aarch32::Register dst = AsVIXLRegister(dest);
1005 CHECK(!dst.Is(sp)) << dest;
1006
1007 UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
1008 temps.Exclude(dst);
1009
1010 if (size == 1u) {
1011 ___ Ldrb(dst, MemOperand(base, offset));
1012 } else {
1013 CHECK_EQ(4u, size) << dest;
1014 ___ Ldr(dst, MemOperand(base, offset));
1015 }
1016 } else if (dest.IsRegisterPair()) {
1017 CHECK_EQ(8u, size) << dest;
1018 ___ Ldr(AsVIXLRegisterPairLow(dest), MemOperand(base, offset));
1019 ___ Ldr(AsVIXLRegisterPairHigh(dest), MemOperand(base, offset + 4));
1020 } else if (dest.IsSRegister()) {
1021 ___ Vldr(AsVIXLSRegister(dest), MemOperand(base, offset));
1022 } else {
1023 CHECK(dest.IsDRegister()) << dest;
1024 ___ Vldr(AsVIXLDRegister(dest), MemOperand(base, offset));
1025 }
1026 }
1027
1028 } // namespace arm
1029 } // namespace art
1030