1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
19
20 #include "base/bit_field.h"
21 #include "code_generator.h"
22 #include "common_arm64.h"
23 #include "dex/dex_file_types.h"
24 #include "dex/string_reference.h"
25 #include "dex/type_reference.h"
26 #include "driver/compiler_options.h"
27 #include "nodes.h"
28 #include "parallel_move_resolver.h"
29 #include "utils/arm64/assembler_arm64.h"
30
31 // TODO(VIXL): Make VIXL compile with -Wshadow.
32 #pragma GCC diagnostic push
33 #pragma GCC diagnostic ignored "-Wshadow"
34 #include "aarch64/disasm-aarch64.h"
35 #include "aarch64/macro-assembler-aarch64.h"
36 #pragma GCC diagnostic pop
37
38 namespace art {
39
40 namespace linker {
41 class Arm64RelativePatcherTest;
42 } // namespace linker
43
44 namespace arm64 {
45
46 class CodeGeneratorARM64;
47
48 // Use a local definition to prevent copying mistakes.
49 static constexpr size_t kArm64WordSize = static_cast<size_t>(kArm64PointerSize);
50
51 // These constants are used as an approximate margin when emission of veneer and literal pools
52 // must be blocked.
53 static constexpr int kMaxMacroInstructionSizeInBytes = 15 * vixl::aarch64::kInstructionSize;
54 static constexpr int kInvokeCodeMarginSizeInBytes = 6 * kMaxMacroInstructionSizeInBytes;
55
56 // SVE is currently not enabled.
57 static constexpr bool kArm64AllowSVE = false;
58
59 static const vixl::aarch64::Register kParameterCoreRegisters[] = {
60 vixl::aarch64::x1,
61 vixl::aarch64::x2,
62 vixl::aarch64::x3,
63 vixl::aarch64::x4,
64 vixl::aarch64::x5,
65 vixl::aarch64::x6,
66 vixl::aarch64::x7
67 };
68 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
69 static const vixl::aarch64::VRegister kParameterFPRegisters[] = {
70 vixl::aarch64::d0,
71 vixl::aarch64::d1,
72 vixl::aarch64::d2,
73 vixl::aarch64::d3,
74 vixl::aarch64::d4,
75 vixl::aarch64::d5,
76 vixl::aarch64::d6,
77 vixl::aarch64::d7
78 };
79 static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
80
81 // Thread Register.
82 const vixl::aarch64::Register tr = vixl::aarch64::x19;
83 // Marking Register.
84 const vixl::aarch64::Register mr = vixl::aarch64::x20;
85 // Method register on invoke.
86 static const vixl::aarch64::Register kArtMethodRegister = vixl::aarch64::x0;
87 const vixl::aarch64::CPURegList vixl_reserved_core_registers(vixl::aarch64::ip0,
88 vixl::aarch64::ip1);
89 const vixl::aarch64::CPURegList vixl_reserved_fp_registers(vixl::aarch64::d31);
90
91 const vixl::aarch64::CPURegList runtime_reserved_core_registers =
92 vixl::aarch64::CPURegList(
93 tr,
94 // Reserve X20 as Marking Register when emitting Baker read barriers.
95 ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) ? mr : vixl::aarch64::NoCPUReg),
96 vixl::aarch64::lr);
97
98 // Some instructions have special requirements for a temporary, for example
99 // LoadClass/kBssEntry and LoadString/kBssEntry for Baker read barrier require
100 // temp that's not an R0 (to avoid an extra move) and Baker read barrier field
101 // loads with large offsets need a fixed register to limit the number of link-time
102 // thunks we generate. For these and similar cases, we want to reserve a specific
103 // register that's neither callee-save nor an argument register. We choose x15.
FixedTempLocation()104 inline Location FixedTempLocation() {
105 return Location::RegisterLocation(vixl::aarch64::x15.GetCode());
106 }
107
108 // Callee-save registers AAPCS64, without x19 (Thread Register) (nor
109 // x20 (Marking Register) when emitting Baker read barriers).
110 const vixl::aarch64::CPURegList callee_saved_core_registers(
111 vixl::aarch64::CPURegister::kRegister,
112 vixl::aarch64::kXRegSize,
113 ((kEmitCompilerReadBarrier && kUseBakerReadBarrier)
114 ? vixl::aarch64::x21.GetCode()
115 : vixl::aarch64::x20.GetCode()),
116 vixl::aarch64::x30.GetCode());
117 const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kVRegister,
118 vixl::aarch64::kDRegSize,
119 vixl::aarch64::d8.GetCode(),
120 vixl::aarch64::d15.GetCode());
121 Location ARM64ReturnLocation(DataType::Type return_type);
122
123 class SlowPathCodeARM64 : public SlowPathCode {
124 public:
SlowPathCodeARM64(HInstruction * instruction)125 explicit SlowPathCodeARM64(HInstruction* instruction)
126 : SlowPathCode(instruction), entry_label_(), exit_label_() {}
127
GetEntryLabel()128 vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; }
GetExitLabel()129 vixl::aarch64::Label* GetExitLabel() { return &exit_label_; }
130
131 void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
132 void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
133
134 private:
135 vixl::aarch64::Label entry_label_;
136 vixl::aarch64::Label exit_label_;
137
138 DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
139 };
140
141 class JumpTableARM64 : public DeletableArenaObject<kArenaAllocSwitchTable> {
142 public:
JumpTableARM64(HPackedSwitch * switch_instr)143 explicit JumpTableARM64(HPackedSwitch* switch_instr)
144 : switch_instr_(switch_instr), table_start_() {}
145
GetTableStartLabel()146 vixl::aarch64::Label* GetTableStartLabel() { return &table_start_; }
147
148 void EmitTable(CodeGeneratorARM64* codegen);
149
150 private:
151 HPackedSwitch* const switch_instr_;
152 vixl::aarch64::Label table_start_;
153
154 DISALLOW_COPY_AND_ASSIGN(JumpTableARM64);
155 };
156
157 static const vixl::aarch64::Register kRuntimeParameterCoreRegisters[] =
158 { vixl::aarch64::x0,
159 vixl::aarch64::x1,
160 vixl::aarch64::x2,
161 vixl::aarch64::x3,
162 vixl::aarch64::x4,
163 vixl::aarch64::x5,
164 vixl::aarch64::x6,
165 vixl::aarch64::x7 };
166 static constexpr size_t kRuntimeParameterCoreRegistersLength =
167 arraysize(kRuntimeParameterCoreRegisters);
168 static const vixl::aarch64::VRegister kRuntimeParameterFpuRegisters[] =
169 { vixl::aarch64::d0,
170 vixl::aarch64::d1,
171 vixl::aarch64::d2,
172 vixl::aarch64::d3,
173 vixl::aarch64::d4,
174 vixl::aarch64::d5,
175 vixl::aarch64::d6,
176 vixl::aarch64::d7 };
177 static constexpr size_t kRuntimeParameterFpuRegistersLength =
178 arraysize(kRuntimeParameterCoreRegisters);
179
180 class InvokeRuntimeCallingConvention : public CallingConvention<vixl::aarch64::Register,
181 vixl::aarch64::VRegister> {
182 public:
183 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
184
InvokeRuntimeCallingConvention()185 InvokeRuntimeCallingConvention()
186 : CallingConvention(kRuntimeParameterCoreRegisters,
187 kRuntimeParameterCoreRegistersLength,
188 kRuntimeParameterFpuRegisters,
189 kRuntimeParameterFpuRegistersLength,
190 kArm64PointerSize) {}
191
192 Location GetReturnLocation(DataType::Type return_type);
193
194 private:
195 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
196 };
197
198 class InvokeDexCallingConvention : public CallingConvention<vixl::aarch64::Register,
199 vixl::aarch64::VRegister> {
200 public:
InvokeDexCallingConvention()201 InvokeDexCallingConvention()
202 : CallingConvention(kParameterCoreRegisters,
203 kParameterCoreRegistersLength,
204 kParameterFPRegisters,
205 kParameterFPRegistersLength,
206 kArm64PointerSize) {}
207
GetReturnLocation(DataType::Type return_type)208 Location GetReturnLocation(DataType::Type return_type) const {
209 return ARM64ReturnLocation(return_type);
210 }
211
212
213 private:
214 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
215 };
216
217 class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConventionVisitor {
218 public:
InvokeDexCallingConventionVisitorARM64()219 InvokeDexCallingConventionVisitorARM64() {}
~InvokeDexCallingConventionVisitorARM64()220 virtual ~InvokeDexCallingConventionVisitorARM64() {}
221
222 Location GetNextLocation(DataType::Type type) override;
GetReturnLocation(DataType::Type return_type)223 Location GetReturnLocation(DataType::Type return_type) const override {
224 return calling_convention.GetReturnLocation(return_type);
225 }
226 Location GetMethodLocation() const override;
227
228 private:
229 InvokeDexCallingConvention calling_convention;
230
231 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM64);
232 };
233
234 class CriticalNativeCallingConventionVisitorARM64 : public InvokeDexCallingConventionVisitor {
235 public:
CriticalNativeCallingConventionVisitorARM64(bool for_register_allocation)236 explicit CriticalNativeCallingConventionVisitorARM64(bool for_register_allocation)
237 : for_register_allocation_(for_register_allocation) {}
238
~CriticalNativeCallingConventionVisitorARM64()239 virtual ~CriticalNativeCallingConventionVisitorARM64() {}
240
241 Location GetNextLocation(DataType::Type type) override;
242 Location GetReturnLocation(DataType::Type type) const override;
243 Location GetMethodLocation() const override;
244
GetStackOffset()245 size_t GetStackOffset() const { return stack_offset_; }
246
247 private:
248 // Register allocator does not support adjusting frame size, so we cannot provide final locations
249 // of stack arguments for register allocation. We ask the register allocator for any location and
250 // move these arguments to the right place after adjusting the SP when generating the call.
251 const bool for_register_allocation_;
252 size_t gpr_index_ = 0u;
253 size_t fpr_index_ = 0u;
254 size_t stack_offset_ = 0u;
255
256 DISALLOW_COPY_AND_ASSIGN(CriticalNativeCallingConventionVisitorARM64);
257 };
258
259 class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
260 public:
FieldAccessCallingConventionARM64()261 FieldAccessCallingConventionARM64() {}
262
GetObjectLocation()263 Location GetObjectLocation() const override {
264 return helpers::LocationFrom(vixl::aarch64::x1);
265 }
GetFieldIndexLocation()266 Location GetFieldIndexLocation() const override {
267 return helpers::LocationFrom(vixl::aarch64::x0);
268 }
GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED)269 Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
270 return helpers::LocationFrom(vixl::aarch64::x0);
271 }
GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,bool is_instance)272 Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
273 bool is_instance) const override {
274 return is_instance
275 ? helpers::LocationFrom(vixl::aarch64::x2)
276 : helpers::LocationFrom(vixl::aarch64::x1);
277 }
GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED)278 Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
279 return helpers::LocationFrom(vixl::aarch64::d0);
280 }
281
282 private:
283 DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64);
284 };
285
286 class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
287 public:
288 InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
289
290 #define DECLARE_VISIT_INSTRUCTION(name, super) \
291 void Visit##name(H##name* instr) override;
292
293 FOR_EACH_CONCRETE_INSTRUCTION_SCALAR_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)294 FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
295 FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
296
297 #undef DECLARE_VISIT_INSTRUCTION
298
299 void VisitInstruction(HInstruction* instruction) override {
300 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
301 << " (id " << instruction->GetId() << ")";
302 }
303
GetAssembler()304 Arm64Assembler* GetAssembler() const { return assembler_; }
GetVIXLAssembler()305 vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
306
307 // SIMD helpers.
308 virtual Location AllocateSIMDScratchLocation(vixl::aarch64::UseScratchRegisterScope* scope) = 0;
309 virtual void FreeSIMDScratchLocation(Location loc,
310 vixl::aarch64::UseScratchRegisterScope* scope) = 0;
311 virtual void LoadSIMDRegFromStack(Location destination, Location source) = 0;
312 virtual void MoveSIMDRegToSIMDReg(Location destination, Location source) = 0;
313 virtual void MoveToSIMDStackSlot(Location destination, Location source) = 0;
314
315 protected:
316 void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
317 vixl::aarch64::Register class_reg);
318 void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check,
319 vixl::aarch64::Register temp);
320 void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
321 void HandleBinaryOp(HBinaryOperation* instr);
322
323 void HandleFieldSet(HInstruction* instruction,
324 const FieldInfo& field_info,
325 bool value_can_be_null);
326 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
327 void HandleCondition(HCondition* instruction);
328
329 // Generate a heap reference load using one register `out`:
330 //
331 // out <- *(out + offset)
332 //
333 // while honoring heap poisoning and/or read barriers (if any).
334 //
335 // Location `maybe_temp` is used when generating a read barrier and
336 // shall be a register in that case; it may be an invalid location
337 // otherwise.
338 void GenerateReferenceLoadOneRegister(HInstruction* instruction,
339 Location out,
340 uint32_t offset,
341 Location maybe_temp,
342 ReadBarrierOption read_barrier_option);
343 // Generate a heap reference load using two different registers
344 // `out` and `obj`:
345 //
346 // out <- *(obj + offset)
347 //
348 // while honoring heap poisoning and/or read barriers (if any).
349 //
350 // Location `maybe_temp` is used when generating a Baker's (fast
351 // path) read barrier and shall be a register in that case; it may
352 // be an invalid location otherwise.
353 void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
354 Location out,
355 Location obj,
356 uint32_t offset,
357 Location maybe_temp,
358 ReadBarrierOption read_barrier_option);
359
360 // Generate a floating-point comparison.
361 void GenerateFcmp(HInstruction* instruction);
362
363 void HandleShift(HBinaryOperation* instr);
364 void GenerateTestAndBranch(HInstruction* instruction,
365 size_t condition_input_index,
366 vixl::aarch64::Label* true_target,
367 vixl::aarch64::Label* false_target);
368 void DivRemOneOrMinusOne(HBinaryOperation* instruction);
369 void DivRemByPowerOfTwo(HBinaryOperation* instruction);
370 void GenerateIncrementNegativeByOne(vixl::aarch64::Register out,
371 vixl::aarch64::Register in, bool use_cond_inc);
372 void GenerateResultRemWithAnyConstant(vixl::aarch64::Register out,
373 vixl::aarch64::Register dividend,
374 vixl::aarch64::Register quotient,
375 int64_t divisor,
376 // This function may acquire a scratch register.
377 vixl::aarch64::UseScratchRegisterScope* temps_scope);
378 void GenerateInt64DivRemWithAnyConstant(HBinaryOperation* instruction);
379 void GenerateInt32DivRemWithAnyConstant(HBinaryOperation* instruction);
380 void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
381 void GenerateIntDiv(HDiv* instruction);
382 void GenerateIntDivForConstDenom(HDiv *instruction);
383 void GenerateIntDivForPower2Denom(HDiv *instruction);
384 void GenerateIntRem(HRem* instruction);
385 void GenerateIntRemForConstDenom(HRem *instruction);
386 void GenerateIntRemForPower2Denom(HRem *instruction);
387 void HandleGoto(HInstruction* got, HBasicBlock* successor);
388
389 // Helper to set up locations for vector memory operations. Returns the memory operand and,
390 // if used, sets the output parameter scratch to a temporary register used in this operand,
391 // so that the client can release it right after the memory operand use.
392 // Neon version.
393 vixl::aarch64::MemOperand VecNeonAddress(
394 HVecMemoryOperation* instruction,
395 // This function may acquire a scratch register.
396 vixl::aarch64::UseScratchRegisterScope* temps_scope,
397 size_t size,
398 bool is_string_char_at,
399 /*out*/ vixl::aarch64::Register* scratch);
400
401 Arm64Assembler* const assembler_;
402 CodeGeneratorARM64* const codegen_;
403
404 DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARM64);
405 };
406
407 class LocationsBuilderARM64 : public HGraphVisitor {
408 public:
LocationsBuilderARM64(HGraph * graph,CodeGeneratorARM64 * codegen)409 LocationsBuilderARM64(HGraph* graph, CodeGeneratorARM64* codegen)
410 : HGraphVisitor(graph), codegen_(codegen) {}
411
412 #define DECLARE_VISIT_INSTRUCTION(name, super) \
413 void Visit##name(H##name* instr) override;
414
415 FOR_EACH_CONCRETE_INSTRUCTION_SCALAR_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)416 FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
417 FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
418
419 #undef DECLARE_VISIT_INSTRUCTION
420
421 void VisitInstruction(HInstruction* instruction) override {
422 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
423 << " (id " << instruction->GetId() << ")";
424 }
425
426 protected:
427 void HandleBinaryOp(HBinaryOperation* instr);
428 void HandleFieldSet(HInstruction* instruction);
429 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
430 void HandleInvoke(HInvoke* instr);
431 void HandleCondition(HCondition* instruction);
432 void HandleShift(HBinaryOperation* instr);
433
434 CodeGeneratorARM64* const codegen_;
435 InvokeDexCallingConventionVisitorARM64 parameter_visitor_;
436
437 DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64);
438 };
439
440 class InstructionCodeGeneratorARM64Neon : public InstructionCodeGeneratorARM64 {
441 public:
InstructionCodeGeneratorARM64Neon(HGraph * graph,CodeGeneratorARM64 * codegen)442 InstructionCodeGeneratorARM64Neon(HGraph* graph, CodeGeneratorARM64* codegen) :
443 InstructionCodeGeneratorARM64(graph, codegen) {}
444
445 #define DECLARE_VISIT_INSTRUCTION(name, super) \
446 void Visit##name(H##name* instr) override;
447
448 FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(DECLARE_VISIT_INSTRUCTION)
449
450 #undef DECLARE_VISIT_INSTRUCTION
451
452 Location AllocateSIMDScratchLocation(vixl::aarch64::UseScratchRegisterScope* scope) override;
453 void FreeSIMDScratchLocation(Location loc,
454 vixl::aarch64::UseScratchRegisterScope* scope) override;
455 void LoadSIMDRegFromStack(Location destination, Location source) override;
456 void MoveSIMDRegToSIMDReg(Location destination, Location source) override;
457 void MoveToSIMDStackSlot(Location destination, Location source) override;
458 };
459
460 class LocationsBuilderARM64Neon : public LocationsBuilderARM64 {
461 public:
LocationsBuilderARM64Neon(HGraph * graph,CodeGeneratorARM64 * codegen)462 LocationsBuilderARM64Neon(HGraph* graph, CodeGeneratorARM64* codegen) :
463 LocationsBuilderARM64(graph, codegen) {}
464
465 #define DECLARE_VISIT_INSTRUCTION(name, super) \
466 void Visit##name(H##name* instr) override;
467
468 FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(DECLARE_VISIT_INSTRUCTION)
469
470 #undef DECLARE_VISIT_INSTRUCTION
471 };
472
473 class InstructionCodeGeneratorARM64Sve : public InstructionCodeGeneratorARM64 {
474 public:
InstructionCodeGeneratorARM64Sve(HGraph * graph,CodeGeneratorARM64 * codegen)475 InstructionCodeGeneratorARM64Sve(HGraph* graph, CodeGeneratorARM64* codegen) :
476 InstructionCodeGeneratorARM64(graph, codegen) {}
477
478 #define DECLARE_VISIT_INSTRUCTION(name, super) \
479 void Visit##name(H##name* instr) override;
480
481 FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(DECLARE_VISIT_INSTRUCTION)
482
483 #undef DECLARE_VISIT_INSTRUCTION
484
485 Location AllocateSIMDScratchLocation(vixl::aarch64::UseScratchRegisterScope* scope) override;
486 void FreeSIMDScratchLocation(Location loc,
487 vixl::aarch64::UseScratchRegisterScope* scope) override;
488 void LoadSIMDRegFromStack(Location destination, Location source) override;
489 void MoveSIMDRegToSIMDReg(Location destination, Location source) override;
490 void MoveToSIMDStackSlot(Location destination, Location source) override;
491 };
492
493 class LocationsBuilderARM64Sve : public LocationsBuilderARM64 {
494 public:
LocationsBuilderARM64Sve(HGraph * graph,CodeGeneratorARM64 * codegen)495 LocationsBuilderARM64Sve(HGraph* graph, CodeGeneratorARM64* codegen) :
496 LocationsBuilderARM64(graph, codegen) {}
497
498 #define DECLARE_VISIT_INSTRUCTION(name, super) \
499 void Visit##name(H##name* instr) override;
500
501 FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(DECLARE_VISIT_INSTRUCTION)
502
503 #undef DECLARE_VISIT_INSTRUCTION
504 };
505
506 class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap {
507 public:
ParallelMoveResolverARM64(ArenaAllocator * allocator,CodeGeneratorARM64 * codegen)508 ParallelMoveResolverARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
509 : ParallelMoveResolverNoSwap(allocator), codegen_(codegen), vixl_temps_() {}
510
511 protected:
512 void PrepareForEmitNativeCode() override;
513 void FinishEmitNativeCode() override;
514 Location AllocateScratchLocationFor(Location::Kind kind) override;
515 void FreeScratchLocation(Location loc) override;
516 void EmitMove(size_t index) override;
517
518 private:
519 Arm64Assembler* GetAssembler() const;
GetVIXLAssembler()520 vixl::aarch64::MacroAssembler* GetVIXLAssembler() const {
521 return GetAssembler()->GetVIXLAssembler();
522 }
523
524 CodeGeneratorARM64* const codegen_;
525 vixl::aarch64::UseScratchRegisterScope vixl_temps_;
526
527 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARM64);
528 };
529
530 class CodeGeneratorARM64 : public CodeGenerator {
531 public:
532 CodeGeneratorARM64(HGraph* graph,
533 const CompilerOptions& compiler_options,
534 OptimizingCompilerStats* stats = nullptr);
~CodeGeneratorARM64()535 virtual ~CodeGeneratorARM64() {}
536
537 void GenerateFrameEntry() override;
538 void GenerateFrameExit() override;
539
540 vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const;
541 vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const;
542
543 void Bind(HBasicBlock* block) override;
544
GetLabelOf(HBasicBlock * block)545 vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) {
546 block = FirstNonEmptyBlock(block);
547 return &(block_labels_[block->GetBlockId()]);
548 }
549
GetWordSize()550 size_t GetWordSize() const override {
551 return kArm64WordSize;
552 }
553
SupportsPredicatedSIMD()554 bool SupportsPredicatedSIMD() const override { return ShouldUseSVE(); }
555
GetSlowPathFPWidth()556 size_t GetSlowPathFPWidth() const override {
557 return GetGraph()->HasSIMD()
558 ? GetSIMDRegisterWidth()
559 : vixl::aarch64::kDRegSizeInBytes;
560 }
561
GetCalleePreservedFPWidth()562 size_t GetCalleePreservedFPWidth() const override {
563 return vixl::aarch64::kDRegSizeInBytes;
564 }
565
GetSIMDRegisterWidth()566 size_t GetSIMDRegisterWidth() const override {
567 return vixl::aarch64::kQRegSizeInBytes;
568 }
569
GetAddressOf(HBasicBlock * block)570 uintptr_t GetAddressOf(HBasicBlock* block) override {
571 vixl::aarch64::Label* block_entry_label = GetLabelOf(block);
572 DCHECK(block_entry_label->IsBound());
573 return block_entry_label->GetLocation();
574 }
575
GetLocationBuilder()576 HGraphVisitor* GetLocationBuilder() override { return location_builder_; }
GetInstructionCodeGeneratorArm64()577 InstructionCodeGeneratorARM64* GetInstructionCodeGeneratorArm64() {
578 return instruction_visitor_;
579 }
GetInstructionVisitor()580 HGraphVisitor* GetInstructionVisitor() override { return GetInstructionCodeGeneratorArm64(); }
GetAssembler()581 Arm64Assembler* GetAssembler() override { return &assembler_; }
GetAssembler()582 const Arm64Assembler& GetAssembler() const override { return assembler_; }
GetVIXLAssembler()583 vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
584
585 // Emit a write barrier.
586 void MarkGCCard(vixl::aarch64::Register object,
587 vixl::aarch64::Register value,
588 bool value_can_be_null);
589
590 void GenerateMemoryBarrier(MemBarrierKind kind);
591
592 // Register allocation.
593
594 void SetupBlockedRegisters() const override;
595
596 size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
597 size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
598 size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
599 size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
600
601 // The number of registers that can be allocated. The register allocator may
602 // decide to reserve and not use a few of them.
603 // We do not consider registers sp, xzr, wzr. They are either not allocatable
604 // (xzr, wzr), or make for poor allocatable registers (sp alignment
605 // requirements, etc.). This also facilitates our task as all other registers
606 // can easily be mapped via to or from their type and index or code.
607 static const int kNumberOfAllocatableRegisters = vixl::aarch64::kNumberOfRegisters - 1;
608 static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfVRegisters;
609 static constexpr int kNumberOfAllocatableRegisterPairs = 0;
610
611 void DumpCoreRegister(std::ostream& stream, int reg) const override;
612 void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
613
GetInstructionSet()614 InstructionSet GetInstructionSet() const override {
615 return InstructionSet::kArm64;
616 }
617
618 const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const;
619
Initialize()620 void Initialize() override {
621 block_labels_.resize(GetGraph()->GetBlocks().size());
622 }
623
624 // We want to use the STP and LDP instructions to spill and restore registers for slow paths.
625 // These instructions can only encode offsets that are multiples of the register size accessed.
GetPreferredSlotsAlignment()626 uint32_t GetPreferredSlotsAlignment() const override { return vixl::aarch64::kXRegSizeInBytes; }
627
CreateJumpTable(HPackedSwitch * switch_instr)628 JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
629 jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr));
630 return jump_tables_.back().get();
631 }
632
633 void Finalize(CodeAllocator* allocator) override;
634
635 // Code generation helpers.
636 void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant);
637 void MoveConstant(Location destination, int32_t value) override;
638 void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
639 void AddLocationAsTemp(Location location, LocationSummary* locations) override;
640
641 void Load(DataType::Type type,
642 vixl::aarch64::CPURegister dst,
643 const vixl::aarch64::MemOperand& src);
644 void Store(DataType::Type type,
645 vixl::aarch64::CPURegister src,
646 const vixl::aarch64::MemOperand& dst);
647 void LoadAcquire(HInstruction* instruction,
648 vixl::aarch64::CPURegister dst,
649 const vixl::aarch64::MemOperand& src,
650 bool needs_null_check);
651 void StoreRelease(HInstruction* instruction,
652 DataType::Type type,
653 vixl::aarch64::CPURegister src,
654 const vixl::aarch64::MemOperand& dst,
655 bool needs_null_check);
656
657 // Generate code to invoke a runtime entry point.
658 void InvokeRuntime(QuickEntrypointEnum entrypoint,
659 HInstruction* instruction,
660 uint32_t dex_pc,
661 SlowPathCode* slow_path = nullptr) override;
662
663 // Generate code to invoke a runtime entry point, but do not record
664 // PC-related information in a stack map.
665 void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
666 HInstruction* instruction,
667 SlowPathCode* slow_path);
668
GetMoveResolver()669 ParallelMoveResolverARM64* GetMoveResolver() override { return &move_resolver_; }
670
NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED)671 bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override {
672 return false;
673 }
674
675 // Check if the desired_string_load_kind is supported. If it is, return it,
676 // otherwise return a fall-back kind that should be used instead.
677 HLoadString::LoadKind GetSupportedLoadStringKind(
678 HLoadString::LoadKind desired_string_load_kind) override;
679
680 // Check if the desired_class_load_kind is supported. If it is, return it,
681 // otherwise return a fall-back kind that should be used instead.
682 HLoadClass::LoadKind GetSupportedLoadClassKind(
683 HLoadClass::LoadKind desired_class_load_kind) override;
684
685 // Check if the desired_dispatch_info is supported. If it is, return it,
686 // otherwise return a fall-back info that should be used instead.
687 HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
688 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
689 ArtMethod* method) override;
690
691 void GenerateStaticOrDirectCall(
692 HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
693 void GenerateVirtualCall(
694 HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
695
696 void MoveFromReturnRegister(Location trg, DataType::Type type) override;
697
698 // Add a new boot image intrinsic patch for an instruction and return the label
699 // to be bound before the instruction. The instruction will be either the
700 // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
701 // to the associated ADRP patch label).
702 vixl::aarch64::Label* NewBootImageIntrinsicPatch(uint32_t intrinsic_data,
703 vixl::aarch64::Label* adrp_label = nullptr);
704
705 // Add a new boot image relocation patch for an instruction and return the label
706 // to be bound before the instruction. The instruction will be either the
707 // ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label` pointing
708 // to the associated ADRP patch label).
709 vixl::aarch64::Label* NewBootImageRelRoPatch(uint32_t boot_image_offset,
710 vixl::aarch64::Label* adrp_label = nullptr);
711
712 // Add a new boot image method patch for an instruction and return the label
713 // to be bound before the instruction. The instruction will be either the
714 // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
715 // to the associated ADRP patch label).
716 vixl::aarch64::Label* NewBootImageMethodPatch(MethodReference target_method,
717 vixl::aarch64::Label* adrp_label = nullptr);
718
719 // Add a new .bss entry method patch for an instruction and return
720 // the label to be bound before the instruction. The instruction will be
721 // either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
722 // pointing to the associated ADRP patch label).
723 vixl::aarch64::Label* NewMethodBssEntryPatch(MethodReference target_method,
724 vixl::aarch64::Label* adrp_label = nullptr);
725
726 // Add a new boot image type patch for an instruction and return the label
727 // to be bound before the instruction. The instruction will be either the
728 // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
729 // to the associated ADRP patch label).
730 vixl::aarch64::Label* NewBootImageTypePatch(const DexFile& dex_file,
731 dex::TypeIndex type_index,
732 vixl::aarch64::Label* adrp_label = nullptr);
733
734 // Add a new .bss entry type patch for an instruction and return the label
735 // to be bound before the instruction. The instruction will be either the
736 // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
737 // to the associated ADRP patch label).
738 vixl::aarch64::Label* NewBssEntryTypePatch(const DexFile& dex_file,
739 dex::TypeIndex type_index,
740 vixl::aarch64::Label* adrp_label = nullptr);
741
742 // Add a new boot image string patch for an instruction and return the label
743 // to be bound before the instruction. The instruction will be either the
744 // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
745 // to the associated ADRP patch label).
746 vixl::aarch64::Label* NewBootImageStringPatch(const DexFile& dex_file,
747 dex::StringIndex string_index,
748 vixl::aarch64::Label* adrp_label = nullptr);
749
750 // Add a new .bss entry string patch for an instruction and return the label
751 // to be bound before the instruction. The instruction will be either the
752 // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
753 // to the associated ADRP patch label).
754 vixl::aarch64::Label* NewStringBssEntryPatch(const DexFile& dex_file,
755 dex::StringIndex string_index,
756 vixl::aarch64::Label* adrp_label = nullptr);
757
758 // Emit the BL instruction for entrypoint thunk call and record the associated patch for AOT.
759 void EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offset);
760
761 // Emit the CBNZ instruction for baker read barrier and record
762 // the associated patch for AOT or slow path for JIT.
763 void EmitBakerReadBarrierCbnz(uint32_t custom_data);
764
765 vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
766 vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
767 dex::StringIndex string_index,
768 Handle<mirror::String> handle);
769 vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file,
770 dex::TypeIndex string_index,
771 Handle<mirror::Class> handle);
772
773 void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
774 void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
775 vixl::aarch64::Register out,
776 vixl::aarch64::Register base);
777 void EmitLdrOffsetPlaceholder(vixl::aarch64::Label* fixup_label,
778 vixl::aarch64::Register out,
779 vixl::aarch64::Register base);
780
781 void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_reference);
782 void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
783
784 void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
785 bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
786 void EmitThunkCode(const linker::LinkerPatch& patch,
787 /*out*/ ArenaVector<uint8_t>* code,
788 /*out*/ std::string* debug_name) override;
789
790 void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
791
792 // Generate a GC root reference load:
793 //
794 // root <- *(obj + offset)
795 //
796 // while honoring read barriers based on read_barrier_option.
797 void GenerateGcRootFieldLoad(HInstruction* instruction,
798 Location root,
799 vixl::aarch64::Register obj,
800 uint32_t offset,
801 vixl::aarch64::Label* fixup_label,
802 ReadBarrierOption read_barrier_option);
803 // Generate MOV for the `old_value` in UnsafeCASObject and mark it with Baker read barrier.
804 void GenerateUnsafeCasOldValueMovWithBakerReadBarrier(vixl::aarch64::Register marked,
805 vixl::aarch64::Register old_value);
806 // Fast path implementation of ReadBarrier::Barrier for a heap
807 // reference field load when Baker's read barriers are used.
808 // Overload suitable for Unsafe.getObject/-Volatile() intrinsic.
809 void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
810 Location ref,
811 vixl::aarch64::Register obj,
812 const vixl::aarch64::MemOperand& src,
813 bool needs_null_check,
814 bool use_load_acquire);
815 // Fast path implementation of ReadBarrier::Barrier for a heap
816 // reference field load when Baker's read barriers are used.
817 void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
818 Location ref,
819 vixl::aarch64::Register obj,
820 uint32_t offset,
821 Location maybe_temp,
822 bool needs_null_check,
823 bool use_load_acquire);
824 // Fast path implementation of ReadBarrier::Barrier for a heap
825 // reference array load when Baker's read barriers are used.
826 void GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction,
827 Location ref,
828 vixl::aarch64::Register obj,
829 uint32_t data_offset,
830 Location index,
831 bool needs_null_check);
832
833 // Emit code checking the status of the Marking Register, and
834 // aborting the program if MR does not match the value stored in the
835 // art::Thread object. Code is only emitted in debug mode and if
836 // CompilerOptions::EmitRunTimeChecksInDebugMode returns true.
837 //
838 // Argument `code` is used to identify the different occurrences of
839 // MaybeGenerateMarkingRegisterCheck in the code generator, and is
840 // passed to the BRK instruction.
841 //
842 // If `temp_loc` is a valid location, it is expected to be a
843 // register and will be used as a temporary to generate code;
844 // otherwise, a temporary will be fetched from the core register
845 // scratch pool.
846 virtual void MaybeGenerateMarkingRegisterCheck(int code,
847 Location temp_loc = Location::NoLocation());
848
849 // Generate a read barrier for a heap reference within `instruction`
850 // using a slow path.
851 //
852 // A read barrier for an object reference read from the heap is
853 // implemented as a call to the artReadBarrierSlow runtime entry
854 // point, which is passed the values in locations `ref`, `obj`, and
855 // `offset`:
856 //
857 // mirror::Object* artReadBarrierSlow(mirror::Object* ref,
858 // mirror::Object* obj,
859 // uint32_t offset);
860 //
861 // The `out` location contains the value returned by
862 // artReadBarrierSlow.
863 //
864 // When `index` is provided (i.e. for array accesses), the offset
865 // value passed to artReadBarrierSlow is adjusted to take `index`
866 // into account.
867 void GenerateReadBarrierSlow(HInstruction* instruction,
868 Location out,
869 Location ref,
870 Location obj,
871 uint32_t offset,
872 Location index = Location::NoLocation());
873
874 // If read barriers are enabled, generate a read barrier for a heap
875 // reference using a slow path. If heap poisoning is enabled, also
876 // unpoison the reference in `out`.
877 void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
878 Location out,
879 Location ref,
880 Location obj,
881 uint32_t offset,
882 Location index = Location::NoLocation());
883
884 // Generate a read barrier for a GC root within `instruction` using
885 // a slow path.
886 //
887 // A read barrier for an object reference GC root is implemented as
888 // a call to the artReadBarrierForRootSlow runtime entry point,
889 // which is passed the value in location `root`:
890 //
891 // mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
892 //
893 // The `out` location contains the value returned by
894 // artReadBarrierForRootSlow.
895 void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
896
897 void IncreaseFrame(size_t adjustment) override;
898 void DecreaseFrame(size_t adjustment) override;
899
900 void GenerateNop() override;
901
902 void GenerateImplicitNullCheck(HNullCheck* instruction) override;
903 void GenerateExplicitNullCheck(HNullCheck* instruction) override;
904
MaybeRecordImplicitNullCheck(HInstruction * instr)905 void MaybeRecordImplicitNullCheck(HInstruction* instr) final {
906 // The function must be only called within special scopes
907 // (EmissionCheckScope, ExactAssemblyScope) which prevent generation of
908 // veneer/literal pools by VIXL assembler.
909 CHECK_EQ(GetVIXLAssembler()->ArePoolsBlocked(), true)
910 << "The function must only be called within EmissionCheckScope or ExactAssemblyScope";
911 CodeGenerator::MaybeRecordImplicitNullCheck(instr);
912 }
913
914 void MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl::aarch64::Register klass);
915 void MaybeIncrementHotness(bool is_frame_entry);
916
917 private:
918 // Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
919
920 enum class BakerReadBarrierKind : uint8_t {
921 kField, // Field get or array get with constant offset (i.e. constant index).
922 kAcquire, // Volatile field get.
923 kArray, // Array get with index in register.
924 kGcRoot, // GC root load.
925 kLast = kGcRoot
926 };
927
928 static constexpr uint32_t kBakerReadBarrierInvalidEncodedReg = /* sp/zr is invalid */ 31u;
929
930 static constexpr size_t kBitsForBakerReadBarrierKind =
931 MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast));
932 static constexpr size_t kBakerReadBarrierBitsForRegister =
933 MinimumBitsToStore(kBakerReadBarrierInvalidEncodedReg);
934 using BakerReadBarrierKindField =
935 BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>;
936 using BakerReadBarrierFirstRegField =
937 BitField<uint32_t, kBitsForBakerReadBarrierKind, kBakerReadBarrierBitsForRegister>;
938 using BakerReadBarrierSecondRegField =
939 BitField<uint32_t,
940 kBitsForBakerReadBarrierKind + kBakerReadBarrierBitsForRegister,
941 kBakerReadBarrierBitsForRegister>;
942
CheckValidReg(uint32_t reg)943 static void CheckValidReg(uint32_t reg) {
944 DCHECK(reg < vixl::aarch64::lr.GetCode() &&
945 reg != vixl::aarch64::ip0.GetCode() &&
946 reg != vixl::aarch64::ip1.GetCode()) << reg;
947 }
948
EncodeBakerReadBarrierFieldData(uint32_t base_reg,uint32_t holder_reg)949 static inline uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, uint32_t holder_reg) {
950 CheckValidReg(base_reg);
951 CheckValidReg(holder_reg);
952 return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) |
953 BakerReadBarrierFirstRegField::Encode(base_reg) |
954 BakerReadBarrierSecondRegField::Encode(holder_reg);
955 }
956
EncodeBakerReadBarrierAcquireData(uint32_t base_reg,uint32_t holder_reg)957 static inline uint32_t EncodeBakerReadBarrierAcquireData(uint32_t base_reg, uint32_t holder_reg) {
958 CheckValidReg(base_reg);
959 CheckValidReg(holder_reg);
960 DCHECK_NE(base_reg, holder_reg);
961 return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kAcquire) |
962 BakerReadBarrierFirstRegField::Encode(base_reg) |
963 BakerReadBarrierSecondRegField::Encode(holder_reg);
964 }
965
EncodeBakerReadBarrierArrayData(uint32_t base_reg)966 static inline uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
967 CheckValidReg(base_reg);
968 return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) |
969 BakerReadBarrierFirstRegField::Encode(base_reg) |
970 BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg);
971 }
972
EncodeBakerReadBarrierGcRootData(uint32_t root_reg)973 static inline uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg) {
974 CheckValidReg(root_reg);
975 return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) |
976 BakerReadBarrierFirstRegField::Encode(root_reg) |
977 BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg);
978 }
979
980 void CompileBakerReadBarrierThunk(Arm64Assembler& assembler,
981 uint32_t encoded_data,
982 /*out*/ std::string* debug_name);
983
984 using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::aarch64::Literal<uint64_t>*>;
985 using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, vixl::aarch64::Literal<uint32_t>*>;
986 using StringToLiteralMap = ArenaSafeMap<StringReference,
987 vixl::aarch64::Literal<uint32_t>*,
988 StringReferenceValueComparator>;
989 using TypeToLiteralMap = ArenaSafeMap<TypeReference,
990 vixl::aarch64::Literal<uint32_t>*,
991 TypeReferenceValueComparator>;
992
993 vixl::aarch64::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value);
994 vixl::aarch64::Literal<uint64_t>* DeduplicateUint64Literal(uint64_t value);
995
996 // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
997 // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
998 struct PcRelativePatchInfo : PatchInfo<vixl::aarch64::Label> {
PcRelativePatchInfoPcRelativePatchInfo999 PcRelativePatchInfo(const DexFile* dex_file, uint32_t off_or_idx)
1000 : PatchInfo<vixl::aarch64::Label>(dex_file, off_or_idx), pc_insn_label() { }
1001
1002 vixl::aarch64::Label* pc_insn_label;
1003 };
1004
1005 struct BakerReadBarrierPatchInfo {
BakerReadBarrierPatchInfoBakerReadBarrierPatchInfo1006 explicit BakerReadBarrierPatchInfo(uint32_t data) : label(), custom_data(data) { }
1007
1008 vixl::aarch64::Label label;
1009 uint32_t custom_data;
1010 };
1011
1012 vixl::aarch64::Label* NewPcRelativePatch(const DexFile* dex_file,
1013 uint32_t offset_or_index,
1014 vixl::aarch64::Label* adrp_label,
1015 ArenaDeque<PcRelativePatchInfo>* patches);
1016
1017 void EmitJumpTables();
1018
1019 template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
1020 static void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
1021 ArenaVector<linker::LinkerPatch>* linker_patches);
1022
1023 // Returns whether SVE features are supported and should be used.
1024 bool ShouldUseSVE() const;
1025
1026 // Labels for each block that will be compiled.
1027 // We use a deque so that the `vixl::aarch64::Label` objects do not move in memory.
1028 ArenaDeque<vixl::aarch64::Label> block_labels_; // Indexed by block id.
1029 vixl::aarch64::Label frame_entry_label_;
1030 ArenaVector<std::unique_ptr<JumpTableARM64>> jump_tables_;
1031
1032 LocationsBuilderARM64Neon location_builder_neon_;
1033 InstructionCodeGeneratorARM64Neon instruction_visitor_neon_;
1034 LocationsBuilderARM64Sve location_builder_sve_;
1035 InstructionCodeGeneratorARM64Sve instruction_visitor_sve_;
1036
1037 LocationsBuilderARM64* location_builder_;
1038 InstructionCodeGeneratorARM64* instruction_visitor_;
1039 ParallelMoveResolverARM64 move_resolver_;
1040 Arm64Assembler assembler_;
1041
1042 // PC-relative method patch info for kBootImageLinkTimePcRelative.
1043 ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
1044 // PC-relative method patch info for kBssEntry.
1045 ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
1046 // PC-relative type patch info for kBootImageLinkTimePcRelative.
1047 ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
1048 // PC-relative type patch info for kBssEntry.
1049 ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
1050 // PC-relative String patch info for kBootImageLinkTimePcRelative.
1051 ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
1052 // PC-relative String patch info for kBssEntry.
1053 ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
1054 // PC-relative patch info for IntrinsicObjects for the boot image,
1055 // and for method/type/string patches for kBootImageRelRo otherwise.
1056 ArenaDeque<PcRelativePatchInfo> boot_image_other_patches_;
1057 // Patch info for calls to entrypoint dispatch thunks. Used for slow paths.
1058 ArenaDeque<PatchInfo<vixl::aarch64::Label>> call_entrypoint_patches_;
1059 // Baker read barrier patch info.
1060 ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
1061
1062 // Deduplication map for 32-bit literals, used for JIT for boot image addresses.
1063 Uint32ToLiteralMap uint32_literals_;
1064 // Deduplication map for 64-bit literals, used for JIT for method address or method code.
1065 Uint64ToLiteralMap uint64_literals_;
1066 // Patches for string literals in JIT compiled code.
1067 StringToLiteralMap jit_string_patches_;
1068 // Patches for class literals in JIT compiled code.
1069 TypeToLiteralMap jit_class_patches_;
1070
1071 // Baker read barrier slow paths, mapping custom data (uint32_t) to label.
1072 // Wrap the label to work around vixl::aarch64::Label being non-copyable
1073 // and non-moveable and as such unusable in ArenaSafeMap<>.
1074 struct LabelWrapper {
LabelWrapperLabelWrapper1075 LabelWrapper(const LabelWrapper& src)
1076 : label() {
1077 DCHECK(!src.label.IsLinked() && !src.label.IsBound());
1078 }
1079 LabelWrapper() = default;
1080 vixl::aarch64::Label label;
1081 };
1082 ArenaSafeMap<uint32_t, LabelWrapper> jit_baker_read_barrier_slow_paths_;
1083
1084 friend class linker::Arm64RelativePatcherTest;
1085 DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
1086 };
1087
GetAssembler()1088 inline Arm64Assembler* ParallelMoveResolverARM64::GetAssembler() const {
1089 return codegen_->GetAssembler();
1090 }
1091
1092 } // namespace arm64
1093 } // namespace art
1094
1095 #endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
1096