1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
19 
20 #include "base/enums.h"
21 #include "code_generator.h"
22 #include "common_arm.h"
23 #include "dex/string_reference.h"
24 #include "dex/type_reference.h"
25 #include "driver/compiler_options.h"
26 #include "nodes.h"
27 #include "parallel_move_resolver.h"
28 #include "utils/arm/assembler_arm_vixl.h"
29 
30 // TODO(VIXL): make vixl clean wrt -Wshadow.
31 #pragma GCC diagnostic push
32 #pragma GCC diagnostic ignored "-Wshadow"
33 #include "aarch32/constants-aarch32.h"
34 #include "aarch32/instructions-aarch32.h"
35 #include "aarch32/macro-assembler-aarch32.h"
36 #pragma GCC diagnostic pop
37 
38 namespace art {
39 
40 namespace linker {
41 class Thumb2RelativePatcherTest;
42 }  // namespace linker
43 
44 namespace arm {
45 
46 // This constant is used as an approximate margin when emission of veneer and literal pools
47 // must be blocked.
48 static constexpr int kMaxMacroInstructionSizeInBytes =
49     15 * vixl::aarch32::kMaxInstructionSizeInBytes;
50 
51 static const vixl::aarch32::Register kParameterCoreRegistersVIXL[] = {
52     vixl::aarch32::r1,
53     vixl::aarch32::r2,
54     vixl::aarch32::r3
55 };
56 static const size_t kParameterCoreRegistersLengthVIXL = arraysize(kParameterCoreRegistersVIXL);
57 static const vixl::aarch32::SRegister kParameterFpuRegistersVIXL[] = {
58     vixl::aarch32::s0,
59     vixl::aarch32::s1,
60     vixl::aarch32::s2,
61     vixl::aarch32::s3,
62     vixl::aarch32::s4,
63     vixl::aarch32::s5,
64     vixl::aarch32::s6,
65     vixl::aarch32::s7,
66     vixl::aarch32::s8,
67     vixl::aarch32::s9,
68     vixl::aarch32::s10,
69     vixl::aarch32::s11,
70     vixl::aarch32::s12,
71     vixl::aarch32::s13,
72     vixl::aarch32::s14,
73     vixl::aarch32::s15
74 };
75 static const size_t kParameterFpuRegistersLengthVIXL = arraysize(kParameterFpuRegistersVIXL);
76 
77 static const vixl::aarch32::Register kMethodRegister = vixl::aarch32::r0;
78 
79 // Callee saves core registers r5, r6, r7, r8 (except when emitting Baker
80 // read barriers, where it is used as Marking Register), r10, r11, and lr.
81 static const vixl::aarch32::RegisterList kCoreCalleeSaves = vixl::aarch32::RegisterList::Union(
82     vixl::aarch32::RegisterList(vixl::aarch32::r5,
83                                 vixl::aarch32::r6,
84                                 vixl::aarch32::r7),
85     // Do not consider r8 as a callee-save register with Baker read barriers.
86     ((kEmitCompilerReadBarrier && kUseBakerReadBarrier)
87          ? vixl::aarch32::RegisterList()
88          : vixl::aarch32::RegisterList(vixl::aarch32::r8)),
89     vixl::aarch32::RegisterList(vixl::aarch32::r10,
90                                 vixl::aarch32::r11,
91                                 vixl::aarch32::lr));
92 
93 // Callee saves FP registers s16 to s31 inclusive.
94 static const vixl::aarch32::SRegisterList kFpuCalleeSaves =
95     vixl::aarch32::SRegisterList(vixl::aarch32::s16, 16);
96 
97 static const vixl::aarch32::Register kRuntimeParameterCoreRegistersVIXL[] = {
98     vixl::aarch32::r0,
99     vixl::aarch32::r1,
100     vixl::aarch32::r2,
101     vixl::aarch32::r3
102 };
103 static const size_t kRuntimeParameterCoreRegistersLengthVIXL =
104     arraysize(kRuntimeParameterCoreRegistersVIXL);
105 static const vixl::aarch32::SRegister kRuntimeParameterFpuRegistersVIXL[] = {
106     vixl::aarch32::s0,
107     vixl::aarch32::s1,
108     vixl::aarch32::s2,
109     vixl::aarch32::s3
110 };
111 static const size_t kRuntimeParameterFpuRegistersLengthVIXL =
112     arraysize(kRuntimeParameterFpuRegistersVIXL);
113 
114 class LoadClassSlowPathARMVIXL;
115 class CodeGeneratorARMVIXL;
116 
117 using VIXLInt32Literal = vixl::aarch32::Literal<int32_t>;
118 using VIXLUInt32Literal = vixl::aarch32::Literal<uint32_t>;
119 
120 class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> {
121  public:
JumpTableARMVIXL(HPackedSwitch * switch_instr)122   explicit JumpTableARMVIXL(HPackedSwitch* switch_instr)
123       : switch_instr_(switch_instr),
124         table_start_(),
125         bb_addresses_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
126     uint32_t num_entries = switch_instr_->GetNumEntries();
127     for (uint32_t i = 0; i < num_entries; i++) {
128       VIXLInt32Literal *lit = new VIXLInt32Literal(0, vixl32::RawLiteral::kManuallyPlaced);
129       bb_addresses_.emplace_back(lit);
130     }
131   }
132 
GetTableStartLabel()133   vixl::aarch32::Label* GetTableStartLabel() { return &table_start_; }
134 
135   void EmitTable(CodeGeneratorARMVIXL* codegen);
136   void FixTable(CodeGeneratorARMVIXL* codegen);
137 
138  private:
139   HPackedSwitch* const switch_instr_;
140   vixl::aarch32::Label table_start_;
141   ArenaVector<std::unique_ptr<VIXLInt32Literal>> bb_addresses_;
142 
143   DISALLOW_COPY_AND_ASSIGN(JumpTableARMVIXL);
144 };
145 
146 class InvokeRuntimeCallingConventionARMVIXL
147     : public CallingConvention<vixl::aarch32::Register, vixl::aarch32::SRegister> {
148  public:
InvokeRuntimeCallingConventionARMVIXL()149   InvokeRuntimeCallingConventionARMVIXL()
150       : CallingConvention(kRuntimeParameterCoreRegistersVIXL,
151                           kRuntimeParameterCoreRegistersLengthVIXL,
152                           kRuntimeParameterFpuRegistersVIXL,
153                           kRuntimeParameterFpuRegistersLengthVIXL,
154                           kArmPointerSize) {}
155 
156  private:
157   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConventionARMVIXL);
158 };
159 
160 class InvokeDexCallingConventionARMVIXL
161     : public CallingConvention<vixl::aarch32::Register, vixl::aarch32::SRegister> {
162  public:
InvokeDexCallingConventionARMVIXL()163   InvokeDexCallingConventionARMVIXL()
164       : CallingConvention(kParameterCoreRegistersVIXL,
165                           kParameterCoreRegistersLengthVIXL,
166                           kParameterFpuRegistersVIXL,
167                           kParameterFpuRegistersLengthVIXL,
168                           kArmPointerSize) {}
169 
170  private:
171   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionARMVIXL);
172 };
173 
174 class InvokeDexCallingConventionVisitorARMVIXL : public InvokeDexCallingConventionVisitor {
175  public:
InvokeDexCallingConventionVisitorARMVIXL()176   InvokeDexCallingConventionVisitorARMVIXL() {}
~InvokeDexCallingConventionVisitorARMVIXL()177   virtual ~InvokeDexCallingConventionVisitorARMVIXL() {}
178 
179   Location GetNextLocation(DataType::Type type) override;
180   Location GetReturnLocation(DataType::Type type) const override;
181   Location GetMethodLocation() const override;
182 
183  private:
184   InvokeDexCallingConventionARMVIXL calling_convention;
185   uint32_t double_index_ = 0;
186 
187   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARMVIXL);
188 };
189 
190 class CriticalNativeCallingConventionVisitorARMVIXL : public InvokeDexCallingConventionVisitor {
191  public:
CriticalNativeCallingConventionVisitorARMVIXL(bool for_register_allocation)192   explicit CriticalNativeCallingConventionVisitorARMVIXL(bool for_register_allocation)
193       : for_register_allocation_(for_register_allocation) {}
194 
~CriticalNativeCallingConventionVisitorARMVIXL()195   virtual ~CriticalNativeCallingConventionVisitorARMVIXL() {}
196 
197   Location GetNextLocation(DataType::Type type) override;
198   Location GetReturnLocation(DataType::Type type) const override;
199   Location GetMethodLocation() const override;
200 
GetStackOffset()201   size_t GetStackOffset() const { return stack_offset_; }
202 
203  private:
204   // Register allocator does not support adjusting frame size, so we cannot provide final locations
205   // of stack arguments for register allocation. We ask the register allocator for any location and
206   // move these arguments to the right place after adjusting the SP when generating the call.
207   const bool for_register_allocation_;
208   size_t gpr_index_ = 0u;
209   size_t stack_offset_ = 0u;
210 
211   DISALLOW_COPY_AND_ASSIGN(CriticalNativeCallingConventionVisitorARMVIXL);
212 };
213 
214 class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention {
215  public:
FieldAccessCallingConventionARMVIXL()216   FieldAccessCallingConventionARMVIXL() {}
217 
GetObjectLocation()218   Location GetObjectLocation() const override {
219     return helpers::LocationFrom(vixl::aarch32::r1);
220   }
GetFieldIndexLocation()221   Location GetFieldIndexLocation() const override {
222     return helpers::LocationFrom(vixl::aarch32::r0);
223   }
GetReturnLocation(DataType::Type type)224   Location GetReturnLocation(DataType::Type type) const override {
225     return DataType::Is64BitType(type)
226         ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1)
227         : helpers::LocationFrom(vixl::aarch32::r0);
228   }
GetSetValueLocation(DataType::Type type,bool is_instance)229   Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
230     return DataType::Is64BitType(type)
231         ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
232         : (is_instance
233             ? helpers::LocationFrom(vixl::aarch32::r2)
234             : helpers::LocationFrom(vixl::aarch32::r1));
235   }
GetFpuLocation(DataType::Type type)236   Location GetFpuLocation(DataType::Type type) const override {
237     return DataType::Is64BitType(type)
238         ? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1)
239         : helpers::LocationFrom(vixl::aarch32::s0);
240   }
241 
242  private:
243   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARMVIXL);
244 };
245 
246 class SlowPathCodeARMVIXL : public SlowPathCode {
247  public:
SlowPathCodeARMVIXL(HInstruction * instruction)248   explicit SlowPathCodeARMVIXL(HInstruction* instruction)
249       : SlowPathCode(instruction), entry_label_(), exit_label_() {}
250 
GetEntryLabel()251   vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; }
GetExitLabel()252   vixl::aarch32::Label* GetExitLabel() { return &exit_label_; }
253 
254   void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
255   void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override;
256 
257  private:
258   vixl::aarch32::Label entry_label_;
259   vixl::aarch32::Label exit_label_;
260 
261   DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARMVIXL);
262 };
263 
264 class ParallelMoveResolverARMVIXL : public ParallelMoveResolverWithSwap {
265  public:
ParallelMoveResolverARMVIXL(ArenaAllocator * allocator,CodeGeneratorARMVIXL * codegen)266   ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen)
267       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
268 
269   void EmitMove(size_t index) override;
270   void EmitSwap(size_t index) override;
271   void SpillScratch(int reg) override;
272   void RestoreScratch(int reg) override;
273 
274   ArmVIXLAssembler* GetAssembler() const;
275 
276  private:
277   void Exchange(vixl32::Register reg, int mem);
278   void Exchange(int mem1, int mem2);
279 
280   CodeGeneratorARMVIXL* const codegen_;
281 
282   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARMVIXL);
283 };
284 
285 class LocationsBuilderARMVIXL : public HGraphVisitor {
286  public:
LocationsBuilderARMVIXL(HGraph * graph,CodeGeneratorARMVIXL * codegen)287   LocationsBuilderARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen)
288       : HGraphVisitor(graph), codegen_(codegen) {}
289 
290 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
291   void Visit##name(H##name* instr) override;
292 
293   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)294   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
295   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
296 
297 #undef DECLARE_VISIT_INSTRUCTION
298 
299   void VisitInstruction(HInstruction* instruction) override {
300     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
301                << " (id " << instruction->GetId() << ")";
302   }
303 
304  private:
305   void HandleInvoke(HInvoke* invoke);
306   void HandleBitwiseOperation(HBinaryOperation* operation, Opcode opcode);
307   void HandleCondition(HCondition* condition);
308   void HandleIntegerRotate(LocationSummary* locations);
309   void HandleLongRotate(LocationSummary* locations);
310   void HandleShift(HBinaryOperation* operation);
311   void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
312   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
313 
314   Location ArithmeticZeroOrFpuRegister(HInstruction* input);
315   Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
316   bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
317 
318   CodeGeneratorARMVIXL* const codegen_;
319   InvokeDexCallingConventionVisitorARMVIXL parameter_visitor_;
320 
321   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARMVIXL);
322 };
323 
324 class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
325  public:
326   InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen);
327 
328 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
329   void Visit##name(H##name* instr) override;
330 
331   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)332   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
333   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
334 
335 #undef DECLARE_VISIT_INSTRUCTION
336 
337   void VisitInstruction(HInstruction* instruction) override {
338     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
339                << " (id " << instruction->GetId() << ")";
340   }
341 
GetAssembler()342   ArmVIXLAssembler* GetAssembler() const { return assembler_; }
GetVIXLAssembler()343   ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
344 
345  private:
346   // Generate code for the given suspend check. If not null, `successor`
347   // is the block to branch to if the suspend check is not needed, and after
348   // the suspend call.
349   void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
350   void GenerateClassInitializationCheck(LoadClassSlowPathARMVIXL* slow_path,
351                                         vixl32::Register class_reg);
352   void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check,
353                                          vixl::aarch32::Register temp,
354                                          vixl::aarch32::FlagsUpdate flags_update);
355   void GenerateAndConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
356   void GenerateOrrConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
357   void GenerateEorConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
358   void GenerateAddLongConst(Location out, Location first, uint64_t value);
359   void HandleBitwiseOperation(HBinaryOperation* operation);
360   void HandleCondition(HCondition* condition);
361   void HandleIntegerRotate(HRor* ror);
362   void HandleLongRotate(HRor* ror);
363   void HandleShift(HBinaryOperation* operation);
364 
365   void GenerateWideAtomicStore(vixl::aarch32::Register addr,
366                                uint32_t offset,
367                                vixl::aarch32::Register value_lo,
368                                vixl::aarch32::Register value_hi,
369                                vixl::aarch32::Register temp1,
370                                vixl::aarch32::Register temp2,
371                                HInstruction* instruction);
372   void GenerateWideAtomicLoad(vixl::aarch32::Register addr,
373                               uint32_t offset,
374                               vixl::aarch32::Register out_lo,
375                               vixl::aarch32::Register out_hi);
376 
377   void HandleFieldSet(HInstruction* instruction,
378                       const FieldInfo& field_info,
379                       bool value_can_be_null);
380   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
381 
382   void GenerateMinMaxInt(LocationSummary* locations, bool is_min);
383   void GenerateMinMaxLong(LocationSummary* locations, bool is_min);
384   void GenerateMinMaxFloat(HInstruction* minmax, bool is_min);
385   void GenerateMinMaxDouble(HInstruction* minmax, bool is_min);
386   void GenerateMinMax(HBinaryOperation* minmax, bool is_min);
387 
388   // Generate a heap reference load using one register `out`:
389   //
390   //   out <- *(out + offset)
391   //
392   // while honoring heap poisoning and/or read barriers (if any).
393   //
394   // Location `maybe_temp` is used when generating a read barrier and
395   // shall be a register in that case; it may be an invalid location
396   // otherwise.
397   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
398                                         Location out,
399                                         uint32_t offset,
400                                         Location maybe_temp,
401                                         ReadBarrierOption read_barrier_option);
402   // Generate a heap reference load using two different registers
403   // `out` and `obj`:
404   //
405   //   out <- *(obj + offset)
406   //
407   // while honoring heap poisoning and/or read barriers (if any).
408   //
409   // Location `maybe_temp` is used when generating a Baker's (fast
410   // path) read barrier and shall be a register in that case; it may
411   // be an invalid location otherwise.
412   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
413                                          Location out,
414                                          Location obj,
415                                          uint32_t offset,
416                                          Location maybe_temp,
417                                          ReadBarrierOption read_barrier_option);
418   void GenerateTestAndBranch(HInstruction* instruction,
419                              size_t condition_input_index,
420                              vixl::aarch32::Label* true_target,
421                              vixl::aarch32::Label* false_target,
422                              bool far_target = true);
423   void GenerateCompareTestAndBranch(HCondition* condition,
424                                     vixl::aarch32::Label* true_target,
425                                     vixl::aarch32::Label* false_target,
426                                     bool is_far_target = true);
427   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
428   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
429   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
430   void GenerateDivRemConstantIntegral(HBinaryOperation* instruction);
431   void HandleGoto(HInstruction* got, HBasicBlock* successor);
432 
433   vixl::aarch32::MemOperand VecAddress(
434       HVecMemoryOperation* instruction,
435       // This function may acquire a scratch register.
436       vixl::aarch32::UseScratchRegisterScope* temps_scope,
437       /*out*/ vixl32::Register* scratch);
438   vixl::aarch32::AlignedMemOperand VecAddressUnaligned(
439       HVecMemoryOperation* instruction,
440       // This function may acquire a scratch register.
441       vixl::aarch32::UseScratchRegisterScope* temps_scope,
442       /*out*/ vixl32::Register* scratch);
443 
444   ArmVIXLAssembler* const assembler_;
445   CodeGeneratorARMVIXL* const codegen_;
446 
447   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARMVIXL);
448 };
449 
450 class CodeGeneratorARMVIXL : public CodeGenerator {
451  public:
452   CodeGeneratorARMVIXL(HGraph* graph,
453                        const CompilerOptions& compiler_options,
454                        OptimizingCompilerStats* stats = nullptr);
~CodeGeneratorARMVIXL()455   virtual ~CodeGeneratorARMVIXL() {}
456 
457   void GenerateFrameEntry() override;
458   void GenerateFrameExit() override;
459   void Bind(HBasicBlock* block) override;
460   void MoveConstant(Location destination, int32_t value) override;
461   void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
462   void AddLocationAsTemp(Location location, LocationSummary* locations) override;
463 
464   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
465   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
466   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
467   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
468 
GetWordSize()469   size_t GetWordSize() const override {
470     return static_cast<size_t>(kArmPointerSize);
471   }
472 
GetCalleePreservedFPWidth()473   size_t GetCalleePreservedFPWidth() const override {
474     return vixl::aarch32::kSRegSizeInBytes;
475   }
476 
GetSIMDRegisterWidth()477   size_t GetSIMDRegisterWidth() const override {
478     // ARM 32-bit backend doesn't support Q registers in vectorizer, only D
479     // registers (due to register allocator restrictions: overlapping s/d/q
480     // registers).
481     return vixl::aarch32::kDRegSizeInBytes;
482   }
483 
GetLocationBuilder()484   HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
485 
GetInstructionVisitor()486   HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
487 
GetAssembler()488   ArmVIXLAssembler* GetAssembler() override { return &assembler_; }
489 
GetAssembler()490   const ArmVIXLAssembler& GetAssembler() const override { return assembler_; }
491 
GetVIXLAssembler()492   ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
493 
GetAddressOf(HBasicBlock * block)494   uintptr_t GetAddressOf(HBasicBlock* block) override {
495     vixl::aarch32::Label* block_entry_label = GetLabelOf(block);
496     DCHECK(block_entry_label->IsBound());
497     return block_entry_label->GetLocation();
498   }
499 
500   void FixJumpTables();
501   void SetupBlockedRegisters() const override;
502 
503   void DumpCoreRegister(std::ostream& stream, int reg) const override;
504   void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
505 
GetMoveResolver()506   ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
GetInstructionSet()507   InstructionSet GetInstructionSet() const override { return InstructionSet::kThumb2; }
508 
509   const ArmInstructionSetFeatures& GetInstructionSetFeatures() const;
510 
511   // Helper method to move a 32-bit value between two locations.
512   void Move32(Location destination, Location source);
513 
514   void LoadFromShiftedRegOffset(DataType::Type type,
515                                 Location out_loc,
516                                 vixl::aarch32::Register base,
517                                 vixl::aarch32::Register reg_index,
518                                 vixl::aarch32::Condition cond = vixl::aarch32::al);
519   void StoreToShiftedRegOffset(DataType::Type type,
520                                Location out_loc,
521                                vixl::aarch32::Register base,
522                                vixl::aarch32::Register reg_index,
523                                vixl::aarch32::Condition cond = vixl::aarch32::al);
524 
525   // Generate code to invoke a runtime entry point.
526   void InvokeRuntime(QuickEntrypointEnum entrypoint,
527                      HInstruction* instruction,
528                      uint32_t dex_pc,
529                      SlowPathCode* slow_path = nullptr) override;
530 
531   // Generate code to invoke a runtime entry point, but do not record
532   // PC-related information in a stack map.
533   void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
534                                            HInstruction* instruction,
535                                            SlowPathCode* slow_path);
536 
537   // Emit a write barrier.
538   void MarkGCCard(vixl::aarch32::Register temp,
539                   vixl::aarch32::Register card,
540                   vixl::aarch32::Register object,
541                   vixl::aarch32::Register value,
542                   bool can_be_null);
543 
544   void GenerateMemoryBarrier(MemBarrierKind kind);
545 
GetLabelOf(HBasicBlock * block)546   vixl::aarch32::Label* GetLabelOf(HBasicBlock* block) {
547     block = FirstNonEmptyBlock(block);
548     return &(block_labels_[block->GetBlockId()]);
549   }
550 
551   vixl32::Label* GetFinalLabel(HInstruction* instruction, vixl32::Label* final_label);
552 
Initialize()553   void Initialize() override {
554     block_labels_.resize(GetGraph()->GetBlocks().size());
555   }
556 
557   void Finalize(CodeAllocator* allocator) override;
558 
NeedsTwoRegisters(DataType::Type type)559   bool NeedsTwoRegisters(DataType::Type type) const override {
560     return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64;
561   }
562 
563   void ComputeSpillMask() override;
564 
GetFrameEntryLabel()565   vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
566 
567   // Check if the desired_string_load_kind is supported. If it is, return it,
568   // otherwise return a fall-back kind that should be used instead.
569   HLoadString::LoadKind GetSupportedLoadStringKind(
570       HLoadString::LoadKind desired_string_load_kind) override;
571 
572   // Check if the desired_class_load_kind is supported. If it is, return it,
573   // otherwise return a fall-back kind that should be used instead.
574   HLoadClass::LoadKind GetSupportedLoadClassKind(
575       HLoadClass::LoadKind desired_class_load_kind) override;
576 
577   // Check if the desired_dispatch_info is supported. If it is, return it,
578   // otherwise return a fall-back info that should be used instead.
579   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
580       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
581       ArtMethod* method) override;
582 
583   void GenerateStaticOrDirectCall(
584       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
585   void GenerateVirtualCall(
586       HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
587 
588   void MoveFromReturnRegister(Location trg, DataType::Type type) override;
589 
590   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
591   // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
592   //
593   // The PC-relative address is loaded with three instructions,
594   // MOVW+MOVT to load the offset to base_reg and then ADD base_reg, PC. The offset
595   // is calculated from the ADD's effective PC, i.e. PC+4 on Thumb2. Though we
596   // currently emit these 3 instructions together, instruction scheduling could
597   // split this sequence apart, so we keep separate labels for each of them.
598   struct PcRelativePatchInfo {
PcRelativePatchInfoPcRelativePatchInfo599     PcRelativePatchInfo(const DexFile* dex_file, uint32_t off_or_idx)
600         : target_dex_file(dex_file), offset_or_index(off_or_idx) { }
601     PcRelativePatchInfo(PcRelativePatchInfo&& other) = default;
602 
603     // Target dex file or null for .data.bmig.rel.ro patches.
604     const DexFile* target_dex_file;
605     // Either the boot image offset (to write to .data.bmig.rel.ro) or string/type/method index.
606     uint32_t offset_or_index;
607     vixl::aarch32::Label movw_label;
608     vixl::aarch32::Label movt_label;
609     vixl::aarch32::Label add_pc_label;
610   };
611 
612   PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data);
613   PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset);
614   PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method);
615   PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method);
616   PcRelativePatchInfo* NewBootImageTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
617   PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
618   PcRelativePatchInfo* NewBootImageStringPatch(const DexFile& dex_file,
619                                                dex::StringIndex string_index);
620   PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
621                                               dex::StringIndex string_index);
622 
623   // Emit the BL instruction for entrypoint thunk call and record the associated patch for AOT.
624   void EmitEntrypointThunkCall(ThreadOffset32 entrypoint_offset);
625 
626   // Emit the BNE instruction for baker read barrier and record
627   // the associated patch for AOT or slow path for JIT.
628   void EmitBakerReadBarrierBne(uint32_t custom_data);
629 
630   VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
631   VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
632                                                  dex::StringIndex string_index,
633                                                  Handle<mirror::String> handle);
634   VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
635                                                 dex::TypeIndex type_index,
636                                                 Handle<mirror::Class> handle);
637 
638   void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_reference);
639   void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
640 
641   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
642   bool NeedsThunkCode(const linker::LinkerPatch& patch) const override;
643   void EmitThunkCode(const linker::LinkerPatch& patch,
644                      /*out*/ ArenaVector<uint8_t>* code,
645                      /*out*/ std::string* debug_name) override;
646 
647   void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
648 
649   // Generate a GC root reference load:
650   //
651   //   root <- *(obj + offset)
652   //
653   // while honoring read barriers based on read_barrier_option.
654   void GenerateGcRootFieldLoad(HInstruction* instruction,
655                                Location root,
656                                vixl::aarch32::Register obj,
657                                uint32_t offset,
658                                ReadBarrierOption read_barrier_option);
659   // Generate ADD for UnsafeCASObject to reconstruct the old value from
660   // `old_value - expected` and mark it with Baker read barrier.
661   void GenerateUnsafeCasOldValueAddWithBakerReadBarrier(vixl::aarch32::Register old_value,
662                                                         vixl::aarch32::Register adjusted_old_value,
663                                                         vixl::aarch32::Register expected);
664   // Fast path implementation of ReadBarrier::Barrier for a heap
665   // reference field load when Baker's read barriers are used.
666   // Overload suitable for Unsafe.getObject/-Volatile() intrinsic.
667   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
668                                              Location ref,
669                                              vixl::aarch32::Register obj,
670                                              const vixl::aarch32::MemOperand& src,
671                                              bool needs_null_check);
672   // Fast path implementation of ReadBarrier::Barrier for a heap
673   // reference field load when Baker's read barriers are used.
674   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
675                                              Location ref,
676                                              vixl::aarch32::Register obj,
677                                              uint32_t offset,
678                                              Location temp,
679                                              bool needs_null_check);
680   // Fast path implementation of ReadBarrier::Barrier for a heap
681   // reference array load when Baker's read barriers are used.
682   void GenerateArrayLoadWithBakerReadBarrier(Location ref,
683                                              vixl::aarch32::Register obj,
684                                              uint32_t data_offset,
685                                              Location index,
686                                              Location temp,
687                                              bool needs_null_check);
688 
689   // Emit code checking the status of the Marking Register, and
690   // aborting the program if MR does not match the value stored in the
691   // art::Thread object. Code is only emitted in debug mode and if
692   // CompilerOptions::EmitRunTimeChecksInDebugMode returns true.
693   //
694   // Argument `code` is used to identify the different occurrences of
695   // MaybeGenerateMarkingRegisterCheck in the code generator, and is
696   // used together with kMarkingRegisterCheckBreakCodeBaseCode to
697   // create the value passed to the BKPT instruction. Note that unlike
698   // in the ARM64 code generator, where `__LINE__` is passed as `code`
699   // argument to
700   // CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck, we cannot
701   // realistically do that here, as Encoding T1 for the BKPT
702   // instruction only accepts 8-bit immediate values.
703   //
704   // If `temp_loc` is a valid location, it is expected to be a
705   // register and will be used as a temporary to generate code;
706   // otherwise, a temporary will be fetched from the core register
707   // scratch pool.
708   virtual void MaybeGenerateMarkingRegisterCheck(int code,
709                                                  Location temp_loc = Location::NoLocation());
710 
711   // Generate a read barrier for a heap reference within `instruction`
712   // using a slow path.
713   //
714   // A read barrier for an object reference read from the heap is
715   // implemented as a call to the artReadBarrierSlow runtime entry
716   // point, which is passed the values in locations `ref`, `obj`, and
717   // `offset`:
718   //
719   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
720   //                                      mirror::Object* obj,
721   //                                      uint32_t offset);
722   //
723   // The `out` location contains the value returned by
724   // artReadBarrierSlow.
725   //
726   // When `index` is provided (i.e. for array accesses), the offset
727   // value passed to artReadBarrierSlow is adjusted to take `index`
728   // into account.
729   void GenerateReadBarrierSlow(HInstruction* instruction,
730                                Location out,
731                                Location ref,
732                                Location obj,
733                                uint32_t offset,
734                                Location index = Location::NoLocation());
735 
736   // If read barriers are enabled, generate a read barrier for a heap
737   // reference using a slow path. If heap poisoning is enabled, also
738   // unpoison the reference in `out`.
739   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
740                                     Location out,
741                                     Location ref,
742                                     Location obj,
743                                     uint32_t offset,
744                                     Location index = Location::NoLocation());
745 
746   // Generate a read barrier for a GC root within `instruction` using
747   // a slow path.
748   //
749   // A read barrier for an object reference GC root is implemented as
750   // a call to the artReadBarrierForRootSlow runtime entry point,
751   // which is passed the value in location `root`:
752   //
753   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
754   //
755   // The `out` location contains the value returned by
756   // artReadBarrierForRootSlow.
757   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
758 
759   void IncreaseFrame(size_t adjustment) override;
760   void DecreaseFrame(size_t adjustment) override;
761 
762   void GenerateNop() override;
763 
764   void GenerateImplicitNullCheck(HNullCheck* instruction) override;
765   void GenerateExplicitNullCheck(HNullCheck* instruction) override;
766 
CreateJumpTable(HPackedSwitch * switch_instr)767   JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
768     jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr));
769     return jump_tables_.back().get();
770   }
771   void EmitJumpTables();
772 
773   void EmitMovwMovtPlaceholder(CodeGeneratorARMVIXL::PcRelativePatchInfo* labels,
774                                vixl::aarch32::Register out);
775 
776   // `temp` is an extra temporary register that is used for some conditions;
777   // callers may not specify it, in which case the method will use a scratch
778   // register instead.
779   void GenerateConditionWithZero(IfCondition condition,
780                                  vixl::aarch32::Register out,
781                                  vixl::aarch32::Register in,
782                                  vixl::aarch32::Register temp = vixl32::Register());
783 
MaybeRecordImplicitNullCheck(HInstruction * instr)784   void MaybeRecordImplicitNullCheck(HInstruction* instr) final {
785     // The function must be only be called within special scopes
786     // (EmissionCheckScope, ExactAssemblyScope) which prevent generation of
787     // veneer/literal pools by VIXL assembler.
788     CHECK_EQ(GetVIXLAssembler()->ArePoolsBlocked(), true)
789         << "The function must only be called within EmissionCheckScope or ExactAssemblyScope";
790     CodeGenerator::MaybeRecordImplicitNullCheck(instr);
791   }
792 
793   void MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl32::Register klass);
794   void MaybeIncrementHotness(bool is_frame_entry);
795 
796  private:
797   // Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
798 
799   enum class BakerReadBarrierKind : uint8_t {
800     kField,       // Field get or array get with constant offset (i.e. constant index).
801     kArray,       // Array get with index in register.
802     kGcRoot,      // GC root load.
803     kUnsafeCas,   // UnsafeCASObject intrinsic.
804     kLast = kUnsafeCas
805   };
806 
807   enum class BakerReadBarrierWidth : uint8_t {
808     kWide,          // 32-bit LDR (and 32-bit NEG if heap poisoning is enabled).
809     kNarrow,        // 16-bit LDR (and 16-bit NEG if heap poisoning is enabled).
810     kLast = kNarrow
811   };
812 
813   static constexpr uint32_t kBakerReadBarrierInvalidEncodedReg = /* pc is invalid */ 15u;
814 
815   static constexpr size_t kBitsForBakerReadBarrierKind =
816       MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast));
817   static constexpr size_t kBakerReadBarrierBitsForRegister =
818       MinimumBitsToStore(kBakerReadBarrierInvalidEncodedReg);
819   using BakerReadBarrierKindField =
820       BitField<BakerReadBarrierKind, 0, kBitsForBakerReadBarrierKind>;
821   using BakerReadBarrierFirstRegField =
822       BitField<uint32_t, kBitsForBakerReadBarrierKind, kBakerReadBarrierBitsForRegister>;
823   using BakerReadBarrierSecondRegField =
824       BitField<uint32_t,
825                kBitsForBakerReadBarrierKind + kBakerReadBarrierBitsForRegister,
826                kBakerReadBarrierBitsForRegister>;
827   static constexpr size_t kBitsForBakerReadBarrierWidth =
828       MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierWidth::kLast));
829   using BakerReadBarrierWidthField =
830       BitField<BakerReadBarrierWidth,
831                kBitsForBakerReadBarrierKind + 2 * kBakerReadBarrierBitsForRegister,
832                kBitsForBakerReadBarrierWidth>;
833 
CheckValidReg(uint32_t reg)834   static void CheckValidReg(uint32_t reg) {
835     DCHECK(reg < vixl::aarch32::ip.GetCode() && reg != mr.GetCode()) << reg;
836   }
837 
EncodeBakerReadBarrierFieldData(uint32_t base_reg,uint32_t holder_reg,bool narrow)838   static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg,
839                                                   uint32_t holder_reg,
840                                                   bool narrow) {
841     CheckValidReg(base_reg);
842     CheckValidReg(holder_reg);
843     DCHECK(!narrow || base_reg < 8u) << base_reg;
844     BakerReadBarrierWidth width =
845         narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide;
846     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) |
847            BakerReadBarrierFirstRegField::Encode(base_reg) |
848            BakerReadBarrierSecondRegField::Encode(holder_reg) |
849            BakerReadBarrierWidthField::Encode(width);
850   }
851 
EncodeBakerReadBarrierArrayData(uint32_t base_reg)852   static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
853     CheckValidReg(base_reg);
854     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) |
855            BakerReadBarrierFirstRegField::Encode(base_reg) |
856            BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) |
857            BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide);
858   }
859 
EncodeBakerReadBarrierGcRootData(uint32_t root_reg,bool narrow)860   static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) {
861     CheckValidReg(root_reg);
862     DCHECK(!narrow || root_reg < 8u) << root_reg;
863     BakerReadBarrierWidth width =
864         narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide;
865     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) |
866            BakerReadBarrierFirstRegField::Encode(root_reg) |
867            BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) |
868            BakerReadBarrierWidthField::Encode(width);
869   }
870 
EncodeBakerReadBarrierUnsafeCasData(uint32_t root_reg)871   static uint32_t EncodeBakerReadBarrierUnsafeCasData(uint32_t root_reg) {
872     CheckValidReg(root_reg);
873     return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kUnsafeCas) |
874            BakerReadBarrierFirstRegField::Encode(root_reg) |
875            BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) |
876            BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide);
877   }
878 
879   void CompileBakerReadBarrierThunk(ArmVIXLAssembler& assembler,
880                                     uint32_t encoded_data,
881                                     /*out*/ std::string* debug_name);
882 
883   using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, VIXLUInt32Literal*>;
884   using StringToLiteralMap = ArenaSafeMap<StringReference,
885                                           VIXLUInt32Literal*,
886                                           StringReferenceValueComparator>;
887   using TypeToLiteralMap = ArenaSafeMap<TypeReference,
888                                         VIXLUInt32Literal*,
889                                         TypeReferenceValueComparator>;
890 
891   struct BakerReadBarrierPatchInfo {
BakerReadBarrierPatchInfoBakerReadBarrierPatchInfo892     explicit BakerReadBarrierPatchInfo(uint32_t data) : label(), custom_data(data) { }
893 
894     vixl::aarch32::Label label;
895     uint32_t custom_data;
896   };
897 
898   VIXLUInt32Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
899   PcRelativePatchInfo* NewPcRelativePatch(const DexFile* dex_file,
900                                           uint32_t offset_or_index,
901                                           ArenaDeque<PcRelativePatchInfo>* patches);
902   template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
903   static void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
904                                           ArenaVector<linker::LinkerPatch>* linker_patches);
905 
906   // Labels for each block that will be compiled.
907   // We use a deque so that the `vixl::aarch32::Label` objects do not move in memory.
908   ArenaDeque<vixl::aarch32::Label> block_labels_;  // Indexed by block id.
909   vixl::aarch32::Label frame_entry_label_;
910 
911   ArenaVector<std::unique_ptr<JumpTableARMVIXL>> jump_tables_;
912   LocationsBuilderARMVIXL location_builder_;
913   InstructionCodeGeneratorARMVIXL instruction_visitor_;
914   ParallelMoveResolverARMVIXL move_resolver_;
915 
916   ArmVIXLAssembler assembler_;
917 
918   // PC-relative method patch info for kBootImageLinkTimePcRelative.
919   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
920   // PC-relative method patch info for kBssEntry.
921   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
922   // PC-relative type patch info for kBootImageLinkTimePcRelative.
923   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
924   // PC-relative type patch info for kBssEntry.
925   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
926   // PC-relative String patch info for kBootImageLinkTimePcRelative.
927   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
928   // PC-relative String patch info for kBssEntry.
929   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
930   // PC-relative patch info for IntrinsicObjects for the boot image,
931   // and for method/type/string patches for kBootImageRelRo otherwise.
932   ArenaDeque<PcRelativePatchInfo> boot_image_other_patches_;
933   // Patch info for calls to entrypoint dispatch thunks. Used for slow paths.
934   ArenaDeque<PatchInfo<vixl::aarch32::Label>> call_entrypoint_patches_;
935   // Baker read barrier patch info.
936   ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
937 
938   // Deduplication map for 32-bit literals, used for JIT for boot image addresses.
939   Uint32ToLiteralMap uint32_literals_;
940   // Patches for string literals in JIT compiled code.
941   StringToLiteralMap jit_string_patches_;
942   // Patches for class literals in JIT compiled code.
943   TypeToLiteralMap jit_class_patches_;
944 
945   // Baker read barrier slow paths, mapping custom data (uint32_t) to label.
946   // Wrap the label to work around vixl::aarch32::Label being non-copyable
947   // and non-moveable and as such unusable in ArenaSafeMap<>.
948   struct LabelWrapper {
LabelWrapperLabelWrapper949     LabelWrapper(const LabelWrapper& src)
950         : label() {
951       DCHECK(!src.label.IsReferenced() && !src.label.IsBound());
952     }
953     LabelWrapper() = default;
954     vixl::aarch32::Label label;
955   };
956   ArenaSafeMap<uint32_t, LabelWrapper> jit_baker_read_barrier_slow_paths_;
957 
958   friend class linker::Thumb2RelativePatcherTest;
959   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL);
960 };
961 
962 }  // namespace arm
963 }  // namespace art
964 
965 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
966