1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_LINEAR_SCAN_H_
18 #define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_LINEAR_SCAN_H_
19 
20 #include "arch/instruction_set.h"
21 #include "base/scoped_arena_containers.h"
22 #include "base/macros.h"
23 #include "register_allocator.h"
24 
25 namespace art {
26 
27 class CodeGenerator;
28 class HBasicBlock;
29 class HGraph;
30 class HInstruction;
31 class HParallelMove;
32 class HPhi;
33 class LiveInterval;
34 class Location;
35 class SsaLivenessAnalysis;
36 
37 /**
38  * An implementation of a linear scan register allocator on an `HGraph` with SSA form.
39  */
40 class RegisterAllocatorLinearScan : public RegisterAllocator {
41  public:
42   RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
43                               CodeGenerator* codegen,
44                               const SsaLivenessAnalysis& analysis);
45   ~RegisterAllocatorLinearScan() override;
46 
47   void AllocateRegisters() override;
48 
Validate(bool log_fatal_on_failure)49   bool Validate(bool log_fatal_on_failure) override {
50     processing_core_registers_ = true;
51     if (!ValidateInternal(log_fatal_on_failure)) {
52       return false;
53     }
54     processing_core_registers_ = false;
55     return ValidateInternal(log_fatal_on_failure);
56   }
57 
GetNumberOfSpillSlots()58   size_t GetNumberOfSpillSlots() const {
59     return int_spill_slots_.size()
60         + long_spill_slots_.size()
61         + float_spill_slots_.size()
62         + double_spill_slots_.size()
63         + catch_phi_spill_slots_;
64   }
65 
66  private:
67   // Main methods of the allocator.
68   void LinearScan();
69   bool TryAllocateFreeReg(LiveInterval* interval);
70   bool AllocateBlockedReg(LiveInterval* interval);
71 
72   // Add `interval` in the given sorted list.
73   static void AddSorted(ScopedArenaVector<LiveInterval*>* array, LiveInterval* interval);
74 
75   // Returns whether `reg` is blocked by the code generator.
76   bool IsBlocked(int reg) const;
77 
78   // Update the interval for the register in `location` to cover [start, end).
79   void BlockRegister(Location location, size_t start, size_t end);
80   void BlockRegisters(size_t start, size_t end, bool caller_save_only = false);
81 
82   // Allocate a spill slot for the given interval. Should be called in linear
83   // order of interval starting positions.
84   void AllocateSpillSlotFor(LiveInterval* interval);
85 
86   // Allocate a spill slot for the given catch phi. Will allocate the same slot
87   // for phis which share the same vreg. Must be called in reverse linear order
88   // of lifetime positions and ascending vreg numbers for correctness.
89   void AllocateSpillSlotForCatchPhi(HPhi* phi);
90 
91   // Helper methods.
92   void AllocateRegistersInternal();
93   void ProcessInstruction(HInstruction* instruction);
94   bool ValidateInternal(bool log_fatal_on_failure) const;
95   void DumpInterval(std::ostream& stream, LiveInterval* interval) const;
96   void DumpAllIntervals(std::ostream& stream) const;
97   int FindAvailableRegisterPair(size_t* next_use, size_t starting_at) const;
98   int FindAvailableRegister(size_t* next_use, LiveInterval* current) const;
99   bool IsCallerSaveRegister(int reg) const;
100 
101   // Try splitting an active non-pair or unaligned pair interval at the given `position`.
102   // Returns whether it was successful at finding such an interval.
103   bool TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,
104                                                 size_t first_register_use,
105                                                 size_t* next_use);
106 
107   // List of intervals for core registers that must be processed, ordered by start
108   // position. Last entry is the interval that has the lowest start position.
109   // This list is initially populated before doing the linear scan.
110   ScopedArenaVector<LiveInterval*> unhandled_core_intervals_;
111 
112   // List of intervals for floating-point registers. Same comments as above.
113   ScopedArenaVector<LiveInterval*> unhandled_fp_intervals_;
114 
115   // Currently processed list of unhandled intervals. Either `unhandled_core_intervals_`
116   // or `unhandled_fp_intervals_`.
117   ScopedArenaVector<LiveInterval*>* unhandled_;
118 
119   // List of intervals that have been processed.
120   ScopedArenaVector<LiveInterval*> handled_;
121 
122   // List of intervals that are currently active when processing a new live interval.
123   // That is, they have a live range that spans the start of the new interval.
124   ScopedArenaVector<LiveInterval*> active_;
125 
126   // List of intervals that are currently inactive when processing a new live interval.
127   // That is, they have a lifetime hole that spans the start of the new interval.
128   ScopedArenaVector<LiveInterval*> inactive_;
129 
130   // Fixed intervals for physical registers. Such intervals cover the positions
131   // where an instruction requires a specific register.
132   ScopedArenaVector<LiveInterval*> physical_core_register_intervals_;
133   ScopedArenaVector<LiveInterval*> physical_fp_register_intervals_;
134 
135   // Intervals for temporaries. Such intervals cover the positions
136   // where an instruction requires a temporary.
137   ScopedArenaVector<LiveInterval*> temp_intervals_;
138 
139   // The spill slots allocated for live intervals. We ensure spill slots
140   // are typed to avoid (1) doing moves and swaps between two different kinds
141   // of registers, and (2) swapping between a single stack slot and a double
142   // stack slot. This simplifies the parallel move resolver.
143   ScopedArenaVector<size_t> int_spill_slots_;
144   ScopedArenaVector<size_t> long_spill_slots_;
145   ScopedArenaVector<size_t> float_spill_slots_;
146   ScopedArenaVector<size_t> double_spill_slots_;
147 
148   // Spill slots allocated to catch phis. This category is special-cased because
149   // (1) slots are allocated prior to linear scan and in reverse linear order,
150   // (2) equivalent phis need to share slots despite having different types.
151   size_t catch_phi_spill_slots_;
152 
153   // Instructions that need a safepoint.
154   ScopedArenaVector<HInstruction*> safepoints_;
155 
156   // True if processing core registers. False if processing floating
157   // point registers.
158   bool processing_core_registers_;
159 
160   // Number of registers for the current register kind (core or floating point).
161   size_t number_of_registers_;
162 
163   // Temporary array, allocated ahead of time for simplicity.
164   size_t* registers_array_;
165 
166   // Blocked registers, as decided by the code generator.
167   bool* const blocked_core_registers_;
168   bool* const blocked_fp_registers_;
169 
170   // Slots reserved for out arguments.
171   size_t reserved_out_slots_;
172 
173   ART_FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
174   ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
175 
176   DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorLinearScan);
177 };
178 
179 }  // namespace art
180 
181 #endif  // ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_LINEAR_SCAN_H_
182