1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "loop_optimization.h"
18 
19 #include "arch/arm/instruction_set_features_arm.h"
20 #include "arch/arm64/instruction_set_features_arm64.h"
21 #include "arch/instruction_set.h"
22 #include "arch/x86/instruction_set_features_x86.h"
23 #include "arch/x86_64/instruction_set_features_x86_64.h"
24 #include "code_generator.h"
25 #include "driver/compiler_options.h"
26 #include "linear_order.h"
27 #include "mirror/array-inl.h"
28 #include "mirror/string.h"
29 
30 namespace art {
31 
32 // Enables vectorization (SIMDization) in the loop optimizer.
33 static constexpr bool kEnableVectorization = true;
34 
35 //
36 // Static helpers.
37 //
38 
39 // Base alignment for arrays/strings guaranteed by the Android runtime.
BaseAlignment()40 static uint32_t BaseAlignment() {
41   return kObjectAlignment;
42 }
43 
44 // Hidden offset for arrays/strings guaranteed by the Android runtime.
HiddenOffset(DataType::Type type,bool is_string_char_at)45 static uint32_t HiddenOffset(DataType::Type type, bool is_string_char_at) {
46   return is_string_char_at
47       ? mirror::String::ValueOffset().Uint32Value()
48       : mirror::Array::DataOffset(DataType::Size(type)).Uint32Value();
49 }
50 
51 // Remove the instruction from the graph. A bit more elaborate than the usual
52 // instruction removal, since there may be a cycle in the use structure.
RemoveFromCycle(HInstruction * instruction)53 static void RemoveFromCycle(HInstruction* instruction) {
54   instruction->RemoveAsUserOfAllInputs();
55   instruction->RemoveEnvironmentUsers();
56   instruction->GetBlock()->RemoveInstructionOrPhi(instruction, /*ensure_safety=*/ false);
57   RemoveEnvironmentUses(instruction);
58   ResetEnvironmentInputRecords(instruction);
59 }
60 
61 // Detect a goto block and sets succ to the single successor.
IsGotoBlock(HBasicBlock * block,HBasicBlock ** succ)62 static bool IsGotoBlock(HBasicBlock* block, /*out*/ HBasicBlock** succ) {
63   if (block->GetPredecessors().size() == 1 &&
64       block->GetSuccessors().size() == 1 &&
65       block->IsSingleGoto()) {
66     *succ = block->GetSingleSuccessor();
67     return true;
68   }
69   return false;
70 }
71 
72 // Detect an early exit loop.
IsEarlyExit(HLoopInformation * loop_info)73 static bool IsEarlyExit(HLoopInformation* loop_info) {
74   HBlocksInLoopReversePostOrderIterator it_loop(*loop_info);
75   for (it_loop.Advance(); !it_loop.Done(); it_loop.Advance()) {
76     for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) {
77       if (!loop_info->Contains(*successor)) {
78         return true;
79       }
80     }
81   }
82   return false;
83 }
84 
85 // Forward declaration.
86 static bool IsZeroExtensionAndGet(HInstruction* instruction,
87                                   DataType::Type type,
88                                   /*out*/ HInstruction** operand);
89 
90 // Detect a sign extension in instruction from the given type.
91 // Returns the promoted operand on success.
IsSignExtensionAndGet(HInstruction * instruction,DataType::Type type,HInstruction ** operand)92 static bool IsSignExtensionAndGet(HInstruction* instruction,
93                                   DataType::Type type,
94                                   /*out*/ HInstruction** operand) {
95   // Accept any already wider constant that would be handled properly by sign
96   // extension when represented in the *width* of the given narrower data type
97   // (the fact that Uint8/Uint16 normally zero extend does not matter here).
98   int64_t value = 0;
99   if (IsInt64AndGet(instruction, /*out*/ &value)) {
100     switch (type) {
101       case DataType::Type::kUint8:
102       case DataType::Type::kInt8:
103         if (IsInt<8>(value)) {
104           *operand = instruction;
105           return true;
106         }
107         return false;
108       case DataType::Type::kUint16:
109       case DataType::Type::kInt16:
110         if (IsInt<16>(value)) {
111           *operand = instruction;
112           return true;
113         }
114         return false;
115       default:
116         return false;
117     }
118   }
119   // An implicit widening conversion of any signed expression sign-extends.
120   if (instruction->GetType() == type) {
121     switch (type) {
122       case DataType::Type::kInt8:
123       case DataType::Type::kInt16:
124         *operand = instruction;
125         return true;
126       default:
127         return false;
128     }
129   }
130   // An explicit widening conversion of a signed expression sign-extends.
131   if (instruction->IsTypeConversion()) {
132     HInstruction* conv = instruction->InputAt(0);
133     DataType::Type from = conv->GetType();
134     switch (instruction->GetType()) {
135       case DataType::Type::kInt32:
136       case DataType::Type::kInt64:
137         if (type == from && (from == DataType::Type::kInt8 ||
138                              from == DataType::Type::kInt16 ||
139                              from == DataType::Type::kInt32)) {
140           *operand = conv;
141           return true;
142         }
143         return false;
144       case DataType::Type::kInt16:
145         return type == DataType::Type::kUint16 &&
146                from == DataType::Type::kUint16 &&
147                IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
148       default:
149         return false;
150     }
151   }
152   return false;
153 }
154 
155 // Detect a zero extension in instruction from the given type.
156 // Returns the promoted operand on success.
IsZeroExtensionAndGet(HInstruction * instruction,DataType::Type type,HInstruction ** operand)157 static bool IsZeroExtensionAndGet(HInstruction* instruction,
158                                   DataType::Type type,
159                                   /*out*/ HInstruction** operand) {
160   // Accept any already wider constant that would be handled properly by zero
161   // extension when represented in the *width* of the given narrower data type
162   // (the fact that Int8/Int16 normally sign extend does not matter here).
163   int64_t value = 0;
164   if (IsInt64AndGet(instruction, /*out*/ &value)) {
165     switch (type) {
166       case DataType::Type::kUint8:
167       case DataType::Type::kInt8:
168         if (IsUint<8>(value)) {
169           *operand = instruction;
170           return true;
171         }
172         return false;
173       case DataType::Type::kUint16:
174       case DataType::Type::kInt16:
175         if (IsUint<16>(value)) {
176           *operand = instruction;
177           return true;
178         }
179         return false;
180       default:
181         return false;
182     }
183   }
184   // An implicit widening conversion of any unsigned expression zero-extends.
185   if (instruction->GetType() == type) {
186     switch (type) {
187       case DataType::Type::kUint8:
188       case DataType::Type::kUint16:
189         *operand = instruction;
190         return true;
191       default:
192         return false;
193     }
194   }
195   // An explicit widening conversion of an unsigned expression zero-extends.
196   if (instruction->IsTypeConversion()) {
197     HInstruction* conv = instruction->InputAt(0);
198     DataType::Type from = conv->GetType();
199     switch (instruction->GetType()) {
200       case DataType::Type::kInt32:
201       case DataType::Type::kInt64:
202         if (type == from && from == DataType::Type::kUint16) {
203           *operand = conv;
204           return true;
205         }
206         return false;
207       case DataType::Type::kUint16:
208         return type == DataType::Type::kInt16 &&
209                from == DataType::Type::kInt16 &&
210                IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
211       default:
212         return false;
213     }
214   }
215   return false;
216 }
217 
218 // Detect situations with same-extension narrower operands.
219 // Returns true on success and sets is_unsigned accordingly.
IsNarrowerOperands(HInstruction * a,HInstruction * b,DataType::Type type,HInstruction ** r,HInstruction ** s,bool * is_unsigned)220 static bool IsNarrowerOperands(HInstruction* a,
221                                HInstruction* b,
222                                DataType::Type type,
223                                /*out*/ HInstruction** r,
224                                /*out*/ HInstruction** s,
225                                /*out*/ bool* is_unsigned) {
226   DCHECK(a != nullptr && b != nullptr);
227   // Look for a matching sign extension.
228   DataType::Type stype = HVecOperation::ToSignedType(type);
229   if (IsSignExtensionAndGet(a, stype, r) && IsSignExtensionAndGet(b, stype, s)) {
230     *is_unsigned = false;
231     return true;
232   }
233   // Look for a matching zero extension.
234   DataType::Type utype = HVecOperation::ToUnsignedType(type);
235   if (IsZeroExtensionAndGet(a, utype, r) && IsZeroExtensionAndGet(b, utype, s)) {
236     *is_unsigned = true;
237     return true;
238   }
239   return false;
240 }
241 
242 // As above, single operand.
IsNarrowerOperand(HInstruction * a,DataType::Type type,HInstruction ** r,bool * is_unsigned)243 static bool IsNarrowerOperand(HInstruction* a,
244                               DataType::Type type,
245                               /*out*/ HInstruction** r,
246                               /*out*/ bool* is_unsigned) {
247   DCHECK(a != nullptr);
248   // Look for a matching sign extension.
249   DataType::Type stype = HVecOperation::ToSignedType(type);
250   if (IsSignExtensionAndGet(a, stype, r)) {
251     *is_unsigned = false;
252     return true;
253   }
254   // Look for a matching zero extension.
255   DataType::Type utype = HVecOperation::ToUnsignedType(type);
256   if (IsZeroExtensionAndGet(a, utype, r)) {
257     *is_unsigned = true;
258     return true;
259   }
260   return false;
261 }
262 
263 // Compute relative vector length based on type difference.
GetOtherVL(DataType::Type other_type,DataType::Type vector_type,uint32_t vl)264 static uint32_t GetOtherVL(DataType::Type other_type, DataType::Type vector_type, uint32_t vl) {
265   DCHECK(DataType::IsIntegralType(other_type));
266   DCHECK(DataType::IsIntegralType(vector_type));
267   DCHECK_GE(DataType::SizeShift(other_type), DataType::SizeShift(vector_type));
268   return vl >> (DataType::SizeShift(other_type) - DataType::SizeShift(vector_type));
269 }
270 
271 // Detect up to two added operands a and b and an acccumulated constant c.
IsAddConst(HInstruction * instruction,HInstruction ** a,HInstruction ** b,int64_t * c,int32_t depth=8)272 static bool IsAddConst(HInstruction* instruction,
273                        /*out*/ HInstruction** a,
274                        /*out*/ HInstruction** b,
275                        /*out*/ int64_t* c,
276                        int32_t depth = 8) {  // don't search too deep
277   int64_t value = 0;
278   // Enter add/sub while still within reasonable depth.
279   if (depth > 0) {
280     if (instruction->IsAdd()) {
281       return IsAddConst(instruction->InputAt(0), a, b, c, depth - 1) &&
282              IsAddConst(instruction->InputAt(1), a, b, c, depth - 1);
283     } else if (instruction->IsSub() &&
284                IsInt64AndGet(instruction->InputAt(1), &value)) {
285       *c -= value;
286       return IsAddConst(instruction->InputAt(0), a, b, c, depth - 1);
287     }
288   }
289   // Otherwise, deal with leaf nodes.
290   if (IsInt64AndGet(instruction, &value)) {
291     *c += value;
292     return true;
293   } else if (*a == nullptr) {
294     *a = instruction;
295     return true;
296   } else if (*b == nullptr) {
297     *b = instruction;
298     return true;
299   }
300   return false;  // too many operands
301 }
302 
303 // Detect a + b + c with optional constant c.
IsAddConst2(HGraph * graph,HInstruction * instruction,HInstruction ** a,HInstruction ** b,int64_t * c)304 static bool IsAddConst2(HGraph* graph,
305                         HInstruction* instruction,
306                         /*out*/ HInstruction** a,
307                         /*out*/ HInstruction** b,
308                         /*out*/ int64_t* c) {
309   // We want an actual add/sub and not the trivial case where {b: 0, c: 0}.
310   if (IsAddOrSub(instruction) && IsAddConst(instruction, a, b, c) && *a != nullptr) {
311     if (*b == nullptr) {
312       // Constant is usually already present, unless accumulated.
313       *b = graph->GetConstant(instruction->GetType(), (*c));
314       *c = 0;
315     }
316     return true;
317   }
318   return false;
319 }
320 
321 // Detect a direct a - b or a hidden a - (-c).
IsSubConst2(HGraph * graph,HInstruction * instruction,HInstruction ** a,HInstruction ** b)322 static bool IsSubConst2(HGraph* graph,
323                         HInstruction* instruction,
324                         /*out*/ HInstruction** a,
325                         /*out*/ HInstruction** b) {
326   int64_t c = 0;
327   if (instruction->IsSub()) {
328     *a = instruction->InputAt(0);
329     *b = instruction->InputAt(1);
330     return true;
331   } else if (IsAddConst(instruction, a, b, &c) && *a != nullptr && *b == nullptr) {
332     // Constant for the hidden subtraction.
333     *b = graph->GetConstant(instruction->GetType(), -c);
334     return true;
335   }
336   return false;
337 }
338 
339 // Detect reductions of the following forms,
340 //   x = x_phi + ..
341 //   x = x_phi - ..
HasReductionFormat(HInstruction * reduction,HInstruction * phi)342 static bool HasReductionFormat(HInstruction* reduction, HInstruction* phi) {
343   if (reduction->IsAdd()) {
344     return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi) ||
345            (reduction->InputAt(0) != phi && reduction->InputAt(1) == phi);
346   } else if (reduction->IsSub()) {
347     return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi);
348   }
349   return false;
350 }
351 
352 // Translates vector operation to reduction kind.
GetReductionKind(HVecOperation * reduction)353 static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) {
354   if (reduction->IsVecAdd()  ||
355       reduction->IsVecSub() ||
356       reduction->IsVecSADAccumulate() ||
357       reduction->IsVecDotProd()) {
358     return HVecReduce::kSum;
359   }
360   LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId();
361   UNREACHABLE();
362 }
363 
364 // Test vector restrictions.
HasVectorRestrictions(uint64_t restrictions,uint64_t tested)365 static bool HasVectorRestrictions(uint64_t restrictions, uint64_t tested) {
366   return (restrictions & tested) != 0;
367 }
368 
369 // Insert an instruction.
Insert(HBasicBlock * block,HInstruction * instruction)370 static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
371   DCHECK(block != nullptr);
372   DCHECK(instruction != nullptr);
373   block->InsertInstructionBefore(instruction, block->GetLastInstruction());
374   return instruction;
375 }
376 
377 // Check that instructions from the induction sets are fully removed: have no uses
378 // and no other instructions use them.
CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction * > * iset)379 static bool CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction*>* iset) {
380   for (HInstruction* instr : *iset) {
381     if (instr->GetBlock() != nullptr ||
382         !instr->GetUses().empty() ||
383         !instr->GetEnvUses().empty() ||
384         HasEnvironmentUsedByOthers(instr)) {
385       return false;
386     }
387   }
388   return true;
389 }
390 
391 // Tries to statically evaluate condition of the specified "HIf" for other condition checks.
TryToEvaluateIfCondition(HIf * instruction,HGraph * graph)392 static void TryToEvaluateIfCondition(HIf* instruction, HGraph* graph) {
393   HInstruction* cond = instruction->InputAt(0);
394 
395   // If a condition 'cond' is evaluated in an HIf instruction then in the successors of the
396   // IF_BLOCK we statically know the value of the condition 'cond' (TRUE in TRUE_SUCC, FALSE in
397   // FALSE_SUCC). Using that we can replace another evaluation (use) EVAL of the same 'cond'
398   // with TRUE value (FALSE value) if every path from the ENTRY_BLOCK to EVAL_BLOCK contains the
399   // edge HIF_BLOCK->TRUE_SUCC (HIF_BLOCK->FALSE_SUCC).
400   //     if (cond) {               if(cond) {
401   //       if (cond) {}              if (1) {}
402   //     } else {        =======>  } else {
403   //       if (cond) {}              if (0) {}
404   //     }                         }
405   if (!cond->IsConstant()) {
406     HBasicBlock* true_succ = instruction->IfTrueSuccessor();
407     HBasicBlock* false_succ = instruction->IfFalseSuccessor();
408 
409     DCHECK_EQ(true_succ->GetPredecessors().size(), 1u);
410     DCHECK_EQ(false_succ->GetPredecessors().size(), 1u);
411 
412     const HUseList<HInstruction*>& uses = cond->GetUses();
413     for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
414       HInstruction* user = it->GetUser();
415       size_t index = it->GetIndex();
416       HBasicBlock* user_block = user->GetBlock();
417       // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
418       ++it;
419       if (true_succ->Dominates(user_block)) {
420         user->ReplaceInput(graph->GetIntConstant(1), index);
421      } else if (false_succ->Dominates(user_block)) {
422         user->ReplaceInput(graph->GetIntConstant(0), index);
423       }
424     }
425   }
426 }
427 
428 // Peel the first 'count' iterations of the loop.
PeelByCount(HLoopInformation * loop_info,int count,InductionVarRange * induction_range)429 static void PeelByCount(HLoopInformation* loop_info,
430                         int count,
431                         InductionVarRange* induction_range) {
432   for (int i = 0; i < count; i++) {
433     // Perform peeling.
434     LoopClonerSimpleHelper helper(loop_info, induction_range);
435     helper.DoPeeling();
436   }
437 }
438 
439 // Returns the narrower type out of instructions a and b types.
GetNarrowerType(HInstruction * a,HInstruction * b)440 static DataType::Type GetNarrowerType(HInstruction* a, HInstruction* b) {
441   DataType::Type type = a->GetType();
442   if (DataType::Size(b->GetType()) < DataType::Size(type)) {
443     type = b->GetType();
444   }
445   if (a->IsTypeConversion() &&
446       DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(type)) {
447     type = a->InputAt(0)->GetType();
448   }
449   if (b->IsTypeConversion() &&
450       DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(type)) {
451     type = b->InputAt(0)->GetType();
452   }
453   return type;
454 }
455 
456 //
457 // Public methods.
458 //
459 
HLoopOptimization(HGraph * graph,const CodeGenerator & codegen,HInductionVarAnalysis * induction_analysis,OptimizingCompilerStats * stats,const char * name)460 HLoopOptimization::HLoopOptimization(HGraph* graph,
461                                      const CodeGenerator& codegen,
462                                      HInductionVarAnalysis* induction_analysis,
463                                      OptimizingCompilerStats* stats,
464                                      const char* name)
465     : HOptimization(graph, name, stats),
466       compiler_options_(&codegen.GetCompilerOptions()),
467       simd_register_size_(codegen.GetSIMDRegisterWidth()),
468       induction_range_(induction_analysis),
469       loop_allocator_(nullptr),
470       global_allocator_(graph_->GetAllocator()),
471       top_loop_(nullptr),
472       last_loop_(nullptr),
473       iset_(nullptr),
474       reductions_(nullptr),
475       simplified_(false),
476       vector_length_(0),
477       vector_refs_(nullptr),
478       vector_static_peeling_factor_(0),
479       vector_dynamic_peeling_candidate_(nullptr),
480       vector_runtime_test_a_(nullptr),
481       vector_runtime_test_b_(nullptr),
482       vector_map_(nullptr),
483       vector_permanent_map_(nullptr),
484       vector_mode_(kSequential),
485       vector_preheader_(nullptr),
486       vector_header_(nullptr),
487       vector_body_(nullptr),
488       vector_index_(nullptr),
489       arch_loop_helper_(ArchNoOptsLoopHelper::Create(compiler_options_ != nullptr
490                                                           ? compiler_options_->GetInstructionSet()
491                                                           : InstructionSet::kNone,
492                                                       global_allocator_)) {
493 }
494 
Run()495 bool HLoopOptimization::Run() {
496   // Skip if there is no loop or the graph has try-catch/irreducible loops.
497   // TODO: make this less of a sledgehammer.
498   if (!graph_->HasLoops() || graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) {
499     return false;
500   }
501 
502   // Phase-local allocator.
503   ScopedArenaAllocator allocator(graph_->GetArenaStack());
504   loop_allocator_ = &allocator;
505 
506   // Perform loop optimizations.
507   bool didLoopOpt = LocalRun();
508   if (top_loop_ == nullptr) {
509     graph_->SetHasLoops(false);  // no more loops
510   }
511 
512   // Detach.
513   loop_allocator_ = nullptr;
514   last_loop_ = top_loop_ = nullptr;
515 
516   return didLoopOpt;
517 }
518 
519 //
520 // Loop setup and traversal.
521 //
522 
LocalRun()523 bool HLoopOptimization::LocalRun() {
524   bool didLoopOpt = false;
525   // Build the linear order using the phase-local allocator. This step enables building
526   // a loop hierarchy that properly reflects the outer-inner and previous-next relation.
527   ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
528   LinearizeGraph(graph_, &linear_order);
529 
530   // Build the loop hierarchy.
531   for (HBasicBlock* block : linear_order) {
532     if (block->IsLoopHeader()) {
533       AddLoop(block->GetLoopInformation());
534     }
535   }
536 
537   // Traverse the loop hierarchy inner-to-outer and optimize. Traversal can use
538   // temporary data structures using the phase-local allocator. All new HIR
539   // should use the global allocator.
540   if (top_loop_ != nullptr) {
541     ScopedArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
542     ScopedArenaSafeMap<HInstruction*, HInstruction*> reds(
543         std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
544     ScopedArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
545     ScopedArenaSafeMap<HInstruction*, HInstruction*> map(
546         std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
547     ScopedArenaSafeMap<HInstruction*, HInstruction*> perm(
548         std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
549     // Attach.
550     iset_ = &iset;
551     reductions_ = &reds;
552     vector_refs_ = &refs;
553     vector_map_ = &map;
554     vector_permanent_map_ = &perm;
555     // Traverse.
556     didLoopOpt = TraverseLoopsInnerToOuter(top_loop_);
557     // Detach.
558     iset_ = nullptr;
559     reductions_ = nullptr;
560     vector_refs_ = nullptr;
561     vector_map_ = nullptr;
562     vector_permanent_map_ = nullptr;
563   }
564   return didLoopOpt;
565 }
566 
AddLoop(HLoopInformation * loop_info)567 void HLoopOptimization::AddLoop(HLoopInformation* loop_info) {
568   DCHECK(loop_info != nullptr);
569   LoopNode* node = new (loop_allocator_) LoopNode(loop_info);
570   if (last_loop_ == nullptr) {
571     // First loop.
572     DCHECK(top_loop_ == nullptr);
573     last_loop_ = top_loop_ = node;
574   } else if (loop_info->IsIn(*last_loop_->loop_info)) {
575     // Inner loop.
576     node->outer = last_loop_;
577     DCHECK(last_loop_->inner == nullptr);
578     last_loop_ = last_loop_->inner = node;
579   } else {
580     // Subsequent loop.
581     while (last_loop_->outer != nullptr && !loop_info->IsIn(*last_loop_->outer->loop_info)) {
582       last_loop_ = last_loop_->outer;
583     }
584     node->outer = last_loop_->outer;
585     node->previous = last_loop_;
586     DCHECK(last_loop_->next == nullptr);
587     last_loop_ = last_loop_->next = node;
588   }
589 }
590 
RemoveLoop(LoopNode * node)591 void HLoopOptimization::RemoveLoop(LoopNode* node) {
592   DCHECK(node != nullptr);
593   DCHECK(node->inner == nullptr);
594   if (node->previous != nullptr) {
595     // Within sequence.
596     node->previous->next = node->next;
597     if (node->next != nullptr) {
598       node->next->previous = node->previous;
599     }
600   } else {
601     // First of sequence.
602     if (node->outer != nullptr) {
603       node->outer->inner = node->next;
604     } else {
605       top_loop_ = node->next;
606     }
607     if (node->next != nullptr) {
608       node->next->outer = node->outer;
609       node->next->previous = nullptr;
610     }
611   }
612 }
613 
TraverseLoopsInnerToOuter(LoopNode * node)614 bool HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
615   bool changed = false;
616   for ( ; node != nullptr; node = node->next) {
617     // Visit inner loops first. Recompute induction information for this
618     // loop if the induction of any inner loop has changed.
619     if (TraverseLoopsInnerToOuter(node->inner)) {
620       induction_range_.ReVisit(node->loop_info);
621       changed = true;
622     }
623     // Repeat simplifications in the loop-body until no more changes occur.
624     // Note that since each simplification consists of eliminating code (without
625     // introducing new code), this process is always finite.
626     do {
627       simplified_ = false;
628       SimplifyInduction(node);
629       SimplifyBlocks(node);
630       changed = simplified_ || changed;
631     } while (simplified_);
632     // Optimize inner loop.
633     if (node->inner == nullptr) {
634       changed = OptimizeInnerLoop(node) || changed;
635     }
636   }
637   return changed;
638 }
639 
640 //
641 // Optimization.
642 //
643 
SimplifyInduction(LoopNode * node)644 void HLoopOptimization::SimplifyInduction(LoopNode* node) {
645   HBasicBlock* header = node->loop_info->GetHeader();
646   HBasicBlock* preheader = node->loop_info->GetPreHeader();
647   // Scan the phis in the header to find opportunities to simplify an induction
648   // cycle that is only used outside the loop. Replace these uses, if any, with
649   // the last value and remove the induction cycle.
650   // Examples: for (int i = 0; x != null;   i++) { .... no i .... }
651   //           for (int i = 0; i < 10; i++, k++) { .... no k .... } return k;
652   for (HInstructionIterator it(header->GetPhis()); !it.Done(); it.Advance()) {
653     HPhi* phi = it.Current()->AsPhi();
654     if (TrySetPhiInduction(phi, /*restrict_uses*/ true) &&
655         TryAssignLastValue(node->loop_info, phi, preheader, /*collect_loop_uses*/ false)) {
656       // Note that it's ok to have replaced uses after the loop with the last value, without
657       // being able to remove the cycle. Environment uses (which are the reason we may not be
658       // able to remove the cycle) within the loop will still hold the right value. We must
659       // have tried first, however, to replace outside uses.
660       if (CanRemoveCycle()) {
661         simplified_ = true;
662         for (HInstruction* i : *iset_) {
663           RemoveFromCycle(i);
664         }
665         DCHECK(CheckInductionSetFullyRemoved(iset_));
666       }
667     }
668   }
669 }
670 
SimplifyBlocks(LoopNode * node)671 void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
672   // Iterate over all basic blocks in the loop-body.
673   for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
674     HBasicBlock* block = it.Current();
675     // Remove dead instructions from the loop-body.
676     RemoveDeadInstructions(block->GetPhis());
677     RemoveDeadInstructions(block->GetInstructions());
678     // Remove trivial control flow blocks from the loop-body.
679     if (block->GetPredecessors().size() == 1 &&
680         block->GetSuccessors().size() == 1 &&
681         block->GetSingleSuccessor()->GetPredecessors().size() == 1) {
682       simplified_ = true;
683       block->MergeWith(block->GetSingleSuccessor());
684     } else if (block->GetSuccessors().size() == 2) {
685       // Trivial if block can be bypassed to either branch.
686       HBasicBlock* succ0 = block->GetSuccessors()[0];
687       HBasicBlock* succ1 = block->GetSuccessors()[1];
688       HBasicBlock* meet0 = nullptr;
689       HBasicBlock* meet1 = nullptr;
690       if (succ0 != succ1 &&
691           IsGotoBlock(succ0, &meet0) &&
692           IsGotoBlock(succ1, &meet1) &&
693           meet0 == meet1 &&  // meets again
694           meet0 != block &&  // no self-loop
695           meet0->GetPhis().IsEmpty()) {  // not used for merging
696         simplified_ = true;
697         succ0->DisconnectAndDelete();
698         if (block->Dominates(meet0)) {
699           block->RemoveDominatedBlock(meet0);
700           succ1->AddDominatedBlock(meet0);
701           meet0->SetDominator(succ1);
702         }
703       }
704     }
705   }
706 }
707 
TryOptimizeInnerLoopFinite(LoopNode * node)708 bool HLoopOptimization::TryOptimizeInnerLoopFinite(LoopNode* node) {
709   HBasicBlock* header = node->loop_info->GetHeader();
710   HBasicBlock* preheader = node->loop_info->GetPreHeader();
711   // Ensure loop header logic is finite.
712   int64_t trip_count = 0;
713   if (!induction_range_.IsFinite(node->loop_info, &trip_count)) {
714     return false;
715   }
716   // Ensure there is only a single loop-body (besides the header).
717   HBasicBlock* body = nullptr;
718   for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
719     if (it.Current() != header) {
720       if (body != nullptr) {
721         return false;
722       }
723       body = it.Current();
724     }
725   }
726   CHECK(body != nullptr);
727   // Ensure there is only a single exit point.
728   if (header->GetSuccessors().size() != 2) {
729     return false;
730   }
731   HBasicBlock* exit = (header->GetSuccessors()[0] == body)
732       ? header->GetSuccessors()[1]
733       : header->GetSuccessors()[0];
734   // Ensure exit can only be reached by exiting loop.
735   if (exit->GetPredecessors().size() != 1) {
736     return false;
737   }
738   // Detect either an empty loop (no side effects other than plain iteration) or
739   // a trivial loop (just iterating once). Replace subsequent index uses, if any,
740   // with the last value and remove the loop, possibly after unrolling its body.
741   HPhi* main_phi = nullptr;
742   if (TrySetSimpleLoopHeader(header, &main_phi)) {
743     bool is_empty = IsEmptyBody(body);
744     if (reductions_->empty() &&  // TODO: possible with some effort
745         (is_empty || trip_count == 1) &&
746         TryAssignLastValue(node->loop_info, main_phi, preheader, /*collect_loop_uses*/ true)) {
747       if (!is_empty) {
748         // Unroll the loop-body, which sees initial value of the index.
749         main_phi->ReplaceWith(main_phi->InputAt(0));
750         preheader->MergeInstructionsWith(body);
751       }
752       body->DisconnectAndDelete();
753       exit->RemovePredecessor(header);
754       header->RemoveSuccessor(exit);
755       header->RemoveDominatedBlock(exit);
756       header->DisconnectAndDelete();
757       preheader->AddSuccessor(exit);
758       preheader->AddInstruction(new (global_allocator_) HGoto());
759       preheader->AddDominatedBlock(exit);
760       exit->SetDominator(preheader);
761       RemoveLoop(node);  // update hierarchy
762       return true;
763     }
764   }
765   // Vectorize loop, if possible and valid.
766   if (kEnableVectorization &&
767       // Disable vectorization for debuggable graphs: this is a workaround for the bug
768       // in 'GenerateNewLoop' which caused the SuspendCheck environment to be invalid.
769       // TODO: b/138601207, investigate other possible cases with wrong environment values and
770       // possibly switch back vectorization on for debuggable graphs.
771       !graph_->IsDebuggable() &&
772       TrySetSimpleLoopHeader(header, &main_phi) &&
773       ShouldVectorize(node, body, trip_count) &&
774       TryAssignLastValue(node->loop_info, main_phi, preheader, /*collect_loop_uses*/ true)) {
775     Vectorize(node, body, exit, trip_count);
776     graph_->SetHasSIMD(true);  // flag SIMD usage
777     MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorized);
778     return true;
779   }
780   return false;
781 }
782 
OptimizeInnerLoop(LoopNode * node)783 bool HLoopOptimization::OptimizeInnerLoop(LoopNode* node) {
784   return TryOptimizeInnerLoopFinite(node) || TryPeelingAndUnrolling(node);
785 }
786 
787 
788 
789 //
790 // Scalar loop peeling and unrolling: generic part methods.
791 //
792 
TryUnrollingForBranchPenaltyReduction(LoopAnalysisInfo * analysis_info,bool generate_code)793 bool HLoopOptimization::TryUnrollingForBranchPenaltyReduction(LoopAnalysisInfo* analysis_info,
794                                                               bool generate_code) {
795   if (analysis_info->GetNumberOfExits() > 1) {
796     return false;
797   }
798 
799   uint32_t unrolling_factor = arch_loop_helper_->GetScalarUnrollingFactor(analysis_info);
800   if (unrolling_factor == LoopAnalysisInfo::kNoUnrollingFactor) {
801     return false;
802   }
803 
804   if (generate_code) {
805     // TODO: support other unrolling factors.
806     DCHECK_EQ(unrolling_factor, 2u);
807 
808     // Perform unrolling.
809     HLoopInformation* loop_info = analysis_info->GetLoopInfo();
810     LoopClonerSimpleHelper helper(loop_info, &induction_range_);
811     helper.DoUnrolling();
812 
813     // Remove the redundant loop check after unrolling.
814     HIf* copy_hif =
815         helper.GetBasicBlockMap()->Get(loop_info->GetHeader())->GetLastInstruction()->AsIf();
816     int32_t constant = loop_info->Contains(*copy_hif->IfTrueSuccessor()) ? 1 : 0;
817     copy_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
818   }
819   return true;
820 }
821 
TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo * analysis_info,bool generate_code)822 bool HLoopOptimization::TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo* analysis_info,
823                                                                    bool generate_code) {
824   HLoopInformation* loop_info = analysis_info->GetLoopInfo();
825   if (!arch_loop_helper_->IsLoopPeelingEnabled()) {
826     return false;
827   }
828 
829   if (analysis_info->GetNumberOfInvariantExits() == 0) {
830     return false;
831   }
832 
833   if (generate_code) {
834     // Perform peeling.
835     LoopClonerSimpleHelper helper(loop_info, &induction_range_);
836     helper.DoPeeling();
837 
838     // Statically evaluate loop check after peeling for loop invariant condition.
839     const SuperblockCloner::HInstructionMap* hir_map = helper.GetInstructionMap();
840     for (auto entry : *hir_map) {
841       HInstruction* copy = entry.second;
842       if (copy->IsIf()) {
843         TryToEvaluateIfCondition(copy->AsIf(), graph_);
844       }
845     }
846   }
847 
848   return true;
849 }
850 
TryFullUnrolling(LoopAnalysisInfo * analysis_info,bool generate_code)851 bool HLoopOptimization::TryFullUnrolling(LoopAnalysisInfo* analysis_info, bool generate_code) {
852   // Fully unroll loops with a known and small trip count.
853   int64_t trip_count = analysis_info->GetTripCount();
854   if (!arch_loop_helper_->IsLoopPeelingEnabled() ||
855       trip_count == LoopAnalysisInfo::kUnknownTripCount ||
856       !arch_loop_helper_->IsFullUnrollingBeneficial(analysis_info)) {
857     return false;
858   }
859 
860   if (generate_code) {
861     // Peeling of the N first iterations (where N equals to the trip count) will effectively
862     // eliminate the loop: after peeling we will have N sequential iterations copied into the loop
863     // preheader and the original loop. The trip count of this loop will be 0 as the sequential
864     // iterations are executed first and there are exactly N of them. Thus we can statically
865     // evaluate the loop exit condition to 'false' and fully eliminate it.
866     //
867     // Here is an example of full unrolling of a loop with a trip count 2:
868     //
869     //                                           loop_cond_1
870     //                                           loop_body_1        <- First iteration.
871     //                                               |
872     //                             \                 v
873     //                            ==\            loop_cond_2
874     //                            ==/            loop_body_2        <- Second iteration.
875     //                             /                 |
876     //               <-                              v     <-
877     //     loop_cond   \                         loop_cond   \      <- This cond is always false.
878     //     loop_body  _/                         loop_body  _/
879     //
880     HLoopInformation* loop_info = analysis_info->GetLoopInfo();
881     PeelByCount(loop_info, trip_count, &induction_range_);
882     HIf* loop_hif = loop_info->GetHeader()->GetLastInstruction()->AsIf();
883     int32_t constant = loop_info->Contains(*loop_hif->IfTrueSuccessor()) ? 0 : 1;
884     loop_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
885   }
886 
887   return true;
888 }
889 
TryPeelingAndUnrolling(LoopNode * node)890 bool HLoopOptimization::TryPeelingAndUnrolling(LoopNode* node) {
891   HLoopInformation* loop_info = node->loop_info;
892   int64_t trip_count = LoopAnalysis::GetLoopTripCount(loop_info, &induction_range_);
893   LoopAnalysisInfo analysis_info(loop_info);
894   LoopAnalysis::CalculateLoopBasicProperties(loop_info, &analysis_info, trip_count);
895 
896   if (analysis_info.HasInstructionsPreventingScalarOpts() ||
897       arch_loop_helper_->IsLoopNonBeneficialForScalarOpts(&analysis_info)) {
898     return false;
899   }
900 
901   if (!TryFullUnrolling(&analysis_info, /*generate_code*/ false) &&
902       !TryPeelingForLoopInvariantExitsElimination(&analysis_info, /*generate_code*/ false) &&
903       !TryUnrollingForBranchPenaltyReduction(&analysis_info, /*generate_code*/ false)) {
904     return false;
905   }
906 
907   // Run 'IsLoopClonable' the last as it might be time-consuming.
908   if (!LoopClonerHelper::IsLoopClonable(loop_info)) {
909     return false;
910   }
911 
912   return TryFullUnrolling(&analysis_info) ||
913          TryPeelingForLoopInvariantExitsElimination(&analysis_info) ||
914          TryUnrollingForBranchPenaltyReduction(&analysis_info);
915 }
916 
917 //
918 // Loop vectorization. The implementation is based on the book by Aart J.C. Bik:
919 // "The Software Vectorization Handbook. Applying Multimedia Extensions for Maximum Performance."
920 // Intel Press, June, 2004 (http://www.aartbik.com/).
921 //
922 
ShouldVectorize(LoopNode * node,HBasicBlock * block,int64_t trip_count)923 bool HLoopOptimization::ShouldVectorize(LoopNode* node, HBasicBlock* block, int64_t trip_count) {
924   // Reset vector bookkeeping.
925   vector_length_ = 0;
926   vector_refs_->clear();
927   vector_static_peeling_factor_ = 0;
928   vector_dynamic_peeling_candidate_ = nullptr;
929   vector_runtime_test_a_ =
930   vector_runtime_test_b_ = nullptr;
931 
932   // Phis in the loop-body prevent vectorization.
933   if (!block->GetPhis().IsEmpty()) {
934     return false;
935   }
936 
937   // Scan the loop-body, starting a right-hand-side tree traversal at each left-hand-side
938   // occurrence, which allows passing down attributes down the use tree.
939   for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
940     if (!VectorizeDef(node, it.Current(), /*generate_code*/ false)) {
941       return false;  // failure to vectorize a left-hand-side
942     }
943   }
944 
945   // Prepare alignment analysis:
946   // (1) find desired alignment (SIMD vector size in bytes).
947   // (2) initialize static loop peeling votes (peeling factor that will
948   //     make one particular reference aligned), never to exceed (1).
949   // (3) variable to record how many references share same alignment.
950   // (4) variable to record suitable candidate for dynamic loop peeling.
951   uint32_t desired_alignment = GetVectorSizeInBytes();
952   DCHECK_LE(desired_alignment, 16u);
953   uint32_t peeling_votes[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
954   uint32_t max_num_same_alignment = 0;
955   const ArrayReference* peeling_candidate = nullptr;
956 
957   // Data dependence analysis. Find each pair of references with same type, where
958   // at least one is a write. Each such pair denotes a possible data dependence.
959   // This analysis exploits the property that differently typed arrays cannot be
960   // aliased, as well as the property that references either point to the same
961   // array or to two completely disjoint arrays, i.e., no partial aliasing.
962   // Other than a few simply heuristics, no detailed subscript analysis is done.
963   // The scan over references also prepares finding a suitable alignment strategy.
964   for (auto i = vector_refs_->begin(); i != vector_refs_->end(); ++i) {
965     uint32_t num_same_alignment = 0;
966     // Scan over all next references.
967     for (auto j = i; ++j != vector_refs_->end(); ) {
968       if (i->type == j->type && (i->lhs || j->lhs)) {
969         // Found same-typed a[i+x] vs. b[i+y], where at least one is a write.
970         HInstruction* a = i->base;
971         HInstruction* b = j->base;
972         HInstruction* x = i->offset;
973         HInstruction* y = j->offset;
974         if (a == b) {
975           // Found a[i+x] vs. a[i+y]. Accept if x == y (loop-independent data dependence).
976           // Conservatively assume a loop-carried data dependence otherwise, and reject.
977           if (x != y) {
978             return false;
979           }
980           // Count the number of references that have the same alignment (since
981           // base and offset are the same) and where at least one is a write, so
982           // e.g. a[i] = a[i] + b[i] counts a[i] but not b[i]).
983           num_same_alignment++;
984         } else {
985           // Found a[i+x] vs. b[i+y]. Accept if x == y (at worst loop-independent data dependence).
986           // Conservatively assume a potential loop-carried data dependence otherwise, avoided by
987           // generating an explicit a != b disambiguation runtime test on the two references.
988           if (x != y) {
989             // To avoid excessive overhead, we only accept one a != b test.
990             if (vector_runtime_test_a_ == nullptr) {
991               // First test found.
992               vector_runtime_test_a_ = a;
993               vector_runtime_test_b_ = b;
994             } else if ((vector_runtime_test_a_ != a || vector_runtime_test_b_ != b) &&
995                        (vector_runtime_test_a_ != b || vector_runtime_test_b_ != a)) {
996               return false;  // second test would be needed
997             }
998           }
999         }
1000       }
1001     }
1002     // Update information for finding suitable alignment strategy:
1003     // (1) update votes for static loop peeling,
1004     // (2) update suitable candidate for dynamic loop peeling.
1005     Alignment alignment = ComputeAlignment(i->offset, i->type, i->is_string_char_at);
1006     if (alignment.Base() >= desired_alignment) {
1007       // If the array/string object has a known, sufficient alignment, use the
1008       // initial offset to compute the static loop peeling vote (this always
1009       // works, since elements have natural alignment).
1010       uint32_t offset = alignment.Offset() & (desired_alignment - 1u);
1011       uint32_t vote = (offset == 0)
1012           ? 0
1013           : ((desired_alignment - offset) >> DataType::SizeShift(i->type));
1014       DCHECK_LT(vote, 16u);
1015       ++peeling_votes[vote];
1016     } else if (BaseAlignment() >= desired_alignment &&
1017                num_same_alignment > max_num_same_alignment) {
1018       // Otherwise, if the array/string object has a known, sufficient alignment
1019       // for just the base but with an unknown offset, record the candidate with
1020       // the most occurrences for dynamic loop peeling (again, the peeling always
1021       // works, since elements have natural alignment).
1022       max_num_same_alignment = num_same_alignment;
1023       peeling_candidate = &(*i);
1024     }
1025   }  // for i
1026 
1027   // Find a suitable alignment strategy.
1028   SetAlignmentStrategy(peeling_votes, peeling_candidate);
1029 
1030   // Does vectorization seem profitable?
1031   if (!IsVectorizationProfitable(trip_count)) {
1032     return false;
1033   }
1034 
1035   // Success!
1036   return true;
1037 }
1038 
Vectorize(LoopNode * node,HBasicBlock * block,HBasicBlock * exit,int64_t trip_count)1039 void HLoopOptimization::Vectorize(LoopNode* node,
1040                                   HBasicBlock* block,
1041                                   HBasicBlock* exit,
1042                                   int64_t trip_count) {
1043   HBasicBlock* header = node->loop_info->GetHeader();
1044   HBasicBlock* preheader = node->loop_info->GetPreHeader();
1045 
1046   // Pick a loop unrolling factor for the vector loop.
1047   uint32_t unroll = arch_loop_helper_->GetSIMDUnrollingFactor(
1048       block, trip_count, MaxNumberPeeled(), vector_length_);
1049   uint32_t chunk = vector_length_ * unroll;
1050 
1051   DCHECK(trip_count == 0 || (trip_count >= MaxNumberPeeled() + chunk));
1052 
1053   // A cleanup loop is needed, at least, for any unknown trip count or
1054   // for a known trip count with remainder iterations after vectorization.
1055   bool needs_cleanup = trip_count == 0 ||
1056       ((trip_count - vector_static_peeling_factor_) % chunk) != 0;
1057 
1058   // Adjust vector bookkeeping.
1059   HPhi* main_phi = nullptr;
1060   bool is_simple_loop_header = TrySetSimpleLoopHeader(header, &main_phi);  // refills sets
1061   DCHECK(is_simple_loop_header);
1062   vector_header_ = header;
1063   vector_body_ = block;
1064 
1065   // Loop induction type.
1066   DataType::Type induc_type = main_phi->GetType();
1067   DCHECK(induc_type == DataType::Type::kInt32 || induc_type == DataType::Type::kInt64)
1068       << induc_type;
1069 
1070   // Generate the trip count for static or dynamic loop peeling, if needed:
1071   // ptc = <peeling factor>;
1072   HInstruction* ptc = nullptr;
1073   if (vector_static_peeling_factor_ != 0) {
1074     // Static loop peeling for SIMD alignment (using the most suitable
1075     // fixed peeling factor found during prior alignment analysis).
1076     DCHECK(vector_dynamic_peeling_candidate_ == nullptr);
1077     ptc = graph_->GetConstant(induc_type, vector_static_peeling_factor_);
1078   } else if (vector_dynamic_peeling_candidate_ != nullptr) {
1079     // Dynamic loop peeling for SIMD alignment (using the most suitable
1080     // candidate found during prior alignment analysis):
1081     // rem = offset % ALIGN;    // adjusted as #elements
1082     // ptc = rem == 0 ? 0 : (ALIGN - rem);
1083     uint32_t shift = DataType::SizeShift(vector_dynamic_peeling_candidate_->type);
1084     uint32_t align = GetVectorSizeInBytes() >> shift;
1085     uint32_t hidden_offset = HiddenOffset(vector_dynamic_peeling_candidate_->type,
1086                                           vector_dynamic_peeling_candidate_->is_string_char_at);
1087     HInstruction* adjusted_offset = graph_->GetConstant(induc_type, hidden_offset >> shift);
1088     HInstruction* offset = Insert(preheader, new (global_allocator_) HAdd(
1089         induc_type, vector_dynamic_peeling_candidate_->offset, adjusted_offset));
1090     HInstruction* rem = Insert(preheader, new (global_allocator_) HAnd(
1091         induc_type, offset, graph_->GetConstant(induc_type, align - 1u)));
1092     HInstruction* sub = Insert(preheader, new (global_allocator_) HSub(
1093         induc_type, graph_->GetConstant(induc_type, align), rem));
1094     HInstruction* cond = Insert(preheader, new (global_allocator_) HEqual(
1095         rem, graph_->GetConstant(induc_type, 0)));
1096     ptc = Insert(preheader, new (global_allocator_) HSelect(
1097         cond, graph_->GetConstant(induc_type, 0), sub, kNoDexPc));
1098     needs_cleanup = true;  // don't know the exact amount
1099   }
1100 
1101   // Generate loop control:
1102   // stc = <trip-count>;
1103   // ptc = min(stc, ptc);
1104   // vtc = stc - (stc - ptc) % chunk;
1105   // i = 0;
1106   HInstruction* stc = induction_range_.GenerateTripCount(node->loop_info, graph_, preheader);
1107   HInstruction* vtc = stc;
1108   if (needs_cleanup) {
1109     DCHECK(IsPowerOfTwo(chunk));
1110     HInstruction* diff = stc;
1111     if (ptc != nullptr) {
1112       if (trip_count == 0) {
1113         HInstruction* cond = Insert(preheader, new (global_allocator_) HAboveOrEqual(stc, ptc));
1114         ptc = Insert(preheader, new (global_allocator_) HSelect(cond, ptc, stc, kNoDexPc));
1115       }
1116       diff = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, ptc));
1117     }
1118     HInstruction* rem = Insert(
1119         preheader, new (global_allocator_) HAnd(induc_type,
1120                                                 diff,
1121                                                 graph_->GetConstant(induc_type, chunk - 1)));
1122     vtc = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, rem));
1123   }
1124   vector_index_ = graph_->GetConstant(induc_type, 0);
1125 
1126   // Generate runtime disambiguation test:
1127   // vtc = a != b ? vtc : 0;
1128   if (vector_runtime_test_a_ != nullptr) {
1129     HInstruction* rt = Insert(
1130         preheader,
1131         new (global_allocator_) HNotEqual(vector_runtime_test_a_, vector_runtime_test_b_));
1132     vtc = Insert(preheader,
1133                  new (global_allocator_)
1134                  HSelect(rt, vtc, graph_->GetConstant(induc_type, 0), kNoDexPc));
1135     needs_cleanup = true;
1136   }
1137 
1138   // Generate alignment peeling loop, if needed:
1139   // for ( ; i < ptc; i += 1)
1140   //    <loop-body>
1141   //
1142   // NOTE: The alignment forced by the peeling loop is preserved even if data is
1143   //       moved around during suspend checks, since all analysis was based on
1144   //       nothing more than the Android runtime alignment conventions.
1145   if (ptc != nullptr) {
1146     vector_mode_ = kSequential;
1147     GenerateNewLoop(node,
1148                     block,
1149                     graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
1150                     vector_index_,
1151                     ptc,
1152                     graph_->GetConstant(induc_type, 1),
1153                     LoopAnalysisInfo::kNoUnrollingFactor);
1154   }
1155 
1156   // Generate vector loop, possibly further unrolled:
1157   // for ( ; i < vtc; i += chunk)
1158   //    <vectorized-loop-body>
1159   vector_mode_ = kVector;
1160   GenerateNewLoop(node,
1161                   block,
1162                   graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
1163                   vector_index_,
1164                   vtc,
1165                   graph_->GetConstant(induc_type, vector_length_),  // increment per unroll
1166                   unroll);
1167   HLoopInformation* vloop = vector_header_->GetLoopInformation();
1168 
1169   // Generate cleanup loop, if needed:
1170   // for ( ; i < stc; i += 1)
1171   //    <loop-body>
1172   if (needs_cleanup) {
1173     vector_mode_ = kSequential;
1174     GenerateNewLoop(node,
1175                     block,
1176                     graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
1177                     vector_index_,
1178                     stc,
1179                     graph_->GetConstant(induc_type, 1),
1180                     LoopAnalysisInfo::kNoUnrollingFactor);
1181   }
1182 
1183   // Link reductions to their final uses.
1184   for (auto i = reductions_->begin(); i != reductions_->end(); ++i) {
1185     if (i->first->IsPhi()) {
1186       HInstruction* phi = i->first;
1187       HInstruction* repl = ReduceAndExtractIfNeeded(i->second);
1188       // Deal with regular uses.
1189       for (const HUseListNode<HInstruction*>& use : phi->GetUses()) {
1190         induction_range_.Replace(use.GetUser(), phi, repl);  // update induction use
1191       }
1192       phi->ReplaceWith(repl);
1193     }
1194   }
1195 
1196   // Remove the original loop by disconnecting the body block
1197   // and removing all instructions from the header.
1198   block->DisconnectAndDelete();
1199   while (!header->GetFirstInstruction()->IsGoto()) {
1200     header->RemoveInstruction(header->GetFirstInstruction());
1201   }
1202 
1203   // Update loop hierarchy: the old header now resides in the same outer loop
1204   // as the old preheader. Note that we don't bother putting sequential
1205   // loops back in the hierarchy at this point.
1206   header->SetLoopInformation(preheader->GetLoopInformation());  // outward
1207   node->loop_info = vloop;
1208 }
1209 
GenerateNewLoop(LoopNode * node,HBasicBlock * block,HBasicBlock * new_preheader,HInstruction * lo,HInstruction * hi,HInstruction * step,uint32_t unroll)1210 void HLoopOptimization::GenerateNewLoop(LoopNode* node,
1211                                         HBasicBlock* block,
1212                                         HBasicBlock* new_preheader,
1213                                         HInstruction* lo,
1214                                         HInstruction* hi,
1215                                         HInstruction* step,
1216                                         uint32_t unroll) {
1217   DCHECK(unroll == 1 || vector_mode_ == kVector);
1218   DataType::Type induc_type = lo->GetType();
1219   // Prepare new loop.
1220   vector_preheader_ = new_preheader,
1221   vector_header_ = vector_preheader_->GetSingleSuccessor();
1222   vector_body_ = vector_header_->GetSuccessors()[1];
1223   HPhi* phi = new (global_allocator_) HPhi(global_allocator_,
1224                                            kNoRegNumber,
1225                                            0,
1226                                            HPhi::ToPhiType(induc_type));
1227   // Generate header and prepare body.
1228   // for (i = lo; i < hi; i += step)
1229   //    <loop-body>
1230   HInstruction* cond = new (global_allocator_) HAboveOrEqual(phi, hi);
1231   vector_header_->AddPhi(phi);
1232   vector_header_->AddInstruction(cond);
1233   vector_header_->AddInstruction(new (global_allocator_) HIf(cond));
1234   vector_index_ = phi;
1235   vector_permanent_map_->clear();  // preserved over unrolling
1236   for (uint32_t u = 0; u < unroll; u++) {
1237     // Generate instruction map.
1238     vector_map_->clear();
1239     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
1240       bool vectorized_def = VectorizeDef(node, it.Current(), /*generate_code*/ true);
1241       DCHECK(vectorized_def);
1242     }
1243     // Generate body from the instruction map, but in original program order.
1244     HEnvironment* env = vector_header_->GetFirstInstruction()->GetEnvironment();
1245     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
1246       auto i = vector_map_->find(it.Current());
1247       if (i != vector_map_->end() && !i->second->IsInBlock()) {
1248         Insert(vector_body_, i->second);
1249         // Deal with instructions that need an environment, such as the scalar intrinsics.
1250         if (i->second->NeedsEnvironment()) {
1251           i->second->CopyEnvironmentFromWithLoopPhiAdjustment(env, vector_header_);
1252         }
1253       }
1254     }
1255     // Generate the induction.
1256     vector_index_ = new (global_allocator_) HAdd(induc_type, vector_index_, step);
1257     Insert(vector_body_, vector_index_);
1258   }
1259   // Finalize phi inputs for the reductions (if any).
1260   for (auto i = reductions_->begin(); i != reductions_->end(); ++i) {
1261     if (!i->first->IsPhi()) {
1262       DCHECK(i->second->IsPhi());
1263       GenerateVecReductionPhiInputs(i->second->AsPhi(), i->first);
1264     }
1265   }
1266   // Finalize phi inputs for the loop index.
1267   phi->AddInput(lo);
1268   phi->AddInput(vector_index_);
1269   vector_index_ = phi;
1270 }
1271 
VectorizeDef(LoopNode * node,HInstruction * instruction,bool generate_code)1272 bool HLoopOptimization::VectorizeDef(LoopNode* node,
1273                                      HInstruction* instruction,
1274                                      bool generate_code) {
1275   // Accept a left-hand-side array base[index] for
1276   // (1) supported vector type,
1277   // (2) loop-invariant base,
1278   // (3) unit stride index,
1279   // (4) vectorizable right-hand-side value.
1280   uint64_t restrictions = kNone;
1281   // Don't accept expressions that can throw.
1282   if (instruction->CanThrow()) {
1283     return false;
1284   }
1285   if (instruction->IsArraySet()) {
1286     DataType::Type type = instruction->AsArraySet()->GetComponentType();
1287     HInstruction* base = instruction->InputAt(0);
1288     HInstruction* index = instruction->InputAt(1);
1289     HInstruction* value = instruction->InputAt(2);
1290     HInstruction* offset = nullptr;
1291     // For narrow types, explicit type conversion may have been
1292     // optimized way, so set the no hi bits restriction here.
1293     if (DataType::Size(type) <= 2) {
1294       restrictions |= kNoHiBits;
1295     }
1296     if (TrySetVectorType(type, &restrictions) &&
1297         node->loop_info->IsDefinedOutOfTheLoop(base) &&
1298         induction_range_.IsUnitStride(instruction, index, graph_, &offset) &&
1299         VectorizeUse(node, value, generate_code, type, restrictions)) {
1300       if (generate_code) {
1301         GenerateVecSub(index, offset);
1302         GenerateVecMem(instruction, vector_map_->Get(index), vector_map_->Get(value), offset, type);
1303       } else {
1304         vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ true));
1305       }
1306       return true;
1307     }
1308     return false;
1309   }
1310   // Accept a left-hand-side reduction for
1311   // (1) supported vector type,
1312   // (2) vectorizable right-hand-side value.
1313   auto redit = reductions_->find(instruction);
1314   if (redit != reductions_->end()) {
1315     DataType::Type type = instruction->GetType();
1316     // Recognize SAD idiom or direct reduction.
1317     if (VectorizeSADIdiom(node, instruction, generate_code, type, restrictions) ||
1318         VectorizeDotProdIdiom(node, instruction, generate_code, type, restrictions) ||
1319         (TrySetVectorType(type, &restrictions) &&
1320          VectorizeUse(node, instruction, generate_code, type, restrictions))) {
1321       if (generate_code) {
1322         HInstruction* new_red = vector_map_->Get(instruction);
1323         vector_permanent_map_->Put(new_red, vector_map_->Get(redit->second));
1324         vector_permanent_map_->Overwrite(redit->second, new_red);
1325       }
1326       return true;
1327     }
1328     return false;
1329   }
1330   // Branch back okay.
1331   if (instruction->IsGoto()) {
1332     return true;
1333   }
1334   // Otherwise accept only expressions with no effects outside the immediate loop-body.
1335   // Note that actual uses are inspected during right-hand-side tree traversal.
1336   return !IsUsedOutsideLoop(node->loop_info, instruction)
1337          && !instruction->DoesAnyWrite();
1338 }
1339 
VectorizeUse(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type type,uint64_t restrictions)1340 bool HLoopOptimization::VectorizeUse(LoopNode* node,
1341                                      HInstruction* instruction,
1342                                      bool generate_code,
1343                                      DataType::Type type,
1344                                      uint64_t restrictions) {
1345   // Accept anything for which code has already been generated.
1346   if (generate_code) {
1347     if (vector_map_->find(instruction) != vector_map_->end()) {
1348       return true;
1349     }
1350   }
1351   // Continue the right-hand-side tree traversal, passing in proper
1352   // types and vector restrictions along the way. During code generation,
1353   // all new nodes are drawn from the global allocator.
1354   if (node->loop_info->IsDefinedOutOfTheLoop(instruction)) {
1355     // Accept invariant use, using scalar expansion.
1356     if (generate_code) {
1357       GenerateVecInv(instruction, type);
1358     }
1359     return true;
1360   } else if (instruction->IsArrayGet()) {
1361     // Deal with vector restrictions.
1362     bool is_string_char_at = instruction->AsArrayGet()->IsStringCharAt();
1363     if (is_string_char_at && HasVectorRestrictions(restrictions, kNoStringCharAt)) {
1364       return false;
1365     }
1366     // Accept a right-hand-side array base[index] for
1367     // (1) matching vector type (exact match or signed/unsigned integral type of the same size),
1368     // (2) loop-invariant base,
1369     // (3) unit stride index,
1370     // (4) vectorizable right-hand-side value.
1371     HInstruction* base = instruction->InputAt(0);
1372     HInstruction* index = instruction->InputAt(1);
1373     HInstruction* offset = nullptr;
1374     if (HVecOperation::ToSignedType(type) == HVecOperation::ToSignedType(instruction->GetType()) &&
1375         node->loop_info->IsDefinedOutOfTheLoop(base) &&
1376         induction_range_.IsUnitStride(instruction, index, graph_, &offset)) {
1377       if (generate_code) {
1378         GenerateVecSub(index, offset);
1379         GenerateVecMem(instruction, vector_map_->Get(index), nullptr, offset, type);
1380       } else {
1381         vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ false, is_string_char_at));
1382       }
1383       return true;
1384     }
1385   } else if (instruction->IsPhi()) {
1386     // Accept particular phi operations.
1387     if (reductions_->find(instruction) != reductions_->end()) {
1388       // Deal with vector restrictions.
1389       if (HasVectorRestrictions(restrictions, kNoReduction)) {
1390         return false;
1391       }
1392       // Accept a reduction.
1393       if (generate_code) {
1394         GenerateVecReductionPhi(instruction->AsPhi());
1395       }
1396       return true;
1397     }
1398     // TODO: accept right-hand-side induction?
1399     return false;
1400   } else if (instruction->IsTypeConversion()) {
1401     // Accept particular type conversions.
1402     HTypeConversion* conversion = instruction->AsTypeConversion();
1403     HInstruction* opa = conversion->InputAt(0);
1404     DataType::Type from = conversion->GetInputType();
1405     DataType::Type to = conversion->GetResultType();
1406     if (DataType::IsIntegralType(from) && DataType::IsIntegralType(to)) {
1407       uint32_t size_vec = DataType::Size(type);
1408       uint32_t size_from = DataType::Size(from);
1409       uint32_t size_to = DataType::Size(to);
1410       // Accept an integral conversion
1411       // (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or
1412       // (1b) widening from at least vector type, and
1413       // (2) vectorizable operand.
1414       if ((size_to < size_from &&
1415            size_to == size_vec &&
1416            VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) ||
1417           (size_to >= size_from &&
1418            size_from >= size_vec &&
1419            VectorizeUse(node, opa, generate_code, type, restrictions))) {
1420         if (generate_code) {
1421           if (vector_mode_ == kVector) {
1422             vector_map_->Put(instruction, vector_map_->Get(opa));  // operand pass-through
1423           } else {
1424             GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
1425           }
1426         }
1427         return true;
1428       }
1429     } else if (to == DataType::Type::kFloat32 && from == DataType::Type::kInt32) {
1430       DCHECK_EQ(to, type);
1431       // Accept int to float conversion for
1432       // (1) supported int,
1433       // (2) vectorizable operand.
1434       if (TrySetVectorType(from, &restrictions) &&
1435           VectorizeUse(node, opa, generate_code, from, restrictions)) {
1436         if (generate_code) {
1437           GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
1438         }
1439         return true;
1440       }
1441     }
1442     return false;
1443   } else if (instruction->IsNeg() || instruction->IsNot() || instruction->IsBooleanNot()) {
1444     // Accept unary operator for vectorizable operand.
1445     HInstruction* opa = instruction->InputAt(0);
1446     if (VectorizeUse(node, opa, generate_code, type, restrictions)) {
1447       if (generate_code) {
1448         GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
1449       }
1450       return true;
1451     }
1452   } else if (instruction->IsAdd() || instruction->IsSub() ||
1453              instruction->IsMul() || instruction->IsDiv() ||
1454              instruction->IsAnd() || instruction->IsOr()  || instruction->IsXor()) {
1455     // Deal with vector restrictions.
1456     if ((instruction->IsMul() && HasVectorRestrictions(restrictions, kNoMul)) ||
1457         (instruction->IsDiv() && HasVectorRestrictions(restrictions, kNoDiv))) {
1458       return false;
1459     }
1460     // Accept binary operator for vectorizable operands.
1461     HInstruction* opa = instruction->InputAt(0);
1462     HInstruction* opb = instruction->InputAt(1);
1463     if (VectorizeUse(node, opa, generate_code, type, restrictions) &&
1464         VectorizeUse(node, opb, generate_code, type, restrictions)) {
1465       if (generate_code) {
1466         GenerateVecOp(instruction, vector_map_->Get(opa), vector_map_->Get(opb), type);
1467       }
1468       return true;
1469     }
1470   } else if (instruction->IsShl() || instruction->IsShr() || instruction->IsUShr()) {
1471     // Recognize halving add idiom.
1472     if (VectorizeHalvingAddIdiom(node, instruction, generate_code, type, restrictions)) {
1473       return true;
1474     }
1475     // Deal with vector restrictions.
1476     HInstruction* opa = instruction->InputAt(0);
1477     HInstruction* opb = instruction->InputAt(1);
1478     HInstruction* r = opa;
1479     bool is_unsigned = false;
1480     if ((HasVectorRestrictions(restrictions, kNoShift)) ||
1481         (instruction->IsShr() && HasVectorRestrictions(restrictions, kNoShr))) {
1482       return false;  // unsupported instruction
1483     } else if (HasVectorRestrictions(restrictions, kNoHiBits)) {
1484       // Shifts right need extra care to account for higher order bits.
1485       // TODO: less likely shr/unsigned and ushr/signed can by flipping signess.
1486       if (instruction->IsShr() &&
1487           (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
1488         return false;  // reject, unless all operands are sign-extension narrower
1489       } else if (instruction->IsUShr() &&
1490                  (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || !is_unsigned)) {
1491         return false;  // reject, unless all operands are zero-extension narrower
1492       }
1493     }
1494     // Accept shift operator for vectorizable/invariant operands.
1495     // TODO: accept symbolic, albeit loop invariant shift factors.
1496     DCHECK(r != nullptr);
1497     if (generate_code && vector_mode_ != kVector) {  // de-idiom
1498       r = opa;
1499     }
1500     int64_t distance = 0;
1501     if (VectorizeUse(node, r, generate_code, type, restrictions) &&
1502         IsInt64AndGet(opb, /*out*/ &distance)) {
1503       // Restrict shift distance to packed data type width.
1504       int64_t max_distance = DataType::Size(type) * 8;
1505       if (0 <= distance && distance < max_distance) {
1506         if (generate_code) {
1507           GenerateVecOp(instruction, vector_map_->Get(r), opb, type);
1508         }
1509         return true;
1510       }
1511     }
1512   } else if (instruction->IsAbs()) {
1513     // Deal with vector restrictions.
1514     HInstruction* opa = instruction->InputAt(0);
1515     HInstruction* r = opa;
1516     bool is_unsigned = false;
1517     if (HasVectorRestrictions(restrictions, kNoAbs)) {
1518       return false;
1519     } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
1520                (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
1521       return false;  // reject, unless operand is sign-extension narrower
1522     }
1523     // Accept ABS(x) for vectorizable operand.
1524     DCHECK(r != nullptr);
1525     if (generate_code && vector_mode_ != kVector) {  // de-idiom
1526       r = opa;
1527     }
1528     if (VectorizeUse(node, r, generate_code, type, restrictions)) {
1529       if (generate_code) {
1530         GenerateVecOp(instruction,
1531                       vector_map_->Get(r),
1532                       nullptr,
1533                       HVecOperation::ToProperType(type, is_unsigned));
1534       }
1535       return true;
1536     }
1537   }
1538   return false;
1539 }
1540 
GetVectorSizeInBytes()1541 uint32_t HLoopOptimization::GetVectorSizeInBytes() {
1542   if (kIsDebugBuild) {
1543     InstructionSet isa = compiler_options_->GetInstructionSet();
1544     // TODO: Remove this check when there are no implicit assumptions on the SIMD reg size.
1545     DCHECK_EQ(simd_register_size_, (isa == InstructionSet::kArm || isa == InstructionSet::kThumb2)
1546                                    ? 8u
1547                                    : 16u);
1548   }
1549 
1550   return simd_register_size_;
1551 }
1552 
TrySetVectorType(DataType::Type type,uint64_t * restrictions)1553 bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
1554   const InstructionSetFeatures* features = compiler_options_->GetInstructionSetFeatures();
1555   switch (compiler_options_->GetInstructionSet()) {
1556     case InstructionSet::kArm:
1557     case InstructionSet::kThumb2:
1558       // Allow vectorization for all ARM devices, because Android assumes that
1559       // ARM 32-bit always supports advanced SIMD (64-bit SIMD).
1560       switch (type) {
1561         case DataType::Type::kBool:
1562         case DataType::Type::kUint8:
1563         case DataType::Type::kInt8:
1564           *restrictions |= kNoDiv | kNoReduction | kNoDotProd;
1565           return TrySetVectorLength(type, 8);
1566         case DataType::Type::kUint16:
1567         case DataType::Type::kInt16:
1568           *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoDotProd;
1569           return TrySetVectorLength(type, 4);
1570         case DataType::Type::kInt32:
1571           *restrictions |= kNoDiv | kNoWideSAD;
1572           return TrySetVectorLength(type, 2);
1573         default:
1574           break;
1575       }
1576       return false;
1577     case InstructionSet::kArm64:
1578       // Allow vectorization for all ARM devices, because Android assumes that
1579       // ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD).
1580       switch (type) {
1581         case DataType::Type::kBool:
1582         case DataType::Type::kUint8:
1583         case DataType::Type::kInt8:
1584           *restrictions |= kNoDiv;
1585           return TrySetVectorLength(type, 16);
1586         case DataType::Type::kUint16:
1587         case DataType::Type::kInt16:
1588           *restrictions |= kNoDiv;
1589           return TrySetVectorLength(type, 8);
1590         case DataType::Type::kInt32:
1591           *restrictions |= kNoDiv;
1592           return TrySetVectorLength(type, 4);
1593         case DataType::Type::kInt64:
1594           *restrictions |= kNoDiv | kNoMul;
1595           return TrySetVectorLength(type, 2);
1596         case DataType::Type::kFloat32:
1597           *restrictions |= kNoReduction;
1598           return TrySetVectorLength(type, 4);
1599         case DataType::Type::kFloat64:
1600           *restrictions |= kNoReduction;
1601           return TrySetVectorLength(type, 2);
1602         default:
1603           return false;
1604       }
1605     case InstructionSet::kX86:
1606     case InstructionSet::kX86_64:
1607       // Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD).
1608       if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
1609         switch (type) {
1610           case DataType::Type::kBool:
1611           case DataType::Type::kUint8:
1612           case DataType::Type::kInt8:
1613             *restrictions |= kNoMul |
1614                              kNoDiv |
1615                              kNoShift |
1616                              kNoAbs |
1617                              kNoSignedHAdd |
1618                              kNoUnroundedHAdd |
1619                              kNoSAD |
1620                              kNoDotProd;
1621             return TrySetVectorLength(type, 16);
1622           case DataType::Type::kUint16:
1623             *restrictions |= kNoDiv |
1624                              kNoAbs |
1625                              kNoSignedHAdd |
1626                              kNoUnroundedHAdd |
1627                              kNoSAD |
1628                              kNoDotProd;
1629             return TrySetVectorLength(type, 8);
1630           case DataType::Type::kInt16:
1631             *restrictions |= kNoDiv |
1632                              kNoAbs |
1633                              kNoSignedHAdd |
1634                              kNoUnroundedHAdd |
1635                              kNoSAD;
1636             return TrySetVectorLength(type, 8);
1637           case DataType::Type::kInt32:
1638             *restrictions |= kNoDiv | kNoSAD;
1639             return TrySetVectorLength(type, 4);
1640           case DataType::Type::kInt64:
1641             *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs | kNoSAD;
1642             return TrySetVectorLength(type, 2);
1643           case DataType::Type::kFloat32:
1644             *restrictions |= kNoReduction;
1645             return TrySetVectorLength(type, 4);
1646           case DataType::Type::kFloat64:
1647             *restrictions |= kNoReduction;
1648             return TrySetVectorLength(type, 2);
1649           default:
1650             break;
1651         }  // switch type
1652       }
1653       return false;
1654     default:
1655       return false;
1656   }  // switch instruction set
1657 }
1658 
TrySetVectorLengthImpl(uint32_t length)1659 bool HLoopOptimization::TrySetVectorLengthImpl(uint32_t length) {
1660   DCHECK(IsPowerOfTwo(length) && length >= 2u);
1661   // First time set?
1662   if (vector_length_ == 0) {
1663     vector_length_ = length;
1664   }
1665   // Different types are acceptable within a loop-body, as long as all the corresponding vector
1666   // lengths match exactly to obtain a uniform traversal through the vector iteration space
1667   // (idiomatic exceptions to this rule can be handled by further unrolling sub-expressions).
1668   return vector_length_ == length;
1669 }
1670 
GenerateVecInv(HInstruction * org,DataType::Type type)1671 void HLoopOptimization::GenerateVecInv(HInstruction* org, DataType::Type type) {
1672   if (vector_map_->find(org) == vector_map_->end()) {
1673     // In scalar code, just use a self pass-through for scalar invariants
1674     // (viz. expression remains itself).
1675     if (vector_mode_ == kSequential) {
1676       vector_map_->Put(org, org);
1677       return;
1678     }
1679     // In vector code, explicit scalar expansion is needed.
1680     HInstruction* vector = nullptr;
1681     auto it = vector_permanent_map_->find(org);
1682     if (it != vector_permanent_map_->end()) {
1683       vector = it->second;  // reuse during unrolling
1684     } else {
1685       // Generates ReplicateScalar( (optional_type_conv) org ).
1686       HInstruction* input = org;
1687       DataType::Type input_type = input->GetType();
1688       if (type != input_type && (type == DataType::Type::kInt64 ||
1689                                  input_type == DataType::Type::kInt64)) {
1690         input = Insert(vector_preheader_,
1691                        new (global_allocator_) HTypeConversion(type, input, kNoDexPc));
1692       }
1693       vector = new (global_allocator_)
1694           HVecReplicateScalar(global_allocator_, input, type, vector_length_, kNoDexPc);
1695       vector_permanent_map_->Put(org, Insert(vector_preheader_, vector));
1696     }
1697     vector_map_->Put(org, vector);
1698   }
1699 }
1700 
GenerateVecSub(HInstruction * org,HInstruction * offset)1701 void HLoopOptimization::GenerateVecSub(HInstruction* org, HInstruction* offset) {
1702   if (vector_map_->find(org) == vector_map_->end()) {
1703     HInstruction* subscript = vector_index_;
1704     int64_t value = 0;
1705     if (!IsInt64AndGet(offset, &value) || value != 0) {
1706       subscript = new (global_allocator_) HAdd(DataType::Type::kInt32, subscript, offset);
1707       if (org->IsPhi()) {
1708         Insert(vector_body_, subscript);  // lacks layout placeholder
1709       }
1710     }
1711     vector_map_->Put(org, subscript);
1712   }
1713 }
1714 
GenerateVecMem(HInstruction * org,HInstruction * opa,HInstruction * opb,HInstruction * offset,DataType::Type type)1715 void HLoopOptimization::GenerateVecMem(HInstruction* org,
1716                                        HInstruction* opa,
1717                                        HInstruction* opb,
1718                                        HInstruction* offset,
1719                                        DataType::Type type) {
1720   uint32_t dex_pc = org->GetDexPc();
1721   HInstruction* vector = nullptr;
1722   if (vector_mode_ == kVector) {
1723     // Vector store or load.
1724     bool is_string_char_at = false;
1725     HInstruction* base = org->InputAt(0);
1726     if (opb != nullptr) {
1727       vector = new (global_allocator_) HVecStore(
1728           global_allocator_, base, opa, opb, type, org->GetSideEffects(), vector_length_, dex_pc);
1729     } else  {
1730       is_string_char_at = org->AsArrayGet()->IsStringCharAt();
1731       vector = new (global_allocator_) HVecLoad(global_allocator_,
1732                                                 base,
1733                                                 opa,
1734                                                 type,
1735                                                 org->GetSideEffects(),
1736                                                 vector_length_,
1737                                                 is_string_char_at,
1738                                                 dex_pc);
1739     }
1740     // Known (forced/adjusted/original) alignment?
1741     if (vector_dynamic_peeling_candidate_ != nullptr) {
1742       if (vector_dynamic_peeling_candidate_->offset == offset &&  // TODO: diffs too?
1743           DataType::Size(vector_dynamic_peeling_candidate_->type) == DataType::Size(type) &&
1744           vector_dynamic_peeling_candidate_->is_string_char_at == is_string_char_at) {
1745         vector->AsVecMemoryOperation()->SetAlignment(  // forced
1746             Alignment(GetVectorSizeInBytes(), 0));
1747       }
1748     } else {
1749       vector->AsVecMemoryOperation()->SetAlignment(  // adjusted/original
1750           ComputeAlignment(offset, type, is_string_char_at, vector_static_peeling_factor_));
1751     }
1752   } else {
1753     // Scalar store or load.
1754     DCHECK(vector_mode_ == kSequential);
1755     if (opb != nullptr) {
1756       DataType::Type component_type = org->AsArraySet()->GetComponentType();
1757       vector = new (global_allocator_) HArraySet(
1758           org->InputAt(0), opa, opb, component_type, org->GetSideEffects(), dex_pc);
1759     } else  {
1760       bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
1761       vector = new (global_allocator_) HArrayGet(
1762           org->InputAt(0), opa, org->GetType(), org->GetSideEffects(), dex_pc, is_string_char_at);
1763     }
1764   }
1765   vector_map_->Put(org, vector);
1766 }
1767 
GenerateVecReductionPhi(HPhi * phi)1768 void HLoopOptimization::GenerateVecReductionPhi(HPhi* phi) {
1769   DCHECK(reductions_->find(phi) != reductions_->end());
1770   DCHECK(reductions_->Get(phi->InputAt(1)) == phi);
1771   HInstruction* vector = nullptr;
1772   if (vector_mode_ == kSequential) {
1773     HPhi* new_phi = new (global_allocator_) HPhi(
1774         global_allocator_, kNoRegNumber, 0, phi->GetType());
1775     vector_header_->AddPhi(new_phi);
1776     vector = new_phi;
1777   } else {
1778     // Link vector reduction back to prior unrolled update, or a first phi.
1779     auto it = vector_permanent_map_->find(phi);
1780     if (it != vector_permanent_map_->end()) {
1781       vector = it->second;
1782     } else {
1783       HPhi* new_phi = new (global_allocator_) HPhi(
1784           global_allocator_, kNoRegNumber, 0, HVecOperation::kSIMDType);
1785       vector_header_->AddPhi(new_phi);
1786       vector = new_phi;
1787     }
1788   }
1789   vector_map_->Put(phi, vector);
1790 }
1791 
GenerateVecReductionPhiInputs(HPhi * phi,HInstruction * reduction)1792 void HLoopOptimization::GenerateVecReductionPhiInputs(HPhi* phi, HInstruction* reduction) {
1793   HInstruction* new_phi = vector_map_->Get(phi);
1794   HInstruction* new_init = reductions_->Get(phi);
1795   HInstruction* new_red = vector_map_->Get(reduction);
1796   // Link unrolled vector loop back to new phi.
1797   for (; !new_phi->IsPhi(); new_phi = vector_permanent_map_->Get(new_phi)) {
1798     DCHECK(new_phi->IsVecOperation());
1799   }
1800   // Prepare the new initialization.
1801   if (vector_mode_ == kVector) {
1802     // Generate a [initial, 0, .., 0] vector for add or
1803     // a [initial, initial, .., initial] vector for min/max.
1804     HVecOperation* red_vector = new_red->AsVecOperation();
1805     HVecReduce::ReductionKind kind = GetReductionKind(red_vector);
1806     uint32_t vector_length = red_vector->GetVectorLength();
1807     DataType::Type type = red_vector->GetPackedType();
1808     if (kind == HVecReduce::ReductionKind::kSum) {
1809       new_init = Insert(vector_preheader_,
1810                         new (global_allocator_) HVecSetScalars(global_allocator_,
1811                                                                &new_init,
1812                                                                type,
1813                                                                vector_length,
1814                                                                1,
1815                                                                kNoDexPc));
1816     } else {
1817       new_init = Insert(vector_preheader_,
1818                         new (global_allocator_) HVecReplicateScalar(global_allocator_,
1819                                                                     new_init,
1820                                                                     type,
1821                                                                     vector_length,
1822                                                                     kNoDexPc));
1823     }
1824   } else {
1825     new_init = ReduceAndExtractIfNeeded(new_init);
1826   }
1827   // Set the phi inputs.
1828   DCHECK(new_phi->IsPhi());
1829   new_phi->AsPhi()->AddInput(new_init);
1830   new_phi->AsPhi()->AddInput(new_red);
1831   // New feed value for next phi (safe mutation in iteration).
1832   reductions_->find(phi)->second = new_phi;
1833 }
1834 
ReduceAndExtractIfNeeded(HInstruction * instruction)1835 HInstruction* HLoopOptimization::ReduceAndExtractIfNeeded(HInstruction* instruction) {
1836   if (instruction->IsPhi()) {
1837     HInstruction* input = instruction->InputAt(1);
1838     if (HVecOperation::ReturnsSIMDValue(input)) {
1839       DCHECK(!input->IsPhi());
1840       HVecOperation* input_vector = input->AsVecOperation();
1841       uint32_t vector_length = input_vector->GetVectorLength();
1842       DataType::Type type = input_vector->GetPackedType();
1843       HVecReduce::ReductionKind kind = GetReductionKind(input_vector);
1844       HBasicBlock* exit = instruction->GetBlock()->GetSuccessors()[0];
1845       // Generate a vector reduction and scalar extract
1846       //    x = REDUCE( [x_1, .., x_n] )
1847       //    y = x_1
1848       // along the exit of the defining loop.
1849       HInstruction* reduce = new (global_allocator_) HVecReduce(
1850           global_allocator_, instruction, type, vector_length, kind, kNoDexPc);
1851       exit->InsertInstructionBefore(reduce, exit->GetFirstInstruction());
1852       instruction = new (global_allocator_) HVecExtractScalar(
1853           global_allocator_, reduce, type, vector_length, 0, kNoDexPc);
1854       exit->InsertInstructionAfter(instruction, reduce);
1855     }
1856   }
1857   return instruction;
1858 }
1859 
1860 #define GENERATE_VEC(x, y) \
1861   if (vector_mode_ == kVector) { \
1862     vector = (x); \
1863   } else { \
1864     DCHECK(vector_mode_ == kSequential); \
1865     vector = (y); \
1866   } \
1867   break;
1868 
GenerateVecOp(HInstruction * org,HInstruction * opa,HInstruction * opb,DataType::Type type)1869 void HLoopOptimization::GenerateVecOp(HInstruction* org,
1870                                       HInstruction* opa,
1871                                       HInstruction* opb,
1872                                       DataType::Type type) {
1873   uint32_t dex_pc = org->GetDexPc();
1874   HInstruction* vector = nullptr;
1875   DataType::Type org_type = org->GetType();
1876   switch (org->GetKind()) {
1877     case HInstruction::kNeg:
1878       DCHECK(opb == nullptr);
1879       GENERATE_VEC(
1880         new (global_allocator_) HVecNeg(global_allocator_, opa, type, vector_length_, dex_pc),
1881         new (global_allocator_) HNeg(org_type, opa, dex_pc));
1882     case HInstruction::kNot:
1883       DCHECK(opb == nullptr);
1884       GENERATE_VEC(
1885         new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_, dex_pc),
1886         new (global_allocator_) HNot(org_type, opa, dex_pc));
1887     case HInstruction::kBooleanNot:
1888       DCHECK(opb == nullptr);
1889       GENERATE_VEC(
1890         new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_, dex_pc),
1891         new (global_allocator_) HBooleanNot(opa, dex_pc));
1892     case HInstruction::kTypeConversion:
1893       DCHECK(opb == nullptr);
1894       GENERATE_VEC(
1895         new (global_allocator_) HVecCnv(global_allocator_, opa, type, vector_length_, dex_pc),
1896         new (global_allocator_) HTypeConversion(org_type, opa, dex_pc));
1897     case HInstruction::kAdd:
1898       GENERATE_VEC(
1899         new (global_allocator_) HVecAdd(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1900         new (global_allocator_) HAdd(org_type, opa, opb, dex_pc));
1901     case HInstruction::kSub:
1902       GENERATE_VEC(
1903         new (global_allocator_) HVecSub(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1904         new (global_allocator_) HSub(org_type, opa, opb, dex_pc));
1905     case HInstruction::kMul:
1906       GENERATE_VEC(
1907         new (global_allocator_) HVecMul(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1908         new (global_allocator_) HMul(org_type, opa, opb, dex_pc));
1909     case HInstruction::kDiv:
1910       GENERATE_VEC(
1911         new (global_allocator_) HVecDiv(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1912         new (global_allocator_) HDiv(org_type, opa, opb, dex_pc));
1913     case HInstruction::kAnd:
1914       GENERATE_VEC(
1915         new (global_allocator_) HVecAnd(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1916         new (global_allocator_) HAnd(org_type, opa, opb, dex_pc));
1917     case HInstruction::kOr:
1918       GENERATE_VEC(
1919         new (global_allocator_) HVecOr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1920         new (global_allocator_) HOr(org_type, opa, opb, dex_pc));
1921     case HInstruction::kXor:
1922       GENERATE_VEC(
1923         new (global_allocator_) HVecXor(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1924         new (global_allocator_) HXor(org_type, opa, opb, dex_pc));
1925     case HInstruction::kShl:
1926       GENERATE_VEC(
1927         new (global_allocator_) HVecShl(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1928         new (global_allocator_) HShl(org_type, opa, opb, dex_pc));
1929     case HInstruction::kShr:
1930       GENERATE_VEC(
1931         new (global_allocator_) HVecShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1932         new (global_allocator_) HShr(org_type, opa, opb, dex_pc));
1933     case HInstruction::kUShr:
1934       GENERATE_VEC(
1935         new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1936         new (global_allocator_) HUShr(org_type, opa, opb, dex_pc));
1937     case HInstruction::kAbs:
1938       DCHECK(opb == nullptr);
1939       GENERATE_VEC(
1940         new (global_allocator_) HVecAbs(global_allocator_, opa, type, vector_length_, dex_pc),
1941         new (global_allocator_) HAbs(org_type, opa, dex_pc));
1942     default:
1943       break;
1944   }  // switch
1945   CHECK(vector != nullptr) << "Unsupported SIMD operator";
1946   vector_map_->Put(org, vector);
1947 }
1948 
1949 #undef GENERATE_VEC
1950 
1951 //
1952 // Vectorization idioms.
1953 //
1954 
1955 // Method recognizes the following idioms:
1956 //   rounding  halving add (a + b + 1) >> 1 for unsigned/signed operands a, b
1957 //   truncated halving add (a + b)     >> 1 for unsigned/signed operands a, b
1958 // Provided that the operands are promoted to a wider form to do the arithmetic and
1959 // then cast back to narrower form, the idioms can be mapped into efficient SIMD
1960 // implementation that operates directly in narrower form (plus one extra bit).
1961 // TODO: current version recognizes implicit byte/short/char widening only;
1962 //       explicit widening from int to long could be added later.
VectorizeHalvingAddIdiom(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type type,uint64_t restrictions)1963 bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
1964                                                  HInstruction* instruction,
1965                                                  bool generate_code,
1966                                                  DataType::Type type,
1967                                                  uint64_t restrictions) {
1968   // Test for top level arithmetic shift right x >> 1 or logical shift right x >>> 1
1969   // (note whether the sign bit in wider precision is shifted in has no effect
1970   // on the narrow precision computed by the idiom).
1971   if ((instruction->IsShr() ||
1972        instruction->IsUShr()) &&
1973       IsInt64Value(instruction->InputAt(1), 1)) {
1974     // Test for (a + b + c) >> 1 for optional constant c.
1975     HInstruction* a = nullptr;
1976     HInstruction* b = nullptr;
1977     int64_t       c = 0;
1978     if (IsAddConst2(graph_, instruction->InputAt(0), /*out*/ &a, /*out*/ &b, /*out*/ &c)) {
1979       // Accept c == 1 (rounded) or c == 0 (not rounded).
1980       bool is_rounded = false;
1981       if (c == 1) {
1982         is_rounded = true;
1983       } else if (c != 0) {
1984         return false;
1985       }
1986       // Accept consistent zero or sign extension on operands a and b.
1987       HInstruction* r = nullptr;
1988       HInstruction* s = nullptr;
1989       bool is_unsigned = false;
1990       if (!IsNarrowerOperands(a, b, type, &r, &s, &is_unsigned)) {
1991         return false;
1992       }
1993       // Deal with vector restrictions.
1994       if ((!is_unsigned && HasVectorRestrictions(restrictions, kNoSignedHAdd)) ||
1995           (!is_rounded && HasVectorRestrictions(restrictions, kNoUnroundedHAdd))) {
1996         return false;
1997       }
1998       // Accept recognized halving add for vectorizable operands. Vectorized code uses the
1999       // shorthand idiomatic operation. Sequential code uses the original scalar expressions.
2000       DCHECK(r != nullptr && s != nullptr);
2001       if (generate_code && vector_mode_ != kVector) {  // de-idiom
2002         r = instruction->InputAt(0);
2003         s = instruction->InputAt(1);
2004       }
2005       if (VectorizeUse(node, r, generate_code, type, restrictions) &&
2006           VectorizeUse(node, s, generate_code, type, restrictions)) {
2007         if (generate_code) {
2008           if (vector_mode_ == kVector) {
2009             vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd(
2010                 global_allocator_,
2011                 vector_map_->Get(r),
2012                 vector_map_->Get(s),
2013                 HVecOperation::ToProperType(type, is_unsigned),
2014                 vector_length_,
2015                 is_rounded,
2016                 kNoDexPc));
2017             MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
2018           } else {
2019             GenerateVecOp(instruction, vector_map_->Get(r), vector_map_->Get(s), type);
2020           }
2021         }
2022         return true;
2023       }
2024     }
2025   }
2026   return false;
2027 }
2028 
2029 // Method recognizes the following idiom:
2030 //   q += ABS(a - b) for signed operands a, b
2031 // Provided that the operands have the same type or are promoted to a wider form.
2032 // Since this may involve a vector length change, the idiom is handled by going directly
2033 // to a sad-accumulate node (rather than relying combining finer grained nodes later).
2034 // TODO: unsigned SAD too?
VectorizeSADIdiom(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type reduction_type,uint64_t restrictions)2035 bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
2036                                           HInstruction* instruction,
2037                                           bool generate_code,
2038                                           DataType::Type reduction_type,
2039                                           uint64_t restrictions) {
2040   // Filter integral "q += ABS(a - b);" reduction, where ABS and SUB
2041   // are done in the same precision (either int or long).
2042   if (!instruction->IsAdd() ||
2043       (reduction_type != DataType::Type::kInt32 && reduction_type != DataType::Type::kInt64)) {
2044     return false;
2045   }
2046   HInstruction* acc = instruction->InputAt(0);
2047   HInstruction* abs = instruction->InputAt(1);
2048   HInstruction* a = nullptr;
2049   HInstruction* b = nullptr;
2050   if (abs->IsAbs() &&
2051       abs->GetType() == reduction_type &&
2052       IsSubConst2(graph_, abs->InputAt(0), /*out*/ &a, /*out*/ &b)) {
2053     DCHECK(a != nullptr && b != nullptr);
2054   } else {
2055     return false;
2056   }
2057   // Accept same-type or consistent sign extension for narrower-type on operands a and b.
2058   // The same-type or narrower operands are called r (a or lower) and s (b or lower).
2059   // We inspect the operands carefully to pick the most suited type.
2060   HInstruction* r = a;
2061   HInstruction* s = b;
2062   bool is_unsigned = false;
2063   DataType::Type sub_type = GetNarrowerType(a, b);
2064   if (reduction_type != sub_type &&
2065       (!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) {
2066     return false;
2067   }
2068   // Try same/narrower type and deal with vector restrictions.
2069   if (!TrySetVectorType(sub_type, &restrictions) ||
2070       HasVectorRestrictions(restrictions, kNoSAD) ||
2071       (reduction_type != sub_type && HasVectorRestrictions(restrictions, kNoWideSAD))) {
2072     return false;
2073   }
2074   // Accept SAD idiom for vectorizable operands. Vectorized code uses the shorthand
2075   // idiomatic operation. Sequential code uses the original scalar expressions.
2076   DCHECK(r != nullptr && s != nullptr);
2077   if (generate_code && vector_mode_ != kVector) {  // de-idiom
2078     r = s = abs->InputAt(0);
2079   }
2080   if (VectorizeUse(node, acc, generate_code, sub_type, restrictions) &&
2081       VectorizeUse(node, r, generate_code, sub_type, restrictions) &&
2082       VectorizeUse(node, s, generate_code, sub_type, restrictions)) {
2083     if (generate_code) {
2084       if (vector_mode_ == kVector) {
2085         vector_map_->Put(instruction, new (global_allocator_) HVecSADAccumulate(
2086             global_allocator_,
2087             vector_map_->Get(acc),
2088             vector_map_->Get(r),
2089             vector_map_->Get(s),
2090             HVecOperation::ToProperType(reduction_type, is_unsigned),
2091             GetOtherVL(reduction_type, sub_type, vector_length_),
2092             kNoDexPc));
2093         MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
2094       } else {
2095         // "GenerateVecOp()" must not be called more than once for each original loop body
2096         // instruction. As the SAD idiom processes both "current" instruction ("instruction")
2097         // and its ABS input in one go, we must check that for the scalar case the ABS instruction
2098         // has not yet been processed.
2099         if (vector_map_->find(abs) == vector_map_->end()) {
2100           GenerateVecOp(abs, vector_map_->Get(r), nullptr, reduction_type);
2101         }
2102         GenerateVecOp(instruction, vector_map_->Get(acc), vector_map_->Get(abs), reduction_type);
2103       }
2104     }
2105     return true;
2106   }
2107   return false;
2108 }
2109 
2110 // Method recognises the following dot product idiom:
2111 //   q += a * b for operands a, b whose type is narrower than the reduction one.
2112 // Provided that the operands have the same type or are promoted to a wider form.
2113 // Since this may involve a vector length change, the idiom is handled by going directly
2114 // to a dot product node (rather than relying combining finer grained nodes later).
VectorizeDotProdIdiom(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type reduction_type,uint64_t restrictions)2115 bool HLoopOptimization::VectorizeDotProdIdiom(LoopNode* node,
2116                                               HInstruction* instruction,
2117                                               bool generate_code,
2118                                               DataType::Type reduction_type,
2119                                               uint64_t restrictions) {
2120   if (!instruction->IsAdd() || reduction_type != DataType::Type::kInt32) {
2121     return false;
2122   }
2123 
2124   HInstruction* const acc = instruction->InputAt(0);
2125   HInstruction* const mul = instruction->InputAt(1);
2126   if (!mul->IsMul() || mul->GetType() != reduction_type) {
2127     return false;
2128   }
2129 
2130   HInstruction* const mul_left = mul->InputAt(0);
2131   HInstruction* const mul_right = mul->InputAt(1);
2132   HInstruction* r = mul_left;
2133   HInstruction* s = mul_right;
2134   DataType::Type op_type = GetNarrowerType(mul_left, mul_right);
2135   bool is_unsigned = false;
2136 
2137   if (!IsNarrowerOperands(mul_left, mul_right, op_type, &r, &s, &is_unsigned)) {
2138     return false;
2139   }
2140   op_type = HVecOperation::ToProperType(op_type, is_unsigned);
2141 
2142   if (!TrySetVectorType(op_type, &restrictions) ||
2143       HasVectorRestrictions(restrictions, kNoDotProd)) {
2144     return false;
2145   }
2146 
2147   DCHECK(r != nullptr && s != nullptr);
2148   // Accept dot product idiom for vectorizable operands. Vectorized code uses the shorthand
2149   // idiomatic operation. Sequential code uses the original scalar expressions.
2150   if (generate_code && vector_mode_ != kVector) {  // de-idiom
2151     r = mul_left;
2152     s = mul_right;
2153   }
2154   if (VectorizeUse(node, acc, generate_code, op_type, restrictions) &&
2155       VectorizeUse(node, r, generate_code, op_type, restrictions) &&
2156       VectorizeUse(node, s, generate_code, op_type, restrictions)) {
2157     if (generate_code) {
2158       if (vector_mode_ == kVector) {
2159         vector_map_->Put(instruction, new (global_allocator_) HVecDotProd(
2160             global_allocator_,
2161             vector_map_->Get(acc),
2162             vector_map_->Get(r),
2163             vector_map_->Get(s),
2164             reduction_type,
2165             is_unsigned,
2166             GetOtherVL(reduction_type, op_type, vector_length_),
2167             kNoDexPc));
2168         MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
2169       } else {
2170         // "GenerateVecOp()" must not be called more than once for each original loop body
2171         // instruction. As the DotProd idiom processes both "current" instruction ("instruction")
2172         // and its MUL input in one go, we must check that for the scalar case the MUL instruction
2173         // has not yet been processed.
2174         if (vector_map_->find(mul) == vector_map_->end()) {
2175           GenerateVecOp(mul, vector_map_->Get(r), vector_map_->Get(s), reduction_type);
2176         }
2177         GenerateVecOp(instruction, vector_map_->Get(acc), vector_map_->Get(mul), reduction_type);
2178       }
2179     }
2180     return true;
2181   }
2182   return false;
2183 }
2184 
2185 //
2186 // Vectorization heuristics.
2187 //
2188 
ComputeAlignment(HInstruction * offset,DataType::Type type,bool is_string_char_at,uint32_t peeling)2189 Alignment HLoopOptimization::ComputeAlignment(HInstruction* offset,
2190                                               DataType::Type type,
2191                                               bool is_string_char_at,
2192                                               uint32_t peeling) {
2193   // Combine the alignment and hidden offset that is guaranteed by
2194   // the Android runtime with a known starting index adjusted as bytes.
2195   int64_t value = 0;
2196   if (IsInt64AndGet(offset, /*out*/ &value)) {
2197     uint32_t start_offset =
2198         HiddenOffset(type, is_string_char_at) + (value + peeling) * DataType::Size(type);
2199     return Alignment(BaseAlignment(), start_offset & (BaseAlignment() - 1u));
2200   }
2201   // Otherwise, the Android runtime guarantees at least natural alignment.
2202   return Alignment(DataType::Size(type), 0);
2203 }
2204 
SetAlignmentStrategy(uint32_t peeling_votes[],const ArrayReference * peeling_candidate)2205 void HLoopOptimization::SetAlignmentStrategy(uint32_t peeling_votes[],
2206                                              const ArrayReference* peeling_candidate) {
2207   // Current heuristic: pick the best static loop peeling factor, if any,
2208   // or otherwise use dynamic loop peeling on suggested peeling candidate.
2209   uint32_t max_vote = 0;
2210   for (int32_t i = 0; i < 16; i++) {
2211     if (peeling_votes[i] > max_vote) {
2212       max_vote = peeling_votes[i];
2213       vector_static_peeling_factor_ = i;
2214     }
2215   }
2216   if (max_vote == 0) {
2217     vector_dynamic_peeling_candidate_ = peeling_candidate;
2218   }
2219 }
2220 
MaxNumberPeeled()2221 uint32_t HLoopOptimization::MaxNumberPeeled() {
2222   if (vector_dynamic_peeling_candidate_ != nullptr) {
2223     return vector_length_ - 1u;  // worst-case
2224   }
2225   return vector_static_peeling_factor_;  // known exactly
2226 }
2227 
IsVectorizationProfitable(int64_t trip_count)2228 bool HLoopOptimization::IsVectorizationProfitable(int64_t trip_count) {
2229   // Current heuristic: non-empty body with sufficient number of iterations (if known).
2230   // TODO: refine by looking at e.g. operation count, alignment, etc.
2231   // TODO: trip count is really unsigned entity, provided the guarding test
2232   //       is satisfied; deal with this more carefully later
2233   uint32_t max_peel = MaxNumberPeeled();
2234   if (vector_length_ == 0) {
2235     return false;  // nothing found
2236   } else if (trip_count < 0) {
2237     return false;  // guard against non-taken/large
2238   } else if ((0 < trip_count) && (trip_count < (vector_length_ + max_peel))) {
2239     return false;  // insufficient iterations
2240   }
2241   return true;
2242 }
2243 
2244 //
2245 // Helpers.
2246 //
2247 
TrySetPhiInduction(HPhi * phi,bool restrict_uses)2248 bool HLoopOptimization::TrySetPhiInduction(HPhi* phi, bool restrict_uses) {
2249   // Start with empty phi induction.
2250   iset_->clear();
2251 
2252   // Special case Phis that have equivalent in a debuggable setup. Our graph checker isn't
2253   // smart enough to follow strongly connected components (and it's probably not worth
2254   // it to make it so). See b/33775412.
2255   if (graph_->IsDebuggable() && phi->HasEquivalentPhi()) {
2256     return false;
2257   }
2258 
2259   // Lookup phi induction cycle.
2260   ArenaSet<HInstruction*>* set = induction_range_.LookupCycle(phi);
2261   if (set != nullptr) {
2262     for (HInstruction* i : *set) {
2263       // Check that, other than instructions that are no longer in the graph (removed earlier)
2264       // each instruction is removable and, when restrict uses are requested, other than for phi,
2265       // all uses are contained within the cycle.
2266       if (!i->IsInBlock()) {
2267         continue;
2268       } else if (!i->IsRemovable()) {
2269         return false;
2270       } else if (i != phi && restrict_uses) {
2271         // Deal with regular uses.
2272         for (const HUseListNode<HInstruction*>& use : i->GetUses()) {
2273           if (set->find(use.GetUser()) == set->end()) {
2274             return false;
2275           }
2276         }
2277       }
2278       iset_->insert(i);  // copy
2279     }
2280     return true;
2281   }
2282   return false;
2283 }
2284 
TrySetPhiReduction(HPhi * phi)2285 bool HLoopOptimization::TrySetPhiReduction(HPhi* phi) {
2286   DCHECK(iset_->empty());
2287   // Only unclassified phi cycles are candidates for reductions.
2288   if (induction_range_.IsClassified(phi)) {
2289     return false;
2290   }
2291   // Accept operations like x = x + .., provided that the phi and the reduction are
2292   // used exactly once inside the loop, and by each other.
2293   HInputsRef inputs = phi->GetInputs();
2294   if (inputs.size() == 2) {
2295     HInstruction* reduction = inputs[1];
2296     if (HasReductionFormat(reduction, phi)) {
2297       HLoopInformation* loop_info = phi->GetBlock()->GetLoopInformation();
2298       uint32_t use_count = 0;
2299       bool single_use_inside_loop =
2300           // Reduction update only used by phi.
2301           reduction->GetUses().HasExactlyOneElement() &&
2302           !reduction->HasEnvironmentUses() &&
2303           // Reduction update is only use of phi inside the loop.
2304           IsOnlyUsedAfterLoop(loop_info, phi, /*collect_loop_uses*/ true, &use_count) &&
2305           iset_->size() == 1;
2306       iset_->clear();  // leave the way you found it
2307       if (single_use_inside_loop) {
2308         // Link reduction back, and start recording feed value.
2309         reductions_->Put(reduction, phi);
2310         reductions_->Put(phi, phi->InputAt(0));
2311         return true;
2312       }
2313     }
2314   }
2315   return false;
2316 }
2317 
TrySetSimpleLoopHeader(HBasicBlock * block,HPhi ** main_phi)2318 bool HLoopOptimization::TrySetSimpleLoopHeader(HBasicBlock* block, /*out*/ HPhi** main_phi) {
2319   // Start with empty phi induction and reductions.
2320   iset_->clear();
2321   reductions_->clear();
2322 
2323   // Scan the phis to find the following (the induction structure has already
2324   // been optimized, so we don't need to worry about trivial cases):
2325   // (1) optional reductions in loop,
2326   // (2) the main induction, used in loop control.
2327   HPhi* phi = nullptr;
2328   for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
2329     if (TrySetPhiReduction(it.Current()->AsPhi())) {
2330       continue;
2331     } else if (phi == nullptr) {
2332       // Found the first candidate for main induction.
2333       phi = it.Current()->AsPhi();
2334     } else {
2335       return false;
2336     }
2337   }
2338 
2339   // Then test for a typical loopheader:
2340   //   s:  SuspendCheck
2341   //   c:  Condition(phi, bound)
2342   //   i:  If(c)
2343   if (phi != nullptr && TrySetPhiInduction(phi, /*restrict_uses*/ false)) {
2344     HInstruction* s = block->GetFirstInstruction();
2345     if (s != nullptr && s->IsSuspendCheck()) {
2346       HInstruction* c = s->GetNext();
2347       if (c != nullptr &&
2348           c->IsCondition() &&
2349           c->GetUses().HasExactlyOneElement() &&  // only used for termination
2350           !c->HasEnvironmentUses()) {  // unlikely, but not impossible
2351         HInstruction* i = c->GetNext();
2352         if (i != nullptr && i->IsIf() && i->InputAt(0) == c) {
2353           iset_->insert(c);
2354           iset_->insert(s);
2355           *main_phi = phi;
2356           return true;
2357         }
2358       }
2359     }
2360   }
2361   return false;
2362 }
2363 
IsEmptyBody(HBasicBlock * block)2364 bool HLoopOptimization::IsEmptyBody(HBasicBlock* block) {
2365   if (!block->GetPhis().IsEmpty()) {
2366     return false;
2367   }
2368   for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
2369     HInstruction* instruction = it.Current();
2370     if (!instruction->IsGoto() && iset_->find(instruction) == iset_->end()) {
2371       return false;
2372     }
2373   }
2374   return true;
2375 }
2376 
IsUsedOutsideLoop(HLoopInformation * loop_info,HInstruction * instruction)2377 bool HLoopOptimization::IsUsedOutsideLoop(HLoopInformation* loop_info,
2378                                           HInstruction* instruction) {
2379   // Deal with regular uses.
2380   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
2381     if (use.GetUser()->GetBlock()->GetLoopInformation() != loop_info) {
2382       return true;
2383     }
2384   }
2385   return false;
2386 }
2387 
IsOnlyUsedAfterLoop(HLoopInformation * loop_info,HInstruction * instruction,bool collect_loop_uses,uint32_t * use_count)2388 bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
2389                                             HInstruction* instruction,
2390                                             bool collect_loop_uses,
2391                                             /*out*/ uint32_t* use_count) {
2392   // Deal with regular uses.
2393   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
2394     HInstruction* user = use.GetUser();
2395     if (iset_->find(user) == iset_->end()) {  // not excluded?
2396       HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
2397       if (other_loop_info != nullptr && other_loop_info->IsIn(*loop_info)) {
2398         // If collect_loop_uses is set, simply keep adding those uses to the set.
2399         // Otherwise, reject uses inside the loop that were not already in the set.
2400         if (collect_loop_uses) {
2401           iset_->insert(user);
2402           continue;
2403         }
2404         return false;
2405       }
2406       ++*use_count;
2407     }
2408   }
2409   return true;
2410 }
2411 
TryReplaceWithLastValue(HLoopInformation * loop_info,HInstruction * instruction,HBasicBlock * block)2412 bool HLoopOptimization::TryReplaceWithLastValue(HLoopInformation* loop_info,
2413                                                 HInstruction* instruction,
2414                                                 HBasicBlock* block) {
2415   // Try to replace outside uses with the last value.
2416   if (induction_range_.CanGenerateLastValue(instruction)) {
2417     HInstruction* replacement = induction_range_.GenerateLastValue(instruction, graph_, block);
2418     // Deal with regular uses.
2419     const HUseList<HInstruction*>& uses = instruction->GetUses();
2420     for (auto it = uses.begin(), end = uses.end(); it != end;) {
2421       HInstruction* user = it->GetUser();
2422       size_t index = it->GetIndex();
2423       ++it;  // increment before replacing
2424       if (iset_->find(user) == iset_->end()) {  // not excluded?
2425         if (kIsDebugBuild) {
2426           // We have checked earlier in 'IsOnlyUsedAfterLoop' that the use is after the loop.
2427           HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
2428           CHECK(other_loop_info == nullptr || !other_loop_info->IsIn(*loop_info));
2429         }
2430         user->ReplaceInput(replacement, index);
2431         induction_range_.Replace(user, instruction, replacement);  // update induction
2432       }
2433     }
2434     // Deal with environment uses.
2435     const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
2436     for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
2437       HEnvironment* user = it->GetUser();
2438       size_t index = it->GetIndex();
2439       ++it;  // increment before replacing
2440       if (iset_->find(user->GetHolder()) == iset_->end()) {  // not excluded?
2441         // Only update environment uses after the loop.
2442         HLoopInformation* other_loop_info = user->GetHolder()->GetBlock()->GetLoopInformation();
2443         if (other_loop_info == nullptr || !other_loop_info->IsIn(*loop_info)) {
2444           user->RemoveAsUserOfInput(index);
2445           user->SetRawEnvAt(index, replacement);
2446           replacement->AddEnvUseAt(user, index);
2447         }
2448       }
2449     }
2450     return true;
2451   }
2452   return false;
2453 }
2454 
TryAssignLastValue(HLoopInformation * loop_info,HInstruction * instruction,HBasicBlock * block,bool collect_loop_uses)2455 bool HLoopOptimization::TryAssignLastValue(HLoopInformation* loop_info,
2456                                            HInstruction* instruction,
2457                                            HBasicBlock* block,
2458                                            bool collect_loop_uses) {
2459   // Assigning the last value is always successful if there are no uses.
2460   // Otherwise, it succeeds in a no early-exit loop by generating the
2461   // proper last value assignment.
2462   uint32_t use_count = 0;
2463   return IsOnlyUsedAfterLoop(loop_info, instruction, collect_loop_uses, &use_count) &&
2464       (use_count == 0 ||
2465        (!IsEarlyExit(loop_info) && TryReplaceWithLastValue(loop_info, instruction, block)));
2466 }
2467 
RemoveDeadInstructions(const HInstructionList & list)2468 void HLoopOptimization::RemoveDeadInstructions(const HInstructionList& list) {
2469   for (HBackwardInstructionIterator i(list); !i.Done(); i.Advance()) {
2470     HInstruction* instruction = i.Current();
2471     if (instruction->IsDeadAndRemovable()) {
2472       simplified_ = true;
2473       instruction->GetBlock()->RemoveInstructionOrPhi(instruction);
2474     }
2475   }
2476 }
2477 
CanRemoveCycle()2478 bool HLoopOptimization::CanRemoveCycle() {
2479   for (HInstruction* i : *iset_) {
2480     // We can never remove instructions that have environment
2481     // uses when we compile 'debuggable'.
2482     if (i->HasEnvironmentUses() && graph_->IsDebuggable()) {
2483       return false;
2484     }
2485     // A deoptimization should never have an environment input removed.
2486     for (const HUseListNode<HEnvironment*>& use : i->GetEnvUses()) {
2487       if (use.GetUser()->GetHolder()->IsDeoptimize()) {
2488         return false;
2489       }
2490     }
2491   }
2492   return true;
2493 }
2494 
2495 }  // namespace art
2496