1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <memory>
18 #include <vector>
19 
20 #include "arch/instruction_set.h"
21 #include "base/runtime_debug.h"
22 #include "cfi_test.h"
23 #include "driver/compiler_options.h"
24 #include "gtest/gtest.h"
25 #include "optimizing/code_generator.h"
26 #include "optimizing/optimizing_unit_test.h"
27 #include "read_barrier_config.h"
28 #include "utils/arm/assembler_arm_vixl.h"
29 #include "utils/assembler.h"
30 
31 #include "optimizing/optimizing_cfi_test_expected.inc"
32 
33 namespace vixl32 = vixl::aarch32;
34 
35 namespace art {
36 
37 // Run the tests only on host.
38 #ifndef ART_TARGET_ANDROID
39 
40 class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
41  public:
42   // Enable this flag to generate the expected outputs.
43   static constexpr bool kGenerateExpected = false;
44 
OptimizingCFITest()45   OptimizingCFITest()
46       : graph_(nullptr),
47         code_gen_(),
48         blocks_(GetAllocator()->Adapter()) {}
49 
SetUpFrame(InstructionSet isa)50   void SetUpFrame(InstructionSet isa) {
51     OverrideInstructionSetFeatures(isa, "default");
52 
53     // Ensure that slow-debug is off, so that there is no unexpected read-barrier check emitted.
54     SetRuntimeDebugFlagsEnabled(false);
55 
56     // Setup simple context.
57     graph_ = CreateGraph();
58     // Generate simple frame with some spills.
59     code_gen_ = CodeGenerator::Create(graph_, *compiler_options_);
60     code_gen_->GetAssembler()->cfi().SetEnabled(true);
61     code_gen_->InitializeCodeGenerationData();
62     const int frame_size = 64;
63     int core_reg = 0;
64     int fp_reg = 0;
65     for (int i = 0; i < 2; i++) {  // Two registers of each kind.
66       for (; core_reg < 32; core_reg++) {
67         if (code_gen_->IsCoreCalleeSaveRegister(core_reg)) {
68           auto location = Location::RegisterLocation(core_reg);
69           code_gen_->AddAllocatedRegister(location);
70           core_reg++;
71           break;
72         }
73       }
74       for (; fp_reg < 32; fp_reg++) {
75         if (code_gen_->IsFloatingPointCalleeSaveRegister(fp_reg)) {
76           auto location = Location::FpuRegisterLocation(fp_reg);
77           code_gen_->AddAllocatedRegister(location);
78           fp_reg++;
79           break;
80         }
81       }
82     }
83     code_gen_->block_order_ = &blocks_;
84     code_gen_->ComputeSpillMask();
85     code_gen_->SetFrameSize(frame_size);
86     code_gen_->GenerateFrameEntry();
87   }
88 
Finish()89   void Finish() {
90     code_gen_->GenerateFrameExit();
91     code_gen_->Finalize(&code_allocator_);
92   }
93 
Check(InstructionSet isa,const char * isa_str,const std::vector<uint8_t> & expected_asm,const std::vector<uint8_t> & expected_cfi)94   void Check(InstructionSet isa,
95              const char* isa_str,
96              const std::vector<uint8_t>& expected_asm,
97              const std::vector<uint8_t>& expected_cfi) {
98     // Get the outputs.
99     ArrayRef<const uint8_t> actual_asm = code_allocator_.GetMemory();
100     Assembler* opt_asm = code_gen_->GetAssembler();
101     ArrayRef<const uint8_t> actual_cfi(*(opt_asm->cfi().data()));
102 
103     if (kGenerateExpected) {
104       GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi);
105     } else {
106       EXPECT_EQ(ArrayRef<const uint8_t>(expected_asm), actual_asm);
107       EXPECT_EQ(ArrayRef<const uint8_t>(expected_cfi), actual_cfi);
108     }
109   }
110 
TestImpl(InstructionSet isa,const char * isa_str,const std::vector<uint8_t> & expected_asm,const std::vector<uint8_t> & expected_cfi)111   void TestImpl(InstructionSet isa, const char*
112                 isa_str,
113                 const std::vector<uint8_t>& expected_asm,
114                 const std::vector<uint8_t>& expected_cfi) {
115     SetUpFrame(isa);
116     Finish();
117     Check(isa, isa_str, expected_asm, expected_cfi);
118   }
119 
GetCodeGenerator()120   CodeGenerator* GetCodeGenerator() {
121     return code_gen_.get();
122   }
123 
124  private:
125   class InternalCodeAllocator : public CodeAllocator {
126    public:
InternalCodeAllocator()127     InternalCodeAllocator() {}
128 
Allocate(size_t size)129     uint8_t* Allocate(size_t size) override {
130       memory_.resize(size);
131       return memory_.data();
132     }
133 
GetMemory() const134     ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
135 
136    private:
137     std::vector<uint8_t> memory_;
138 
139     DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
140   };
141 
142   HGraph* graph_;
143   std::unique_ptr<CodeGenerator> code_gen_;
144   ArenaVector<HBasicBlock*> blocks_;
145   InternalCodeAllocator code_allocator_;
146 };
147 
148 #define TEST_ISA(isa)                                                 \
149   TEST_F(OptimizingCFITest, isa) {                                    \
150     std::vector<uint8_t> expected_asm(                                \
151         expected_asm_##isa,                                           \
152         expected_asm_##isa + arraysize(expected_asm_##isa));          \
153     std::vector<uint8_t> expected_cfi(                                \
154         expected_cfi_##isa,                                           \
155         expected_cfi_##isa + arraysize(expected_cfi_##isa));          \
156     TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi);  \
157   }
158 
159 #ifdef ART_ENABLE_CODEGEN_arm
160 TEST_ISA(kThumb2)
161 #endif
162 
163 #ifdef ART_ENABLE_CODEGEN_arm64
164 // Run the tests for ARM64 only with Baker read barriers, as the
165 // expected generated code saves and restore X21 and X22 (instead of
166 // X20 and X21), as X20 is used as Marking Register in the Baker read
167 // barrier configuration, and as such is removed from the set of
168 // callee-save registers in the ARM64 code generator of the Optimizing
169 // compiler.
170 #if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
TEST_ISA(kArm64)171 TEST_ISA(kArm64)
172 #endif
173 #endif
174 
175 #ifdef ART_ENABLE_CODEGEN_x86
176 TEST_ISA(kX86)
177 #endif
178 
179 #ifdef ART_ENABLE_CODEGEN_x86_64
180 TEST_ISA(kX86_64)
181 #endif
182 
183 #ifdef ART_ENABLE_CODEGEN_arm
184 TEST_F(OptimizingCFITest, kThumb2Adjust) {
185   using vixl32::r0;
186   std::vector<uint8_t> expected_asm(
187       expected_asm_kThumb2_adjust,
188       expected_asm_kThumb2_adjust + arraysize(expected_asm_kThumb2_adjust));
189   std::vector<uint8_t> expected_cfi(
190       expected_cfi_kThumb2_adjust,
191       expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
192   SetUpFrame(InstructionSet::kThumb2);
193 #define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
194     ->GetAssembler())->GetVIXLAssembler()->
195   vixl32::Label target;
196   __ CompareAndBranchIfZero(r0, &target);
197   // Push the target out of range of CBZ.
198   for (size_t i = 0; i != 65; ++i) {
199     __ Ldr(r0, vixl32::MemOperand(r0));
200   }
201   __ Bind(&target);
202 #undef __
203   Finish();
204   Check(InstructionSet::kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
205 }
206 #endif
207 
208 #endif  // ART_TARGET_ANDROID
209 
210 }  // namespace art
211