1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "code_generator.h"
18
19 #ifdef ART_ENABLE_CODEGEN_arm
20 #include "code_generator_arm_vixl.h"
21 #endif
22
23 #ifdef ART_ENABLE_CODEGEN_arm64
24 #include "code_generator_arm64.h"
25 #endif
26
27 #ifdef ART_ENABLE_CODEGEN_x86
28 #include "code_generator_x86.h"
29 #endif
30
31 #ifdef ART_ENABLE_CODEGEN_x86_64
32 #include "code_generator_x86_64.h"
33 #endif
34
35 #include "art_method-inl.h"
36 #include "base/bit_utils.h"
37 #include "base/bit_utils_iterator.h"
38 #include "base/casts.h"
39 #include "base/leb128.h"
40 #include "class_linker.h"
41 #include "compiled_method.h"
42 #include "dex/bytecode_utils.h"
43 #include "dex/code_item_accessors-inl.h"
44 #include "dex/verified_method.h"
45 #include "graph_visualizer.h"
46 #include "image.h"
47 #include "gc/space/image_space.h"
48 #include "intern_table.h"
49 #include "intrinsics.h"
50 #include "mirror/array-inl.h"
51 #include "mirror/object_array-inl.h"
52 #include "mirror/object_reference.h"
53 #include "mirror/reference.h"
54 #include "mirror/string.h"
55 #include "parallel_move_resolver.h"
56 #include "scoped_thread_state_change-inl.h"
57 #include "ssa_liveness_analysis.h"
58 #include "stack_map.h"
59 #include "stack_map_stream.h"
60 #include "string_builder_append.h"
61 #include "thread-current-inl.h"
62 #include "utils/assembler.h"
63
64 namespace art {
65
66 // Return whether a location is consistent with a type.
CheckType(DataType::Type type,Location location)67 static bool CheckType(DataType::Type type, Location location) {
68 if (location.IsFpuRegister()
69 || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
70 return (type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64);
71 } else if (location.IsRegister() ||
72 (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
73 return DataType::IsIntegralType(type) || (type == DataType::Type::kReference);
74 } else if (location.IsRegisterPair()) {
75 return type == DataType::Type::kInt64;
76 } else if (location.IsFpuRegisterPair()) {
77 return type == DataType::Type::kFloat64;
78 } else if (location.IsStackSlot()) {
79 return (DataType::IsIntegralType(type) && type != DataType::Type::kInt64)
80 || (type == DataType::Type::kFloat32)
81 || (type == DataType::Type::kReference);
82 } else if (location.IsDoubleStackSlot()) {
83 return (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
84 } else if (location.IsConstant()) {
85 if (location.GetConstant()->IsIntConstant()) {
86 return DataType::IsIntegralType(type) && (type != DataType::Type::kInt64);
87 } else if (location.GetConstant()->IsNullConstant()) {
88 return type == DataType::Type::kReference;
89 } else if (location.GetConstant()->IsLongConstant()) {
90 return type == DataType::Type::kInt64;
91 } else if (location.GetConstant()->IsFloatConstant()) {
92 return type == DataType::Type::kFloat32;
93 } else {
94 return location.GetConstant()->IsDoubleConstant()
95 && (type == DataType::Type::kFloat64);
96 }
97 } else {
98 return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
99 }
100 }
101
102 // Check that a location summary is consistent with an instruction.
CheckTypeConsistency(HInstruction * instruction)103 static bool CheckTypeConsistency(HInstruction* instruction) {
104 LocationSummary* locations = instruction->GetLocations();
105 if (locations == nullptr) {
106 return true;
107 }
108
109 if (locations->Out().IsUnallocated()
110 && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
111 DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
112 << instruction->GetType()
113 << " " << locations->InAt(0);
114 } else {
115 DCHECK(CheckType(instruction->GetType(), locations->Out()))
116 << instruction->GetType()
117 << " " << locations->Out();
118 }
119
120 HConstInputsRef inputs = instruction->GetInputs();
121 for (size_t i = 0; i < inputs.size(); ++i) {
122 DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i)))
123 << inputs[i]->GetType() << " " << locations->InAt(i);
124 }
125
126 HEnvironment* environment = instruction->GetEnvironment();
127 for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
128 if (environment->GetInstructionAt(i) != nullptr) {
129 DataType::Type type = environment->GetInstructionAt(i)->GetType();
130 DCHECK(CheckType(type, environment->GetLocationAt(i)))
131 << type << " " << environment->GetLocationAt(i);
132 } else {
133 DCHECK(environment->GetLocationAt(i).IsInvalid())
134 << environment->GetLocationAt(i);
135 }
136 }
137 return true;
138 }
139
140 class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllocCodeGenerator> {
141 public:
Create(ArenaStack * arena_stack,InstructionSet instruction_set)142 static std::unique_ptr<CodeGenerationData> Create(ArenaStack* arena_stack,
143 InstructionSet instruction_set) {
144 ScopedArenaAllocator allocator(arena_stack);
145 void* memory = allocator.Alloc<CodeGenerationData>(kArenaAllocCodeGenerator);
146 return std::unique_ptr<CodeGenerationData>(
147 ::new (memory) CodeGenerationData(std::move(allocator), instruction_set));
148 }
149
GetScopedAllocator()150 ScopedArenaAllocator* GetScopedAllocator() {
151 return &allocator_;
152 }
153
AddSlowPath(SlowPathCode * slow_path)154 void AddSlowPath(SlowPathCode* slow_path) {
155 slow_paths_.emplace_back(std::unique_ptr<SlowPathCode>(slow_path));
156 }
157
GetSlowPaths() const158 ArrayRef<const std::unique_ptr<SlowPathCode>> GetSlowPaths() const {
159 return ArrayRef<const std::unique_ptr<SlowPathCode>>(slow_paths_);
160 }
161
GetStackMapStream()162 StackMapStream* GetStackMapStream() { return &stack_map_stream_; }
163
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)164 void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string) {
165 jit_string_roots_.Overwrite(string_reference,
166 reinterpret_cast64<uint64_t>(string.GetReference()));
167 }
168
GetJitStringRootIndex(StringReference string_reference) const169 uint64_t GetJitStringRootIndex(StringReference string_reference) const {
170 return jit_string_roots_.Get(string_reference);
171 }
172
GetNumberOfJitStringRoots() const173 size_t GetNumberOfJitStringRoots() const {
174 return jit_string_roots_.size();
175 }
176
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)177 void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
178 jit_class_roots_.Overwrite(type_reference, reinterpret_cast64<uint64_t>(klass.GetReference()));
179 }
180
GetJitClassRootIndex(TypeReference type_reference) const181 uint64_t GetJitClassRootIndex(TypeReference type_reference) const {
182 return jit_class_roots_.Get(type_reference);
183 }
184
GetNumberOfJitClassRoots() const185 size_t GetNumberOfJitClassRoots() const {
186 return jit_class_roots_.size();
187 }
188
GetNumberOfJitRoots() const189 size_t GetNumberOfJitRoots() const {
190 return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
191 }
192
193 void EmitJitRoots(/*out*/std::vector<Handle<mirror::Object>>* roots)
194 REQUIRES_SHARED(Locks::mutator_lock_);
195
196 private:
CodeGenerationData(ScopedArenaAllocator && allocator,InstructionSet instruction_set)197 CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set)
198 : allocator_(std::move(allocator)),
199 stack_map_stream_(&allocator_, instruction_set),
200 slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)),
201 jit_string_roots_(StringReferenceValueComparator(),
202 allocator_.Adapter(kArenaAllocCodeGenerator)),
203 jit_class_roots_(TypeReferenceValueComparator(),
204 allocator_.Adapter(kArenaAllocCodeGenerator)) {
205 slow_paths_.reserve(kDefaultSlowPathsCapacity);
206 }
207
208 static constexpr size_t kDefaultSlowPathsCapacity = 8;
209
210 ScopedArenaAllocator allocator_;
211 StackMapStream stack_map_stream_;
212 ScopedArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
213
214 // Maps a StringReference (dex_file, string_index) to the index in the literal table.
215 // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
216 // will compute all the indices.
217 ScopedArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
218
219 // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
220 // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
221 // will compute all the indices.
222 ScopedArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
223 };
224
EmitJitRoots(std::vector<Handle<mirror::Object>> * roots)225 void CodeGenerator::CodeGenerationData::EmitJitRoots(
226 /*out*/std::vector<Handle<mirror::Object>>* roots) {
227 DCHECK(roots->empty());
228 roots->reserve(GetNumberOfJitRoots());
229 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
230 size_t index = 0;
231 for (auto& entry : jit_string_roots_) {
232 // Update the `roots` with the string, and replace the address temporarily
233 // stored to the index in the table.
234 uint64_t address = entry.second;
235 roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
236 DCHECK(roots->back() != nullptr);
237 DCHECK(roots->back()->IsString());
238 entry.second = index;
239 // Ensure the string is strongly interned. This is a requirement on how the JIT
240 // handles strings. b/32995596
241 class_linker->GetInternTable()->InternStrong(roots->back()->AsString());
242 ++index;
243 }
244 for (auto& entry : jit_class_roots_) {
245 // Update the `roots` with the class, and replace the address temporarily
246 // stored to the index in the table.
247 uint64_t address = entry.second;
248 roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
249 DCHECK(roots->back() != nullptr);
250 DCHECK(roots->back()->IsClass());
251 entry.second = index;
252 ++index;
253 }
254 }
255
GetScopedAllocator()256 ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
257 DCHECK(code_generation_data_ != nullptr);
258 return code_generation_data_->GetScopedAllocator();
259 }
260
GetStackMapStream()261 StackMapStream* CodeGenerator::GetStackMapStream() {
262 DCHECK(code_generation_data_ != nullptr);
263 return code_generation_data_->GetStackMapStream();
264 }
265
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)266 void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
267 Handle<mirror::String> string) {
268 DCHECK(code_generation_data_ != nullptr);
269 code_generation_data_->ReserveJitStringRoot(string_reference, string);
270 }
271
GetJitStringRootIndex(StringReference string_reference)272 uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
273 DCHECK(code_generation_data_ != nullptr);
274 return code_generation_data_->GetJitStringRootIndex(string_reference);
275 }
276
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)277 void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
278 DCHECK(code_generation_data_ != nullptr);
279 code_generation_data_->ReserveJitClassRoot(type_reference, klass);
280 }
281
GetJitClassRootIndex(TypeReference type_reference)282 uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
283 DCHECK(code_generation_data_ != nullptr);
284 return code_generation_data_->GetJitClassRootIndex(type_reference);
285 }
286
EmitJitRootPatches(uint8_t * code ATTRIBUTE_UNUSED,const uint8_t * roots_data ATTRIBUTE_UNUSED)287 void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
288 const uint8_t* roots_data ATTRIBUTE_UNUSED) {
289 DCHECK(code_generation_data_ != nullptr);
290 DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
291 DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
292 }
293
GetArrayLengthOffset(HArrayLength * array_length)294 uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
295 return array_length->IsStringLength()
296 ? mirror::String::CountOffset().Uint32Value()
297 : mirror::Array::LengthOffset().Uint32Value();
298 }
299
GetArrayDataOffset(HArrayGet * array_get)300 uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
301 DCHECK(array_get->GetType() == DataType::Type::kUint16 || !array_get->IsStringCharAt());
302 return array_get->IsStringCharAt()
303 ? mirror::String::ValueOffset().Uint32Value()
304 : mirror::Array::DataOffset(DataType::Size(array_get->GetType())).Uint32Value();
305 }
306
GoesToNextBlock(HBasicBlock * current,HBasicBlock * next) const307 bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
308 DCHECK_EQ((*block_order_)[current_block_index_], current);
309 return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
310 }
311
GetNextBlockToEmit() const312 HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
313 for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
314 HBasicBlock* block = (*block_order_)[i];
315 if (!block->IsSingleJump()) {
316 return block;
317 }
318 }
319 return nullptr;
320 }
321
FirstNonEmptyBlock(HBasicBlock * block) const322 HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
323 while (block->IsSingleJump()) {
324 block = block->GetSuccessors()[0];
325 }
326 return block;
327 }
328
329 class DisassemblyScope {
330 public:
DisassemblyScope(HInstruction * instruction,const CodeGenerator & codegen)331 DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
332 : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
333 if (codegen_.GetDisassemblyInformation() != nullptr) {
334 start_offset_ = codegen_.GetAssembler().CodeSize();
335 }
336 }
337
~DisassemblyScope()338 ~DisassemblyScope() {
339 // We avoid building this data when we know it will not be used.
340 if (codegen_.GetDisassemblyInformation() != nullptr) {
341 codegen_.GetDisassemblyInformation()->AddInstructionInterval(
342 instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
343 }
344 }
345
346 private:
347 const CodeGenerator& codegen_;
348 HInstruction* instruction_;
349 size_t start_offset_;
350 };
351
352
GenerateSlowPaths()353 void CodeGenerator::GenerateSlowPaths() {
354 DCHECK(code_generation_data_ != nullptr);
355 size_t code_start = 0;
356 for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
357 SlowPathCode* slow_path = slow_path_ptr.get();
358 current_slow_path_ = slow_path;
359 if (disasm_info_ != nullptr) {
360 code_start = GetAssembler()->CodeSize();
361 }
362 // Record the dex pc at start of slow path (required for java line number mapping).
363 MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path);
364 slow_path->EmitNativeCode(this);
365 if (disasm_info_ != nullptr) {
366 disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
367 }
368 }
369 current_slow_path_ = nullptr;
370 }
371
InitializeCodeGenerationData()372 void CodeGenerator::InitializeCodeGenerationData() {
373 DCHECK(code_generation_data_ == nullptr);
374 code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
375 }
376
Compile(CodeAllocator * allocator)377 void CodeGenerator::Compile(CodeAllocator* allocator) {
378 InitializeCodeGenerationData();
379
380 // The register allocator already called `InitializeCodeGeneration`,
381 // where the frame size has been computed.
382 DCHECK(block_order_ != nullptr);
383 Initialize();
384
385 HGraphVisitor* instruction_visitor = GetInstructionVisitor();
386 DCHECK_EQ(current_block_index_, 0u);
387
388 GetStackMapStream()->BeginMethod(HasEmptyFrame() ? 0 : frame_size_,
389 core_spill_mask_,
390 fpu_spill_mask_,
391 GetGraph()->GetNumberOfVRegs(),
392 GetGraph()->IsCompilingBaseline());
393
394 size_t frame_start = GetAssembler()->CodeSize();
395 GenerateFrameEntry();
396 DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
397 if (disasm_info_ != nullptr) {
398 disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
399 }
400
401 for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
402 HBasicBlock* block = (*block_order_)[current_block_index_];
403 // Don't generate code for an empty block. Its predecessors will branch to its successor
404 // directly. Also, the label of that block will not be emitted, so this helps catch
405 // errors where we reference that label.
406 if (block->IsSingleJump()) continue;
407 Bind(block);
408 // This ensures that we have correct native line mapping for all native instructions.
409 // It is necessary to make stepping over a statement work. Otherwise, any initial
410 // instructions (e.g. moves) would be assumed to be the start of next statement.
411 MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
412 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
413 HInstruction* current = it.Current();
414 if (current->HasEnvironment()) {
415 // Create stackmap for HNativeDebugInfo or any instruction which calls native code.
416 // Note that we need correct mapping for the native PC of the call instruction,
417 // so the runtime's stackmap is not sufficient since it is at PC after the call.
418 MaybeRecordNativeDebugInfo(current, block->GetDexPc());
419 }
420 DisassemblyScope disassembly_scope(current, *this);
421 DCHECK(CheckTypeConsistency(current));
422 current->Accept(instruction_visitor);
423 }
424 }
425
426 GenerateSlowPaths();
427
428 // Emit catch stack maps at the end of the stack map stream as expected by the
429 // runtime exception handler.
430 if (graph_->HasTryCatch()) {
431 RecordCatchBlockInfo();
432 }
433
434 // Finalize instructions in assember;
435 Finalize(allocator);
436
437 GetStackMapStream()->EndMethod();
438 }
439
Finalize(CodeAllocator * allocator)440 void CodeGenerator::Finalize(CodeAllocator* allocator) {
441 size_t code_size = GetAssembler()->CodeSize();
442 uint8_t* buffer = allocator->Allocate(code_size);
443
444 MemoryRegion code(buffer, code_size);
445 GetAssembler()->FinalizeInstructions(code);
446 }
447
EmitLinkerPatches(ArenaVector<linker::LinkerPatch> * linker_patches ATTRIBUTE_UNUSED)448 void CodeGenerator::EmitLinkerPatches(
449 ArenaVector<linker::LinkerPatch>* linker_patches ATTRIBUTE_UNUSED) {
450 // No linker patches by default.
451 }
452
NeedsThunkCode(const linker::LinkerPatch & patch ATTRIBUTE_UNUSED) const453 bool CodeGenerator::NeedsThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED) const {
454 // Code generators that create patches requiring thunk compilation should override this function.
455 return false;
456 }
457
EmitThunkCode(const linker::LinkerPatch & patch ATTRIBUTE_UNUSED,ArenaVector<uint8_t> * code ATTRIBUTE_UNUSED,std::string * debug_name ATTRIBUTE_UNUSED)458 void CodeGenerator::EmitThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED,
459 /*out*/ ArenaVector<uint8_t>* code ATTRIBUTE_UNUSED,
460 /*out*/ std::string* debug_name ATTRIBUTE_UNUSED) {
461 // Code generators that create patches requiring thunk compilation should override this function.
462 LOG(FATAL) << "Unexpected call to EmitThunkCode().";
463 }
464
InitializeCodeGeneration(size_t number_of_spill_slots,size_t maximum_safepoint_spill_size,size_t number_of_out_slots,const ArenaVector<HBasicBlock * > & block_order)465 void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
466 size_t maximum_safepoint_spill_size,
467 size_t number_of_out_slots,
468 const ArenaVector<HBasicBlock*>& block_order) {
469 block_order_ = &block_order;
470 DCHECK(!block_order.empty());
471 DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
472 ComputeSpillMask();
473 first_register_slot_in_slow_path_ = RoundUp(
474 (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment());
475
476 if (number_of_spill_slots == 0
477 && !HasAllocatedCalleeSaveRegisters()
478 && IsLeafMethod()
479 && !RequiresCurrentMethod()) {
480 DCHECK_EQ(maximum_safepoint_spill_size, 0u);
481 SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
482 } else {
483 SetFrameSize(RoundUp(
484 first_register_slot_in_slow_path_
485 + maximum_safepoint_spill_size
486 + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
487 + FrameEntrySpillSize(),
488 kStackAlignment));
489 }
490 }
491
CreateCommonInvokeLocationSummary(HInvoke * invoke,InvokeDexCallingConventionVisitor * visitor)492 void CodeGenerator::CreateCommonInvokeLocationSummary(
493 HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
494 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
495 LocationSummary* locations = new (allocator) LocationSummary(invoke,
496 LocationSummary::kCallOnMainOnly);
497
498 for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
499 HInstruction* input = invoke->InputAt(i);
500 locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
501 }
502
503 locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
504
505 if (invoke->IsInvokeStaticOrDirect()) {
506 HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
507 HInvokeStaticOrDirect::MethodLoadKind method_load_kind = call->GetMethodLoadKind();
508 HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = call->GetCodePtrLocation();
509 if (code_ptr_location == HInvokeStaticOrDirect::CodePtrLocation::kCallCriticalNative) {
510 locations->AddTemp(Location::RequiresRegister()); // For target method.
511 }
512 if (code_ptr_location == HInvokeStaticOrDirect::CodePtrLocation::kCallCriticalNative ||
513 method_load_kind == HInvokeStaticOrDirect::MethodLoadKind::kRecursive) {
514 // For `kCallCriticalNative` we need the current method as the hidden argument
515 // if we reach the dlsym lookup stub for @CriticalNative.
516 locations->SetInAt(call->GetCurrentMethodIndex(), visitor->GetMethodLocation());
517 } else {
518 locations->AddTemp(visitor->GetMethodLocation());
519 if (method_load_kind == HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall) {
520 locations->SetInAt(call->GetCurrentMethodIndex(), Location::RequiresRegister());
521 }
522 }
523 } else if (!invoke->IsInvokePolymorphic()) {
524 locations->AddTemp(visitor->GetMethodLocation());
525 }
526 }
527
PrepareCriticalNativeArgumentMoves(HInvokeStaticOrDirect * invoke,InvokeDexCallingConventionVisitor * visitor,HParallelMove * parallel_move)528 void CodeGenerator::PrepareCriticalNativeArgumentMoves(
529 HInvokeStaticOrDirect* invoke,
530 /*inout*/InvokeDexCallingConventionVisitor* visitor,
531 /*out*/HParallelMove* parallel_move) {
532 LocationSummary* locations = invoke->GetLocations();
533 for (size_t i = 0, num = invoke->GetNumberOfArguments(); i != num; ++i) {
534 Location in_location = locations->InAt(i);
535 DataType::Type type = invoke->InputAt(i)->GetType();
536 DCHECK_NE(type, DataType::Type::kReference);
537 Location out_location = visitor->GetNextLocation(type);
538 if (out_location.IsStackSlot() || out_location.IsDoubleStackSlot()) {
539 // Stack arguments will need to be moved after adjusting the SP.
540 parallel_move->AddMove(in_location, out_location, type, /*instruction=*/ nullptr);
541 } else {
542 // Register arguments should have been assigned their final locations for register allocation.
543 DCHECK(out_location.Equals(in_location)) << in_location << " -> " << out_location;
544 }
545 }
546 }
547
FinishCriticalNativeFrameSetup(size_t out_frame_size,HParallelMove * parallel_move)548 void CodeGenerator::FinishCriticalNativeFrameSetup(size_t out_frame_size,
549 /*inout*/HParallelMove* parallel_move) {
550 DCHECK_NE(out_frame_size, 0u);
551 IncreaseFrame(out_frame_size);
552 // Adjust the source stack offsets by `out_frame_size`, i.e. the additional
553 // frame size needed for outgoing stack arguments.
554 for (size_t i = 0, num = parallel_move->NumMoves(); i != num; ++i) {
555 MoveOperands* operands = parallel_move->MoveOperandsAt(i);
556 Location source = operands->GetSource();
557 if (operands->GetSource().IsStackSlot()) {
558 operands->SetSource(Location::StackSlot(source.GetStackIndex() + out_frame_size));
559 } else if (operands->GetSource().IsDoubleStackSlot()) {
560 operands->SetSource(Location::DoubleStackSlot(source.GetStackIndex() + out_frame_size));
561 }
562 }
563 // Emit the moves.
564 GetMoveResolver()->EmitNativeCode(parallel_move);
565 }
566
GetCriticalNativeShorty(HInvokeStaticOrDirect * invoke,uint32_t * shorty_len)567 const char* CodeGenerator::GetCriticalNativeShorty(HInvokeStaticOrDirect* invoke,
568 uint32_t* shorty_len) {
569 ScopedObjectAccess soa(Thread::Current());
570 DCHECK(invoke->GetResolvedMethod()->IsCriticalNative());
571 return invoke->GetResolvedMethod()->GetShorty(shorty_len);
572 }
573
GenerateInvokeStaticOrDirectRuntimeCall(HInvokeStaticOrDirect * invoke,Location temp,SlowPathCode * slow_path)574 void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
575 HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
576 MoveConstant(temp, invoke->GetDexMethodIndex());
577
578 // The access check is unnecessary but we do not want to introduce
579 // extra entrypoints for the codegens that do not support some
580 // invoke type and fall back to the runtime call.
581
582 // Initialize to anything to silent compiler warnings.
583 QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
584 switch (invoke->GetInvokeType()) {
585 case kStatic:
586 entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
587 break;
588 case kDirect:
589 entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
590 break;
591 case kSuper:
592 entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
593 break;
594 case kVirtual:
595 case kInterface:
596 case kPolymorphic:
597 case kCustom:
598 LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
599 UNREACHABLE();
600 }
601
602 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
603 }
GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved * invoke)604 void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
605 MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetDexMethodIndex());
606
607 // Initialize to anything to silent compiler warnings.
608 QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
609 switch (invoke->GetInvokeType()) {
610 case kStatic:
611 entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
612 break;
613 case kDirect:
614 entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
615 break;
616 case kVirtual:
617 entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck;
618 break;
619 case kSuper:
620 entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
621 break;
622 case kInterface:
623 entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
624 break;
625 case kPolymorphic:
626 case kCustom:
627 LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
628 UNREACHABLE();
629 }
630 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
631 }
632
GenerateInvokePolymorphicCall(HInvokePolymorphic * invoke)633 void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
634 // invoke-polymorphic does not use a temporary to convey any additional information (e.g. a
635 // method index) since it requires multiple info from the instruction (registers A, B, H). Not
636 // using the reservation has no effect on the registers used in the runtime call.
637 QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
638 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
639 }
640
GenerateInvokeCustomCall(HInvokeCustom * invoke)641 void CodeGenerator::GenerateInvokeCustomCall(HInvokeCustom* invoke) {
642 MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetCallSiteIndex());
643 QuickEntrypointEnum entrypoint = kQuickInvokeCustom;
644 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
645 }
646
CreateStringBuilderAppendLocations(HStringBuilderAppend * instruction,Location out)647 void CodeGenerator::CreateStringBuilderAppendLocations(HStringBuilderAppend* instruction,
648 Location out) {
649 ArenaAllocator* allocator = GetGraph()->GetAllocator();
650 LocationSummary* locations =
651 new (allocator) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
652 locations->SetOut(out);
653 instruction->GetLocations()->SetInAt(instruction->FormatIndex(),
654 Location::ConstantLocation(instruction->GetFormat()));
655
656 uint32_t format = static_cast<uint32_t>(instruction->GetFormat()->GetValue());
657 uint32_t f = format;
658 PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
659 size_t stack_offset = static_cast<size_t>(pointer_size); // Start after the ArtMethod*.
660 for (size_t i = 0, num_args = instruction->GetNumberOfArguments(); i != num_args; ++i) {
661 StringBuilderAppend::Argument arg_type =
662 static_cast<StringBuilderAppend::Argument>(f & StringBuilderAppend::kArgMask);
663 switch (arg_type) {
664 case StringBuilderAppend::Argument::kStringBuilder:
665 case StringBuilderAppend::Argument::kString:
666 case StringBuilderAppend::Argument::kCharArray:
667 static_assert(sizeof(StackReference<mirror::Object>) == sizeof(uint32_t), "Size check.");
668 FALLTHROUGH_INTENDED;
669 case StringBuilderAppend::Argument::kBoolean:
670 case StringBuilderAppend::Argument::kChar:
671 case StringBuilderAppend::Argument::kInt:
672 case StringBuilderAppend::Argument::kFloat:
673 locations->SetInAt(i, Location::StackSlot(stack_offset));
674 break;
675 case StringBuilderAppend::Argument::kLong:
676 case StringBuilderAppend::Argument::kDouble:
677 stack_offset = RoundUp(stack_offset, sizeof(uint64_t));
678 locations->SetInAt(i, Location::DoubleStackSlot(stack_offset));
679 // Skip the low word, let the common code skip the high word.
680 stack_offset += sizeof(uint32_t);
681 break;
682 default:
683 LOG(FATAL) << "Unexpected arg format: 0x" << std::hex
684 << (f & StringBuilderAppend::kArgMask) << " full format: 0x" << format;
685 UNREACHABLE();
686 }
687 f >>= StringBuilderAppend::kBitsPerArg;
688 stack_offset += sizeof(uint32_t);
689 }
690 DCHECK_EQ(f, 0u);
691
692 size_t param_size = stack_offset - static_cast<size_t>(pointer_size);
693 DCHECK_ALIGNED(param_size, kVRegSize);
694 size_t num_vregs = param_size / kVRegSize;
695 graph_->UpdateMaximumNumberOfOutVRegs(num_vregs);
696 }
697
CreateUnresolvedFieldLocationSummary(HInstruction * field_access,DataType::Type field_type,const FieldAccessCallingConvention & calling_convention)698 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
699 HInstruction* field_access,
700 DataType::Type field_type,
701 const FieldAccessCallingConvention& calling_convention) {
702 bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
703 || field_access->IsUnresolvedInstanceFieldSet();
704 bool is_get = field_access->IsUnresolvedInstanceFieldGet()
705 || field_access->IsUnresolvedStaticFieldGet();
706
707 ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
708 LocationSummary* locations =
709 new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
710
711 locations->AddTemp(calling_convention.GetFieldIndexLocation());
712
713 if (is_instance) {
714 // Add the `this` object for instance field accesses.
715 locations->SetInAt(0, calling_convention.GetObjectLocation());
716 }
717
718 // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
719 // regardless of the the type. Because of that we forced to special case
720 // the access to floating point values.
721 if (is_get) {
722 if (DataType::IsFloatingPointType(field_type)) {
723 // The return value will be stored in regular registers while register
724 // allocator expects it in a floating point register.
725 // Note We don't need to request additional temps because the return
726 // register(s) are already blocked due the call and they may overlap with
727 // the input or field index.
728 // The transfer between the two will be done at codegen level.
729 locations->SetOut(calling_convention.GetFpuLocation(field_type));
730 } else {
731 locations->SetOut(calling_convention.GetReturnLocation(field_type));
732 }
733 } else {
734 size_t set_index = is_instance ? 1 : 0;
735 if (DataType::IsFloatingPointType(field_type)) {
736 // The set value comes from a float location while the calling convention
737 // expects it in a regular register location. Allocate a temp for it and
738 // make the transfer at codegen.
739 AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
740 locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
741 } else {
742 locations->SetInAt(set_index,
743 calling_convention.GetSetValueLocation(field_type, is_instance));
744 }
745 }
746 }
747
GenerateUnresolvedFieldAccess(HInstruction * field_access,DataType::Type field_type,uint32_t field_index,uint32_t dex_pc,const FieldAccessCallingConvention & calling_convention)748 void CodeGenerator::GenerateUnresolvedFieldAccess(
749 HInstruction* field_access,
750 DataType::Type field_type,
751 uint32_t field_index,
752 uint32_t dex_pc,
753 const FieldAccessCallingConvention& calling_convention) {
754 LocationSummary* locations = field_access->GetLocations();
755
756 MoveConstant(locations->GetTemp(0), field_index);
757
758 bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
759 || field_access->IsUnresolvedInstanceFieldSet();
760 bool is_get = field_access->IsUnresolvedInstanceFieldGet()
761 || field_access->IsUnresolvedStaticFieldGet();
762
763 if (!is_get && DataType::IsFloatingPointType(field_type)) {
764 // Copy the float value to be set into the calling convention register.
765 // Note that using directly the temp location is problematic as we don't
766 // support temp register pairs. To avoid boilerplate conversion code, use
767 // the location from the calling convention.
768 MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
769 locations->InAt(is_instance ? 1 : 0),
770 (DataType::Is64BitType(field_type) ? DataType::Type::kInt64
771 : DataType::Type::kInt32));
772 }
773
774 QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings.
775 switch (field_type) {
776 case DataType::Type::kBool:
777 entrypoint = is_instance
778 ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
779 : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
780 break;
781 case DataType::Type::kInt8:
782 entrypoint = is_instance
783 ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
784 : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
785 break;
786 case DataType::Type::kInt16:
787 entrypoint = is_instance
788 ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
789 : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
790 break;
791 case DataType::Type::kUint16:
792 entrypoint = is_instance
793 ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
794 : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
795 break;
796 case DataType::Type::kInt32:
797 case DataType::Type::kFloat32:
798 entrypoint = is_instance
799 ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
800 : (is_get ? kQuickGet32Static : kQuickSet32Static);
801 break;
802 case DataType::Type::kReference:
803 entrypoint = is_instance
804 ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
805 : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
806 break;
807 case DataType::Type::kInt64:
808 case DataType::Type::kFloat64:
809 entrypoint = is_instance
810 ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
811 : (is_get ? kQuickGet64Static : kQuickSet64Static);
812 break;
813 default:
814 LOG(FATAL) << "Invalid type " << field_type;
815 }
816 InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
817
818 if (is_get && DataType::IsFloatingPointType(field_type)) {
819 MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
820 }
821 }
822
CreateLoadClassRuntimeCallLocationSummary(HLoadClass * cls,Location runtime_type_index_location,Location runtime_return_location)823 void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
824 Location runtime_type_index_location,
825 Location runtime_return_location) {
826 DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
827 DCHECK_EQ(cls->InputCount(), 1u);
828 LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
829 cls, LocationSummary::kCallOnMainOnly);
830 locations->SetInAt(0, Location::NoLocation());
831 locations->AddTemp(runtime_type_index_location);
832 locations->SetOut(runtime_return_location);
833 }
834
GenerateLoadClassRuntimeCall(HLoadClass * cls)835 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
836 DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
837 DCHECK(!cls->MustGenerateClinitCheck());
838 LocationSummary* locations = cls->GetLocations();
839 MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
840 if (cls->NeedsAccessCheck()) {
841 CheckEntrypointTypes<kQuickResolveTypeAndVerifyAccess, void*, uint32_t>();
842 InvokeRuntime(kQuickResolveTypeAndVerifyAccess, cls, cls->GetDexPc());
843 } else {
844 CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
845 InvokeRuntime(kQuickResolveType, cls, cls->GetDexPc());
846 }
847 }
848
CreateLoadMethodHandleRuntimeCallLocationSummary(HLoadMethodHandle * method_handle,Location runtime_proto_index_location,Location runtime_return_location)849 void CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(
850 HLoadMethodHandle* method_handle,
851 Location runtime_proto_index_location,
852 Location runtime_return_location) {
853 DCHECK_EQ(method_handle->InputCount(), 1u);
854 LocationSummary* locations =
855 new (method_handle->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
856 method_handle, LocationSummary::kCallOnMainOnly);
857 locations->SetInAt(0, Location::NoLocation());
858 locations->AddTemp(runtime_proto_index_location);
859 locations->SetOut(runtime_return_location);
860 }
861
GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle * method_handle)862 void CodeGenerator::GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle) {
863 LocationSummary* locations = method_handle->GetLocations();
864 MoveConstant(locations->GetTemp(0), method_handle->GetMethodHandleIndex());
865 CheckEntrypointTypes<kQuickResolveMethodHandle, void*, uint32_t>();
866 InvokeRuntime(kQuickResolveMethodHandle, method_handle, method_handle->GetDexPc());
867 }
868
CreateLoadMethodTypeRuntimeCallLocationSummary(HLoadMethodType * method_type,Location runtime_proto_index_location,Location runtime_return_location)869 void CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(
870 HLoadMethodType* method_type,
871 Location runtime_proto_index_location,
872 Location runtime_return_location) {
873 DCHECK_EQ(method_type->InputCount(), 1u);
874 LocationSummary* locations =
875 new (method_type->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
876 method_type, LocationSummary::kCallOnMainOnly);
877 locations->SetInAt(0, Location::NoLocation());
878 locations->AddTemp(runtime_proto_index_location);
879 locations->SetOut(runtime_return_location);
880 }
881
GenerateLoadMethodTypeRuntimeCall(HLoadMethodType * method_type)882 void CodeGenerator::GenerateLoadMethodTypeRuntimeCall(HLoadMethodType* method_type) {
883 LocationSummary* locations = method_type->GetLocations();
884 MoveConstant(locations->GetTemp(0), method_type->GetProtoIndex().index_);
885 CheckEntrypointTypes<kQuickResolveMethodType, void*, uint32_t>();
886 InvokeRuntime(kQuickResolveMethodType, method_type, method_type->GetDexPc());
887 }
888
GetBootImageOffsetImpl(const void * object,ImageHeader::ImageSections section)889 static uint32_t GetBootImageOffsetImpl(const void* object, ImageHeader::ImageSections section) {
890 Runtime* runtime = Runtime::Current();
891 DCHECK(runtime->IsAotCompiler());
892 const std::vector<gc::space::ImageSpace*>& boot_image_spaces =
893 runtime->GetHeap()->GetBootImageSpaces();
894 // Check that the `object` is in the expected section of one of the boot image files.
895 DCHECK(std::any_of(boot_image_spaces.begin(),
896 boot_image_spaces.end(),
897 [object, section](gc::space::ImageSpace* space) {
898 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
899 uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
900 return space->GetImageHeader().GetImageSection(section).Contains(offset);
901 }));
902 uintptr_t begin = reinterpret_cast<uintptr_t>(boot_image_spaces.front()->Begin());
903 uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
904 return dchecked_integral_cast<uint32_t>(offset);
905 }
906
907 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
GetBootImageOffset(HLoadClass * load_class)908 uint32_t CodeGenerator::GetBootImageOffset(HLoadClass* load_class) NO_THREAD_SAFETY_ANALYSIS {
909 DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kBootImageRelRo);
910 ObjPtr<mirror::Class> klass = load_class->GetClass().Get();
911 DCHECK(klass != nullptr);
912 return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
913 }
914
915 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image strings are non-moveable.
GetBootImageOffset(HLoadString * load_string)916 uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_SAFETY_ANALYSIS {
917 DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kBootImageRelRo);
918 ObjPtr<mirror::String> string = load_string->GetString().Get();
919 DCHECK(string != nullptr);
920 return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects);
921 }
922
GetBootImageOffset(HInvokeStaticOrDirect * invoke)923 uint32_t CodeGenerator::GetBootImageOffset(HInvokeStaticOrDirect* invoke) {
924 DCHECK_EQ(invoke->GetMethodLoadKind(), HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo);
925 ArtMethod* method = invoke->GetResolvedMethod();
926 DCHECK(method != nullptr);
927 return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods);
928 }
929
BlockIfInRegister(Location location,bool is_out) const930 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
931 // The DCHECKS below check that a register is not specified twice in
932 // the summary. The out location can overlap with an input, so we need
933 // to special case it.
934 if (location.IsRegister()) {
935 DCHECK(is_out || !blocked_core_registers_[location.reg()]);
936 blocked_core_registers_[location.reg()] = true;
937 } else if (location.IsFpuRegister()) {
938 DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
939 blocked_fpu_registers_[location.reg()] = true;
940 } else if (location.IsFpuRegisterPair()) {
941 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
942 blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
943 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
944 blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
945 } else if (location.IsRegisterPair()) {
946 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
947 blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
948 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
949 blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
950 }
951 }
952
AllocateLocations(HInstruction * instruction)953 void CodeGenerator::AllocateLocations(HInstruction* instruction) {
954 for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) {
955 env->AllocateLocations();
956 }
957 instruction->Accept(GetLocationBuilder());
958 DCHECK(CheckTypeConsistency(instruction));
959 LocationSummary* locations = instruction->GetLocations();
960 if (!instruction->IsSuspendCheckEntry()) {
961 if (locations != nullptr) {
962 if (locations->CanCall()) {
963 MarkNotLeaf();
964 } else if (locations->Intrinsified() &&
965 instruction->IsInvokeStaticOrDirect() &&
966 !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
967 // A static method call that has been fully intrinsified, and cannot call on the slow
968 // path or refer to the current method directly, no longer needs current method.
969 return;
970 }
971 }
972 if (instruction->NeedsCurrentMethod()) {
973 SetRequiresCurrentMethod();
974 }
975 }
976 }
977
Create(HGraph * graph,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)978 std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
979 const CompilerOptions& compiler_options,
980 OptimizingCompilerStats* stats) {
981 ArenaAllocator* allocator = graph->GetAllocator();
982 switch (compiler_options.GetInstructionSet()) {
983 #ifdef ART_ENABLE_CODEGEN_arm
984 case InstructionSet::kArm:
985 case InstructionSet::kThumb2: {
986 return std::unique_ptr<CodeGenerator>(
987 new (allocator) arm::CodeGeneratorARMVIXL(graph, compiler_options, stats));
988 }
989 #endif
990 #ifdef ART_ENABLE_CODEGEN_arm64
991 case InstructionSet::kArm64: {
992 return std::unique_ptr<CodeGenerator>(
993 new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats));
994 }
995 #endif
996 #ifdef ART_ENABLE_CODEGEN_x86
997 case InstructionSet::kX86: {
998 return std::unique_ptr<CodeGenerator>(
999 new (allocator) x86::CodeGeneratorX86(graph, compiler_options, stats));
1000 }
1001 #endif
1002 #ifdef ART_ENABLE_CODEGEN_x86_64
1003 case InstructionSet::kX86_64: {
1004 return std::unique_ptr<CodeGenerator>(
1005 new (allocator) x86_64::CodeGeneratorX86_64(graph, compiler_options, stats));
1006 }
1007 #endif
1008 default:
1009 return nullptr;
1010 }
1011 }
1012
CodeGenerator(HGraph * graph,size_t number_of_core_registers,size_t number_of_fpu_registers,size_t number_of_register_pairs,uint32_t core_callee_save_mask,uint32_t fpu_callee_save_mask,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)1013 CodeGenerator::CodeGenerator(HGraph* graph,
1014 size_t number_of_core_registers,
1015 size_t number_of_fpu_registers,
1016 size_t number_of_register_pairs,
1017 uint32_t core_callee_save_mask,
1018 uint32_t fpu_callee_save_mask,
1019 const CompilerOptions& compiler_options,
1020 OptimizingCompilerStats* stats)
1021 : frame_size_(0),
1022 core_spill_mask_(0),
1023 fpu_spill_mask_(0),
1024 first_register_slot_in_slow_path_(0),
1025 allocated_registers_(RegisterSet::Empty()),
1026 blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
1027 kArenaAllocCodeGenerator)),
1028 blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
1029 kArenaAllocCodeGenerator)),
1030 number_of_core_registers_(number_of_core_registers),
1031 number_of_fpu_registers_(number_of_fpu_registers),
1032 number_of_register_pairs_(number_of_register_pairs),
1033 core_callee_save_mask_(core_callee_save_mask),
1034 fpu_callee_save_mask_(fpu_callee_save_mask),
1035 block_order_(nullptr),
1036 disasm_info_(nullptr),
1037 stats_(stats),
1038 graph_(graph),
1039 compiler_options_(compiler_options),
1040 current_slow_path_(nullptr),
1041 current_block_index_(0),
1042 is_leaf_(true),
1043 requires_current_method_(false),
1044 code_generation_data_() {
1045 if (GetGraph()->IsCompilingOsr()) {
1046 // Make OSR methods have all registers spilled, this simplifies the logic of
1047 // jumping to the compiled code directly.
1048 for (size_t i = 0; i < number_of_core_registers_; ++i) {
1049 if (IsCoreCalleeSaveRegister(i)) {
1050 AddAllocatedRegister(Location::RegisterLocation(i));
1051 }
1052 }
1053 for (size_t i = 0; i < number_of_fpu_registers_; ++i) {
1054 if (IsFloatingPointCalleeSaveRegister(i)) {
1055 AddAllocatedRegister(Location::FpuRegisterLocation(i));
1056 }
1057 }
1058 }
1059 }
1060
~CodeGenerator()1061 CodeGenerator::~CodeGenerator() {}
1062
GetNumberOfJitRoots() const1063 size_t CodeGenerator::GetNumberOfJitRoots() const {
1064 DCHECK(code_generation_data_ != nullptr);
1065 return code_generation_data_->GetNumberOfJitRoots();
1066 }
1067
CheckCovers(uint32_t dex_pc,const HGraph & graph,const CodeInfo & code_info,const ArenaVector<HSuspendCheck * > & loop_headers,ArenaVector<size_t> * covered)1068 static void CheckCovers(uint32_t dex_pc,
1069 const HGraph& graph,
1070 const CodeInfo& code_info,
1071 const ArenaVector<HSuspendCheck*>& loop_headers,
1072 ArenaVector<size_t>* covered) {
1073 for (size_t i = 0; i < loop_headers.size(); ++i) {
1074 if (loop_headers[i]->GetDexPc() == dex_pc) {
1075 if (graph.IsCompilingOsr()) {
1076 DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc).IsValid());
1077 }
1078 ++(*covered)[i];
1079 }
1080 }
1081 }
1082
1083 // Debug helper to ensure loop entries in compiled code are matched by
1084 // dex branch instructions.
CheckLoopEntriesCanBeUsedForOsr(const HGraph & graph,const CodeInfo & code_info,const dex::CodeItem & code_item)1085 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
1086 const CodeInfo& code_info,
1087 const dex::CodeItem& code_item) {
1088 if (graph.HasTryCatch()) {
1089 // One can write loops through try/catch, which we do not support for OSR anyway.
1090 return;
1091 }
1092 ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
1093 for (HBasicBlock* block : graph.GetReversePostOrder()) {
1094 if (block->IsLoopHeader()) {
1095 HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
1096 if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
1097 loop_headers.push_back(suspend_check);
1098 }
1099 }
1100 }
1101 ArenaVector<size_t> covered(
1102 loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
1103 for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(graph.GetDexFile(),
1104 &code_item)) {
1105 const uint32_t dex_pc = pair.DexPc();
1106 const Instruction& instruction = pair.Inst();
1107 if (instruction.IsBranch()) {
1108 uint32_t target = dex_pc + instruction.GetTargetOffset();
1109 CheckCovers(target, graph, code_info, loop_headers, &covered);
1110 } else if (instruction.IsSwitch()) {
1111 DexSwitchTable table(instruction, dex_pc);
1112 uint16_t num_entries = table.GetNumEntries();
1113 size_t offset = table.GetFirstValueIndex();
1114
1115 // Use a larger loop counter type to avoid overflow issues.
1116 for (size_t i = 0; i < num_entries; ++i) {
1117 // The target of the case.
1118 uint32_t target = dex_pc + table.GetEntryAt(i + offset);
1119 CheckCovers(target, graph, code_info, loop_headers, &covered);
1120 }
1121 }
1122 }
1123
1124 for (size_t i = 0; i < covered.size(); ++i) {
1125 DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
1126 }
1127 }
1128
BuildStackMaps(const dex::CodeItem * code_item)1129 ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
1130 ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
1131 if (kIsDebugBuild && code_item != nullptr) {
1132 CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
1133 }
1134 return stack_map;
1135 }
1136
1137 // Returns whether stackmap dex register info is needed for the instruction.
1138 //
1139 // The following cases mandate having a dex register map:
1140 // * Deoptimization
1141 // when we need to obtain the values to restore actual vregisters for interpreter.
1142 // * Debuggability
1143 // when we want to observe the values / asynchronously deoptimize.
1144 // * Monitor operations
1145 // to allow dumping in a stack trace locked dex registers for non-debuggable code.
1146 // * On-stack-replacement (OSR)
1147 // when entering compiled for OSR code from the interpreter we need to initialize the compiled
1148 // code values with the values from the vregisters.
1149 // * Method local catch blocks
1150 // a catch block must see the environment of the instruction from the same method that can
1151 // throw to this block.
NeedsVregInfo(HInstruction * instruction,bool osr)1152 static bool NeedsVregInfo(HInstruction* instruction, bool osr) {
1153 HGraph* graph = instruction->GetBlock()->GetGraph();
1154 return instruction->IsDeoptimize() ||
1155 graph->IsDebuggable() ||
1156 graph->HasMonitorOperations() ||
1157 osr ||
1158 instruction->CanThrowIntoCatchBlock();
1159 }
1160
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path,bool native_debug_info)1161 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1162 uint32_t dex_pc,
1163 SlowPathCode* slow_path,
1164 bool native_debug_info) {
1165 RecordPcInfo(instruction, dex_pc, GetAssembler()->CodePosition(), slow_path, native_debug_info);
1166 }
1167
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,uint32_t native_pc,SlowPathCode * slow_path,bool native_debug_info)1168 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1169 uint32_t dex_pc,
1170 uint32_t native_pc,
1171 SlowPathCode* slow_path,
1172 bool native_debug_info) {
1173 if (instruction != nullptr) {
1174 // The code generated for some type conversions
1175 // may call the runtime, thus normally requiring a subsequent
1176 // call to this method. However, the method verifier does not
1177 // produce PC information for certain instructions, which are
1178 // considered "atomic" (they cannot join a GC).
1179 // Therefore we do not currently record PC information for such
1180 // instructions. As this may change later, we added this special
1181 // case so that code generators may nevertheless call
1182 // CodeGenerator::RecordPcInfo without triggering an error in
1183 // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
1184 // thereafter.
1185 if (instruction->IsTypeConversion()) {
1186 return;
1187 }
1188 if (instruction->IsRem()) {
1189 DataType::Type type = instruction->AsRem()->GetResultType();
1190 if ((type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64)) {
1191 return;
1192 }
1193 }
1194 }
1195
1196 StackMapStream* stack_map_stream = GetStackMapStream();
1197 if (instruction == nullptr) {
1198 // For stack overflow checks and native-debug-info entries without dex register
1199 // mapping (i.e. start of basic block or start of slow path).
1200 stack_map_stream->BeginStackMapEntry(dex_pc, native_pc);
1201 stack_map_stream->EndStackMapEntry();
1202 return;
1203 }
1204
1205 LocationSummary* locations = instruction->GetLocations();
1206 uint32_t register_mask = locations->GetRegisterMask();
1207 DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
1208 if (locations->OnlyCallsOnSlowPath()) {
1209 // In case of slow path, we currently set the location of caller-save registers
1210 // to register (instead of their stack location when pushed before the slow-path
1211 // call). Therefore register_mask contains both callee-save and caller-save
1212 // registers that hold objects. We must remove the spilled caller-save from the
1213 // mask, since they will be overwritten by the callee.
1214 uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
1215 register_mask &= ~spills;
1216 } else {
1217 // The register mask must be a subset of callee-save registers.
1218 DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
1219 }
1220
1221 uint32_t outer_dex_pc = dex_pc;
1222 uint32_t outer_environment_size = 0u;
1223 uint32_t inlining_depth = 0;
1224 HEnvironment* const environment = instruction->GetEnvironment();
1225 if (environment != nullptr) {
1226 HEnvironment* outer_environment = environment;
1227 while (outer_environment->GetParent() != nullptr) {
1228 outer_environment = outer_environment->GetParent();
1229 ++inlining_depth;
1230 }
1231 outer_dex_pc = outer_environment->GetDexPc();
1232 outer_environment_size = outer_environment->Size();
1233 }
1234
1235 HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
1236 bool osr =
1237 instruction->IsSuspendCheck() &&
1238 (info != nullptr) &&
1239 graph_->IsCompilingOsr() &&
1240 (inlining_depth == 0);
1241 StackMap::Kind kind = native_debug_info
1242 ? StackMap::Kind::Debug
1243 : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default);
1244 bool needs_vreg_info = NeedsVregInfo(instruction, osr);
1245 stack_map_stream->BeginStackMapEntry(outer_dex_pc,
1246 native_pc,
1247 register_mask,
1248 locations->GetStackMask(),
1249 kind,
1250 needs_vreg_info);
1251
1252 EmitEnvironment(environment, slow_path, needs_vreg_info);
1253 stack_map_stream->EndStackMapEntry();
1254
1255 if (osr) {
1256 DCHECK_EQ(info->GetSuspendCheck(), instruction);
1257 DCHECK(info->IsIrreducible());
1258 DCHECK(environment != nullptr);
1259 if (kIsDebugBuild) {
1260 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1261 HInstruction* in_environment = environment->GetInstructionAt(i);
1262 if (in_environment != nullptr) {
1263 DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
1264 Location location = environment->GetLocationAt(i);
1265 DCHECK(location.IsStackSlot() ||
1266 location.IsDoubleStackSlot() ||
1267 location.IsConstant() ||
1268 location.IsInvalid());
1269 if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
1270 DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
1271 }
1272 }
1273 }
1274 }
1275 }
1276 }
1277
HasStackMapAtCurrentPc()1278 bool CodeGenerator::HasStackMapAtCurrentPc() {
1279 uint32_t pc = GetAssembler()->CodeSize();
1280 StackMapStream* stack_map_stream = GetStackMapStream();
1281 size_t count = stack_map_stream->GetNumberOfStackMaps();
1282 if (count == 0) {
1283 return false;
1284 }
1285 return stack_map_stream->GetStackMapNativePcOffset(count - 1) == pc;
1286 }
1287
MaybeRecordNativeDebugInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1288 void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
1289 uint32_t dex_pc,
1290 SlowPathCode* slow_path) {
1291 if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
1292 if (HasStackMapAtCurrentPc()) {
1293 // Ensure that we do not collide with the stack map of the previous instruction.
1294 GenerateNop();
1295 }
1296 RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
1297 }
1298 }
1299
RecordCatchBlockInfo()1300 void CodeGenerator::RecordCatchBlockInfo() {
1301 StackMapStream* stack_map_stream = GetStackMapStream();
1302
1303 for (HBasicBlock* block : *block_order_) {
1304 if (!block->IsCatchBlock()) {
1305 continue;
1306 }
1307
1308 uint32_t dex_pc = block->GetDexPc();
1309 uint32_t num_vregs = graph_->GetNumberOfVRegs();
1310 uint32_t native_pc = GetAddressOf(block);
1311
1312 stack_map_stream->BeginStackMapEntry(dex_pc,
1313 native_pc,
1314 /* register_mask= */ 0,
1315 /* sp_mask= */ nullptr,
1316 StackMap::Kind::Catch);
1317
1318 HInstruction* current_phi = block->GetFirstPhi();
1319 for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
1320 while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
1321 HInstruction* next_phi = current_phi->GetNext();
1322 DCHECK(next_phi == nullptr ||
1323 current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
1324 << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
1325 current_phi = next_phi;
1326 }
1327
1328 if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
1329 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1330 } else {
1331 Location location = current_phi->GetLocations()->Out();
1332 switch (location.GetKind()) {
1333 case Location::kStackSlot: {
1334 stack_map_stream->AddDexRegisterEntry(
1335 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1336 break;
1337 }
1338 case Location::kDoubleStackSlot: {
1339 stack_map_stream->AddDexRegisterEntry(
1340 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1341 stack_map_stream->AddDexRegisterEntry(
1342 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1343 ++vreg;
1344 DCHECK_LT(vreg, num_vregs);
1345 break;
1346 }
1347 default: {
1348 // All catch phis must be allocated to a stack slot.
1349 LOG(FATAL) << "Unexpected kind " << location.GetKind();
1350 UNREACHABLE();
1351 }
1352 }
1353 }
1354 }
1355
1356 stack_map_stream->EndStackMapEntry();
1357 }
1358 }
1359
AddSlowPath(SlowPathCode * slow_path)1360 void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
1361 DCHECK(code_generation_data_ != nullptr);
1362 code_generation_data_->AddSlowPath(slow_path);
1363 }
1364
EmitVRegInfo(HEnvironment * environment,SlowPathCode * slow_path)1365 void CodeGenerator::EmitVRegInfo(HEnvironment* environment, SlowPathCode* slow_path) {
1366 StackMapStream* stack_map_stream = GetStackMapStream();
1367 // Walk over the environment, and record the location of dex registers.
1368 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1369 HInstruction* current = environment->GetInstructionAt(i);
1370 if (current == nullptr) {
1371 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1372 continue;
1373 }
1374
1375 using Kind = DexRegisterLocation::Kind;
1376 Location location = environment->GetLocationAt(i);
1377 switch (location.GetKind()) {
1378 case Location::kConstant: {
1379 DCHECK_EQ(current, location.GetConstant());
1380 if (current->IsLongConstant()) {
1381 int64_t value = current->AsLongConstant()->GetValue();
1382 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1383 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1384 ++i;
1385 DCHECK_LT(i, environment_size);
1386 } else if (current->IsDoubleConstant()) {
1387 int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
1388 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1389 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1390 ++i;
1391 DCHECK_LT(i, environment_size);
1392 } else if (current->IsIntConstant()) {
1393 int32_t value = current->AsIntConstant()->GetValue();
1394 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1395 } else if (current->IsNullConstant()) {
1396 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, 0);
1397 } else {
1398 DCHECK(current->IsFloatConstant()) << current->DebugName();
1399 int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
1400 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1401 }
1402 break;
1403 }
1404
1405 case Location::kStackSlot: {
1406 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1407 break;
1408 }
1409
1410 case Location::kDoubleStackSlot: {
1411 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1412 stack_map_stream->AddDexRegisterEntry(
1413 Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1414 ++i;
1415 DCHECK_LT(i, environment_size);
1416 break;
1417 }
1418
1419 case Location::kRegister : {
1420 int id = location.reg();
1421 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
1422 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
1423 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1424 if (current->GetType() == DataType::Type::kInt64) {
1425 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1426 ++i;
1427 DCHECK_LT(i, environment_size);
1428 }
1429 } else {
1430 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, id);
1431 if (current->GetType() == DataType::Type::kInt64) {
1432 stack_map_stream->AddDexRegisterEntry(Kind::kInRegisterHigh, id);
1433 ++i;
1434 DCHECK_LT(i, environment_size);
1435 }
1436 }
1437 break;
1438 }
1439
1440 case Location::kFpuRegister : {
1441 int id = location.reg();
1442 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
1443 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
1444 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1445 if (current->GetType() == DataType::Type::kFloat64) {
1446 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1447 ++i;
1448 DCHECK_LT(i, environment_size);
1449 }
1450 } else {
1451 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, id);
1452 if (current->GetType() == DataType::Type::kFloat64) {
1453 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegisterHigh, id);
1454 ++i;
1455 DCHECK_LT(i, environment_size);
1456 }
1457 }
1458 break;
1459 }
1460
1461 case Location::kFpuRegisterPair : {
1462 int low = location.low();
1463 int high = location.high();
1464 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
1465 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
1466 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1467 } else {
1468 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, low);
1469 }
1470 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
1471 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
1472 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1473 ++i;
1474 } else {
1475 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, high);
1476 ++i;
1477 }
1478 DCHECK_LT(i, environment_size);
1479 break;
1480 }
1481
1482 case Location::kRegisterPair : {
1483 int low = location.low();
1484 int high = location.high();
1485 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
1486 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
1487 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1488 } else {
1489 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, low);
1490 }
1491 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
1492 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
1493 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1494 } else {
1495 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, high);
1496 }
1497 ++i;
1498 DCHECK_LT(i, environment_size);
1499 break;
1500 }
1501
1502 case Location::kInvalid: {
1503 stack_map_stream->AddDexRegisterEntry(Kind::kNone, 0);
1504 break;
1505 }
1506
1507 default:
1508 LOG(FATAL) << "Unexpected kind " << location.GetKind();
1509 }
1510 }
1511 }
1512
EmitEnvironment(HEnvironment * environment,SlowPathCode * slow_path,bool needs_vreg_info)1513 void CodeGenerator::EmitEnvironment(HEnvironment* environment,
1514 SlowPathCode* slow_path,
1515 bool needs_vreg_info) {
1516 if (environment == nullptr) return;
1517
1518 StackMapStream* stack_map_stream = GetStackMapStream();
1519 bool emit_inline_info = environment->GetParent() != nullptr;
1520
1521 if (emit_inline_info) {
1522 // We emit the parent environment first.
1523 EmitEnvironment(environment->GetParent(), slow_path, needs_vreg_info);
1524 stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
1525 environment->GetDexPc(),
1526 needs_vreg_info ? environment->Size() : 0,
1527 &graph_->GetDexFile());
1528 }
1529
1530 if (needs_vreg_info) {
1531 // If a dex register map is not required we just won't emit it.
1532 EmitVRegInfo(environment, slow_path);
1533 }
1534
1535 if (emit_inline_info) {
1536 stack_map_stream->EndInlineInfoEntry();
1537 }
1538 }
1539
CanMoveNullCheckToUser(HNullCheck * null_check)1540 bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
1541 return null_check->IsEmittedAtUseSite();
1542 }
1543
MaybeRecordImplicitNullCheck(HInstruction * instr)1544 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
1545 HNullCheck* null_check = instr->GetImplicitNullCheck();
1546 if (null_check != nullptr) {
1547 RecordPcInfo(null_check, null_check->GetDexPc(), GetAssembler()->CodePosition());
1548 }
1549 }
1550
CreateThrowingSlowPathLocations(HInstruction * instruction,RegisterSet caller_saves)1551 LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
1552 RegisterSet caller_saves) {
1553 // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
1554 // HSuspendCheck from entry block). However, it will still get a valid stack frame
1555 // because the HNullCheck needs an environment.
1556 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
1557 // When throwing from a try block, we may need to retrieve dalvik registers from
1558 // physical registers and we also need to set up stack mask for GC. This is
1559 // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
1560 bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
1561 if (can_throw_into_catch_block) {
1562 call_kind = LocationSummary::kCallOnSlowPath;
1563 }
1564 LocationSummary* locations =
1565 new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
1566 if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
1567 locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
1568 }
1569 DCHECK(!instruction->HasUses());
1570 return locations;
1571 }
1572
GenerateNullCheck(HNullCheck * instruction)1573 void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
1574 if (compiler_options_.GetImplicitNullChecks()) {
1575 MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated);
1576 GenerateImplicitNullCheck(instruction);
1577 } else {
1578 MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated);
1579 GenerateExplicitNullCheck(instruction);
1580 }
1581 }
1582
ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck * suspend_check,HParallelMove * spills) const1583 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
1584 HParallelMove* spills) const {
1585 LocationSummary* locations = suspend_check->GetLocations();
1586 HBasicBlock* block = suspend_check->GetBlock();
1587 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
1588 DCHECK(block->IsLoopHeader());
1589 DCHECK(block->GetFirstInstruction() == spills);
1590
1591 for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
1592 Location dest = spills->MoveOperandsAt(i)->GetDestination();
1593 // All parallel moves in loop headers are spills.
1594 DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
1595 // Clear the stack bit marking a reference. Do not bother to check if the spill is
1596 // actually a reference spill, clearing bits that are already zero is harmless.
1597 locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
1598 }
1599 }
1600
EmitParallelMoves(Location from1,Location to1,DataType::Type type1,Location from2,Location to2,DataType::Type type2)1601 void CodeGenerator::EmitParallelMoves(Location from1,
1602 Location to1,
1603 DataType::Type type1,
1604 Location from2,
1605 Location to2,
1606 DataType::Type type2) {
1607 HParallelMove parallel_move(GetGraph()->GetAllocator());
1608 parallel_move.AddMove(from1, to1, type1, nullptr);
1609 parallel_move.AddMove(from2, to2, type2, nullptr);
1610 GetMoveResolver()->EmitNativeCode(¶llel_move);
1611 }
1612
ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,SlowPathCode * slow_path)1613 void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
1614 HInstruction* instruction,
1615 SlowPathCode* slow_path) {
1616 // Ensure that the call kind indication given to the register allocator is
1617 // coherent with the runtime call generated.
1618 if (slow_path == nullptr) {
1619 DCHECK(instruction->GetLocations()->WillCall())
1620 << "instruction->DebugName()=" << instruction->DebugName();
1621 } else {
1622 DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
1623 << "instruction->DebugName()=" << instruction->DebugName()
1624 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1625 }
1626
1627 // Check that the GC side effect is set when required.
1628 // TODO: Reverse EntrypointCanTriggerGC
1629 if (EntrypointCanTriggerGC(entrypoint)) {
1630 if (slow_path == nullptr) {
1631 DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
1632 << "instruction->DebugName()=" << instruction->DebugName()
1633 << " instruction->GetSideEffects().ToString()="
1634 << instruction->GetSideEffects().ToString();
1635 } else {
1636 // 'CanTriggerGC' side effect is used to restrict optimization of instructions which depend
1637 // on GC (e.g. IntermediateAddress) - to ensure they are not alive across GC points. However
1638 // if execution never returns to the compiled code from a GC point this restriction is
1639 // unnecessary - in particular for fatal slow paths which might trigger GC.
1640 DCHECK((slow_path->IsFatal() && !instruction->GetLocations()->WillCall()) ||
1641 instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
1642 // When (non-Baker) read barriers are enabled, some instructions
1643 // use a slow path to emit a read barrier, which does not trigger
1644 // GC.
1645 (kEmitCompilerReadBarrier &&
1646 !kUseBakerReadBarrier &&
1647 (instruction->IsInstanceFieldGet() ||
1648 instruction->IsStaticFieldGet() ||
1649 instruction->IsArrayGet() ||
1650 instruction->IsLoadClass() ||
1651 instruction->IsLoadString() ||
1652 instruction->IsInstanceOf() ||
1653 instruction->IsCheckCast() ||
1654 (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
1655 << "instruction->DebugName()=" << instruction->DebugName()
1656 << " instruction->GetSideEffects().ToString()="
1657 << instruction->GetSideEffects().ToString()
1658 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1659 }
1660 } else {
1661 // The GC side effect is not required for the instruction. But the instruction might still have
1662 // it, for example if it calls other entrypoints requiring it.
1663 }
1664
1665 // Check the coherency of leaf information.
1666 DCHECK(instruction->IsSuspendCheck()
1667 || ((slow_path != nullptr) && slow_path->IsFatal())
1668 || instruction->GetLocations()->CanCall()
1669 || !IsLeafMethod())
1670 << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
1671 }
1672
ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction * instruction,SlowPathCode * slow_path)1673 void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
1674 SlowPathCode* slow_path) {
1675 DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
1676 << "instruction->DebugName()=" << instruction->DebugName()
1677 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1678 // Only the Baker read barrier marking slow path used by certains
1679 // instructions is expected to invoke the runtime without recording
1680 // PC-related information.
1681 DCHECK(kUseBakerReadBarrier);
1682 DCHECK(instruction->IsInstanceFieldGet() ||
1683 instruction->IsStaticFieldGet() ||
1684 instruction->IsArrayGet() ||
1685 instruction->IsArraySet() ||
1686 instruction->IsLoadClass() ||
1687 instruction->IsLoadString() ||
1688 instruction->IsInstanceOf() ||
1689 instruction->IsCheckCast() ||
1690 (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()) ||
1691 (instruction->IsInvokeStaticOrDirect() && instruction->GetLocations()->Intrinsified()))
1692 << "instruction->DebugName()=" << instruction->DebugName()
1693 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1694 }
1695
SaveLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1696 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1697 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1698
1699 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1700 for (uint32_t i : LowToHighBits(core_spills)) {
1701 // If the register holds an object, update the stack mask.
1702 if (locations->RegisterContainsObject(i)) {
1703 locations->SetStackBit(stack_offset / kVRegSize);
1704 }
1705 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1706 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1707 saved_core_stack_offsets_[i] = stack_offset;
1708 stack_offset += codegen->SaveCoreRegister(stack_offset, i);
1709 }
1710
1711 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1712 for (uint32_t i : LowToHighBits(fp_spills)) {
1713 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1714 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1715 saved_fpu_stack_offsets_[i] = stack_offset;
1716 stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
1717 }
1718 }
1719
RestoreLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1720 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1721 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1722
1723 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1724 for (uint32_t i : LowToHighBits(core_spills)) {
1725 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1726 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1727 stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
1728 }
1729
1730 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1731 for (uint32_t i : LowToHighBits(fp_spills)) {
1732 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1733 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1734 stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
1735 }
1736 }
1737
CreateSystemArrayCopyLocationSummary(HInvoke * invoke)1738 void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
1739 // Check to see if we have known failures that will cause us to have to bail out
1740 // to the runtime, and just generate the runtime call directly.
1741 HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
1742 HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
1743
1744 // The positions must be non-negative.
1745 if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
1746 (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
1747 // We will have to fail anyways.
1748 return;
1749 }
1750
1751 // The length must be >= 0.
1752 HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
1753 if (length != nullptr) {
1754 int32_t len = length->GetValue();
1755 if (len < 0) {
1756 // Just call as normal.
1757 return;
1758 }
1759 }
1760
1761 SystemArrayCopyOptimizations optimizations(invoke);
1762
1763 if (optimizations.GetDestinationIsSource()) {
1764 if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
1765 // We only support backward copying if source and destination are the same.
1766 return;
1767 }
1768 }
1769
1770 if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
1771 // We currently don't intrinsify primitive copying.
1772 return;
1773 }
1774
1775 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
1776 LocationSummary* locations = new (allocator) LocationSummary(invoke,
1777 LocationSummary::kCallOnSlowPath,
1778 kIntrinsified);
1779 // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
1780 locations->SetInAt(0, Location::RequiresRegister());
1781 locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
1782 locations->SetInAt(2, Location::RequiresRegister());
1783 locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
1784 locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
1785
1786 locations->AddTemp(Location::RequiresRegister());
1787 locations->AddTemp(Location::RequiresRegister());
1788 locations->AddTemp(Location::RequiresRegister());
1789 }
1790
EmitJitRoots(uint8_t * code,const uint8_t * roots_data,std::vector<Handle<mirror::Object>> * roots)1791 void CodeGenerator::EmitJitRoots(uint8_t* code,
1792 const uint8_t* roots_data,
1793 /*out*/std::vector<Handle<mirror::Object>>* roots) {
1794 code_generation_data_->EmitJitRoots(roots);
1795 EmitJitRootPatches(code, roots_data);
1796 }
1797
GetArrayAllocationEntrypoint(HNewArray * new_array)1798 QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
1799 switch (new_array->GetComponentSizeShift()) {
1800 case 0: return kQuickAllocArrayResolved8;
1801 case 1: return kQuickAllocArrayResolved16;
1802 case 2: return kQuickAllocArrayResolved32;
1803 case 3: return kQuickAllocArrayResolved64;
1804 }
1805 LOG(FATAL) << "Unreachable";
1806 UNREACHABLE();
1807 }
1808
1809 } // namespace art
1810