1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "optimizing_compiler.h"
18
19 #include <fstream>
20 #include <memory>
21 #include <sstream>
22
23 #include <stdint.h>
24
25 #include "art_method-inl.h"
26 #include "base/arena_allocator.h"
27 #include "base/arena_containers.h"
28 #include "base/dumpable.h"
29 #include "base/logging.h"
30 #include "base/macros.h"
31 #include "base/mutex.h"
32 #include "base/scoped_arena_allocator.h"
33 #include "base/timing_logger.h"
34 #include "builder.h"
35 #include "code_generator.h"
36 #include "compiled_method.h"
37 #include "compiler.h"
38 #include "debug/elf_debug_writer.h"
39 #include "debug/method_debug_info.h"
40 #include "dex/dex_file_types.h"
41 #include "dex/verification_results.h"
42 #include "dex/verified_method.h"
43 #include "driver/compiled_method_storage.h"
44 #include "driver/compiler_options.h"
45 #include "driver/dex_compilation_unit.h"
46 #include "graph_checker.h"
47 #include "graph_visualizer.h"
48 #include "inliner.h"
49 #include "jit/debugger_interface.h"
50 #include "jit/jit.h"
51 #include "jit/jit_code_cache.h"
52 #include "jit/jit_logger.h"
53 #include "jni/quick/jni_compiler.h"
54 #include "linker/linker_patch.h"
55 #include "nodes.h"
56 #include "oat_quick_method_header.h"
57 #include "prepare_for_register_allocation.h"
58 #include "reference_type_propagation.h"
59 #include "register_allocator_linear_scan.h"
60 #include "select_generator.h"
61 #include "ssa_builder.h"
62 #include "ssa_liveness_analysis.h"
63 #include "ssa_phi_elimination.h"
64 #include "stack_map_stream.h"
65 #include "utils/assembler.h"
66 #include "verifier/verifier_compiler_binding.h"
67
68 namespace art {
69
70 static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB;
71
72 static constexpr const char* kPassNameSeparator = "$";
73
74 /**
75 * Used by the code generator, to allocate the code in a vector.
76 */
77 class CodeVectorAllocator final : public CodeAllocator {
78 public:
CodeVectorAllocator(ArenaAllocator * allocator)79 explicit CodeVectorAllocator(ArenaAllocator* allocator)
80 : memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
81
Allocate(size_t size)82 uint8_t* Allocate(size_t size) override {
83 memory_.resize(size);
84 return &memory_[0];
85 }
86
GetMemory() const87 ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
GetData()88 uint8_t* GetData() { return memory_.data(); }
89
90 private:
91 ArenaVector<uint8_t> memory_;
92
93 DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator);
94 };
95
96 /**
97 * Filter to apply to the visualizer. Methods whose name contain that filter will
98 * be dumped.
99 */
100 static constexpr const char kStringFilter[] = "";
101
102 class PassScope;
103
104 class PassObserver : public ValueObject {
105 public:
PassObserver(HGraph * graph,CodeGenerator * codegen,std::ostream * visualizer_output,const CompilerOptions & compiler_options,Mutex & dump_mutex)106 PassObserver(HGraph* graph,
107 CodeGenerator* codegen,
108 std::ostream* visualizer_output,
109 const CompilerOptions& compiler_options,
110 Mutex& dump_mutex)
111 : graph_(graph),
112 last_seen_graph_size_(0),
113 cached_method_name_(),
114 timing_logger_enabled_(compiler_options.GetDumpPassTimings()),
115 timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
116 disasm_info_(graph->GetAllocator()),
117 visualizer_oss_(),
118 visualizer_output_(visualizer_output),
119 visualizer_enabled_(!compiler_options.GetDumpCfgFileName().empty()),
120 visualizer_(&visualizer_oss_, graph, *codegen),
121 codegen_(codegen),
122 visualizer_dump_mutex_(dump_mutex),
123 graph_in_bad_state_(false) {
124 if (timing_logger_enabled_ || visualizer_enabled_) {
125 if (!IsVerboseMethod(compiler_options, GetMethodName())) {
126 timing_logger_enabled_ = visualizer_enabled_ = false;
127 }
128 if (visualizer_enabled_) {
129 visualizer_.PrintHeader(GetMethodName());
130 codegen->SetDisassemblyInformation(&disasm_info_);
131 }
132 }
133 }
134
~PassObserver()135 ~PassObserver() {
136 if (timing_logger_enabled_) {
137 LOG(INFO) << "TIMINGS " << GetMethodName();
138 LOG(INFO) << Dumpable<TimingLogger>(timing_logger_);
139 }
140 if (visualizer_enabled_) {
141 FlushVisualizer();
142 }
143 DCHECK(visualizer_oss_.str().empty());
144 }
145
DumpDisassembly()146 void DumpDisassembly() {
147 if (visualizer_enabled_) {
148 visualizer_.DumpGraphWithDisassembly();
149 }
150 }
151
SetGraphInBadState()152 void SetGraphInBadState() { graph_in_bad_state_ = true; }
153
GetMethodName()154 const char* GetMethodName() {
155 // PrettyMethod() is expensive, so we delay calling it until we actually have to.
156 if (cached_method_name_.empty()) {
157 cached_method_name_ = graph_->GetDexFile().PrettyMethod(graph_->GetMethodIdx());
158 }
159 return cached_method_name_.c_str();
160 }
161
162 private:
StartPass(const char * pass_name)163 void StartPass(const char* pass_name) {
164 VLOG(compiler) << "Starting pass: " << pass_name;
165 // Dump graph first, then start timer.
166 if (visualizer_enabled_) {
167 visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
168 }
169 if (timing_logger_enabled_) {
170 timing_logger_.StartTiming(pass_name);
171 }
172 }
173
FlushVisualizer()174 void FlushVisualizer() REQUIRES(!visualizer_dump_mutex_) {
175 MutexLock mu(Thread::Current(), visualizer_dump_mutex_);
176 *visualizer_output_ << visualizer_oss_.str();
177 visualizer_output_->flush();
178 visualizer_oss_.str("");
179 visualizer_oss_.clear();
180 }
181
EndPass(const char * pass_name,bool pass_change)182 void EndPass(const char* pass_name, bool pass_change) {
183 // Pause timer first, then dump graph.
184 if (timing_logger_enabled_) {
185 timing_logger_.EndTiming();
186 }
187 if (visualizer_enabled_) {
188 visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
189 }
190
191 // Validate the HGraph if running in debug mode.
192 if (kIsDebugBuild) {
193 if (!graph_in_bad_state_) {
194 GraphChecker checker(graph_, codegen_);
195 last_seen_graph_size_ = checker.Run(pass_change, last_seen_graph_size_);
196 if (!checker.IsValid()) {
197 LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker);
198 }
199 }
200 }
201 }
202
IsVerboseMethod(const CompilerOptions & compiler_options,const char * method_name)203 static bool IsVerboseMethod(const CompilerOptions& compiler_options, const char* method_name) {
204 // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an
205 // empty kStringFilter matching all methods.
206 if (compiler_options.HasVerboseMethods()) {
207 return compiler_options.IsVerboseMethod(method_name);
208 }
209
210 // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code
211 // warning when the string is empty.
212 constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1;
213 if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) {
214 return true;
215 }
216
217 return false;
218 }
219
220 HGraph* const graph_;
221 size_t last_seen_graph_size_;
222
223 std::string cached_method_name_;
224
225 bool timing_logger_enabled_;
226 TimingLogger timing_logger_;
227
228 DisassemblyInformation disasm_info_;
229
230 std::ostringstream visualizer_oss_;
231 std::ostream* visualizer_output_;
232 bool visualizer_enabled_;
233 HGraphVisualizer visualizer_;
234 CodeGenerator* codegen_;
235 Mutex& visualizer_dump_mutex_;
236
237 // Flag to be set by the compiler if the pass failed and the graph is not
238 // expected to validate.
239 bool graph_in_bad_state_;
240
241 friend PassScope;
242
243 DISALLOW_COPY_AND_ASSIGN(PassObserver);
244 };
245
246 class PassScope : public ValueObject {
247 public:
PassScope(const char * pass_name,PassObserver * pass_observer)248 PassScope(const char *pass_name, PassObserver* pass_observer)
249 : pass_name_(pass_name),
250 pass_change_(true), // assume change
251 pass_observer_(pass_observer) {
252 pass_observer_->StartPass(pass_name_);
253 }
254
SetPassNotChanged()255 void SetPassNotChanged() {
256 pass_change_ = false;
257 }
258
~PassScope()259 ~PassScope() {
260 pass_observer_->EndPass(pass_name_, pass_change_);
261 }
262
263 private:
264 const char* const pass_name_;
265 bool pass_change_;
266 PassObserver* const pass_observer_;
267 };
268
269 class OptimizingCompiler final : public Compiler {
270 public:
271 explicit OptimizingCompiler(const CompilerOptions& compiler_options,
272 CompiledMethodStorage* storage);
273 ~OptimizingCompiler() override;
274
275 bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
276
277 CompiledMethod* Compile(const dex::CodeItem* code_item,
278 uint32_t access_flags,
279 InvokeType invoke_type,
280 uint16_t class_def_idx,
281 uint32_t method_idx,
282 Handle<mirror::ClassLoader> class_loader,
283 const DexFile& dex_file,
284 Handle<mirror::DexCache> dex_cache) const override;
285
286 CompiledMethod* JniCompile(uint32_t access_flags,
287 uint32_t method_idx,
288 const DexFile& dex_file,
289 Handle<mirror::DexCache> dex_cache) const override;
290
GetEntryPointOf(ArtMethod * method) const291 uintptr_t GetEntryPointOf(ArtMethod* method) const override
292 REQUIRES_SHARED(Locks::mutator_lock_) {
293 return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
294 InstructionSetPointerSize(GetCompilerOptions().GetInstructionSet())));
295 }
296
297 bool JitCompile(Thread* self,
298 jit::JitCodeCache* code_cache,
299 jit::JitMemoryRegion* region,
300 ArtMethod* method,
301 CompilationKind compilation_kind,
302 jit::JitLogger* jit_logger)
303 override
304 REQUIRES_SHARED(Locks::mutator_lock_);
305
306 private:
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,const OptimizationDef definitions[],size_t length) const307 bool RunOptimizations(HGraph* graph,
308 CodeGenerator* codegen,
309 const DexCompilationUnit& dex_compilation_unit,
310 PassObserver* pass_observer,
311 const OptimizationDef definitions[],
312 size_t length) const {
313 // Convert definitions to optimization passes.
314 ArenaVector<HOptimization*> optimizations = ConstructOptimizations(
315 definitions,
316 length,
317 graph->GetAllocator(),
318 graph,
319 compilation_stats_.get(),
320 codegen,
321 dex_compilation_unit);
322 DCHECK_EQ(length, optimizations.size());
323 // Run the optimization passes one by one. Any "depends_on" pass refers back to
324 // the most recent occurrence of that pass, skipped or executed.
325 std::bitset<static_cast<size_t>(OptimizationPass::kLast) + 1u> pass_changes;
326 pass_changes[static_cast<size_t>(OptimizationPass::kNone)] = true;
327 bool change = false;
328 for (size_t i = 0; i < length; ++i) {
329 if (pass_changes[static_cast<size_t>(definitions[i].depends_on)]) {
330 // Execute the pass and record whether it changed anything.
331 PassScope scope(optimizations[i]->GetPassName(), pass_observer);
332 bool pass_change = optimizations[i]->Run();
333 pass_changes[static_cast<size_t>(definitions[i].pass)] = pass_change;
334 if (pass_change) {
335 change = true;
336 } else {
337 scope.SetPassNotChanged();
338 }
339 } else {
340 // Skip the pass and record that nothing changed.
341 pass_changes[static_cast<size_t>(definitions[i].pass)] = false;
342 }
343 }
344 return change;
345 }
346
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,const OptimizationDef (& definitions)[length]) const347 template <size_t length> bool RunOptimizations(
348 HGraph* graph,
349 CodeGenerator* codegen,
350 const DexCompilationUnit& dex_compilation_unit,
351 PassObserver* pass_observer,
352 const OptimizationDef (&definitions)[length]) const {
353 return RunOptimizations(
354 graph, codegen, dex_compilation_unit, pass_observer, definitions, length);
355 }
356
357 void RunOptimizations(HGraph* graph,
358 CodeGenerator* codegen,
359 const DexCompilationUnit& dex_compilation_unit,
360 PassObserver* pass_observer) const;
361
362 private:
363 // Create a 'CompiledMethod' for an optimized graph.
364 CompiledMethod* Emit(ArenaAllocator* allocator,
365 CodeVectorAllocator* code_allocator,
366 CodeGenerator* codegen,
367 const dex::CodeItem* item) const;
368
369 // Try compiling a method and return the code generator used for
370 // compiling it.
371 // This method:
372 // 1) Builds the graph. Returns null if it failed to build it.
373 // 2) Transforms the graph to SSA. Returns null if it failed.
374 // 3) Runs optimizations on the graph, including register allocator.
375 // 4) Generates code with the `code_allocator` provided.
376 CodeGenerator* TryCompile(ArenaAllocator* allocator,
377 ArenaStack* arena_stack,
378 CodeVectorAllocator* code_allocator,
379 const DexCompilationUnit& dex_compilation_unit,
380 ArtMethod* method,
381 CompilationKind compilation_kind,
382 VariableSizedHandleScope* handles) const;
383
384 CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
385 ArenaStack* arena_stack,
386 CodeVectorAllocator* code_allocator,
387 const DexCompilationUnit& dex_compilation_unit,
388 ArtMethod* method,
389 VariableSizedHandleScope* handles) const;
390
391 bool RunArchOptimizations(HGraph* graph,
392 CodeGenerator* codegen,
393 const DexCompilationUnit& dex_compilation_unit,
394 PassObserver* pass_observer) const;
395
396 bool RunBaselineOptimizations(HGraph* graph,
397 CodeGenerator* codegen,
398 const DexCompilationUnit& dex_compilation_unit,
399 PassObserver* pass_observer) const;
400
401 std::vector<uint8_t> GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info);
402
403 // This must be called before any other function that dumps data to the cfg
404 void DumpInstructionSetFeaturesToCfg() const;
405
406 std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
407
408 std::unique_ptr<std::ostream> visualizer_output_;
409
410 mutable Mutex dump_mutex_; // To synchronize visualizer writing.
411
412 DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
413 };
414
415 static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
416
OptimizingCompiler(const CompilerOptions & compiler_options,CompiledMethodStorage * storage)417 OptimizingCompiler::OptimizingCompiler(const CompilerOptions& compiler_options,
418 CompiledMethodStorage* storage)
419 : Compiler(compiler_options, storage, kMaximumCompilationTimeBeforeWarning),
420 dump_mutex_("Visualizer dump lock") {
421 // Enable C1visualizer output.
422 const std::string& cfg_file_name = compiler_options.GetDumpCfgFileName();
423 if (!cfg_file_name.empty()) {
424 std::ios_base::openmode cfg_file_mode =
425 compiler_options.GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
426 visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
427 DumpInstructionSetFeaturesToCfg();
428 }
429 if (compiler_options.GetDumpStats()) {
430 compilation_stats_.reset(new OptimizingCompilerStats());
431 }
432 }
433
~OptimizingCompiler()434 OptimizingCompiler::~OptimizingCompiler() {
435 if (compilation_stats_.get() != nullptr) {
436 compilation_stats_->Log();
437 }
438 }
439
DumpInstructionSetFeaturesToCfg() const440 void OptimizingCompiler::DumpInstructionSetFeaturesToCfg() const {
441 const CompilerOptions& compiler_options = GetCompilerOptions();
442 const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures();
443 std::string isa_string =
444 std::string("isa:") + GetInstructionSetString(features->GetInstructionSet());
445 std::string features_string = "isa_features:" + features->GetFeatureString();
446 // It is assumed that visualizer_output_ is empty when calling this function, hence the fake
447 // compilation block containing the ISA features will be printed at the beginning of the .cfg
448 // file.
449 *visualizer_output_
450 << HGraphVisualizer::InsertMetaDataAsCompilationBlock(isa_string + ' ' + features_string);
451 }
452
CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,const DexFile & dex_file ATTRIBUTE_UNUSED) const453 bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
454 const DexFile& dex_file ATTRIBUTE_UNUSED) const {
455 return true;
456 }
457
IsInstructionSetSupported(InstructionSet instruction_set)458 static bool IsInstructionSetSupported(InstructionSet instruction_set) {
459 return instruction_set == InstructionSet::kArm
460 || instruction_set == InstructionSet::kArm64
461 || instruction_set == InstructionSet::kThumb2
462 || instruction_set == InstructionSet::kX86
463 || instruction_set == InstructionSet::kX86_64;
464 }
465
RunBaselineOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const466 bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
467 CodeGenerator* codegen,
468 const DexCompilationUnit& dex_compilation_unit,
469 PassObserver* pass_observer) const {
470 switch (codegen->GetCompilerOptions().GetInstructionSet()) {
471 #ifdef ART_ENABLE_CODEGEN_x86
472 case InstructionSet::kX86: {
473 OptimizationDef x86_optimizations[] = {
474 OptDef(OptimizationPass::kPcRelativeFixupsX86),
475 };
476 return RunOptimizations(graph,
477 codegen,
478 dex_compilation_unit,
479 pass_observer,
480 x86_optimizations);
481 }
482 #endif
483 default:
484 UNUSED(graph);
485 UNUSED(codegen);
486 UNUSED(dex_compilation_unit);
487 UNUSED(pass_observer);
488 return false;
489 }
490 }
491
RunArchOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const492 bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
493 CodeGenerator* codegen,
494 const DexCompilationUnit& dex_compilation_unit,
495 PassObserver* pass_observer) const {
496 switch (codegen->GetCompilerOptions().GetInstructionSet()) {
497 #if defined(ART_ENABLE_CODEGEN_arm)
498 case InstructionSet::kThumb2:
499 case InstructionSet::kArm: {
500 OptimizationDef arm_optimizations[] = {
501 OptDef(OptimizationPass::kInstructionSimplifierArm),
502 OptDef(OptimizationPass::kSideEffectsAnalysis),
503 OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
504 OptDef(OptimizationPass::kScheduling)
505 };
506 return RunOptimizations(graph,
507 codegen,
508 dex_compilation_unit,
509 pass_observer,
510 arm_optimizations);
511 }
512 #endif
513 #ifdef ART_ENABLE_CODEGEN_arm64
514 case InstructionSet::kArm64: {
515 OptimizationDef arm64_optimizations[] = {
516 OptDef(OptimizationPass::kInstructionSimplifierArm64),
517 OptDef(OptimizationPass::kSideEffectsAnalysis),
518 OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
519 OptDef(OptimizationPass::kScheduling)
520 };
521 return RunOptimizations(graph,
522 codegen,
523 dex_compilation_unit,
524 pass_observer,
525 arm64_optimizations);
526 }
527 #endif
528 #ifdef ART_ENABLE_CODEGEN_x86
529 case InstructionSet::kX86: {
530 OptimizationDef x86_optimizations[] = {
531 OptDef(OptimizationPass::kInstructionSimplifierX86),
532 OptDef(OptimizationPass::kSideEffectsAnalysis),
533 OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
534 OptDef(OptimizationPass::kPcRelativeFixupsX86),
535 OptDef(OptimizationPass::kX86MemoryOperandGeneration)
536 };
537 return RunOptimizations(graph,
538 codegen,
539 dex_compilation_unit,
540 pass_observer,
541 x86_optimizations);
542 }
543 #endif
544 #ifdef ART_ENABLE_CODEGEN_x86_64
545 case InstructionSet::kX86_64: {
546 OptimizationDef x86_64_optimizations[] = {
547 OptDef(OptimizationPass::kInstructionSimplifierX86_64),
548 OptDef(OptimizationPass::kSideEffectsAnalysis),
549 OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
550 OptDef(OptimizationPass::kX86MemoryOperandGeneration)
551 };
552 return RunOptimizations(graph,
553 codegen,
554 dex_compilation_unit,
555 pass_observer,
556 x86_64_optimizations);
557 }
558 #endif
559 default:
560 return false;
561 }
562 }
563
564 NO_INLINE // Avoid increasing caller's frame size by large stack-allocated objects.
AllocateRegisters(HGraph * graph,CodeGenerator * codegen,PassObserver * pass_observer,RegisterAllocator::Strategy strategy,OptimizingCompilerStats * stats)565 static void AllocateRegisters(HGraph* graph,
566 CodeGenerator* codegen,
567 PassObserver* pass_observer,
568 RegisterAllocator::Strategy strategy,
569 OptimizingCompilerStats* stats) {
570 {
571 PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
572 pass_observer);
573 PrepareForRegisterAllocation(graph, codegen->GetCompilerOptions(), stats).Run();
574 }
575 // Use local allocator shared by SSA liveness analysis and register allocator.
576 // (Register allocator creates new objects in the liveness data.)
577 ScopedArenaAllocator local_allocator(graph->GetArenaStack());
578 SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
579 {
580 PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer);
581 liveness.Analyze();
582 }
583 {
584 PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
585 std::unique_ptr<RegisterAllocator> register_allocator =
586 RegisterAllocator::Create(&local_allocator, codegen, liveness, strategy);
587 register_allocator->AllocateRegisters();
588 }
589 }
590
591 // Strip pass name suffix to get optimization name.
ConvertPassNameToOptimizationName(const std::string & pass_name)592 static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
593 size_t pos = pass_name.find(kPassNameSeparator);
594 return pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
595 }
596
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const597 void OptimizingCompiler::RunOptimizations(HGraph* graph,
598 CodeGenerator* codegen,
599 const DexCompilationUnit& dex_compilation_unit,
600 PassObserver* pass_observer) const {
601 const std::vector<std::string>* pass_names = GetCompilerOptions().GetPassesToRun();
602 if (pass_names != nullptr) {
603 // If passes were defined on command-line, build the optimization
604 // passes and run these instead of the built-in optimizations.
605 // TODO: a way to define depends_on via command-line?
606 const size_t length = pass_names->size();
607 std::vector<OptimizationDef> optimizations;
608 for (const std::string& pass_name : *pass_names) {
609 std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
610 optimizations.push_back(OptDef(OptimizationPassByName(opt_name), pass_name.c_str()));
611 }
612 RunOptimizations(graph,
613 codegen,
614 dex_compilation_unit,
615 pass_observer,
616 optimizations.data(),
617 length);
618 return;
619 }
620
621 OptimizationDef optimizations[] = {
622 // Initial optimizations.
623 OptDef(OptimizationPass::kConstantFolding),
624 OptDef(OptimizationPass::kInstructionSimplifier),
625 OptDef(OptimizationPass::kDeadCodeElimination,
626 "dead_code_elimination$initial"),
627 // Inlining.
628 OptDef(OptimizationPass::kInliner),
629 // Simplification (only if inlining occurred).
630 OptDef(OptimizationPass::kConstantFolding,
631 "constant_folding$after_inlining",
632 OptimizationPass::kInliner),
633 OptDef(OptimizationPass::kInstructionSimplifier,
634 "instruction_simplifier$after_inlining",
635 OptimizationPass::kInliner),
636 OptDef(OptimizationPass::kDeadCodeElimination,
637 "dead_code_elimination$after_inlining",
638 OptimizationPass::kInliner),
639 // GVN.
640 OptDef(OptimizationPass::kSideEffectsAnalysis,
641 "side_effects$before_gvn"),
642 OptDef(OptimizationPass::kGlobalValueNumbering),
643 // Simplification (TODO: only if GVN occurred).
644 OptDef(OptimizationPass::kSelectGenerator),
645 OptDef(OptimizationPass::kConstantFolding,
646 "constant_folding$after_gvn"),
647 OptDef(OptimizationPass::kInstructionSimplifier,
648 "instruction_simplifier$after_gvn"),
649 OptDef(OptimizationPass::kDeadCodeElimination,
650 "dead_code_elimination$after_gvn"),
651 // High-level optimizations.
652 OptDef(OptimizationPass::kSideEffectsAnalysis,
653 "side_effects$before_licm"),
654 OptDef(OptimizationPass::kInvariantCodeMotion),
655 OptDef(OptimizationPass::kInductionVarAnalysis),
656 OptDef(OptimizationPass::kBoundsCheckElimination),
657 OptDef(OptimizationPass::kLoopOptimization),
658 // Simplification.
659 OptDef(OptimizationPass::kConstantFolding,
660 "constant_folding$after_bce"),
661 OptDef(OptimizationPass::kAggressiveInstructionSimplifier,
662 "instruction_simplifier$after_bce"),
663 // Other high-level optimizations.
664 OptDef(OptimizationPass::kSideEffectsAnalysis,
665 "side_effects$before_lse"),
666 OptDef(OptimizationPass::kLoadStoreElimination),
667 OptDef(OptimizationPass::kCHAGuardOptimization),
668 OptDef(OptimizationPass::kDeadCodeElimination,
669 "dead_code_elimination$final"),
670 OptDef(OptimizationPass::kCodeSinking),
671 // The codegen has a few assumptions that only the instruction simplifier
672 // can satisfy. For example, the code generator does not expect to see a
673 // HTypeConversion from a type to the same type.
674 OptDef(OptimizationPass::kAggressiveInstructionSimplifier,
675 "instruction_simplifier$before_codegen"),
676 // Eliminate constructor fences after code sinking to avoid
677 // complicated sinking logic to split a fence with many inputs.
678 OptDef(OptimizationPass::kConstructorFenceRedundancyElimination)
679 };
680 RunOptimizations(graph,
681 codegen,
682 dex_compilation_unit,
683 pass_observer,
684 optimizations);
685
686 RunArchOptimizations(graph, codegen, dex_compilation_unit, pass_observer);
687 }
688
EmitAndSortLinkerPatches(CodeGenerator * codegen)689 static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
690 ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetAllocator()->Adapter());
691 codegen->EmitLinkerPatches(&linker_patches);
692
693 // Sort patches by literal offset. Required for .oat_patches encoding.
694 std::sort(linker_patches.begin(), linker_patches.end(),
695 [](const linker::LinkerPatch& lhs, const linker::LinkerPatch& rhs) {
696 return lhs.LiteralOffset() < rhs.LiteralOffset();
697 });
698
699 return linker_patches;
700 }
701
Emit(ArenaAllocator * allocator,CodeVectorAllocator * code_allocator,CodeGenerator * codegen,const dex::CodeItem * code_item_for_osr_check) const702 CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
703 CodeVectorAllocator* code_allocator,
704 CodeGenerator* codegen,
705 const dex::CodeItem* code_item_for_osr_check) const {
706 ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
707 ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
708
709 CompiledMethodStorage* storage = GetCompiledMethodStorage();
710 CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
711 storage,
712 codegen->GetInstructionSet(),
713 code_allocator->GetMemory(),
714 ArrayRef<const uint8_t>(stack_map),
715 ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
716 ArrayRef<const linker::LinkerPatch>(linker_patches));
717
718 for (const linker::LinkerPatch& patch : linker_patches) {
719 if (codegen->NeedsThunkCode(patch) && storage->GetThunkCode(patch).empty()) {
720 ArenaVector<uint8_t> code(allocator->Adapter());
721 std::string debug_name;
722 codegen->EmitThunkCode(patch, &code, &debug_name);
723 storage->SetThunkCode(patch, ArrayRef<const uint8_t>(code), debug_name);
724 }
725 }
726
727 return compiled_method;
728 }
729
TryCompile(ArenaAllocator * allocator,ArenaStack * arena_stack,CodeVectorAllocator * code_allocator,const DexCompilationUnit & dex_compilation_unit,ArtMethod * method,CompilationKind compilation_kind,VariableSizedHandleScope * handles) const730 CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
731 ArenaStack* arena_stack,
732 CodeVectorAllocator* code_allocator,
733 const DexCompilationUnit& dex_compilation_unit,
734 ArtMethod* method,
735 CompilationKind compilation_kind,
736 VariableSizedHandleScope* handles) const {
737 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
738 const CompilerOptions& compiler_options = GetCompilerOptions();
739 InstructionSet instruction_set = compiler_options.GetInstructionSet();
740 const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
741 uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
742 const dex::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
743
744 // Always use the Thumb-2 assembler: some runtime functionality
745 // (like implicit stack overflow checks) assume Thumb-2.
746 DCHECK_NE(instruction_set, InstructionSet::kArm);
747
748 // Do not attempt to compile on architectures we do not support.
749 if (!IsInstructionSetSupported(instruction_set)) {
750 MaybeRecordStat(compilation_stats_.get(),
751 MethodCompilationStat::kNotCompiledUnsupportedIsa);
752 return nullptr;
753 }
754
755 if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
756 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledPathological);
757 return nullptr;
758 }
759
760 // Implementation of the space filter: do not compile a code item whose size in
761 // code units is bigger than 128.
762 static constexpr size_t kSpaceFilterOptimizingThreshold = 128;
763 if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace)
764 && (CodeItemInstructionAccessor(dex_file, code_item).InsnsSizeInCodeUnits() >
765 kSpaceFilterOptimizingThreshold)) {
766 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledSpaceFilter);
767 return nullptr;
768 }
769
770 CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item, method_idx);
771
772 bool dead_reference_safe;
773 ArrayRef<const uint8_t> interpreter_metadata;
774 // For AOT compilation, we may not get a method, for example if its class is erroneous,
775 // possibly due to an unavailable superclass. JIT should always have a method.
776 DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
777 if (method != nullptr) {
778 const dex::ClassDef* containing_class;
779 {
780 ScopedObjectAccess soa(Thread::Current());
781 containing_class = &method->GetClassDef();
782 interpreter_metadata = method->GetQuickenedInfo();
783 }
784 // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
785 // is currently rarely true.
786 dead_reference_safe =
787 annotations::HasDeadReferenceSafeAnnotation(dex_file, *containing_class)
788 && !annotations::MethodContainsRSensitiveAccess(dex_file, *containing_class, method_idx);
789 } else {
790 // If we could not resolve the class, conservatively assume it's dead-reference unsafe.
791 dead_reference_safe = false;
792 }
793
794 HGraph* graph = new (allocator) HGraph(
795 allocator,
796 arena_stack,
797 handles,
798 dex_file,
799 method_idx,
800 compiler_options.GetInstructionSet(),
801 kInvalidInvokeType,
802 dead_reference_safe,
803 compiler_options.GetDebuggable(),
804 compilation_kind);
805
806 if (method != nullptr) {
807 graph->SetArtMethod(method);
808 }
809
810 std::unique_ptr<CodeGenerator> codegen(
811 CodeGenerator::Create(graph,
812 compiler_options,
813 compilation_stats_.get()));
814 if (codegen.get() == nullptr) {
815 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledNoCodegen);
816 return nullptr;
817 }
818 codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
819
820 PassObserver pass_observer(graph,
821 codegen.get(),
822 visualizer_output_.get(),
823 compiler_options,
824 dump_mutex_);
825
826 {
827 VLOG(compiler) << "Building " << pass_observer.GetMethodName();
828 PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
829 HGraphBuilder builder(graph,
830 code_item_accessor,
831 &dex_compilation_unit,
832 &dex_compilation_unit,
833 codegen.get(),
834 compilation_stats_.get(),
835 interpreter_metadata);
836 GraphAnalysisResult result = builder.BuildGraph();
837 if (result != kAnalysisSuccess) {
838 switch (result) {
839 case kAnalysisSkipped: {
840 MaybeRecordStat(compilation_stats_.get(),
841 MethodCompilationStat::kNotCompiledSkipped);
842 break;
843 }
844 case kAnalysisInvalidBytecode: {
845 MaybeRecordStat(compilation_stats_.get(),
846 MethodCompilationStat::kNotCompiledInvalidBytecode);
847 break;
848 }
849 case kAnalysisFailThrowCatchLoop: {
850 MaybeRecordStat(compilation_stats_.get(),
851 MethodCompilationStat::kNotCompiledThrowCatchLoop);
852 break;
853 }
854 case kAnalysisFailAmbiguousArrayOp: {
855 MaybeRecordStat(compilation_stats_.get(),
856 MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
857 break;
858 }
859 case kAnalysisFailIrreducibleLoopAndStringInit: {
860 MaybeRecordStat(compilation_stats_.get(),
861 MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
862 break;
863 }
864 case kAnalysisFailPhiEquivalentInOsr: {
865 MaybeRecordStat(compilation_stats_.get(),
866 MethodCompilationStat::kNotCompiledPhiEquivalentInOsr);
867 break;
868 }
869 case kAnalysisSuccess:
870 UNREACHABLE();
871 }
872 pass_observer.SetGraphInBadState();
873 return nullptr;
874 }
875 }
876
877 if (compilation_kind == CompilationKind::kBaseline) {
878 RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
879 } else {
880 RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
881 }
882
883 RegisterAllocator::Strategy regalloc_strategy =
884 compiler_options.GetRegisterAllocationStrategy();
885 AllocateRegisters(graph,
886 codegen.get(),
887 &pass_observer,
888 regalloc_strategy,
889 compilation_stats_.get());
890
891 codegen->Compile(code_allocator);
892 pass_observer.DumpDisassembly();
893
894 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledBytecode);
895 return codegen.release();
896 }
897
TryCompileIntrinsic(ArenaAllocator * allocator,ArenaStack * arena_stack,CodeVectorAllocator * code_allocator,const DexCompilationUnit & dex_compilation_unit,ArtMethod * method,VariableSizedHandleScope * handles) const898 CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
899 ArenaAllocator* allocator,
900 ArenaStack* arena_stack,
901 CodeVectorAllocator* code_allocator,
902 const DexCompilationUnit& dex_compilation_unit,
903 ArtMethod* method,
904 VariableSizedHandleScope* handles) const {
905 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptIntrinsicCompilation);
906 const CompilerOptions& compiler_options = GetCompilerOptions();
907 InstructionSet instruction_set = compiler_options.GetInstructionSet();
908 const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
909 uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
910
911 // Always use the Thumb-2 assembler: some runtime functionality
912 // (like implicit stack overflow checks) assume Thumb-2.
913 DCHECK_NE(instruction_set, InstructionSet::kArm);
914
915 // Do not attempt to compile on architectures we do not support.
916 if (!IsInstructionSetSupported(instruction_set)) {
917 return nullptr;
918 }
919
920 HGraph* graph = new (allocator) HGraph(
921 allocator,
922 arena_stack,
923 handles,
924 dex_file,
925 method_idx,
926 compiler_options.GetInstructionSet(),
927 kInvalidInvokeType,
928 /* dead_reference_safe= */ true, // Intrinsics don't affect dead reference safety.
929 compiler_options.GetDebuggable(),
930 CompilationKind::kOptimized);
931
932 DCHECK(Runtime::Current()->IsAotCompiler());
933 DCHECK(method != nullptr);
934 graph->SetArtMethod(method);
935
936 std::unique_ptr<CodeGenerator> codegen(
937 CodeGenerator::Create(graph,
938 compiler_options,
939 compilation_stats_.get()));
940 if (codegen.get() == nullptr) {
941 return nullptr;
942 }
943 codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
944
945 PassObserver pass_observer(graph,
946 codegen.get(),
947 visualizer_output_.get(),
948 compiler_options,
949 dump_mutex_);
950
951 {
952 VLOG(compiler) << "Building intrinsic graph " << pass_observer.GetMethodName();
953 PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
954 HGraphBuilder builder(graph,
955 CodeItemDebugInfoAccessor(), // Null code item.
956 &dex_compilation_unit,
957 &dex_compilation_unit,
958 codegen.get(),
959 compilation_stats_.get(),
960 /* interpreter_metadata= */ ArrayRef<const uint8_t>());
961 builder.BuildIntrinsicGraph(method);
962 }
963
964 OptimizationDef optimizations[] = {
965 // The codegen has a few assumptions that only the instruction simplifier
966 // can satisfy.
967 OptDef(OptimizationPass::kInstructionSimplifier),
968 };
969 RunOptimizations(graph,
970 codegen.get(),
971 dex_compilation_unit,
972 &pass_observer,
973 optimizations);
974
975 RunArchOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
976
977 AllocateRegisters(graph,
978 codegen.get(),
979 &pass_observer,
980 compiler_options.GetRegisterAllocationStrategy(),
981 compilation_stats_.get());
982 if (!codegen->IsLeafMethod()) {
983 VLOG(compiler) << "Intrinsic method is not leaf: " << method->GetIntrinsic()
984 << " " << graph->PrettyMethod();
985 return nullptr;
986 }
987
988 codegen->Compile(code_allocator);
989 pass_observer.DumpDisassembly();
990
991 VLOG(compiler) << "Compiled intrinsic: " << method->GetIntrinsic()
992 << " " << graph->PrettyMethod();
993 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledIntrinsic);
994 return codegen.release();
995 }
996
Compile(const dex::CodeItem * code_item,uint32_t access_flags,InvokeType invoke_type,uint16_t class_def_idx,uint32_t method_idx,Handle<mirror::ClassLoader> jclass_loader,const DexFile & dex_file,Handle<mirror::DexCache> dex_cache) const997 CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
998 uint32_t access_flags,
999 InvokeType invoke_type,
1000 uint16_t class_def_idx,
1001 uint32_t method_idx,
1002 Handle<mirror::ClassLoader> jclass_loader,
1003 const DexFile& dex_file,
1004 Handle<mirror::DexCache> dex_cache) const {
1005 const CompilerOptions& compiler_options = GetCompilerOptions();
1006 DCHECK(compiler_options.IsAotCompiler());
1007 CompiledMethod* compiled_method = nullptr;
1008 Runtime* runtime = Runtime::Current();
1009 DCHECK(runtime->IsAotCompiler());
1010 const VerifiedMethod* verified_method = compiler_options.GetVerifiedMethod(&dex_file, method_idx);
1011 DCHECK(!verified_method->HasRuntimeThrow());
1012 if (compiler_options.IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
1013 verifier::CanCompilerHandleVerificationFailure(
1014 verified_method->GetEncounteredVerificationFailures())) {
1015 ArenaAllocator allocator(runtime->GetArenaPool());
1016 ArenaStack arena_stack(runtime->GetArenaPool());
1017 CodeVectorAllocator code_allocator(&allocator);
1018 std::unique_ptr<CodeGenerator> codegen;
1019 bool compiled_intrinsic = false;
1020 {
1021 ScopedObjectAccess soa(Thread::Current());
1022 ArtMethod* method =
1023 runtime->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
1024 method_idx, dex_cache, jclass_loader, /*referrer=*/ nullptr, invoke_type);
1025 DCHECK_EQ(method == nullptr, soa.Self()->IsExceptionPending());
1026 soa.Self()->ClearException(); // Suppress exception if any.
1027 VariableSizedHandleScope handles(soa.Self());
1028 Handle<mirror::Class> compiling_class =
1029 handles.NewHandle(method != nullptr ? method->GetDeclaringClass() : nullptr);
1030 DexCompilationUnit dex_compilation_unit(
1031 jclass_loader,
1032 runtime->GetClassLinker(),
1033 dex_file,
1034 code_item,
1035 class_def_idx,
1036 method_idx,
1037 access_flags,
1038 /*verified_method=*/ nullptr, // Not needed by the Optimizing compiler.
1039 dex_cache,
1040 compiling_class);
1041 // Go to native so that we don't block GC during compilation.
1042 ScopedThreadSuspension sts(soa.Self(), kNative);
1043 if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
1044 DCHECK(compiler_options.IsBootImage());
1045 codegen.reset(
1046 TryCompileIntrinsic(&allocator,
1047 &arena_stack,
1048 &code_allocator,
1049 dex_compilation_unit,
1050 method,
1051 &handles));
1052 if (codegen != nullptr) {
1053 compiled_intrinsic = true;
1054 }
1055 }
1056 if (codegen == nullptr) {
1057 codegen.reset(
1058 TryCompile(&allocator,
1059 &arena_stack,
1060 &code_allocator,
1061 dex_compilation_unit,
1062 method,
1063 compiler_options.IsBaseline()
1064 ? CompilationKind::kBaseline
1065 : CompilationKind::kOptimized,
1066 &handles));
1067 }
1068 }
1069 if (codegen.get() != nullptr) {
1070 compiled_method = Emit(&allocator,
1071 &code_allocator,
1072 codegen.get(),
1073 compiled_intrinsic ? nullptr : code_item);
1074 if (compiled_intrinsic) {
1075 compiled_method->MarkAsIntrinsic();
1076 }
1077
1078 if (kArenaAllocatorCountAllocations) {
1079 codegen.reset(); // Release codegen's ScopedArenaAllocator for memory accounting.
1080 size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
1081 if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
1082 MemStats mem_stats(allocator.GetMemStats());
1083 MemStats peak_stats(arena_stack.GetPeakStats());
1084 LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
1085 << dex_file.PrettyMethod(method_idx)
1086 << "\n" << Dumpable<MemStats>(mem_stats)
1087 << "\n" << Dumpable<MemStats>(peak_stats);
1088 }
1089 }
1090 }
1091 } else {
1092 MethodCompilationStat method_stat;
1093 if (compiler_options.VerifyAtRuntime()) {
1094 method_stat = MethodCompilationStat::kNotCompiledVerifyAtRuntime;
1095 } else {
1096 method_stat = MethodCompilationStat::kNotCompiledVerificationError;
1097 }
1098 MaybeRecordStat(compilation_stats_.get(), method_stat);
1099 }
1100
1101 if (kIsDebugBuild &&
1102 compiler_options.CompileArtTest() &&
1103 IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
1104 // For testing purposes, we put a special marker on method names
1105 // that should be compiled with this compiler (when the
1106 // instruction set is supported). This makes sure we're not
1107 // regressing.
1108 std::string method_name = dex_file.PrettyMethod(method_idx);
1109 bool shouldCompile = method_name.find("$opt$") != std::string::npos;
1110 DCHECK((compiled_method != nullptr) || !shouldCompile) << "Didn't compile " << method_name;
1111 }
1112
1113 return compiled_method;
1114 }
1115
CreateJniStackMap(ScopedArenaAllocator * allocator,const JniCompiledMethod & jni_compiled_method)1116 static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
1117 const JniCompiledMethod& jni_compiled_method) {
1118 // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
1119 // to stay clear of the frame size limit.
1120 std::unique_ptr<StackMapStream> stack_map_stream(
1121 new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet()));
1122 stack_map_stream->BeginMethod(
1123 jni_compiled_method.GetFrameSize(),
1124 jni_compiled_method.GetCoreSpillMask(),
1125 jni_compiled_method.GetFpSpillMask(),
1126 /* num_dex_registers= */ 0,
1127 /* baseline= */ false);
1128 stack_map_stream->EndMethod();
1129 return stack_map_stream->Encode();
1130 }
1131
JniCompile(uint32_t access_flags,uint32_t method_idx,const DexFile & dex_file,Handle<mirror::DexCache> dex_cache) const1132 CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
1133 uint32_t method_idx,
1134 const DexFile& dex_file,
1135 Handle<mirror::DexCache> dex_cache) const {
1136 Runtime* runtime = Runtime::Current();
1137 ArenaAllocator allocator(runtime->GetArenaPool());
1138 ArenaStack arena_stack(runtime->GetArenaPool());
1139
1140 const CompilerOptions& compiler_options = GetCompilerOptions();
1141 if (compiler_options.IsBootImage()) {
1142 ScopedObjectAccess soa(Thread::Current());
1143 ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod(
1144 method_idx, dex_cache.Get(), /*class_loader=*/ nullptr);
1145 if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
1146 VariableSizedHandleScope handles(soa.Self());
1147 ScopedNullHandle<mirror::ClassLoader> class_loader; // null means boot class path loader.
1148 Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
1149 DexCompilationUnit dex_compilation_unit(
1150 class_loader,
1151 runtime->GetClassLinker(),
1152 dex_file,
1153 /*code_item=*/ nullptr,
1154 /*class_def_idx=*/ DexFile::kDexNoIndex16,
1155 method_idx,
1156 access_flags,
1157 /*verified_method=*/ nullptr,
1158 dex_cache,
1159 compiling_class);
1160 CodeVectorAllocator code_allocator(&allocator);
1161 // Go to native so that we don't block GC during compilation.
1162 ScopedThreadSuspension sts(soa.Self(), kNative);
1163 std::unique_ptr<CodeGenerator> codegen(
1164 TryCompileIntrinsic(&allocator,
1165 &arena_stack,
1166 &code_allocator,
1167 dex_compilation_unit,
1168 method,
1169 &handles));
1170 if (codegen != nullptr) {
1171 CompiledMethod* compiled_method = Emit(&allocator,
1172 &code_allocator,
1173 codegen.get(),
1174 /* item= */ nullptr);
1175 compiled_method->MarkAsIntrinsic();
1176 return compiled_method;
1177 }
1178 }
1179 }
1180
1181 JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
1182 compiler_options, access_flags, method_idx, dex_file);
1183 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
1184
1185 ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
1186 ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
1187 jni_compiled_method);
1188 return CompiledMethod::SwapAllocCompiledMethod(
1189 GetCompiledMethodStorage(),
1190 jni_compiled_method.GetInstructionSet(),
1191 jni_compiled_method.GetCode(),
1192 ArrayRef<const uint8_t>(stack_map),
1193 jni_compiled_method.GetCfi(),
1194 /* patches= */ ArrayRef<const linker::LinkerPatch>());
1195 }
1196
CreateOptimizingCompiler(const CompilerOptions & compiler_options,CompiledMethodStorage * storage)1197 Compiler* CreateOptimizingCompiler(const CompilerOptions& compiler_options,
1198 CompiledMethodStorage* storage) {
1199 return new OptimizingCompiler(compiler_options, storage);
1200 }
1201
EncodeArtMethodInInlineInfo(ArtMethod * method ATTRIBUTE_UNUSED)1202 bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
1203 // Note: the runtime is null only for unit testing.
1204 return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
1205 }
1206
JitCompile(Thread * self,jit::JitCodeCache * code_cache,jit::JitMemoryRegion * region,ArtMethod * method,CompilationKind compilation_kind,jit::JitLogger * jit_logger)1207 bool OptimizingCompiler::JitCompile(Thread* self,
1208 jit::JitCodeCache* code_cache,
1209 jit::JitMemoryRegion* region,
1210 ArtMethod* method,
1211 CompilationKind compilation_kind,
1212 jit::JitLogger* jit_logger) {
1213 const CompilerOptions& compiler_options = GetCompilerOptions();
1214 // If the baseline flag was explicitly passed, change the compilation kind
1215 // from optimized to baseline.
1216 if (compiler_options.IsBaseline() && compilation_kind == CompilationKind::kOptimized) {
1217 compilation_kind = CompilationKind::kBaseline;
1218 }
1219 DCHECK(compiler_options.IsJitCompiler());
1220 DCHECK_EQ(compiler_options.IsJitCompilerForSharedCode(), code_cache->IsSharedRegion(*region));
1221 StackHandleScope<3> hs(self);
1222 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
1223 method->GetDeclaringClass()->GetClassLoader()));
1224 Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
1225 DCHECK(method->IsCompilable());
1226
1227 const DexFile* dex_file = method->GetDexFile();
1228 const uint16_t class_def_idx = method->GetClassDefIndex();
1229 const dex::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
1230 const uint32_t method_idx = method->GetDexMethodIndex();
1231 const uint32_t access_flags = method->GetAccessFlags();
1232
1233 Runtime* runtime = Runtime::Current();
1234 ArenaAllocator allocator(runtime->GetJitArenaPool());
1235
1236 if (UNLIKELY(method->IsNative())) {
1237 JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
1238 compiler_options, access_flags, method_idx, *dex_file);
1239 std::vector<Handle<mirror::Object>> roots;
1240 ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
1241 allocator.Adapter(kArenaAllocCHA));
1242 ArenaStack arena_stack(runtime->GetJitArenaPool());
1243 // StackMapStream is large and it does not fit into this frame, so we need helper method.
1244 ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
1245 ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
1246 jni_compiled_method);
1247
1248 ArrayRef<const uint8_t> reserved_code;
1249 ArrayRef<const uint8_t> reserved_data;
1250 if (!code_cache->Reserve(self,
1251 region,
1252 jni_compiled_method.GetCode().size(),
1253 stack_map.size(),
1254 /* number_of_roots= */ 0,
1255 method,
1256 /*out*/ &reserved_code,
1257 /*out*/ &reserved_data)) {
1258 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1259 return false;
1260 }
1261 const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
1262
1263 // Add debug info after we know the code location but before we update entry-point.
1264 std::vector<uint8_t> debug_info;
1265 if (compiler_options.GenerateAnyDebugInfo()) {
1266 debug::MethodDebugInfo info = {};
1267 info.custom_name = "art_jni_trampoline";
1268 info.dex_file = dex_file;
1269 info.class_def_index = class_def_idx;
1270 info.dex_method_index = method_idx;
1271 info.access_flags = access_flags;
1272 info.code_item = code_item;
1273 info.isa = jni_compiled_method.GetInstructionSet();
1274 info.deduped = false;
1275 info.is_native_debuggable = compiler_options.GetNativeDebuggable();
1276 info.is_optimized = true;
1277 info.is_code_address_text_relative = false;
1278 info.code_address = reinterpret_cast<uintptr_t>(code);
1279 info.code_size = jni_compiled_method.GetCode().size();
1280 info.frame_size_in_bytes = jni_compiled_method.GetFrameSize();
1281 info.code_info = nullptr;
1282 info.cfi = jni_compiled_method.GetCfi();
1283 debug_info = GenerateJitDebugInfo(info);
1284 }
1285
1286 if (!code_cache->Commit(self,
1287 region,
1288 method,
1289 reserved_code,
1290 jni_compiled_method.GetCode(),
1291 reserved_data,
1292 roots,
1293 ArrayRef<const uint8_t>(stack_map),
1294 debug_info,
1295 /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
1296 compilation_kind,
1297 /* has_should_deoptimize_flag= */ false,
1298 cha_single_implementation_list)) {
1299 code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
1300 return false;
1301 }
1302
1303 Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
1304 if (jit_logger != nullptr) {
1305 jit_logger->WriteLog(code, jni_compiled_method.GetCode().size(), method);
1306 }
1307 return true;
1308 }
1309
1310 ArenaStack arena_stack(runtime->GetJitArenaPool());
1311 CodeVectorAllocator code_allocator(&allocator);
1312 VariableSizedHandleScope handles(self);
1313
1314 std::unique_ptr<CodeGenerator> codegen;
1315 {
1316 Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
1317 DexCompilationUnit dex_compilation_unit(
1318 class_loader,
1319 runtime->GetClassLinker(),
1320 *dex_file,
1321 code_item,
1322 class_def_idx,
1323 method_idx,
1324 access_flags,
1325 /*verified_method=*/ nullptr,
1326 dex_cache,
1327 compiling_class);
1328
1329 // Go to native so that we don't block GC during compilation.
1330 ScopedThreadSuspension sts(self, kNative);
1331 codegen.reset(
1332 TryCompile(&allocator,
1333 &arena_stack,
1334 &code_allocator,
1335 dex_compilation_unit,
1336 method,
1337 compilation_kind,
1338 &handles));
1339 if (codegen.get() == nullptr) {
1340 return false;
1341 }
1342 }
1343
1344 ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
1345
1346 ArrayRef<const uint8_t> reserved_code;
1347 ArrayRef<const uint8_t> reserved_data;
1348 if (!code_cache->Reserve(self,
1349 region,
1350 code_allocator.GetMemory().size(),
1351 stack_map.size(),
1352 /*number_of_roots=*/codegen->GetNumberOfJitRoots(),
1353 method,
1354 /*out*/ &reserved_code,
1355 /*out*/ &reserved_data)) {
1356 MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1357 return false;
1358 }
1359 const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
1360 const uint8_t* roots_data = reserved_data.data();
1361
1362 std::vector<Handle<mirror::Object>> roots;
1363 codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
1364 // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
1365 DCHECK(std::all_of(roots.begin(),
1366 roots.end(),
1367 [&handles](Handle<mirror::Object> root){
1368 return handles.Contains(root.GetReference());
1369 }));
1370
1371 // Add debug info after we know the code location but before we update entry-point.
1372 std::vector<uint8_t> debug_info;
1373 if (compiler_options.GenerateAnyDebugInfo()) {
1374 debug::MethodDebugInfo info = {};
1375 DCHECK(info.custom_name.empty());
1376 info.dex_file = dex_file;
1377 info.class_def_index = class_def_idx;
1378 info.dex_method_index = method_idx;
1379 info.access_flags = access_flags;
1380 info.code_item = code_item;
1381 info.isa = codegen->GetInstructionSet();
1382 info.deduped = false;
1383 info.is_native_debuggable = compiler_options.GetNativeDebuggable();
1384 info.is_optimized = true;
1385 info.is_code_address_text_relative = false;
1386 info.code_address = reinterpret_cast<uintptr_t>(code);
1387 info.code_size = code_allocator.GetMemory().size();
1388 info.frame_size_in_bytes = codegen->GetFrameSize();
1389 info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data();
1390 info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
1391 debug_info = GenerateJitDebugInfo(info);
1392 }
1393
1394 if (!code_cache->Commit(self,
1395 region,
1396 method,
1397 reserved_code,
1398 code_allocator.GetMemory(),
1399 reserved_data,
1400 roots,
1401 ArrayRef<const uint8_t>(stack_map),
1402 debug_info,
1403 /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
1404 compilation_kind,
1405 codegen->GetGraph()->HasShouldDeoptimizeFlag(),
1406 codegen->GetGraph()->GetCHASingleImplementationList())) {
1407 code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
1408 return false;
1409 }
1410
1411 Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
1412 if (jit_logger != nullptr) {
1413 jit_logger->WriteLog(code, code_allocator.GetMemory().size(), method);
1414 }
1415
1416 if (kArenaAllocatorCountAllocations) {
1417 codegen.reset(); // Release codegen's ScopedArenaAllocator for memory accounting.
1418 size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
1419 if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
1420 MemStats mem_stats(allocator.GetMemStats());
1421 MemStats peak_stats(arena_stack.GetPeakStats());
1422 LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
1423 << dex_file->PrettyMethod(method_idx)
1424 << "\n" << Dumpable<MemStats>(mem_stats)
1425 << "\n" << Dumpable<MemStats>(peak_stats);
1426 }
1427 }
1428
1429 return true;
1430 }
1431
GenerateJitDebugInfo(const debug::MethodDebugInfo & info)1432 std::vector<uint8_t> OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info) {
1433 const CompilerOptions& compiler_options = GetCompilerOptions();
1434 if (compiler_options.GenerateAnyDebugInfo()) {
1435 // If both flags are passed, generate full debug info.
1436 const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
1437
1438 // Create entry for the single method that we just compiled.
1439 InstructionSet isa = compiler_options.GetInstructionSet();
1440 const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures();
1441 return debug::MakeElfFileForJIT(isa, features, mini_debug_info, info);
1442 }
1443 return std::vector<uint8_t>();
1444 }
1445
1446 } // namespace art
1447