OSDN Git Service

am b148ad48: am f72fd02c: Merge "Quick compiler: disable GVN DO NOT MERGE" into lmp-dev
[android-x86/art.git] / compiler / optimizing / optimizing_compiler.cc
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "optimizing_compiler.h"
18
19 #include <fstream>
20 #include <stdint.h>
21
22 #include "builder.h"
23 #include "code_generator.h"
24 #include "compiler.h"
25 #include "driver/compiler_driver.h"
26 #include "driver/dex_compilation_unit.h"
27 #include "graph_visualizer.h"
28 #include "nodes.h"
29 #include "register_allocator.h"
30 #include "ssa_phi_elimination.h"
31 #include "ssa_liveness_analysis.h"
32 #include "utils/arena_allocator.h"
33
34 namespace art {
35
36 /**
37  * Used by the code generator, to allocate the code in a vector.
38  */
39 class CodeVectorAllocator FINAL : public CodeAllocator {
40  public:
41   CodeVectorAllocator() { }
42
43   virtual uint8_t* Allocate(size_t size) {
44     size_ = size;
45     memory_.resize(size);
46     return &memory_[0];
47   }
48
49   size_t GetSize() const { return size_; }
50   const std::vector<uint8_t>& GetMemory() const { return memory_; }
51
52  private:
53   std::vector<uint8_t> memory_;
54   size_t size_;
55
56   DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator);
57 };
58
59 /**
60  * If set to true, generates a file suitable for the c1visualizer tool and IRHydra.
61  */
62 static bool kIsVisualizerEnabled = false;
63
64 /**
65  * Filter to apply to the visualizer. Methods whose name contain that filter will
66  * be in the file.
67  */
68 static const char* kStringFilter = "";
69
70 class OptimizingCompiler FINAL : public Compiler {
71  public:
72   explicit OptimizingCompiler(CompilerDriver* driver);
73
74   bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const
75       OVERRIDE;
76
77   CompiledMethod* Compile(const DexFile::CodeItem* code_item,
78                           uint32_t access_flags,
79                           InvokeType invoke_type,
80                           uint16_t class_def_idx,
81                           uint32_t method_idx,
82                           jobject class_loader,
83                           const DexFile& dex_file) const OVERRIDE;
84
85   CompiledMethod* TryCompile(const DexFile::CodeItem* code_item,
86                              uint32_t access_flags,
87                              InvokeType invoke_type,
88                              uint16_t class_def_idx,
89                              uint32_t method_idx,
90                              jobject class_loader,
91                              const DexFile& dex_file) const;
92
93   // For the following methods we will use the fallback. This is a delegation pattern.
94   CompiledMethod* JniCompile(uint32_t access_flags,
95                              uint32_t method_idx,
96                              const DexFile& dex_file) const OVERRIDE;
97
98   uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE
99       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
100
101   bool WriteElf(art::File* file,
102                 OatWriter* oat_writer,
103                 const std::vector<const art::DexFile*>& dex_files,
104                 const std::string& android_root,
105                 bool is_host) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
106
107   Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const OVERRIDE;
108
109   void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE;
110
111   void Init() const OVERRIDE;
112
113   void UnInit() const OVERRIDE;
114
115  private:
116   std::unique_ptr<std::ostream> visualizer_output_;
117
118   // Delegate to another compiler in case the optimizing compiler cannot compile a method.
119   // Currently the fallback is the quick compiler.
120   std::unique_ptr<Compiler> delegate_;
121
122   DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
123 };
124
125 OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) : Compiler(driver, 100),
126     delegate_(Create(driver, Compiler::Kind::kQuick)) {
127   if (kIsVisualizerEnabled) {
128     visualizer_output_.reset(new std::ofstream("art.cfg"));
129   }
130 }
131
132 void OptimizingCompiler::Init() const {
133   delegate_->Init();
134 }
135
136 void OptimizingCompiler::UnInit() const {
137   delegate_->UnInit();
138 }
139
140 bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
141                                           CompilationUnit* cu) const {
142   return delegate_->CanCompileMethod(method_idx, dex_file, cu);
143 }
144
145 CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
146                                                uint32_t method_idx,
147                                                const DexFile& dex_file) const {
148   return delegate_->JniCompile(access_flags, method_idx, dex_file);
149 }
150
151 uintptr_t OptimizingCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
152   return delegate_->GetEntryPointOf(method);
153 }
154
155 bool OptimizingCompiler::WriteElf(art::File* file, OatWriter* oat_writer,
156                                   const std::vector<const art::DexFile*>& dex_files,
157                                   const std::string& android_root, bool is_host) const {
158   return delegate_->WriteElf(file, oat_writer, dex_files, android_root, is_host);
159 }
160
161 Backend* OptimizingCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
162   return delegate_->GetCodeGenerator(cu, compilation_unit);
163 }
164
165 void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu) const {
166   delegate_->InitCompilationUnit(cu);
167 }
168
169 CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item,
170                                                uint32_t access_flags,
171                                                InvokeType invoke_type,
172                                                uint16_t class_def_idx,
173                                                uint32_t method_idx,
174                                                jobject class_loader,
175                                                const DexFile& dex_file) const {
176   InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
177   // Always use the thumb2 assembler: some runtime functionality (like implicit stack
178   // overflow checks) assume thumb2.
179   if (instruction_set == kArm) {
180     instruction_set = kThumb2;
181   }
182
183   // Do not attempt to compile on architectures we do not support.
184   if (instruction_set != kX86 && instruction_set != kX86_64 && instruction_set != kThumb2) {
185     return nullptr;
186   }
187
188   DexCompilationUnit dex_compilation_unit(
189     nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item,
190     class_def_idx, method_idx, access_flags,
191     GetCompilerDriver()->GetVerifiedMethod(&dex_file, method_idx));
192
193   // For testing purposes, we put a special marker on method names that should be compiled
194   // with this compiler. This makes sure we're not regressing.
195   bool shouldCompile = dex_compilation_unit.GetSymbol().find("00024opt_00024") != std::string::npos;
196   bool shouldOptimize =
197       dex_compilation_unit.GetSymbol().find("00024reg_00024") != std::string::npos;
198
199   ArenaPool pool;
200   ArenaAllocator arena(&pool);
201   HGraphBuilder builder(&arena, &dex_compilation_unit, &dex_file, GetCompilerDriver());
202
203   HGraph* graph = builder.BuildGraph(*code_item);
204   if (graph == nullptr) {
205     if (shouldCompile) {
206       LOG(FATAL) << "Could not build graph in optimizing compiler";
207     }
208     return nullptr;
209   }
210
211   CodeGenerator* codegen = CodeGenerator::Create(&arena, graph, instruction_set);
212   if (codegen == nullptr) {
213     if (shouldCompile) {
214       LOG(FATAL) << "Could not find code generator for optimizing compiler";
215     }
216     return nullptr;
217   }
218
219   HGraphVisualizer visualizer(
220       visualizer_output_.get(), graph, kStringFilter, *codegen, dex_compilation_unit);
221   visualizer.DumpGraph("builder");
222
223   CodeVectorAllocator allocator;
224
225   if (RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
226     graph->BuildDominatorTree();
227     graph->TransformToSSA();
228     visualizer.DumpGraph("ssa");
229     graph->FindNaturalLoops();
230
231     SsaRedundantPhiElimination(graph).Run();
232     SsaDeadPhiElimination(graph).Run();
233
234     SsaLivenessAnalysis liveness(*graph, codegen);
235     liveness.Analyze();
236     visualizer.DumpGraph(kLivenessPassName);
237
238     RegisterAllocator register_allocator(graph->GetArena(), codegen, liveness);
239     register_allocator.AllocateRegisters();
240
241     visualizer.DumpGraph(kRegisterAllocatorPassName);
242     codegen->CompileOptimized(&allocator);
243   } else if (shouldOptimize && RegisterAllocator::Supports(instruction_set)) {
244     LOG(FATAL) << "Could not allocate registers in optimizing compiler";
245   } else {
246     codegen->CompileBaseline(&allocator);
247
248     // Run these phases to get some test coverage.
249     graph->BuildDominatorTree();
250     graph->TransformToSSA();
251     visualizer.DumpGraph("ssa");
252     graph->FindNaturalLoops();
253     SsaLivenessAnalysis liveness(*graph, codegen);
254     liveness.Analyze();
255     visualizer.DumpGraph(kLivenessPassName);
256   }
257
258   std::vector<uint8_t> mapping_table;
259   SrcMap src_mapping_table;
260   codegen->BuildMappingTable(&mapping_table,
261           GetCompilerDriver()->GetCompilerOptions().GetIncludeDebugSymbols() ?
262                &src_mapping_table : nullptr);
263   std::vector<uint8_t> vmap_table;
264   codegen->BuildVMapTable(&vmap_table);
265   std::vector<uint8_t> gc_map;
266   codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit);
267
268   return new CompiledMethod(GetCompilerDriver(),
269                             instruction_set,
270                             allocator.GetMemory(),
271                             codegen->GetFrameSize(),
272                             codegen->GetCoreSpillMask(),
273                             0, /* FPR spill mask, unused */
274                             &src_mapping_table,
275                             mapping_table,
276                             vmap_table,
277                             gc_map,
278                             nullptr);
279 }
280
281 CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
282                                             uint32_t access_flags,
283                                             InvokeType invoke_type,
284                                             uint16_t class_def_idx,
285                                             uint32_t method_idx,
286                                             jobject class_loader,
287                                             const DexFile& dex_file) const {
288   CompiledMethod* method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
289                                       method_idx, class_loader, dex_file);
290   if (method != nullptr) {
291     return method;
292   }
293
294   return delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
295                             class_loader, dex_file);
296 }
297
298 Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
299   return new OptimizingCompiler(driver);
300 }
301
302 }  // namespace art