diff options
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/Android.mk | 1 | ||||
| -rw-r--r-- | compiler/compilers.h | 4 | ||||
| -rw-r--r-- | compiler/dex/quick/gen_invoke.cc | 16 | ||||
| -rw-r--r-- | compiler/dex/quick/mir_to_lir.cc | 59 | ||||
| -rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 2 | ||||
| -rw-r--r-- | compiler/optimizing/builder.cc | 4 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator.cc | 2 | ||||
| -rw-r--r-- | compiler/optimizing/graph_visualizer.cc | 199 | ||||
| -rw-r--r-- | compiler/optimizing/graph_visualizer.h | 63 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.cc | 8 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 17 | ||||
| -rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 23 | ||||
| -rw-r--r-- | compiler/optimizing/ssa_builder.cc | 6 | ||||
| -rw-r--r-- | compiler/optimizing/ssa_liveness_analysis.cc | 8 | ||||
| -rw-r--r-- | compiler/optimizing/ssa_test.cc | 4 |
15 files changed, 376 insertions, 40 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk index 1b70d59def..8592aaa4e0 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -82,6 +82,7 @@ LIBART_COMPILER_SRC_FILES := \ optimizing/code_generator.cc \ optimizing/code_generator_arm.cc \ optimizing/code_generator_x86.cc \ + optimizing/graph_visualizer.cc \ optimizing/nodes.cc \ optimizing/optimizing_compiler.cc \ optimizing/ssa_builder.cc \ diff --git a/compiler/compilers.h b/compiler/compilers.h index 3ca78c94c6..e523d647ce 100644 --- a/compiler/compilers.h +++ b/compiler/compilers.h @@ -73,7 +73,7 @@ class QuickCompiler : public Compiler { class OptimizingCompiler FINAL : public QuickCompiler { public: - explicit OptimizingCompiler(CompilerDriver* driver) : QuickCompiler(driver) { } + explicit OptimizingCompiler(CompilerDriver* driver); CompiledMethod* Compile(const DexFile::CodeItem* code_item, uint32_t access_flags, @@ -92,6 +92,8 @@ class OptimizingCompiler FINAL : public QuickCompiler { const DexFile& dex_file) const; private: + UniquePtr<std::ostream> visualizer_output_; + DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler); }; diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 7aaffcbed2..24ed4a3346 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -801,8 +801,10 @@ int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state, const MethodReference& target_method, uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this) { - int last_arg_reg = TargetReg(kArg3).GetReg(); - int next_reg = TargetReg(kArg1).GetReg(); + int last_arg_reg = 3 - 1; + int arg_regs[3] = {TargetReg(kArg1).GetReg(), TargetReg(kArg2).GetReg(), TargetReg(kArg3).GetReg()}; + + int next_reg = 0; int next_arg = 0; if (skip_this) { next_reg++; @@ -811,8 +813,8 @@ int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state, for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) { RegLocation rl_arg = info->args[next_arg++]; rl_arg = UpdateRawLoc(rl_arg); - if (rl_arg.wide && (next_reg <= TargetReg(kArg2).GetReg())) { - RegStorage r_tmp(RegStorage::k64BitPair, next_reg, next_reg + 1); + if (rl_arg.wide && (next_reg <= last_arg_reg - 1)) { + RegStorage r_tmp(RegStorage::k64BitPair, arg_regs[next_reg], arg_regs[next_reg + 1]); LoadValueDirectWideFixed(rl_arg, r_tmp); next_reg++; next_arg++; @@ -821,7 +823,7 @@ int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state, rl_arg = NarrowRegLoc(rl_arg); rl_arg.is_const = false; } - LoadValueDirectFixed(rl_arg, RegStorage::Solo32(next_reg)); + LoadValueDirectFixed(rl_arg, RegStorage::Solo32(arg_regs[next_reg])); } call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); @@ -1571,7 +1573,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info, RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (is_long) { - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg, k64); } else { RegStorage rl_temp_offset = AllocTemp(); @@ -1618,7 +1620,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, RegLocation rl_value; if (is_long) { rl_value = LoadValueWide(rl_src_value, kCoreReg); - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg, k64); } else { RegStorage rl_temp_offset = AllocTemp(); diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 77119a4667..2c4ca8885a 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -37,7 +37,7 @@ void Mir2Lir::LockArg(int in_position, bool wide) { } // TODO: needs revisit for 64-bit. -RegStorage Mir2Lir::LoadArg(int in_position, bool wide) { +RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) { RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position); RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) : RegStorage::InvalidReg(); @@ -56,28 +56,45 @@ RegStorage Mir2Lir::LoadArg(int in_position, bool wide) { if (wide && !reg_arg_high.Valid()) { // If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg. if (!reg_arg_low.Valid()) { - RegStorage new_regs = AllocTypedTempWide(false, kAnyReg); - reg_arg_low = new_regs.GetLow(); - reg_arg_high = new_regs.GetHigh(); + RegStorage new_regs = AllocTypedTempWide(false, reg_class); LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64); + return new_regs; // The reg_class is OK, we can return. } else { + // Assume that no ABI allows splitting a wide fp reg between a narrow fp reg and memory, + // i.e. the low part is in a core reg. Load the second part in a core reg as well for now. + DCHECK(!reg_arg_low.IsFloat()); reg_arg_high = AllocTemp(); int offset_high = offset + sizeof(uint32_t); Load32Disp(TargetReg(kSp), offset_high, reg_arg_high); + // Continue below to check the reg_class. } } // If the low part is not in a register yet, we need to load it. if (!reg_arg_low.Valid()) { - reg_arg_low = AllocTemp(); + // Assume that if the low part of a wide arg is passed in memory, so is the high part, + // thus we don't get here for wide args as it's handled above. Big-endian ABIs could + // conceivably break this assumption but Android supports only little-endian architectures. + DCHECK(!wide); + reg_arg_low = AllocTypedTemp(false, reg_class); Load32Disp(TargetReg(kSp), offset, reg_arg_low); + return reg_arg_low; // The reg_class is OK, we can return. } - if (wide) { - return RegStorage::MakeRegPair(reg_arg_low, reg_arg_high); - } else { - return reg_arg_low; + RegStorage reg_arg = wide ? RegStorage::MakeRegPair(reg_arg_low, reg_arg_high) : reg_arg_low; + // Check if we need to copy the arg to a different reg_class. + if (!RegClassMatches(reg_class, reg_arg)) { + if (wide) { + RegStorage new_regs = AllocTypedTempWide(false, reg_class); + OpRegCopyWide(new_regs, reg_arg); + reg_arg = new_regs; + } else { + RegStorage new_reg = AllocTypedTemp(false, reg_class); + OpRegCopy(new_reg, reg_arg); + reg_arg = new_reg; + } } + return reg_arg; } void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { @@ -138,16 +155,29 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) { // Point of no return - no aborts after this GenPrintLabel(mir); LockArg(data.object_arg); + RegStorage reg_obj = LoadArg(data.object_arg, kCoreReg); RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float); - RegStorage reg_obj = LoadArg(data.object_arg); + RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile); + RegStorage r_result = rl_dest.reg; + if (!RegClassMatches(reg_class, r_result)) { + r_result = wide ? AllocTypedTempWide(rl_dest.fp, reg_class) + : AllocTypedTemp(rl_dest.fp, reg_class); + } if (data.is_volatile) { - LoadBaseDispVolatile(reg_obj, data.field_offset, rl_dest.reg, size); + LoadBaseDispVolatile(reg_obj, data.field_offset, r_result, size); // Without context sensitive analysis, we must issue the most conservative barriers. // In this case, either a load or store may follow so we issue both barriers. GenMemBarrier(kLoadLoad); GenMemBarrier(kLoadStore); } else { - LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg, size); + LoadBaseDisp(reg_obj, data.field_offset, r_result, size); + } + if (r_result != rl_dest.reg) { + if (wide) { + OpRegCopyWide(rl_dest.reg, r_result); + } else { + OpRegCopy(rl_dest.reg, r_result); + } } return true; } @@ -175,8 +205,9 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) { GenPrintLabel(mir); LockArg(data.object_arg); LockArg(data.src_arg, wide); - RegStorage reg_obj = LoadArg(data.object_arg); - RegStorage reg_src = LoadArg(data.src_arg, wide); + RegStorage reg_obj = LoadArg(data.object_arg, kCoreReg); + RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile); + RegStorage reg_src = LoadArg(data.src_arg, reg_class, wide); if (data.is_volatile) { // There might have been a store before this volatile one so insert StoreStore barrier. GenMemBarrier(kStoreStore); diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 77e5649716..6a0f3b2a9e 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -1397,7 +1397,7 @@ class Mir2Lir : public Backend { * @param wide Whether the argument is 64-bit or not. * @return Returns the register (or register pair) for the loaded argument. */ - RegStorage LoadArg(int in_position, bool wide = false); + RegStorage LoadArg(int in_position, RegisterClass reg_class, bool wide = false); /** * @brief Used to load a VR argument directly to a specified register location. diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index b0aa63bb3e..2c2564d2ec 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -311,6 +311,10 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, } } + if (return_type == Primitive::kPrimDouble || return_type == Primitive::kPrimFloat) { + return false; + } + DCHECK_EQ(argument_index, number_of_arguments); current_block_->AddInstruction(invoke); return true; diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index bbebd3af24..beafbcc386 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -47,7 +47,7 @@ void CodeGenerator::CompileBlock(HBasicBlock* block) { Bind(GetLabelOf(block)); HGraphVisitor* location_builder = GetLocationBuilder(); HGraphVisitor* instruction_visitor = GetInstructionVisitor(); - for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); current->Accept(location_builder); InitLocations(current); diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc new file mode 100644 index 0000000000..a7604beac4 --- /dev/null +++ b/compiler/optimizing/graph_visualizer.cc @@ -0,0 +1,199 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "graph_visualizer.h" + +#include "driver/dex_compilation_unit.h" +#include "nodes.h" + +namespace art { + +/** + * HGraph visitor to generate a file suitable for the c1visualizer tool and IRHydra. + */ +class HGraphVisualizerPrinter : public HGraphVisitor { + public: + HGraphVisualizerPrinter(HGraph* graph, std::ostream& output) + : HGraphVisitor(graph), output_(output), indent_(0) {} + + void StartTag(const char* name) { + AddIndent(); + output_ << "begin_" << name << std::endl; + indent_++; + } + + void EndTag(const char* name) { + indent_--; + AddIndent(); + output_ << "end_" << name << std::endl; + } + + void PrintProperty(const char* name, const char* property) { + AddIndent(); + output_ << name << " \"" << property << "\"" << std::endl; + } + + void PrintProperty(const char* name, const char* property, int id) { + AddIndent(); + output_ << name << " \"" << property << id << "\"" << std::endl; + } + + void PrintEmptyProperty(const char* name) { + AddIndent(); + output_ << name << std::endl; + } + + void PrintTime(const char* name) { + AddIndent(); + output_ << name << " " << time(NULL) << std::endl; + } + + void PrintInt(const char* name, int value) { + AddIndent(); + output_ << name << " " << value << std::endl; + } + + void AddIndent() { + for (size_t i = 0; i < indent_; ++i) { + output_ << " "; + } + } + + void PrintPredecessors(HBasicBlock* block) { + AddIndent(); + output_ << "predecessors"; + for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) { + HBasicBlock* predecessor = block->GetPredecessors().Get(i); + output_ << " \"B" << predecessor->GetBlockId() << "\" "; + } + output_<< std::endl; + } + + void PrintSuccessors(HBasicBlock* block) { + AddIndent(); + output_ << "successors"; + for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) { + HBasicBlock* successor = block->GetSuccessors().Get(i); + output_ << " \"B" << successor->GetBlockId() << "\" "; + } + output_<< std::endl; + } + + + void VisitInstruction(HInstruction* instruction) { + output_ << instruction->DebugName(); + if (instruction->InputCount() > 0) { + output_ << " [ "; + for (HInputIterator inputs(instruction); !inputs.Done(); inputs.Advance()) { + output_ << "v" << inputs.Current()->GetId() << " "; + } + output_ << "]"; + } + } + + void PrintInstructions(const HInstructionList& list) { + const char* kEndInstructionMarker = "<|@"; + for (HInstructionIterator it(list); !it.Done(); it.Advance()) { + HInstruction* instruction = it.Current(); + AddIndent(); + int bci = 0; + output_ << bci << " " << instruction->NumberOfUses() << " v" << instruction->GetId() << " "; + instruction->Accept(this); + output_ << kEndInstructionMarker << std::endl; + } + } + + void Run(const char* pass_name) { + StartTag("cfg"); + PrintProperty("name", pass_name); + VisitInsertionOrder(); + EndTag("cfg"); + } + + void VisitBasicBlock(HBasicBlock* block) { + StartTag("block"); + PrintProperty("name", "B", block->GetBlockId()); + PrintInt("from_bci", -1); + PrintInt("to_bci", -1); + PrintPredecessors(block); + PrintSuccessors(block); + PrintEmptyProperty("xhandlers"); + PrintEmptyProperty("flags"); + if (block->GetDominator() != nullptr) { + PrintProperty("dominator", "B", block->GetDominator()->GetBlockId()); + } + + StartTag("states"); + StartTag("locals"); + PrintInt("size", 0); + PrintProperty("method", "None"); + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { + AddIndent(); + HInstruction* instruction = it.Current(); + output_ << instruction->GetId() << " v" << instruction->GetId() << "[ "; + for (HInputIterator inputs(instruction); !inputs.Done(); inputs.Advance()) { + output_ << inputs.Current()->GetId() << " "; + } + output_ << "]" << std::endl; + } + EndTag("locals"); + EndTag("states"); + + StartTag("HIR"); + PrintInstructions(block->GetPhis()); + PrintInstructions(block->GetInstructions()); + EndTag("HIR"); + EndTag("block"); + } + + private: + std::ostream& output_; + size_t indent_; + + DISALLOW_COPY_AND_ASSIGN(HGraphVisualizerPrinter); +}; + +HGraphVisualizer::HGraphVisualizer(std::ostream* output, + HGraph* graph, + const char* string_filter, + const DexCompilationUnit& cu) + : output_(output), graph_(graph), is_enabled_(false) { + if (output == nullptr) { + return; + } + std::string pretty_name = PrettyMethod(cu.GetDexMethodIndex(), *cu.GetDexFile()); + if (pretty_name.find(string_filter) == std::string::npos) { + return; + } + + is_enabled_ = true; + HGraphVisualizerPrinter printer(graph, *output_); + printer.StartTag("compilation"); + printer.PrintProperty("name", pretty_name.c_str()); + printer.PrintProperty("method", pretty_name.c_str()); + printer.PrintTime("date"); + printer.EndTag("compilation"); +} + +void HGraphVisualizer::DumpGraph(const char* pass_name) { + if (!is_enabled_) { + return; + } + HGraphVisualizerPrinter printer(graph_, *output_); + printer.Run(pass_name); +} + +} // namespace art diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h new file mode 100644 index 0000000000..433d55d421 --- /dev/null +++ b/compiler/optimizing/graph_visualizer.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_ +#define ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_ + +#include "utils/allocation.h" + +namespace art { + +class DexCompilationUnit; +class HGraph; + +/** + * If enabled, emits compilation information suitable for the c1visualizer tool + * and IRHydra. + * Currently only works if the compiler is single threaded. + */ +class HGraphVisualizer : public ValueObject { + public: + /** + * If output is not null, and the method name of the dex compilation + * unit contains `string_filter`, the compilation information will be + * emitted. + */ + HGraphVisualizer(std::ostream* output, + HGraph* graph, + const char* string_filter, + const DexCompilationUnit& cu); + + /** + * If this visualizer is enabled, emit the compilation information + * in `output_`. + */ + void DumpGraph(const char* pass_name); + + private: + std::ostream* const output_; + HGraph* const graph_; + + // Is true when `output_` is not null, and the compiled method's name + // contains the string_filter given in the constructor. + bool is_enabled_; + + DISALLOW_COPY_AND_ASSIGN(HGraphVisualizer); +}; + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_ diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index cf2d1eec23..afaedd7f18 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -37,10 +37,10 @@ void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) const { for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) { block->GetSuccessors().Get(j)->RemovePredecessor(block, false); } - for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { block->RemovePhi(it.Current()->AsPhi()); } - for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { block->RemoveInstruction(it.Current()); } } @@ -420,10 +420,10 @@ void HGraphVisitor::VisitInsertionOrder() { } void HGraphVisitor::VisitBasicBlock(HBasicBlock* block) { - for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { it.Current()->Accept(this); } - for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { it.Current()->Accept(this); } } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 081c2bd08a..27b87ca0da 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -82,7 +82,7 @@ class HGraph : public ArenaObject { void SimplifyCFG(); // Find all natural loops in this graph. Aborts computation and returns false - // if one loop is not natural, that is the header does not dominated the back + // if one loop is not natural, that is the header does not dominate the back // edge. bool FindNaturalLoops() const; @@ -268,8 +268,8 @@ class HBasicBlock : public ArenaObject { HInstruction* GetFirstInstruction() const { return instructions_.first_instruction_; } HInstruction* GetLastInstruction() const { return instructions_.last_instruction_; } - HInstructionList const* GetInstructions() const { return &instructions_; } - HInstructionList const* GetPhis() const { return &phis_; } + const HInstructionList& GetInstructions() const { return instructions_; } + const HInstructionList& GetPhis() const { return phis_; } void AddSuccessor(HBasicBlock* block) { successors_.Add(block); @@ -444,6 +444,17 @@ class HInstruction : public ArenaObject { bool HasUses() const { return uses_ != nullptr || env_uses_ != nullptr; } + size_t NumberOfUses() const { + // TODO: Optimize this method if it is used outside of the HGraphTracer. + size_t result = 0; + HUseListNode<HInstruction>* current = uses_; + while (current != nullptr) { + current = current->GetTail(); + ++result; + } + return result; + } + int GetId() const { return id_; } void SetId(int id) { id_ = id; } diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index a5031e0a7c..f435cb0058 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include <fstream> #include <stdint.h> #include "builder.h" @@ -21,6 +22,7 @@ #include "compilers.h" #include "driver/compiler_driver.h" #include "driver/dex_compilation_unit.h" +#include "graph_visualizer.h" #include "nodes.h" #include "ssa_liveness_analysis.h" #include "utils/arena_allocator.h" @@ -50,6 +52,22 @@ class CodeVectorAllocator FINAL : public CodeAllocator { DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator); }; +/** + * If set to true, generates a file suitable for the c1visualizer tool and IRHydra. + */ +static bool kIsVisualizerEnabled = false; + +/** + * Filter to apply to the visualizer. Methods whose name contain that filter will + * be in the file. + */ +static const char* kStringFilter = ""; + +OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) : QuickCompiler(driver) { + if (kIsVisualizerEnabled) { + visualizer_output_.reset(new std::ofstream("art.cfg")); + } +} CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item, uint32_t access_flags, @@ -70,6 +88,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite ArenaPool pool; ArenaAllocator arena(&pool); HGraphBuilder builder(&arena, &dex_compilation_unit, &dex_file); + HGraph* graph = builder.BuildGraph(*code_item); if (graph == nullptr) { if (shouldCompile) { @@ -77,6 +96,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite } return nullptr; } + HGraphVisualizer visualizer(visualizer_output_.get(), graph, kStringFilter, dex_compilation_unit); + visualizer.DumpGraph("builder"); InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet(); // The optimizing compiler currently does not have a Thumb2 assembler. @@ -104,6 +125,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite // Run these phases to get some test coverage. graph->BuildDominatorTree(); graph->TransformToSSA(); + visualizer.DumpGraph("ssa"); + graph->FindNaturalLoops(); SsaLivenessAnalysis(*graph).Analyze(); diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index 1fc041c8c9..50e3254d7c 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -30,7 +30,7 @@ void SsaBuilder::BuildSsa() { // 2) Set inputs of loop phis. for (size_t i = 0; i < loop_headers_.Size(); i++) { HBasicBlock* block = loop_headers_.Get(i); - for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { HPhi* phi = it.Current()->AsPhi(); for (size_t pred = 0; pred < block->GetPredecessors().Size(); pred++) { phi->AddInput(ValueOfLocal(block->GetPredecessors().Get(pred), phi->GetRegNumber())); @@ -40,7 +40,7 @@ void SsaBuilder::BuildSsa() { // 3) Clear locals. // TODO: Move this to a dead code eliminator phase. - for (HInstructionIterator it(*GetGraph()->GetEntryBlock()->GetInstructions()); + for (HInstructionIterator it(GetGraph()->GetEntryBlock()->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); @@ -106,7 +106,7 @@ void SsaBuilder::VisitBasicBlock(HBasicBlock* block) { // - HStoreLocal: update current value of the local and remove the instruction. // - Instructions that require an environment: populate their environment // with the current values of the locals. - for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { it.Current()->Accept(this); } } diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 0ab77ca2ef..7c2ec3966e 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -29,14 +29,14 @@ void SsaLivenessAnalysis::NumberInstructions() { for (HReversePostOrderIterator it(graph_); !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); - for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); if (current->HasUses()) { current->SetSsaIndex(ssa_index++); } } - for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); if (current->HasUses()) { current->SetSsaIndex(ssa_index++); @@ -73,7 +73,7 @@ void SsaLivenessAnalysis::ComputeInitialSets() { BitVector* kill = GetKillSet(*block); BitVector* live_in = GetLiveInSet(*block); - for (HBackwardInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) { + for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); if (current->HasSsaIndex()) { kill->SetBit(current->GetSsaIndex()); @@ -99,7 +99,7 @@ void SsaLivenessAnalysis::ComputeInitialSets() { } } - for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); if (current->HasSsaIndex()) { kill->SetBit(current->GetSsaIndex()); diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc index 9be2197ad7..415d14659e 100644 --- a/compiler/optimizing/ssa_test.cc +++ b/compiler/optimizing/ssa_test.cc @@ -66,10 +66,10 @@ static void ReNumberInstructions(HGraph* graph) { int id = 0; for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) { HBasicBlock* block = graph->GetBlocks().Get(i); - for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { it.Current()->SetId(id++); } - for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) { + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { it.Current()->SetId(id++); } } |