Opt compiler: Add disassembly to the '.cfg' output.
This is automatically added to the '.cfg' output when using the usual
`--dump-cfg` option.
Change-Id: I864bfc3a8299c042e72e451cc7730ad8271e4deb
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 64f2c9a..cd10935 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -25,6 +25,7 @@
#include "dex/verified_method.h"
#include "driver/dex_compilation_unit.h"
#include "gc_map_builder.h"
+#include "graph_visualizer.h"
#include "leb128.h"
#include "mapping_table.h"
#include "mirror/array-inl.h"
@@ -159,12 +160,55 @@
return block;
}
+class DisassemblyScope {
+ public:
+ DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
+ : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
+ if (codegen_.GetDisassemblyInformation() != nullptr) {
+ start_offset_ = codegen_.GetAssembler().CodeSize();
+ }
+ }
+
+ ~DisassemblyScope() {
+ // We avoid building this data when we know it will not be used.
+ if (codegen_.GetDisassemblyInformation() != nullptr) {
+ codegen_.GetDisassemblyInformation()->AddInstructionInterval(
+ instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
+ }
+ }
+
+ private:
+ const CodeGenerator& codegen_;
+ HInstruction* instruction_;
+ size_t start_offset_;
+};
+
+
+void CodeGenerator::GenerateSlowPaths() {
+ size_t code_start = 0;
+ for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) {
+ if (disasm_info_ != nullptr) {
+ code_start = GetAssembler()->CodeSize();
+ }
+ slow_paths_.Get(i)->EmitNativeCode(this);
+ if (disasm_info_ != nullptr) {
+ disasm_info_->AddSlowPathInterval(slow_paths_.Get(i), code_start, GetAssembler()->CodeSize());
+ }
+ }
+}
+
void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
is_baseline_ = is_baseline;
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
DCHECK_EQ(current_block_index_, 0u);
+
+ size_t frame_start = GetAssembler()->CodeSize();
GenerateFrameEntry();
DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
+ if (disasm_info_ != nullptr) {
+ disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
+ }
+
for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
HBasicBlock* block = block_order_->Get(current_block_index_);
// Don't generate code for an empty block. Its predecessors will branch to its successor
@@ -174,6 +218,7 @@
Bind(block);
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
+ DisassemblyScope disassembly_scope(current, *this);
if (is_baseline) {
InitLocationsBaseline(current);
}
@@ -182,10 +227,7 @@
}
}
- // Generate the slow paths.
- for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) {
- slow_paths_.Get(i)->EmitNativeCode(this);
- }
+ GenerateSlowPaths();
// Finalize instructions in assember;
Finalize(allocator);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b1f1674..4cecd61 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -22,6 +22,7 @@
#include "base/bit_field.h"
#include "driver/compiler_options.h"
#include "globals.h"
+#include "graph_visualizer.h"
#include "locations.h"
#include "memory_region.h"
#include "nodes.h"
@@ -162,6 +163,7 @@
virtual void Bind(HBasicBlock* block) = 0;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
virtual Assembler* GetAssembler() = 0;
+ virtual const Assembler& GetAssembler() const = 0;
virtual size_t GetWordSize() const = 0;
virtual size_t GetFloatingPointSpillSlotSize() const = 0;
virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
@@ -340,6 +342,9 @@
static void CreateCommonInvokeLocationSummary(
HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor);
+ void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
+ DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
+
protected:
CodeGenerator(HGraph* graph,
size_t number_of_core_registers,
@@ -363,6 +368,7 @@
stack_map_stream_(graph->GetArena()),
block_order_(nullptr),
is_baseline_(false),
+ disasm_info_(nullptr),
graph_(graph),
compiler_options_(compiler_options),
slow_paths_(graph->GetArena(), 8),
@@ -446,9 +452,12 @@
// Whether we are using baseline.
bool is_baseline_;
+ DisassemblyInformation* disasm_info_;
+
private:
void InitLocationsBaseline(HInstruction* instruction);
size_t GetStackOffsetOfSavedRegister(size_t index);
+ void GenerateSlowPaths();
void CompileInternal(CodeAllocator* allocator, bool is_baseline);
void BlockIfInRegister(Location location, bool is_out = false) const;
void EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 7169679..bd0bfcd 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -436,6 +436,20 @@
__ AdjustLabelPosition(block_label);
}
}
+ // Adjust pc offsets for the disassembly information.
+ if (disasm_info_ != nullptr) {
+ GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
+ frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
+ frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
+ for (auto& it : *disasm_info_->GetInstructionIntervals()) {
+ it.second.start = __ GetAdjustedPosition(it.second.start);
+ it.second.end = __ GetAdjustedPosition(it.second.end);
+ }
+ for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
+ it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
+ it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
+ }
+ }
CodeGenerator::Finalize(allocator);
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 1599a23..5b4b375 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -254,6 +254,10 @@
return &assembler_;
}
+ const ArmAssembler& GetAssembler() const OVERRIDE {
+ return assembler_;
+ }
+
uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
return GetLabelOf(block)->Position();
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f96810f..bbe3adc 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -283,6 +283,7 @@
HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
+ const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
// Emit a write barrier.
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 534154f..ec36496 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -228,6 +228,7 @@
HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
+ const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
void MarkGCCard(GpuRegister object, GpuRegister value);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 696d8d5..1ad89c9 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -245,6 +245,10 @@
return &assembler_;
}
+ const X86Assembler& GetAssembler() const OVERRIDE {
+ return assembler_;
+ }
+
uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
return GetLabelOf(block)->Position();
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 215754c..a18e89a 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -245,6 +245,10 @@
return &assembler_;
}
+ const X86_64Assembler& GetAssembler() const OVERRIDE {
+ return assembler_;
+ }
+
ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 9fd8d00..2b85c7c 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -16,17 +16,21 @@
#include "graph_visualizer.h"
+#include <dlfcn.h>
+
+#include <cctype>
+#include <sstream>
+
#include "code_generator.h"
#include "dead_code_elimination.h"
+#include "disassembler.h"
#include "licm.h"
#include "nodes.h"
#include "optimization.h"
#include "reference_type_propagation.h"
#include "register_allocator.h"
#include "ssa_liveness_analysis.h"
-
-#include <cctype>
-#include <sstream>
+#include "utils/assembler.h"
namespace art {
@@ -87,6 +91,60 @@
}
}
+typedef Disassembler* create_disasm_prototype(InstructionSet instruction_set,
+ DisassemblerOptions* options);
+class HGraphVisualizerDisassembler {
+ public:
+ HGraphVisualizerDisassembler(InstructionSet instruction_set, const uint8_t* base_address)
+ : instruction_set_(instruction_set) {
+ libart_disassembler_handle_ =
+ dlopen(kIsDebugBuild ? "libartd-disassembler.so" : "libart-disassembler.so", RTLD_NOW);
+ if (libart_disassembler_handle_ == nullptr) {
+ LOG(WARNING) << "Failed to dlopen libart-disassembler: " << dlerror();
+ return;
+ }
+ create_disasm_prototype* create_disassembler = reinterpret_cast<create_disasm_prototype*>(
+ dlsym(libart_disassembler_handle_, "create_disassembler"));
+ if (create_disassembler == nullptr) {
+ LOG(WARNING) << "Could not find create_disassembler entry: " << dlerror();
+ return;
+ }
+ // Reading the disassembly from 0x0 is easier, so we print relative
+ // addresses. We will only disassemble the code once everything has
+ // been generated, so we can read data in literal pools.
+ disassembler_ = std::unique_ptr<Disassembler>((*create_disassembler)(
+ instruction_set,
+ new DisassemblerOptions(/* absolute_addresses */ false,
+ base_address,
+ /* can_read_literals */ true)));
+ }
+
+ ~HGraphVisualizerDisassembler() {
+ // We need to call ~Disassembler() before we close the library.
+ disassembler_.reset();
+ if (libart_disassembler_handle_ != nullptr) {
+ dlclose(libart_disassembler_handle_);
+ }
+ }
+
+ void Disassemble(std::ostream& output, size_t start, size_t end) const {
+ const uint8_t* base = disassembler_->GetDisassemblerOptions()->base_address_;
+ if (instruction_set_ == kThumb2) {
+ // ARM and Thumb-2 use the same disassembler. The bottom bit of the
+ // address is used to distinguish between the two.
+ base += 1;
+ }
+ disassembler_->Dump(output, base + start, base + end);
+ }
+
+ private:
+ InstructionSet instruction_set_;
+ std::unique_ptr<Disassembler> disassembler_;
+
+ void* libart_disassembler_handle_;
+};
+
+
/**
* HGraph visitor to generate a file suitable for the c1visualizer tool and IRHydra.
*/
@@ -96,12 +154,19 @@
std::ostream& output,
const char* pass_name,
bool is_after_pass,
- const CodeGenerator& codegen)
+ const CodeGenerator& codegen,
+ const DisassemblyInformation* disasm_info = nullptr)
: HGraphVisitor(graph),
output_(output),
pass_name_(pass_name),
is_after_pass_(is_after_pass),
codegen_(codegen),
+ disasm_info_(disasm_info),
+ disassembler_(disasm_info_ != nullptr
+ ? new HGraphVisualizerDisassembler(
+ codegen_.GetInstructionSet(),
+ codegen_.GetAssembler().CodeBufferBaseAddress())
+ : nullptr),
indent_(0) {}
void StartTag(const char* name) {
@@ -173,6 +238,9 @@
HBasicBlock* predecessor = block->GetPredecessors().Get(i);
output_ << " \"B" << predecessor->GetBlockId() << "\" ";
}
+ if (block->IsEntryBlock() && (disasm_info_ != nullptr)) {
+ output_ << " \"" << kDisassemblyBlockFrameEntry << "\" ";
+ }
output_<< std::endl;
}
@@ -183,6 +251,11 @@
HBasicBlock* successor = block->GetSuccessors().Get(i);
output_ << " \"B" << successor->GetBlockId() << "\" ";
}
+ if (block->IsExitBlock() &&
+ (disasm_info_ != nullptr) &&
+ !disasm_info_->GetSlowPathIntervals().empty()) {
+ output_ << " \"" << kDisassemblyBlockSlowPaths << "\" ";
+ }
output_<< std::endl;
}
@@ -266,9 +339,9 @@
StartAttributeStream("kind") << barrier->GetBarrierKind();
}
- void VisitLoadClass(HLoadClass* load_cass) OVERRIDE {
+ void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
StartAttributeStream("gen_clinit_check") << std::boolalpha
- << load_cass->MustGenerateClinitCheck() << std::noboolalpha;
+ << load_class->MustGenerateClinitCheck() << std::noboolalpha;
}
void VisitCheckCast(HCheckCast* check_cast) OVERRIDE {
@@ -378,10 +451,20 @@
}
}
}
+ if (disasm_info_ != nullptr) {
+ DCHECK(disassembler_ != nullptr);
+ // If the information is available, disassemble the code generated for
+ // this instruction.
+ auto it = disasm_info_->GetInstructionIntervals().find(instruction);
+ if (it != disasm_info_->GetInstructionIntervals().end()
+ && it->second.start != it->second.end) {
+ output_ << std::endl;
+ disassembler_->Disassemble(output_, it->second.start, it->second.end);
+ }
+ }
}
void PrintInstructions(const HInstructionList& list) {
- const char* kEndInstructionMarker = "<|@";
for (HInstructionIterator it(list); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
int bci = 0;
@@ -399,11 +482,83 @@
}
}
+ void DumpStartOfDisassemblyBlock(const char* block_name,
+ int predecessor_index,
+ int successor_index) {
+ StartTag("block");
+ PrintProperty("name", block_name);
+ PrintInt("from_bci", -1);
+ PrintInt("to_bci", -1);
+ if (predecessor_index != -1) {
+ PrintProperty("predecessors", "B", predecessor_index);
+ } else {
+ PrintEmptyProperty("predecessors");
+ }
+ if (successor_index != -1) {
+ PrintProperty("successors", "B", successor_index);
+ } else {
+ PrintEmptyProperty("successors");
+ }
+ PrintEmptyProperty("xhandlers");
+ PrintEmptyProperty("flags");
+ StartTag("states");
+ StartTag("locals");
+ PrintInt("size", 0);
+ PrintProperty("method", "None");
+ EndTag("locals");
+ EndTag("states");
+ StartTag("HIR");
+ }
+
+ void DumpEndOfDisassemblyBlock() {
+ EndTag("HIR");
+ EndTag("block");
+ }
+
+ void DumpDisassemblyBlockForFrameEntry() {
+ DumpStartOfDisassemblyBlock(kDisassemblyBlockFrameEntry,
+ -1,
+ GetGraph()->GetEntryBlock()->GetBlockId());
+ output_ << " 0 0 disasm " << kDisassemblyBlockFrameEntry << " ";
+ GeneratedCodeInterval frame_entry = disasm_info_->GetFrameEntryInterval();
+ if (frame_entry.start != frame_entry.end) {
+ output_ << std::endl;
+ disassembler_->Disassemble(output_, frame_entry.start, frame_entry.end);
+ }
+ output_ << kEndInstructionMarker << std::endl;
+ DumpEndOfDisassemblyBlock();
+ }
+
+ void DumpDisassemblyBlockForSlowPaths() {
+ if (disasm_info_->GetSlowPathIntervals().empty()) {
+ return;
+ }
+ // If the graph has an exit block we attach the block for the slow paths
+ // after it. Else we just add the block to the graph without linking it to
+ // any other.
+ DumpStartOfDisassemblyBlock(
+ kDisassemblyBlockSlowPaths,
+ GetGraph()->HasExitBlock() ? GetGraph()->GetExitBlock()->GetBlockId() : -1,
+ -1);
+ for (SlowPathCodeInfo info : disasm_info_->GetSlowPathIntervals()) {
+ output_ << " 0 0 disasm " << info.slow_path->GetDescription() << std::endl;
+ disassembler_->Disassemble(output_, info.code_interval.start, info.code_interval.end);
+ output_ << kEndInstructionMarker << std::endl;
+ }
+ DumpEndOfDisassemblyBlock();
+ }
+
void Run() {
StartTag("cfg");
std::string pass_desc = std::string(pass_name_) + (is_after_pass_ ? " (after)" : " (before)");
PrintProperty("name", pass_desc.c_str());
+ if (disasm_info_ != nullptr) {
+ DumpDisassemblyBlockForFrameEntry();
+ }
VisitInsertionOrder();
+ if (disasm_info_ != nullptr) {
+ DumpDisassemblyBlockForSlowPaths();
+ }
EndTag("cfg");
}
@@ -450,11 +605,17 @@
EndTag("block");
}
+ static constexpr const char* const kEndInstructionMarker = "<|@";
+ static constexpr const char* const kDisassemblyBlockFrameEntry = "FrameEntry";
+ static constexpr const char* const kDisassemblyBlockSlowPaths = "SlowPaths";
+
private:
std::ostream& output_;
const char* pass_name_;
const bool is_after_pass_;
const CodeGenerator& codegen_;
+ const DisassemblyInformation* disasm_info_;
+ std::unique_ptr<HGraphVisualizerDisassembler> disassembler_;
size_t indent_;
DISALLOW_COPY_AND_ASSIGN(HGraphVisualizerPrinter);
@@ -483,4 +644,13 @@
}
}
+void HGraphVisualizer::DumpGraphWithDisassembly() const {
+ DCHECK(output_ != nullptr);
+ if (!graph_->GetBlocks().IsEmpty()) {
+ HGraphVisualizerPrinter printer(
+ graph_, *output_, "disassembly", true, codegen_, codegen_.GetDisassemblyInformation());
+ printer.Run();
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 513bceb..b6b66df 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -19,6 +19,8 @@
#include <ostream>
+#include "arch/instruction_set.h"
+#include "base/arena_containers.h"
#include "base/value_object.h"
namespace art {
@@ -26,11 +28,75 @@
class CodeGenerator;
class DexCompilationUnit;
class HGraph;
+class HInstruction;
+class SlowPathCode;
/**
* This class outputs the HGraph in the C1visualizer format.
* Note: Currently only works if the compiler is single threaded.
*/
+struct GeneratedCodeInterval {
+ size_t start;
+ size_t end;
+};
+
+struct SlowPathCodeInfo {
+ const SlowPathCode* slow_path;
+ GeneratedCodeInterval code_interval;
+};
+
+// This information is filled by the code generator. It will be used by the
+// graph visualizer to associate disassembly of the generated code with the
+// instructions and slow paths. We assume that the generated code follows the
+// following structure:
+// - frame entry
+// - instructions
+// - slow paths
+class DisassemblyInformation {
+ public:
+ explicit DisassemblyInformation(ArenaAllocator* allocator)
+ : frame_entry_interval_({0, 0}),
+ instruction_intervals_(std::less<const HInstruction*>(), allocator->Adapter()),
+ slow_path_intervals_(allocator->Adapter()) {}
+
+ void SetFrameEntryInterval(size_t start, size_t end) {
+ frame_entry_interval_ = {start, end};
+ }
+
+ void AddInstructionInterval(HInstruction* instr, size_t start, size_t end) {
+ instruction_intervals_.Put(instr, {start, end});
+ }
+
+ void AddSlowPathInterval(SlowPathCode* slow_path, size_t start, size_t end) {
+ slow_path_intervals_.push_back({slow_path, {start, end}});
+ }
+
+ GeneratedCodeInterval GetFrameEntryInterval() const {
+ return frame_entry_interval_;
+ }
+
+ GeneratedCodeInterval* GetFrameEntryInterval() {
+ return &frame_entry_interval_;
+ }
+
+ const ArenaSafeMap<const HInstruction*, GeneratedCodeInterval>& GetInstructionIntervals() const {
+ return instruction_intervals_;
+ }
+
+ ArenaSafeMap<const HInstruction*, GeneratedCodeInterval>* GetInstructionIntervals() {
+ return &instruction_intervals_;
+ }
+
+ const ArenaVector<SlowPathCodeInfo>& GetSlowPathIntervals() const { return slow_path_intervals_; }
+
+ ArenaVector<SlowPathCodeInfo>* GetSlowPathIntervals() { return &slow_path_intervals_; }
+
+ private:
+ GeneratedCodeInterval frame_entry_interval_;
+ ArenaSafeMap<const HInstruction*, GeneratedCodeInterval> instruction_intervals_;
+ ArenaVector<SlowPathCodeInfo> slow_path_intervals_;
+};
+
class HGraphVisualizer : public ValueObject {
public:
HGraphVisualizer(std::ostream* output,
@@ -39,6 +105,7 @@
void PrintHeader(const char* method_name) const;
void DumpGraph(const char* pass_name, bool is_after_pass = true) const;
+ void DumpGraphWithDisassembly() const;
private:
std::ostream* const output_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index ad67813..0c7b6f7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -92,19 +92,21 @@
public:
PassInfoPrinter(HGraph* graph,
const char* method_name,
- const CodeGenerator& codegen,
+ CodeGenerator* codegen,
std::ostream* visualizer_output,
CompilerDriver* compiler_driver)
: method_name_(method_name),
timing_logger_enabled_(compiler_driver->GetDumpPasses()),
timing_logger_(method_name, true, true),
+ disasm_info_(graph->GetArena()),
visualizer_enabled_(!compiler_driver->GetDumpCfgFileName().empty()),
- visualizer_(visualizer_output, graph, codegen) {
+ visualizer_(visualizer_output, graph, *codegen) {
if (strstr(method_name, kStringFilter) == nullptr) {
timing_logger_enabled_ = visualizer_enabled_ = false;
}
if (visualizer_enabled_) {
visualizer_.PrintHeader(method_name_);
+ codegen->SetDisassemblyInformation(&disasm_info_);
}
}
@@ -115,6 +117,12 @@
}
}
+ void DumpDisassembly() const {
+ if (visualizer_enabled_) {
+ visualizer_.DumpGraphWithDisassembly();
+ }
+ }
+
private:
void StartPass(const char* pass_name) {
// Dump graph first, then start timer.
@@ -141,6 +149,8 @@
bool timing_logger_enabled_;
TimingLogger timing_logger_;
+ DisassemblyInformation disasm_info_;
+
bool visualizer_enabled_;
HGraphVisualizer visualizer_;
@@ -224,12 +234,13 @@
CodeGenerator* codegen,
CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
- PassInfoPrinter* pass_info) const;
+ PassInfoPrinter* pass_info_printer) const;
// Just compile without doing optimizations.
CompiledMethod* CompileBaseline(CodeGenerator* codegen,
CompilerDriver* driver,
- const DexCompilationUnit& dex_compilation_unit) const;
+ const DexCompilationUnit& dex_compilation_unit,
+ PassInfoPrinter* pass_info_printer) const;
std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
@@ -429,7 +440,7 @@
MaybeRecordStat(MethodCompilationStat::kCompiledOptimized);
- return CompiledMethod::SwapAllocCompiledMethod(
+ CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
compiler_driver,
codegen->GetInstructionSet(),
ArrayRef<const uint8_t>(allocator.GetMemory()),
@@ -445,12 +456,15 @@
ArrayRef<const uint8_t>(), // native_gc_map.
ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
ArrayRef<const LinkerPatch>());
+ pass_info_printer->DumpDisassembly();
+ return compiled_method;
}
CompiledMethod* OptimizingCompiler::CompileBaseline(
CodeGenerator* codegen,
CompilerDriver* compiler_driver,
- const DexCompilationUnit& dex_compilation_unit) const {
+ const DexCompilationUnit& dex_compilation_unit,
+ PassInfoPrinter* pass_info_printer) const {
CodeVectorAllocator allocator;
codegen->CompileBaseline(&allocator);
@@ -466,7 +480,7 @@
codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit);
MaybeRecordStat(MethodCompilationStat::kCompiledBaseline);
- return CompiledMethod::SwapAllocCompiledMethod(
+ CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
compiler_driver,
codegen->GetInstructionSet(),
ArrayRef<const uint8_t>(allocator.GetMemory()),
@@ -482,6 +496,8 @@
AlignVectorSize(gc_map),
ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
ArrayRef<const LinkerPatch>());
+ pass_info_printer->DumpDisassembly();
+ return compiled_method;
}
CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item,
@@ -557,7 +573,7 @@
PassInfoPrinter pass_info_printer(graph,
method_name.c_str(),
- *codegen.get(),
+ codegen.get(),
visualizer_output_.get(),
compiler_driver);
@@ -617,7 +633,10 @@
MaybeRecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator);
}
- return CompileBaseline(codegen.get(), compiler_driver, dex_compilation_unit);
+ return CompileBaseline(codegen.get(),
+ compiler_driver,
+ dex_compilation_unit,
+ &pass_info_printer);
} else {
return nullptr;
}