Opt compiler: Add disassembly to the '.cfg' output.

This is automatically added to the '.cfg' output when using the usual
`--dump-cfg` option.

Change-Id: I864bfc3a8299c042e72e451cc7730ad8271e4deb
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 64f2c9a..cd10935 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -25,6 +25,7 @@
 #include "dex/verified_method.h"
 #include "driver/dex_compilation_unit.h"
 #include "gc_map_builder.h"
+#include "graph_visualizer.h"
 #include "leb128.h"
 #include "mapping_table.h"
 #include "mirror/array-inl.h"
@@ -159,12 +160,55 @@
   return block;
 }
 
+class DisassemblyScope {
+ public:
+  DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
+      : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
+    if (codegen_.GetDisassemblyInformation() != nullptr) {
+      start_offset_ = codegen_.GetAssembler().CodeSize();
+    }
+  }
+
+  ~DisassemblyScope() {
+    // We avoid building this data when we know it will not be used.
+    if (codegen_.GetDisassemblyInformation() != nullptr) {
+      codegen_.GetDisassemblyInformation()->AddInstructionInterval(
+          instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
+    }
+  }
+
+ private:
+  const CodeGenerator& codegen_;
+  HInstruction* instruction_;
+  size_t start_offset_;
+};
+
+
+void CodeGenerator::GenerateSlowPaths() {
+  size_t code_start = 0;
+  for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) {
+    if (disasm_info_ != nullptr) {
+      code_start = GetAssembler()->CodeSize();
+    }
+    slow_paths_.Get(i)->EmitNativeCode(this);
+    if (disasm_info_ != nullptr) {
+      disasm_info_->AddSlowPathInterval(slow_paths_.Get(i), code_start, GetAssembler()->CodeSize());
+    }
+  }
+}
+
 void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
   is_baseline_ = is_baseline;
   HGraphVisitor* instruction_visitor = GetInstructionVisitor();
   DCHECK_EQ(current_block_index_, 0u);
+
+  size_t frame_start = GetAssembler()->CodeSize();
   GenerateFrameEntry();
   DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
+  if (disasm_info_ != nullptr) {
+    disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
+  }
+
   for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
     HBasicBlock* block = block_order_->Get(current_block_index_);
     // Don't generate code for an empty block. Its predecessors will branch to its successor
@@ -174,6 +218,7 @@
     Bind(block);
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* current = it.Current();
+      DisassemblyScope disassembly_scope(current, *this);
       if (is_baseline) {
         InitLocationsBaseline(current);
       }
@@ -182,10 +227,7 @@
     }
   }
 
-  // Generate the slow paths.
-  for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) {
-    slow_paths_.Get(i)->EmitNativeCode(this);
-  }
+  GenerateSlowPaths();
 
   // Finalize instructions in assember;
   Finalize(allocator);