Run Java tests with the optimizing compiler.

Also fix a vector.reserve -> vector.resize braino, and build
a GC map that dex2oat expects.

Change-Id: I6acf2f90a4c32f90b79bf7709bf2e43931b98757
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9e5ec6e..60f8796 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -17,6 +17,7 @@
 #include "dex/compiler_internals.h"
 #include "dex_file-inl.h"
 #include "gc_map.h"
+#include "gc_map_builder.h"
 #include "mapping_table.h"
 #include "mir_to_lir-inl.h"
 #include "dex/quick/dex_file_method_inliner.h"
@@ -677,84 +678,6 @@
   }
 }
 
-class NativePcToReferenceMapBuilder {
- public:
-  NativePcToReferenceMapBuilder(std::vector<uint8_t>* table,
-                                size_t entries, uint32_t max_native_offset,
-                                size_t references_width) : entries_(entries),
-                                references_width_(references_width), in_use_(entries),
-                                table_(table) {
-    // Compute width in bytes needed to hold max_native_offset.
-    native_offset_width_ = 0;
-    while (max_native_offset != 0) {
-      native_offset_width_++;
-      max_native_offset >>= 8;
-    }
-    // Resize table and set up header.
-    table->resize((EntryWidth() * entries) + sizeof(uint32_t));
-    CHECK_LT(native_offset_width_, 1U << 3);
-    (*table)[0] = native_offset_width_ & 7;
-    CHECK_LT(references_width_, 1U << 13);
-    (*table)[0] |= (references_width_ << 3) & 0xFF;
-    (*table)[1] = (references_width_ >> 5) & 0xFF;
-    CHECK_LT(entries, 1U << 16);
-    (*table)[2] = entries & 0xFF;
-    (*table)[3] = (entries >> 8) & 0xFF;
-  }
-
-  void AddEntry(uint32_t native_offset, const uint8_t* references) {
-    size_t table_index = TableIndex(native_offset);
-    while (in_use_[table_index]) {
-      table_index = (table_index + 1) % entries_;
-    }
-    in_use_[table_index] = true;
-    SetCodeOffset(table_index, native_offset);
-    DCHECK_EQ(native_offset, GetCodeOffset(table_index));
-    SetReferences(table_index, references);
-  }
-
- private:
-  size_t TableIndex(uint32_t native_offset) {
-    return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
-  }
-
-  uint32_t GetCodeOffset(size_t table_index) {
-    uint32_t native_offset = 0;
-    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
-    for (size_t i = 0; i < native_offset_width_; i++) {
-      native_offset |= (*table_)[table_offset + i] << (i * 8);
-    }
-    return native_offset;
-  }
-
-  void SetCodeOffset(size_t table_index, uint32_t native_offset) {
-    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
-    for (size_t i = 0; i < native_offset_width_; i++) {
-      (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
-    }
-  }
-
-  void SetReferences(size_t table_index, const uint8_t* references) {
-    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
-    memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_);
-  }
-
-  size_t EntryWidth() const {
-    return native_offset_width_ + references_width_;
-  }
-
-  // Number of entries in the table.
-  const size_t entries_;
-  // Number of bytes used to encode the reference bitmap.
-  const size_t references_width_;
-  // Number of bytes used to encode a native offset.
-  size_t native_offset_width_;
-  // Entries that are in use.
-  std::vector<bool> in_use_;
-  // The table we're building.
-  std::vector<uint8_t>* const table_;
-};
-
 void Mir2Lir::CreateNativeGcMap() {
   DCHECK(!encoded_mapping_table_.empty());
   MappingTable mapping_table(&encoded_mapping_table_[0]);
@@ -771,9 +694,9 @@
   verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
   DCHECK_EQ(gc_map_raw.size(), dex_gc_map.RawSize());
   // Compute native offset to references size.
-  NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_,
-                                                      mapping_table.PcToDexSize(),
-                                                      max_native_offset, dex_gc_map.RegWidth());
+  GcMapBuilder native_gc_map_builder(&native_gc_map_,
+                                     mapping_table.PcToDexSize(),
+                                     max_native_offset, dex_gc_map.RegWidth());
 
   for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) {
     uint32_t native_offset = it.NativePcOffset();
diff --git a/compiler/gc_map_builder.h b/compiler/gc_map_builder.h
new file mode 100644
index 0000000..5a7a9e0
--- /dev/null
+++ b/compiler/gc_map_builder.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_GC_MAP_BUILDER_H_
+#define ART_COMPILER_GC_MAP_BUILDER_H_
+
+#include <vector>
+
+#include "gc_map.h"
+
+namespace art {
+
+class GcMapBuilder {
+ public:
+  GcMapBuilder(std::vector<uint8_t>* table,
+               size_t entries, uint32_t max_native_offset,
+               size_t references_width) : entries_(entries),
+               references_width_(references_width), in_use_(entries),
+               table_(table) {
+    // Compute width in bytes needed to hold max_native_offset.
+    native_offset_width_ = 0;
+    while (max_native_offset != 0) {
+      native_offset_width_++;
+      max_native_offset >>= 8;
+    }
+    // Resize table and set up header.
+    table->resize((EntryWidth() * entries) + sizeof(uint32_t));
+    CHECK_LT(native_offset_width_, 1U << 3);
+    (*table)[0] = native_offset_width_ & 7;
+    CHECK_LT(references_width_, 1U << 13);
+    (*table)[0] |= (references_width_ << 3) & 0xFF;
+    (*table)[1] = (references_width_ >> 5) & 0xFF;
+    CHECK_LT(entries, 1U << 16);
+    (*table)[2] = entries & 0xFF;
+    (*table)[3] = (entries >> 8) & 0xFF;
+  }
+
+  void AddEntry(uint32_t native_offset, const uint8_t* references) {
+    size_t table_index = TableIndex(native_offset);
+    while (in_use_[table_index]) {
+      table_index = (table_index + 1) % entries_;
+    }
+    in_use_[table_index] = true;
+    SetCodeOffset(table_index, native_offset);
+    DCHECK_EQ(native_offset, GetCodeOffset(table_index));
+    SetReferences(table_index, references);
+  }
+
+ private:
+  size_t TableIndex(uint32_t native_offset) {
+    return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
+  }
+
+  uint32_t GetCodeOffset(size_t table_index) {
+    uint32_t native_offset = 0;
+    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
+    for (size_t i = 0; i < native_offset_width_; i++) {
+      native_offset |= (*table_)[table_offset + i] << (i * 8);
+    }
+    return native_offset;
+  }
+
+  void SetCodeOffset(size_t table_index, uint32_t native_offset) {
+    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
+    for (size_t i = 0; i < native_offset_width_; i++) {
+      (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
+    }
+  }
+
+  void SetReferences(size_t table_index, const uint8_t* references) {
+    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
+    memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_);
+  }
+
+  size_t EntryWidth() const {
+    return native_offset_width_ + references_width_;
+  }
+
+  // Number of entries in the table.
+  const size_t entries_;
+  // Number of bytes used to encode the reference bitmap.
+  const size_t references_width_;
+  // Number of bytes used to encode a native offset.
+  size_t native_offset_width_;
+  // Entries that are in use.
+  std::vector<bool> in_use_;
+  // The table we're building.
+  std::vector<uint8_t>* const table_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_GC_MAP_BUILDER_H_
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index b2a69d8..bb6ac84 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -18,7 +18,11 @@
 
 #include "code_generator_arm.h"
 #include "code_generator_x86.h"
+#include "dex/verified_method.h"
+#include "driver/dex_compilation_unit.h"
+#include "gc_map_builder.h"
 #include "utils/assembler.h"
+#include "verifier/dex_gc_map.h"
 
 namespace art {
 
@@ -110,4 +114,14 @@
   }
 }
 
+void CodeGenerator::BuildNativeGCMap(
+    std::vector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const {
+  const std::vector<uint8_t>& gc_map_raw =
+      dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap();
+  verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
+
+  GcMapBuilder builder(data, 0, 0, dex_gc_map.RegWidth());
+}
+
+
 }  // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index e95bb21..63f8cbf 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -25,6 +25,8 @@
 
 namespace art {
 
+class DexCompilationUnit;
+
 class CodeAllocator {
  public:
   CodeAllocator() { }
@@ -135,7 +137,8 @@
 
   void BuildMappingTable(std::vector<uint8_t>* vector) const { }
   void BuildVMapTable(std::vector<uint8_t>* vector) const { }
-  void BuildNativeGCMap(std::vector<uint8_t>* vector) const { }
+  void BuildNativeGCMap(
+      std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
 
  protected:
   explicit CodeGenerator(HGraph* graph)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index cc36bbe..334b185 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -20,6 +20,7 @@
 #include "code_generator.h"
 #include "compilers.h"
 #include "driver/compiler_driver.h"
+#include "driver/dex_compilation_unit.h"
 #include "nodes.h"
 #include "utils/arena_allocator.h"
 
@@ -34,12 +35,12 @@
 
   virtual uint8_t* Allocate(size_t size) {
     size_ = size;
-    memory_.reserve(size);
+    memory_.resize(size);
     return &memory_[0];
   }
 
   size_t GetSize() const { return size_; }
-  std::vector<uint8_t>* GetMemory() { return &memory_; }
+  const std::vector<uint8_t>& GetMemory() const { return memory_; }
 
  private:
   std::vector<uint8_t> memory_;
@@ -57,6 +58,10 @@
                                                uint32_t method_idx,
                                                jobject class_loader,
                                                const DexFile& dex_file) const {
+  DexCompilationUnit dex_compilation_unit(
+    nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item,
+    class_def_idx, method_idx, access_flags, driver.GetVerifiedMethod(&dex_file, method_idx));
+
   ArenaPool pool;
   ArenaAllocator arena(&pool);
   HGraphBuilder builder(&arena);
@@ -79,11 +84,11 @@
   std::vector<uint8_t> vmap_table;
   codegen->BuildVMapTable(&vmap_table);
   std::vector<uint8_t> gc_map;
-  codegen->BuildNativeGCMap(&gc_map);
+  codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit);
 
   return new CompiledMethod(driver,
                             instruction_set,
-                            *allocator.GetMemory(),
+                            allocator.GetMemory(),
                             codegen->GetFrameSize(),
                             0, /* GPR spill mask, unused */
                             0, /* FPR spill mask, unused */