Refactor allocation entrypoints.

Adds support for switching entrypoints during runtime. Enables
addition of new allocators with out requiring significant copy
paste. Slight speedup on ritzperf probably due to more inlining.

TODO: Ensuring that the entire allocation path is inlined so
that the switch statement in the allocation code is optimized
out.

Rosalloc measurements:
4583
4453
4439
4434
4751

After change:
4184
4287
4131
4335
4097

Change-Id: I1352a3cbcdf6dae93921582726324d91312df5c9
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index ef73e4d..2955faa 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -59,43 +59,44 @@
 }
 
 static inline Array* SetArrayLength(Array* array, size_t length) {
-  if (LIKELY(array != NULL)) {
+  if (LIKELY(array != nullptr)) {
     DCHECK(array->IsArrayInstance());
     array->SetLength(length);
   }
   return array;
 }
 
-template <bool kIsMovable, bool kIsInstrumented>
+template <bool kIsInstrumented>
 inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
-                           size_t component_size) {
+                           size_t component_size, gc::AllocatorType allocator_type) {
   size_t size = ComputeArraySize(self, array_class, component_count, component_size);
   if (UNLIKELY(size == 0)) {
-    return NULL;
+    return nullptr;
   }
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  Array* array = nullptr;
-  if (kIsMovable) {
-    if (kIsInstrumented) {
-      array = down_cast<Array*>(heap->AllocMovableObjectInstrumented(self, array_class, size));
-    } else {
-      array = down_cast<Array*>(heap->AllocMovableObjectUninstrumented(self, array_class, size));
-    }
-  } else {
-    if (kIsInstrumented) {
-      array = down_cast<Array*>(heap->AllocNonMovableObjectInstrumented(self, array_class, size));
-    } else {
-      array = down_cast<Array*>(heap->AllocNonMovableObjectUninstrumented(self, array_class, size));
-    }
-  }
+  Array* array = down_cast<Array*>(
+      heap->AllocObjectWithAllocator<kIsInstrumented>(self, array_class, size, allocator_type));
   return SetArrayLength(array, component_count);
 }
 
-template <bool kIsMovable, bool kIsInstrumented>
-inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) {
+template <bool kIsInstrumented>
+inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
+                           gc::AllocatorType allocator_type) {
   DCHECK(array_class->IsArrayClass());
-  return Alloc<kIsMovable, kIsInstrumented>(self, array_class, component_count,
-                                            array_class->GetComponentSize());
+  return Alloc<kIsInstrumented>(self, array_class, component_count, array_class->GetComponentSize(),
+                                allocator_type);
+}
+template <bool kIsInstrumented>
+inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) {
+  return Alloc<kIsInstrumented>(self, array_class, component_count,
+               Runtime::Current()->GetHeap()->GetCurrentAllocator());
+}
+
+template <bool kIsInstrumented>
+inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
+                           size_t component_size) {
+  return Alloc<kIsInstrumented>(self, array_class, component_count, component_size,
+               Runtime::Current()->GetHeap()->GetCurrentAllocator());
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index f8a2832..00b88db 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -44,8 +44,7 @@
                                         SirtRef<mirror::IntArray>& dimensions)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   int32_t array_length = dimensions->Get(current_dimension);
-  SirtRef<Array> new_array(self, Array::Alloc<kMovingCollector, true>(self, array_class,
-                                                                      array_length));
+  SirtRef<Array> new_array(self, Array::Alloc<true>(self, array_class, array_length));
   if (UNLIKELY(new_array.get() == NULL)) {
     CHECK(self->IsExceptionPending());
     return NULL;
@@ -115,7 +114,7 @@
 template<typename T>
 PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
   DCHECK(array_class_ != NULL);
-  Array* raw_array = Array::Alloc<kMovingCollector, true>(self, array_class_, length, sizeof(T));
+  Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T));
   return down_cast<PrimitiveArray<T>*>(raw_array);
 }
 
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 584a4c0..a332f97 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_MIRROR_ARRAY_H_
 
 #include "object.h"
+#include "gc/heap.h"
 
 namespace art {
 namespace mirror {
@@ -26,13 +27,24 @@
  public:
   // A convenience for code that doesn't know the component size, and doesn't want to have to work
   // it out itself.
-  template <bool kIsMovable, bool kIsInstrumented>
+  template <bool kIsInstrumented>
+  static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
+                      gc::AllocatorType allocator_type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template <bool kIsInstrumented>
+  static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
+                      size_t component_size, gc::AllocatorType allocator_type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template <bool kIsInstrumented>
   static Array* Alloc(Thread* self, Class* array_class, int32_t component_count)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  template <bool kIsMovable, bool kIsInstrumented>
+  template <bool kIsInstrumented>
   static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
-                      size_t component_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                      size_t component_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static Array* CreateMultiArray(Thread* self, Class* element_class, IntArray* dimensions)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 406ab1b..4dcce1e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -357,23 +357,20 @@
   DCHECK_GE(this->object_size_, sizeof(Object));
 }
 
-template <bool kIsMovable, bool kIsInstrumented>
-inline Object* Class::Alloc(Thread* self) {
+template <bool kIsInstrumented>
+inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
   CheckObjectAlloc();
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  if (kIsMovable) {
-    if (kIsInstrumented) {
-      return heap->AllocMovableObjectInstrumented(self, this, this->object_size_);
-    } else {
-      return heap->AllocMovableObjectUninstrumented(self, this, this->object_size_);
-    }
-  } else {
-    if (kIsInstrumented) {
-      return heap->AllocNonMovableObjectInstrumented(self, this, this->object_size_);
-    } else {
-      return heap->AllocNonMovableObjectUninstrumented(self, this, this->object_size_);
-    }
-  }
+  return heap->AllocObjectWithAllocator<kIsInstrumented>(self, this, this->object_size_,
+                                                         allocator_type);
+}
+
+inline Object* Class::AllocObject(Thread* self) {
+  return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
+}
+
+inline Object* Class::AllocNonMovableObject(Thread* self) {
+  return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 82077dc..5f64bb4 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_MIRROR_CLASS_H_
 #define ART_RUNTIME_MIRROR_CLASS_H_
 
+#include "gc/heap.h"
 #include "modifiers.h"
 #include "object.h"
 #include "primitive.h"
@@ -377,13 +378,14 @@
   }
 
   // Creates a raw object instance but does not invoke the default constructor.
-  Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return Alloc<kMovingCollector, true>(self);
-  }
+  template <bool kIsInstrumented>
+  ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Creates a raw object instance but does not invoke the default constructor.
-  template <bool kIsMovable, bool kIsInstrumented>
-  Object* Alloc(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Object* AllocObject(Thread* self)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Object* AllocNonMovableObject(Thread* self)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsVariableSize() const {
     // Classes and arrays vary in size, and so the object_size_ field cannot
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 385ef5f..008a173 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -75,9 +75,9 @@
   SirtRef<Object> this_object(self, this);
   Object* copy;
   if (heap->IsMovableObject(this)) {
-    copy = heap->AllocObject(self, GetClass(), num_bytes);
+    copy = heap->AllocObject<true>(self, GetClass(), num_bytes);
   } else {
-    copy = heap->AllocNonMovableObject(self, GetClass(), num_bytes);
+    copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes);
   }
   if (LIKELY(copy != nullptr)) {
     return CopyObject(self, copy, this_object.get(), num_bytes);
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 478f4ec..be49b42 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -30,16 +30,25 @@
 namespace mirror {
 
 template<class T>
-inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class, int32_t length) {
-  Array* array = Array::Alloc<kMovingCollector, true>(self, object_array_class, length, sizeof(Object*));
-  if (UNLIKELY(array == NULL)) {
-    return NULL;
+inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class,
+                                             int32_t length, gc::AllocatorType allocator_type) {
+  Array* array = Array::Alloc<true>(self, object_array_class, length, sizeof(Object*),
+                                    allocator_type);
+  if (UNLIKELY(array == nullptr)) {
+    return nullptr;
   } else {
     return array->AsObjectArray<T>();
   }
 }
 
 template<class T>
+inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class,
+                                             int32_t length) {
+  return Alloc(self, object_array_class, length,
+               Runtime::Current()->GetHeap()->GetCurrentAllocator());
+}
+
+template<class T>
 inline T* ObjectArray<T>::Get(int32_t i) const {
   if (UNLIKELY(!IsValidIndex(i))) {
     return NULL;
@@ -137,7 +146,10 @@
 inline ObjectArray<T>* ObjectArray<T>::CopyOf(Thread* self, int32_t new_length) {
   // We may get copied by a compacting GC.
   SirtRef<ObjectArray<T> > sirt_this(self, this);
-  ObjectArray<T>* new_array = Alloc(self, GetClass(), new_length);
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  gc::AllocatorType allocator_type = heap->IsMovableObject(this) ? heap->GetCurrentAllocator() :
+      heap->GetCurrentNonMovingAllocator();
+  ObjectArray<T>* new_array = Alloc(self, GetClass(), new_length, allocator_type);
   if (LIKELY(new_array != nullptr)) {
     Copy(sirt_this.get(), 0, new_array, 0, std::min(sirt_this->GetLength(), new_length));
   }
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 09ff519..5da8845 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_MIRROR_OBJECT_ARRAY_H_
 
 #include "array.h"
+#include "gc/heap.h"
 
 namespace art {
 namespace mirror {
@@ -25,6 +26,10 @@
 template<class T>
 class MANAGED ObjectArray : public Array {
  public:
+  static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
+                               gc::AllocatorType allocator_type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 8530317..8272ff8 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -144,15 +144,15 @@
 TEST_F(ObjectTest, AllocArray) {
   ScopedObjectAccess soa(Thread::Current());
   Class* c = class_linker_->FindSystemClass("[I");
-  SirtRef<Array> a(soa.Self(), Array::Alloc<kMovingCollector, true>(soa.Self(), c, 1));
+  SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1));
   ASSERT_TRUE(c == a->GetClass());
 
   c = class_linker_->FindSystemClass("[Ljava/lang/Object;");
-  a.reset(Array::Alloc<kMovingCollector, true>(soa.Self(), c, 1));
+  a.reset(Array::Alloc<true>(soa.Self(), c, 1));
   ASSERT_TRUE(c == a->GetClass());
 
   c = class_linker_->FindSystemClass("[[Ljava/lang/Object;");
-  a.reset(Array::Alloc<kMovingCollector, true>(soa.Self(), c, 1));
+  a.reset(Array::Alloc<true>(soa.Self(), c, 1));
   ASSERT_TRUE(c == a->GetClass());
 }
 
@@ -221,7 +221,8 @@
       java_lang_dex_file_->GetIndexForStringId(*string_id));
   ASSERT_TRUE(type_id != NULL);
   uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
-  Object* array = CheckAndAllocArrayFromCode(type_idx, sort, 3, Thread::Current(), false);
+  Object* array = CheckAndAllocArrayFromCode(type_idx, sort, 3, Thread::Current(), false,
+                                             Runtime::Current()->GetHeap()->GetCurrentAllocator());
   EXPECT_TRUE(array->IsArrayInstance());
   EXPECT_EQ(3, array->AsArray()->GetLength());
   EXPECT_TRUE(array->GetClass()->IsArrayClass());