Re-enable large object space.

Switch from continuous to discontinuous LOS to avoid latent bugs being exposed
by kernel changes.

Change-Id: I150a3d8635f7e8ce0af465e135b328f9a65007c1
diff --git a/src/gc/large_object_space.cc b/src/gc/large_object_space.cc
index a589e67..81f048c 100644
--- a/src/gc/large_object_space.cc
+++ b/src/gc/large_object_space.cc
@@ -201,6 +201,9 @@
   CHECK(!chunk->IsFree());
 
   size_t allocation_size = chunk->GetSize();
+  if (kIsDebugBuild) {
+    memset(obj, 0xEB, allocation_size);
+  }
   madvise(obj, allocation_size, MADV_DONTNEED);
   num_objects_allocated_--;
   num_bytes_allocated_ -= allocation_size;
diff --git a/src/gc/large_object_space.h b/src/gc/large_object_space.h
index c34dbcc..1654f9c 100644
--- a/src/gc/large_object_space.h
+++ b/src/gc/large_object_space.h
@@ -26,6 +26,7 @@
 namespace art {
 class SpaceSetMap;
 
+// Abstraction implemented by all large object spaces.
 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
  public:
   virtual bool CanAllocateInto() const {
@@ -87,9 +88,9 @@
   friend class Space;
 };
 
+// A discontinuous large object space implemented by individual mmap/munmap calls.
 class LargeObjectMapSpace : public LargeObjectSpace {
  public:
-
   // Creates a large object space. Allocations into the large object space use memory maps instead
   // of malloc.
   static LargeObjectMapSpace* Create(const std::string& name);
@@ -105,24 +106,25 @@
   virtual ~LargeObjectMapSpace() {}
 
   // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
-  mutable Mutex lock_;
-  std::vector<mirror::Object*> large_objects_;
+  mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  std::vector<mirror::Object*> large_objects_ GUARDED_BY(lock_);
   typedef SafeMap<mirror::Object*, MemMap*> MemMaps;
-  MemMaps mem_maps_;
+  MemMaps mem_maps_ GUARDED_BY(lock_);
 };
 
+// A continuous large object space with a free-list to handle holes.
 class FreeListSpace : public LargeObjectSpace {
  public:
   virtual ~FreeListSpace();
   static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
 
-  size_t AllocationSize(const mirror::Object* obj);
+  size_t AllocationSize(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes);
   size_t Free(Thread* self, mirror::Object* obj);
   bool Contains(const mirror::Object* obj) const;
   void Walk(DlMallocSpace::WalkCallback callback, void* arg);
 
-  // Address at which the space begins
+  // Address at which the space begins.
   byte* Begin() const {
     return begin_;
   }
@@ -179,19 +181,19 @@
   };
 
   FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
-  void AddFreeChunk(void* address, size_t size, Chunk* previous);
-  Chunk* ChunkFromAddr(void* address);
-  void* AddrFromChunk(Chunk* chunk);
-  void RemoveFreeChunk(Chunk* chunk);
+  void AddFreeChunk(void* address, size_t size, Chunk* previous) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  Chunk* ChunkFromAddr(void* address) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  void* AddrFromChunk(Chunk* chunk) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  void RemoveFreeChunk(Chunk* chunk) EXCLUSIVE_LOCKS_REQUIRED(lock_);
   Chunk* GetNextChunk(Chunk* chunk);
 
   typedef std::multiset<Chunk*, Chunk::SortBySize> FreeChunks;
-  byte* begin_;
-  byte* end_;
+  byte* const begin_;
+  byte* const end_;
   UniquePtr<MemMap> mem_map_;
-  Mutex lock_;
-  std::vector<Chunk> chunks_;
-  FreeChunks free_chunks_;
+  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  std::vector<Chunk> chunks_ GUARDED_BY(lock_);
+  FreeChunks free_chunks_ GUARDED_BY(lock_);
 };
 
 }
diff --git a/src/heap.cc b/src/heap.cc
index d7432a3..30da10e 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -244,7 +244,12 @@
   }
 
   // Allocate the large object space.
-  large_object_space_.reset(FreeListSpace::Create("large object space", NULL, capacity));
+  const bool kUseFreeListSpaceForLOS  = false;
+  if (kUseFreeListSpaceForLOS) {
+    large_object_space_.reset(FreeListSpace::Create("large object space", NULL, capacity));
+  } else {
+    large_object_space_.reset(LargeObjectMapSpace::Create("large object space"));
+  }
   live_bitmap_->SetLargeObjects(large_object_space_->GetLiveObjects());
   mark_bitmap_->SetLargeObjects(large_object_space_->GetMarkObjects());
 
@@ -494,11 +499,7 @@
   // Zygote resulting in it being prematurely freed.
   // We can only do this for primive objects since large objects will not be within the card table
   // range. This also means that we rely on SetClass not dirtying the object's card.
-  //
-  // TODO: note "false && " to temporarily disable large object space
-  // due to issues on occam and mantaray.  We were seeing objects in
-  // the large object space with NULL klass_ fields.
-  if (false && byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray()) {
+  if (byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray()) {
     size = RoundUp(byte_count, kPageSize);
     obj = Allocate(self, large_object_space_.get(), size);
     // Make sure that our large object didn't get placed anywhere within the space interval or else