Merge "Add mark compact collector."
diff --git a/runtime/Android.mk b/runtime/Android.mk
index c40ae7a..992202a 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -54,6 +54,7 @@
 	gc/collector/concurrent_copying.cc \
 	gc/collector/garbage_collector.cc \
 	gc/collector/immune_region.cc \
+	gc/collector/mark_compact.cc \
 	gc/collector/mark_sweep.cc \
 	gc/collector/partial_mark_sweep.cc \
 	gc/collector/semi_space.cc \
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index f745088..16e0ec3 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_CLASS_LINKER_INL_H_
 
 #include "class_linker.h"
+#include "gc/heap-inl.h"
 #include "mirror/art_field.h"
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache-inl.h"
@@ -186,13 +187,19 @@
 
 inline mirror::IfTable* ClassLinker::AllocIfTable(Thread* self, size_t ifcount) {
   return down_cast<mirror::IfTable*>(
-      mirror::IfTable::Alloc(self, GetClassRoot(kObjectArrayClass), ifcount * mirror::IfTable::kMax));
+      mirror::IfTable::Alloc(self, GetClassRoot(kObjectArrayClass),
+                             ifcount * mirror::IfTable::kMax));
 }
 
 inline mirror::ObjectArray<mirror::ArtField>* ClassLinker::AllocArtFieldArray(Thread* self,
                                                                               size_t length) {
+  gc::Heap* const heap = Runtime::Current()->GetHeap();
+  // Can't have movable field arrays for mark compact since we need these arrays to always be valid
+  // so that we can do Object::VisitReferences in the case where the fields don't fit in the
+  // reference offsets word.
   return mirror::ObjectArray<mirror::ArtField>::Alloc(
-      self, GetClassRoot(kJavaLangReflectArtFieldArrayClass), length);
+      self, GetClassRoot(kJavaLangReflectArtFieldArrayClass), length,
+      kMoveFieldArrays ? heap->GetCurrentAllocator() : heap->GetCurrentNonMovingAllocator());
 }
 
 inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d684a50..d68aca9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1061,6 +1061,42 @@
   VLOG(startup) << "ClassLinker::InitFromImage exiting";
 }
 
+void ClassLinker::VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
+  WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+  if ((flags & kVisitRootFlagAllRoots) != 0) {
+    for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
+      callback(reinterpret_cast<mirror::Object**>(&it.second), arg, 0, kRootStickyClass);
+    }
+  } else if ((flags & kVisitRootFlagNewRoots) != 0) {
+    for (auto& pair : new_class_roots_) {
+      mirror::Object* old_ref = pair.second;
+      callback(reinterpret_cast<mirror::Object**>(&pair.second), arg, 0, kRootStickyClass);
+      if (UNLIKELY(pair.second != old_ref)) {
+        // Uh ohes, GC moved a root in the log. Need to search the class_table and update the
+        // corresponding object. This is slow, but luckily for us, this may only happen with a
+        // concurrent moving GC.
+        for (auto it = class_table_.lower_bound(pair.first), end = class_table_.end();
+            it != end && it->first == pair.first; ++it) {
+          // If the class stored matches the old class, update it to the new value.
+          if (old_ref == it->second) {
+            it->second = pair.second;
+          }
+        }
+      }
+    }
+  }
+  if ((flags & kVisitRootFlagClearRootLog) != 0) {
+    new_class_roots_.clear();
+  }
+  if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
+    log_new_class_table_roots_ = true;
+  } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
+    log_new_class_table_roots_ = false;
+  }
+  // We deliberately ignore the class roots in the image since we
+  // handle image roots by using the MS/CMS rescanning of dirty cards.
+}
+
 // Keep in sync with InitCallback. Anything we visit, we need to
 // reinit references to when reinitializing a ClassLinker from a
 // mapped image.
@@ -1087,41 +1123,7 @@
       log_new_dex_caches_roots_ = false;
     }
   }
-  {
-    WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
-    if ((flags & kVisitRootFlagAllRoots) != 0) {
-      for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
-        callback(reinterpret_cast<mirror::Object**>(&it.second), arg, 0, kRootStickyClass);
-      }
-    } else if ((flags & kVisitRootFlagNewRoots) != 0) {
-      for (auto& pair : new_class_roots_) {
-        mirror::Object* old_ref = pair.second;
-        callback(reinterpret_cast<mirror::Object**>(&pair.second), arg, 0, kRootStickyClass);
-        if (UNLIKELY(pair.second != old_ref)) {
-          // Uh ohes, GC moved a root in the log. Need to search the class_table and update the
-          // corresponding object. This is slow, but luckily for us, this may only happen with a
-          // concurrent moving GC.
-          for (auto it = class_table_.lower_bound(pair.first), end = class_table_.end();
-              it != end && it->first == pair.first; ++it) {
-            // If the class stored matches the old class, update it to the new value.
-            if (old_ref == it->second) {
-              it->second = pair.second;
-            }
-          }
-        }
-      }
-    }
-    if ((flags & kVisitRootFlagClearRootLog) != 0) {
-      new_class_roots_.clear();
-    }
-    if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
-      log_new_class_table_roots_ = true;
-    } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
-      log_new_class_table_roots_ = false;
-    }
-    // We deliberately ignore the class roots in the image since we
-    // handle image roots by using the MS/CMS rescanning of dirty cards.
-  }
+  VisitClassRoots(callback, arg, flags);
   callback(reinterpret_cast<mirror::Object**>(&array_iftable_), arg, 0, kRootVMInternal);
   DCHECK(array_iftable_ != nullptr);
   for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
@@ -1252,7 +1254,7 @@
   DCHECK_GE(class_size, sizeof(mirror::Class));
   gc::Heap* heap = Runtime::Current()->GetHeap();
   InitializeClassVisitor visitor(class_size);
-  mirror::Object* k = (kMovingClasses) ?
+  mirror::Object* k = kMovingClasses ?
       heap->AllocObject<true>(self, java_lang_Class, class_size, visitor) :
       heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size, visitor);
   if (UNLIKELY(k == nullptr)) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 6d96aa2..62b5ea8 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -247,8 +247,10 @@
       LOCKS_EXCLUDED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
+      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
   void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_, dex_lock_);
+      LOCKS_EXCLUDED(dex_lock_);
 
   mirror::DexCache* FindDexCache(const DexFile& dex_file) const
       LOCKS_EXCLUDED(dex_lock_)
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 224b33e..c0aa43e 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -52,7 +52,7 @@
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   std::string error_msg;
   std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
-                                                 PROT_READ | PROT_WRITE, false, &error_msg));
+                                                       PROT_READ | PROT_WRITE, false, &error_msg));
   if (UNLIKELY(mem_map.get() == nullptr)) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
     return nullptr;
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 0849171..27fb087 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -42,7 +42,6 @@
 class SpaceBitmap {
  public:
   typedef void ScanCallback(mirror::Object* obj, void* finger, void* arg);
-
   typedef void SweepCallback(size_t ptr_count, mirror::Object** ptrs, void* arg);
 
   // Initialize a space bitmap so that it points to a bitmap large enough to cover a heap at
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
new file mode 100644
index 0000000..595dc8f
--- /dev/null
+++ b/runtime/gc/collector/mark_compact.cc
@@ -0,0 +1,634 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mark_compact.h"
+
+#include "base/logging.h"
+#include "base/mutex-inl.h"
+#include "base/timing_logger.h"
+#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/accounting/mod_union_table.h"
+#include "gc/accounting/remembered_set.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/reference_processor.h"
+#include "gc/space/bump_pointer_space.h"
+#include "gc/space/bump_pointer_space-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "indirect_reference_table.h"
+#include "intern_table.h"
+#include "jni_internal.h"
+#include "mark_sweep-inl.h"
+#include "monitor.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/reference-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array.h"
+#include "mirror/object_array-inl.h"
+#include "runtime.h"
+#include "stack.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+using ::art::mirror::Class;
+using ::art::mirror::Object;
+
+namespace art {
+namespace gc {
+namespace collector {
+
+void MarkCompact::BindBitmaps() {
+  timings_.StartSplit("BindBitmaps");
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  // Mark all of the spaces we never collect as immune.
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
+        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+      CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+    }
+  }
+  timings_.EndSplit();
+}
+
+MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
+    : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
+      space_(nullptr), collector_name_(name_) {
+}
+
+void MarkCompact::RunPhases() {
+  Thread* self = Thread::Current();
+  InitializePhase();
+  CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
+  {
+    ScopedPause pause(this);
+    GetHeap()->PreGcVerificationPaused(this);
+    GetHeap()->PrePauseRosAllocVerification(this);
+    MarkingPhase();
+    ReclaimPhase();
+  }
+  GetHeap()->PostGcVerification(this);
+  FinishPhase();
+}
+
+void MarkCompact::ForwardObject(mirror::Object* obj) {
+  const size_t alloc_size = RoundUp(obj->SizeOf(), space::BumpPointerSpace::kAlignment);
+  LockWord lock_word = obj->GetLockWord(false);
+  // If we have a non empty lock word, store it and restore it later.
+  if (lock_word.GetValue() != LockWord().GetValue()) {
+    // Set the bit in the bitmap so that we know to restore it later.
+    objects_with_lockword_->Set(obj);
+    lock_words_to_restore_.push_back(lock_word);
+  }
+  obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(bump_pointer_)),
+                   false);
+  bump_pointer_ += alloc_size;
+  ++live_objects_in_space_;
+}
+
+class CalculateObjectForwardingAddressVisitor {
+ public:
+  explicit CalculateObjectForwardingAddressVisitor(MarkCompact* collector)
+      : collector_(collector) {}
+  void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
+                                                                      Locks::heap_bitmap_lock_) {
+    DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
+    DCHECK(collector_->IsMarked(obj));
+    collector_->ForwardObject(obj);
+  }
+
+ private:
+  MarkCompact* const collector_;
+};
+
+void MarkCompact::CalculateObjectForwardingAddresses() {
+  timings_.NewSplit(__FUNCTION__);
+  // The bump pointer in the space where the next forwarding address will be.
+  bump_pointer_ = reinterpret_cast<byte*>(space_->Begin());
+  // Visit all the marked objects in the bitmap.
+  CalculateObjectForwardingAddressVisitor visitor(this);
+  objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
+                                               reinterpret_cast<uintptr_t>(space_->End()),
+                                               visitor);
+}
+
+void MarkCompact::InitializePhase() {
+  TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+  mark_stack_ = heap_->GetMarkStack();
+  DCHECK(mark_stack_ != nullptr);
+  immune_region_.Reset();
+  CHECK(space_->CanMoveObjects()) << "Attempting compact non-movable space from " << *space_;
+  // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
+  ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  mark_bitmap_ = heap_->GetMarkBitmap();
+  live_objects_in_space_ = 0;
+}
+
+void MarkCompact::ProcessReferences(Thread* self) {
+  TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  heap_->GetReferenceProcessor()->ProcessReferences(
+      false, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback, &MarkObjectCallback,
+      &ProcessMarkStackCallback, this);
+}
+
+class BitmapSetSlowPathVisitor {
+ public:
+  void operator()(const mirror::Object* obj) const {
+    // Marking a large object, make sure its aligned as a sanity check.
+    if (!IsAligned<kPageSize>(obj)) {
+      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+      LOG(FATAL) << obj;
+    }
+  }
+};
+
+inline void MarkCompact::MarkObject(mirror::Object* obj) {
+  if (obj == nullptr) {
+    return;
+  }
+  if (kUseBakerOrBrooksReadBarrier) {
+    // Verify all the objects have the correct forward pointer installed.
+    obj->AssertReadBarrierPointer();
+  }
+  if (immune_region_.ContainsObject(obj)) {
+    return;
+  }
+  if (objects_before_forwarding_->HasAddress(obj)) {
+    if (!objects_before_forwarding_->Set(obj)) {
+      MarkStackPush(obj);  // This object was not previously marked.
+    }
+  } else {
+    DCHECK(!space_->HasAddress(obj));
+    BitmapSetSlowPathVisitor visitor;
+    if (!mark_bitmap_->Set(obj, visitor)) {
+      // This object was not previously marked.
+      MarkStackPush(obj);
+    }
+  }
+}
+
+void MarkCompact::MarkingPhase() {
+  Thread* self = Thread::Current();
+  // Bitmap which describes which objects we have to move.
+  objects_before_forwarding_.reset(accounting::ContinuousSpaceBitmap::Create(
+      "objects before forwarding", space_->Begin(), space_->Size()));
+  // Bitmap which describes which lock words we need to restore.
+  objects_with_lockword_.reset(accounting::ContinuousSpaceBitmap::Create(
+      "objects with lock words", space_->Begin(), space_->Size()));
+  CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
+  TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+  // Assume the cleared space is already empty.
+  BindBitmaps();
+  // Process dirty cards and add dirty cards to mod-union tables.
+  heap_->ProcessCards(timings_, false);
+  // Clear the whole card table since we can not Get any additional dirty cards during the
+  // paused GC. This saves memory but only works for pause the world collectors.
+  timings_.NewSplit("ClearCardTable");
+  heap_->GetCardTable()->ClearCardTable();
+  // Need to do this before the checkpoint since we don't want any threads to add references to
+  // the live stack during the recursive mark.
+  timings_.NewSplit("SwapStacks");
+  if (kUseThreadLocalAllocationStack) {
+    heap_->RevokeAllThreadLocalAllocationStacks(self);
+  }
+  heap_->SwapStacks(self);
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    MarkRoots();
+    // Mark roots of immune spaces.
+    UpdateAndMarkModUnion();
+    // Recursively mark remaining objects.
+    MarkReachableObjects();
+  }
+  ProcessReferences(self);
+  {
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    SweepSystemWeaks();
+  }
+  // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
+  // before they are properly counted.
+  RevokeAllThreadLocalBuffers();
+  timings_.StartSplit("PreSweepingGcVerification");
+  // Disabled due to an issue where we have objects in the bump pointer space which reference dead
+  // objects.
+  // heap_->PreSweepingGcVerification(this);
+  timings_.EndSplit();
+}
+
+void MarkCompact::UpdateAndMarkModUnion() {
+  for (auto& space : heap_->GetContinuousSpaces()) {
+    // If the space is immune then we need to mark the references to other spaces.
+    if (immune_region_.ContainsSpace(space)) {
+      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+      if (table != nullptr) {
+        // TODO: Improve naming.
+        TimingLogger::ScopedSplit split(
+            space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+                                     "UpdateAndMarkImageModUnionTable",
+                                     &timings_);
+        table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+      }
+    }
+  }
+}
+
+void MarkCompact::MarkReachableObjects() {
+  timings_.StartSplit("MarkStackAsLive");
+  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+  heap_->MarkAllocStackAsLive(live_stack);
+  live_stack->Reset();
+  // Recursively process the mark stack.
+  ProcessMarkStack();
+}
+
+void MarkCompact::ReclaimPhase() {
+  TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  // Reclaim unmarked objects.
+  Sweep(false);
+  // Swap the live and mark bitmaps for each space which we modified space. This is an
+  // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+  // bitmaps.
+  timings_.StartSplit("SwapBitmapsAndUnBindBitmaps");
+  SwapBitmaps();
+  GetHeap()->UnBindBitmaps();  // Unbind the live and mark bitmaps.
+  Compact();
+  timings_.EndSplit();
+}
+
+void MarkCompact::ResizeMarkStack(size_t new_size) {
+  std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
+  CHECK_LE(mark_stack_->Size(), new_size);
+  mark_stack_->Resize(new_size);
+  for (const auto& obj : temp) {
+    mark_stack_->PushBack(obj);
+  }
+}
+
+inline void MarkCompact::MarkStackPush(Object* obj) {
+  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+    ResizeMarkStack(mark_stack_->Capacity() * 2);
+  }
+  // The object must be pushed on to the mark stack.
+  mark_stack_->PushBack(obj);
+}
+
+void MarkCompact::ProcessMarkStackCallback(void* arg) {
+  reinterpret_cast<MarkCompact*>(arg)->ProcessMarkStack();
+}
+
+mirror::Object* MarkCompact::MarkObjectCallback(mirror::Object* root, void* arg) {
+  reinterpret_cast<MarkCompact*>(arg)->MarkObject(root);
+  return root;
+}
+
+void MarkCompact::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
+                                          void* arg) {
+  reinterpret_cast<MarkCompact*>(arg)->MarkObject(obj_ptr->AsMirrorPtr());
+}
+
+void MarkCompact::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
+                                               void* arg) {
+  reinterpret_cast<MarkCompact*>(arg)->DelayReferenceReferent(klass, ref);
+}
+
+void MarkCompact::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
+                                   RootType /*root_type*/) {
+  reinterpret_cast<MarkCompact*>(arg)->MarkObject(*root);
+}
+
+void MarkCompact::UpdateRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
+                                     RootType /*root_type*/) {
+  mirror::Object* obj = *root;
+  mirror::Object* new_obj = reinterpret_cast<MarkCompact*>(arg)->GetMarkedForwardAddress(obj);
+  if (obj != new_obj) {
+    *root = new_obj;
+    DCHECK(new_obj != nullptr);
+  }
+}
+
+class UpdateObjectReferencesVisitor {
+ public:
+  explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {
+  }
+  void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+          EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+    collector_->UpdateObjectReferences(obj);
+  }
+
+ private:
+  MarkCompact* const collector_;
+};
+
+void MarkCompact::UpdateReferences() {
+  timings_.NewSplit(__FUNCTION__);
+  Runtime* runtime = Runtime::Current();
+  // Update roots.
+  runtime->VisitRoots(UpdateRootCallback, this);
+  // Update object references in mod union tables and spaces.
+  for (const auto& space : heap_->GetContinuousSpaces()) {
+    // If the space is immune then we need to mark the references to other spaces.
+    accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+    if (table != nullptr) {
+      // TODO: Improve naming.
+      TimingLogger::ScopedSplit split(
+          space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
+                                   "UpdateImageModUnionTableReferences",
+                                   &timings_);
+      table->UpdateAndMarkReferences(&UpdateHeapReferenceCallback, this);
+    } else {
+      // No mod union table, so we need to scan the space using bitmap visit.
+      // Scan the space using bitmap visit.
+      accounting::ContinuousSpaceBitmap* bitmap = space->GetLiveBitmap();
+      if (bitmap != nullptr) {
+        UpdateObjectReferencesVisitor visitor(this);
+        bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+                                 reinterpret_cast<uintptr_t>(space->End()),
+                                 visitor);
+      }
+    }
+  }
+  CHECK(!kMovingClasses)
+      << "Didn't update large object classes since they are assumed to not move.";
+  // Update the system weaks, these should already have been swept.
+  runtime->SweepSystemWeaks(&MarkedForwardingAddressCallback, this);
+  // Update the objects in the bump pointer space last, these objects don't have a bitmap.
+  UpdateObjectReferencesVisitor visitor(this);
+  objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
+                                               reinterpret_cast<uintptr_t>(space_->End()),
+                                               visitor);
+  // Update the reference processor cleared list.
+  heap_->GetReferenceProcessor()->UpdateRoots(&MarkedForwardingAddressCallback, this);
+}
+
+void MarkCompact::Compact() {
+  timings_.NewSplit(__FUNCTION__);
+  CalculateObjectForwardingAddresses();
+  UpdateReferences();
+  MoveObjects();
+  // Space
+  int64_t objects_freed = space_->GetObjectsAllocated() - live_objects_in_space_;
+  int64_t bytes_freed = reinterpret_cast<int64_t>(space_->End()) -
+      reinterpret_cast<int64_t>(bump_pointer_);
+  timings_.NewSplit("RecordFree");
+  space_->RecordFree(objects_freed, bytes_freed);
+  RecordFree(objects_freed, bytes_freed);
+  space_->SetEnd(bump_pointer_);
+  // Need to zero out the memory we freed. TODO: Use madvise for pages.
+  memset(bump_pointer_, 0, bytes_freed);
+}
+
+// Marks all objects in the root set.
+void MarkCompact::MarkRoots() {
+  timings_.NewSplit("MarkRoots");
+  Runtime::Current()->VisitRoots(MarkRootCallback, this);
+}
+
+mirror::Object* MarkCompact::MarkedForwardingAddressCallback(mirror::Object* obj, void* arg) {
+  return reinterpret_cast<MarkCompact*>(arg)->GetMarkedForwardAddress(obj);
+}
+
+inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference) {
+  mirror::Object* obj = reference->AsMirrorPtr();
+  if (obj != nullptr) {
+    mirror::Object* new_obj = GetMarkedForwardAddress(obj);
+    if (obj != new_obj) {
+      DCHECK(new_obj != nullptr);
+      reference->Assign(new_obj);
+    }
+  }
+}
+
+void MarkCompact::UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference,
+                                              void* arg) {
+  reinterpret_cast<MarkCompact*>(arg)->UpdateHeapReference(reference);
+}
+
+class UpdateReferenceVisitor {
+ public:
+  explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {
+  }
+
+  void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+      ALWAYS_INLINE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+    collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
+  }
+
+  void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+    collector_->UpdateHeapReference(
+        ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
+  }
+
+ private:
+  MarkCompact* const collector_;
+};
+
+void MarkCompact::UpdateObjectReferences(mirror::Object* obj) {
+  UpdateReferenceVisitor visitor(this);
+  obj->VisitReferences<kMovingClasses>(visitor, visitor);
+}
+
+inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) const {
+  DCHECK(obj != nullptr);
+  if (objects_before_forwarding_->HasAddress(obj)) {
+    DCHECK(objects_before_forwarding_->Test(obj));
+    mirror::Object* ret =
+        reinterpret_cast<mirror::Object*>(obj->GetLockWord(false).ForwardingAddress());
+    DCHECK(ret != nullptr);
+    return ret;
+  }
+  DCHECK(!space_->HasAddress(obj));
+  DCHECK(IsMarked(obj));
+  return obj;
+}
+
+inline bool MarkCompact::IsMarked(const Object* object) const {
+  if (immune_region_.ContainsObject(object)) {
+    return true;
+  }
+  if (objects_before_forwarding_->HasAddress(object)) {
+    return objects_before_forwarding_->Test(object);
+  }
+  return mark_bitmap_->Test(object);
+}
+
+mirror::Object* MarkCompact::IsMarkedCallback(mirror::Object* object, void* arg) {
+  return reinterpret_cast<MarkCompact*>(arg)->IsMarked(object) ? object : nullptr;
+}
+
+bool MarkCompact::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr,
+                                              void* arg) {
+  // Side effect free since we call this before ever moving objects.
+  return reinterpret_cast<MarkCompact*>(arg)->IsMarked(ref_ptr->AsMirrorPtr());
+}
+
+void MarkCompact::SweepSystemWeaks() {
+  timings_.StartSplit("SweepSystemWeaks");
+  Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
+  timings_.EndSplit();
+}
+
+bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
+  return space != space_ && !immune_region_.ContainsSpace(space);
+}
+
+class MoveObjectVisitor {
+ public:
+  explicit MoveObjectVisitor(MarkCompact* collector) : collector_(collector) {
+  }
+  void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+          EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+      collector_->MoveObject(obj, obj->SizeOf());
+  }
+
+ private:
+  MarkCompact* const collector_;
+};
+
+void MarkCompact::MoveObject(mirror::Object* obj, size_t len) {
+  // Look at the forwarding address stored in the lock word to know where to copy.
+  DCHECK(space_->HasAddress(obj)) << obj;
+  uintptr_t dest_addr = obj->GetLockWord(false).ForwardingAddress();
+  mirror::Object* dest_obj = reinterpret_cast<mirror::Object*>(dest_addr);
+  DCHECK(space_->HasAddress(dest_obj)) << dest_obj;
+  // Use memmove since there may be overlap.
+  memmove(reinterpret_cast<void*>(dest_addr), reinterpret_cast<const void*>(obj), len);
+  // Restore the saved lock word if needed.
+  LockWord lock_word;
+  if (UNLIKELY(objects_with_lockword_->Test(obj))) {
+    lock_word = lock_words_to_restore_.front();
+    lock_words_to_restore_.pop_front();
+  }
+  dest_obj->SetLockWord(lock_word, false);
+}
+
+void MarkCompact::MoveObjects() {
+  timings_.NewSplit(__FUNCTION__);
+  // Move the objects in the before forwarding bitmap.
+  MoveObjectVisitor visitor(this);
+  objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
+                                               reinterpret_cast<uintptr_t>(space_->End()),
+                                               visitor);
+  CHECK(lock_words_to_restore_.empty());
+}
+
+void MarkCompact::Sweep(bool swap_bitmaps) {
+  DCHECK(mark_stack_->IsEmpty());
+  TimingLogger::ScopedSplit split("Sweep", &timings_);
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (space->IsContinuousMemMapAllocSpace()) {
+      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
+      if (!ShouldSweepSpace(alloc_space)) {
+        continue;
+      }
+      TimingLogger::ScopedSplit split(
+          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
+      size_t freed_objects = 0;
+      size_t freed_bytes = 0;
+      alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+      RecordFree(freed_objects, freed_bytes);
+    }
+  }
+  SweepLargeObjects(swap_bitmaps);
+}
+
+void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
+  TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
+  size_t freed_objects = 0;
+  size_t freed_bytes = 0;
+  heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+  RecordFreeLargeObjects(freed_objects, freed_bytes);
+}
+
+// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+  heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
+                                                         &HeapReferenceMarkedCallback, this);
+}
+
+class MarkCompactMarkObjectVisitor {
+ public:
+  explicit MarkCompactMarkObjectVisitor(MarkCompact* collector) : collector_(collector) {
+  }
+
+  void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+    // Object was already verified when we scanned it.
+    collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
+  }
+
+  void operator()(mirror::Class* klass, mirror::Reference* ref) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    collector_->DelayReferenceReferent(klass, ref);
+  }
+
+ private:
+  MarkCompact* const collector_;
+};
+
+// Visit all of the references of an object and update.
+void MarkCompact::ScanObject(Object* obj) {
+  MarkCompactMarkObjectVisitor visitor(this);
+  obj->VisitReferences<kMovingClasses>(visitor, visitor);
+}
+
+// Scan anything that's on the mark stack.
+void MarkCompact::ProcessMarkStack() {
+  timings_.StartSplit("ProcessMarkStack");
+  while (!mark_stack_->IsEmpty()) {
+    Object* obj = mark_stack_->PopBack();
+    DCHECK(obj != nullptr);
+    ScanObject(obj);
+  }
+  timings_.EndSplit();
+}
+
+void MarkCompact::SetSpace(space::BumpPointerSpace* space) {
+  DCHECK(space != nullptr);
+  space_ = space;
+}
+
+void MarkCompact::FinishPhase() {
+  TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+  space_ = nullptr;
+  CHECK(mark_stack_->IsEmpty());
+  mark_stack_->Reset();
+  // Clear all of the spaces' mark bitmaps.
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  heap_->ClearMarkedObjects();
+  // Release our bitmaps.
+  objects_before_forwarding_.reset(nullptr);
+  objects_with_lockword_.reset(nullptr);
+}
+
+void MarkCompact::RevokeAllThreadLocalBuffers() {
+  timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+  GetHeap()->RevokeAllThreadLocalBuffers();
+  timings_.EndSplit();
+}
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
new file mode 100644
index 0000000..25cfe0f
--- /dev/null
+++ b/runtime/gc/collector/mark_compact.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
+#define ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
+
+#include <deque>
+#include <memory>  // For unique_ptr.
+
+#include "atomic.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "garbage_collector.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "immune_region.h"
+#include "lock_word.h"
+#include "object_callbacks.h"
+#include "offsets.h"
+
+namespace art {
+
+class Thread;
+
+namespace mirror {
+  class Class;
+  class Object;
+}  // namespace mirror
+
+namespace gc {
+
+class Heap;
+
+namespace accounting {
+  template <typename T> class AtomicStack;
+  typedef AtomicStack<mirror::Object*> ObjectStack;
+}  // namespace accounting
+
+namespace space {
+  class ContinuousMemMapAllocSpace;
+  class ContinuousSpace;
+}  // namespace space
+
+namespace collector {
+
+class MarkCompact : public GarbageCollector {
+ public:
+  explicit MarkCompact(Heap* heap, const std::string& name_prefix = "");
+  ~MarkCompact() {}
+
+  virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+  void InitializePhase();
+  void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void MarkReachableObjects()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  virtual GcType GetGcType() const OVERRIDE {
+    return kGcTypePartial;
+  }
+  virtual CollectorType GetCollectorType() const OVERRIDE {
+    return kCollectorTypeMC;
+  }
+
+  // Sets which space we will be copying objects in.
+  void SetSpace(space::BumpPointerSpace* space);
+
+  // Initializes internal structures.
+  void Init();
+
+  // Find the default mark bitmap.
+  void FindDefaultMarkBitmap();
+
+  void ScanObject(mirror::Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Marks the root set at the start of a garbage collection.
+  void MarkRoots()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
+  // the image. Mark that portion of the heap as immune.
+  void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+
+  void UnBindBitmaps()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Sweeps unmarked objects to complete the garbage collection.
+  void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Sweeps unmarked objects to complete the garbage collection.
+  void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void SweepSystemWeaks()
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t /*tid*/,
+                               RootType /*root_type*/)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr,
+                                          void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static void ProcessMarkStackCallback(void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
+                                             void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Schedules an unmarked object for reference processing.
+  void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ protected:
+  // Returns null if the object is not marked, otherwise returns the forwarding address (same as
+  // object for non movable things).
+  mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
+  // mark, otherwise we unmark.
+  bool MarkLargeObject(const mirror::Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Expand mark stack to 2x its current size.
+  void ResizeMarkStack(size_t new_size);
+
+  // Returns true if we should sweep the space.
+  bool ShouldSweepSpace(space::ContinuousSpace* space) const;
+
+  // Push an object onto the mark stack.
+  void MarkStackPush(mirror::Object* obj);
+
+  void UpdateAndMarkModUnion()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Recursively blackens objects on the mark stack.
+  void ProcessMarkStack()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  // 3 pass mark compact approach.
+  void Compact() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  // Calculate the forwarding address of objects marked as "live" in the objects_before_forwarding
+  // bitmap.
+  void CalculateObjectForwardingAddresses()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  // Update the references of objects by using the forwarding addresses.
+  void UpdateReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  static void UpdateRootCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
+                                 RootType /*root_type*/)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  // Move objects and restore lock words.
+  void MoveObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Move a single object to its forward address.
+  void MoveObject(mirror::Object* obj, size_t len) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Mark a single object.
+  void MarkObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                                                                Locks::mutator_lock_);
+  bool IsMarked(const mirror::Object* obj) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void ForwardObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                                                                   Locks::mutator_lock_);
+  // Update a single heap reference.
+  void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference,
+                                          void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Update all of the references of a single object.
+  void UpdateObjectReferences(mirror::Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Revoke all the thread-local buffers.
+  void RevokeAllThreadLocalBuffers();
+
+  accounting::ObjectStack* mark_stack_;
+
+  // Immune region, every object inside the immune region is assumed to be marked.
+  ImmuneRegion immune_region_;
+
+  // Bump pointer space which we are collecting.
+  space::BumpPointerSpace* space_;
+  // Cached mark bitmap as an optimization.
+  accounting::HeapBitmap* mark_bitmap_;
+
+  // The name of the collector.
+  std::string collector_name_;
+
+  // The bump pointer in the space where the next forwarding address will be.
+  byte* bump_pointer_;
+  // How many live objects we have in the space.
+  size_t live_objects_in_space_;
+
+  // Bitmap which describes which objects we have to move, need to do / 2 so that we can handle
+  // objects which are only 8 bytes.
+  std::unique_ptr<accounting::ContinuousSpaceBitmap> objects_before_forwarding_;
+  // Bitmap which describes which lock words we need to restore.
+  std::unique_ptr<accounting::ContinuousSpaceBitmap> objects_with_lockword_;
+  // Which lock words we need to restore as we are moving objects.
+  std::deque<LockWord> lock_words_to_restore_;
+
+ private:
+  friend class BitmapSetSlowPathVisitor;
+  friend class CalculateObjectForwardingAddressVisitor;
+  friend class MarkCompactMarkObjectVisitor;
+  friend class MoveObjectVisitor;
+  friend class UpdateObjectReferencesVisitor;
+  friend class UpdateReferenceVisitor;
+  DISALLOW_COPY_AND_ASSIGN(MarkCompact);
+};
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index c72913a..fbb349e 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -43,10 +43,7 @@
 #include "thread-inl.h"
 #include "thread_list.h"
 
-using ::art::mirror::ArtField;
-using ::art::mirror::Class;
 using ::art::mirror::Object;
-using ::art::mirror::ObjectArray;
 
 namespace art {
 namespace gc {
@@ -1272,9 +1269,7 @@
   timings_.EndSplit();
 }
 
-inline bool MarkSweep::IsMarked(const Object* object) const
-    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-  DCHECK(object != nullptr);
+inline bool MarkSweep::IsMarked(const Object* object) const {
   if (immune_region_.ContainsObject(object)) {
     return true;
   }
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index a44d8a1..2780099 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -215,7 +215,8 @@
 
  protected:
   // Returns true if the object has its bit set in the mark bitmap.
-  bool IsMarked(const mirror::Object* object) const;
+  bool IsMarked(const mirror::Object* object) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index badf8b3..54e77a7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -41,22 +41,12 @@
 #include "jni_internal.h"
 #include "mark_sweep-inl.h"
 #include "monitor.h"
-#include "mirror/art_field.h"
-#include "mirror/art_field-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/class_loader.h"
-#include "mirror/dex_cache.h"
 #include "mirror/reference-inl.h"
 #include "mirror/object-inl.h"
-#include "mirror/object_array.h"
-#include "mirror/object_array-inl.h"
 #include "runtime.h"
-#include "stack.h"
 #include "thread-inl.h"
 #include "thread_list.h"
-#include "verifier/method_verifier.h"
 
-using ::art::mirror::Class;
 using ::art::mirror::Object;
 
 namespace art {
@@ -788,7 +778,7 @@
     // Already forwarded, must be marked.
     return obj;
   }
-  return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
+  return mark_bitmap_->Test(obj) ? obj : nullptr;
 }
 
 void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index c0a6b6a..530a3c9 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -34,6 +34,8 @@
   kCollectorTypeSS,
   // A generational variant of kCollectorTypeSS.
   kCollectorTypeGSS,
+  // Mark compact colector.
+  kCollectorTypeMC,
   // Heap trimming collector, doesn't do any actual collecting.
   kCollectorTypeHeapTrim,
   // A (mostly) concurrent copying collector.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index e6a5380..1c94d6f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -36,6 +36,7 @@
 #include "gc/accounting/remembered_set.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/collector/concurrent_copying.h"
+#include "gc/collector/mark_compact.h"
 #include "gc/collector/mark_sweep-inl.h"
 #include "gc/collector/partial_mark_sweep.h"
 #include "gc/collector/semi_space.h"
@@ -331,9 +332,10 @@
     semi_space_collector_ = new collector::SemiSpace(this, generational,
                                                      generational ? "generational" : "");
     garbage_collectors_.push_back(semi_space_collector_);
-
     concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
     garbage_collectors_.push_back(concurrent_copying_collector_);
+    mark_compact_collector_ = new collector::MarkCompact(this);
+    garbage_collectors_.push_back(mark_compact_collector_);
   }
 
   if (GetImageSpace() != nullptr && main_space_ != nullptr) {
@@ -1341,8 +1343,9 @@
              << " -> " << static_cast<int>(collector_type);
   uint64_t start_time = NanoTime();
   uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
-  ThreadList* tl = Runtime::Current()->GetThreadList();
-  Thread* self = Thread::Current();
+  Runtime* const runtime = Runtime::Current();
+  ThreadList* const tl = runtime->GetThreadList();
+  Thread* const self = Thread::Current();
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
   Locks::mutator_lock_->AssertNotHeld(self);
   const bool copying_transition =
@@ -1371,7 +1374,7 @@
     }
     usleep(1000);
   }
-  if (Runtime::Current()->IsShuttingDown(self)) {
+  if (runtime->IsShuttingDown(self)) {
     // Don't allow heap transitions to happen if the runtime is shutting down since these can
     // cause objects to get finalized.
     FinishGC(self, collector::kGcTypeNone);
@@ -1432,10 +1435,15 @@
 void Heap::ChangeCollector(CollectorType collector_type) {
   // TODO: Only do this with all mutators suspended to avoid races.
   if (collector_type != collector_type_) {
+    if (collector_type == kCollectorTypeMC) {
+      // Don't allow mark compact unless support is compiled in.
+      CHECK(kMarkCompactSupport);
+    }
     collector_type_ = collector_type;
     gc_plan_.clear();
     switch (collector_type_) {
       case kCollectorTypeCC:  // Fall-through.
+      case kCollectorTypeMC:  // Fall-through.
       case kCollectorTypeSS:  // Fall-through.
       case kCollectorTypeGSS: {
         gc_plan_.push_back(collector::kGcTypeFull);
@@ -1722,13 +1730,17 @@
 void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
                    space::ContinuousMemMapAllocSpace* source_space) {
   CHECK(kMovingCollector);
-  CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported";
   if (target_space != source_space) {
     // Don't swap spaces since this isn't a typical semi space collection.
     semi_space_collector_->SetSwapSemiSpaces(false);
     semi_space_collector_->SetFromSpace(source_space);
     semi_space_collector_->SetToSpace(target_space);
     semi_space_collector_->Run(kGcCauseCollectorTransition, false);
+  } else {
+    CHECK(target_space->IsBumpPointerSpace())
+        << "In-place compaction is only supported for bump pointer spaces";
+    mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
+    mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
   }
 }
 
@@ -1792,21 +1804,30 @@
   if (compacting_gc) {
     DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
            current_allocator_ == kAllocatorTypeTLAB);
-    if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) {
-      gc_type = semi_space_collector_->GetGcType();
-      semi_space_collector_->SetFromSpace(bump_pointer_space_);
-      semi_space_collector_->SetToSpace(temp_space_);
-      collector = semi_space_collector_;
-      semi_space_collector_->SetSwapSemiSpaces(true);
-    } else if (collector_type_ == kCollectorTypeCC) {
-      gc_type = concurrent_copying_collector_->GetGcType();
-      collector = concurrent_copying_collector_;
-    } else {
-      LOG(FATAL) << "Unreachable - invalid collector type " << static_cast<size_t>(collector_type_);
+    switch (collector_type_) {
+      case kCollectorTypeSS:
+        // Fall-through.
+      case kCollectorTypeGSS:
+        semi_space_collector_->SetFromSpace(bump_pointer_space_);
+        semi_space_collector_->SetToSpace(temp_space_);
+        semi_space_collector_->SetSwapSemiSpaces(true);
+        collector = semi_space_collector_;
+        break;
+      case kCollectorTypeCC:
+        collector = concurrent_copying_collector_;
+        break;
+      case kCollectorTypeMC:
+        mark_compact_collector_->SetSpace(bump_pointer_space_);
+        collector = mark_compact_collector_;
+        break;
+      default:
+        LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
     }
-    temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
-    CHECK(temp_space_->IsEmpty());
-    gc_type = collector::kGcTypeFull;
+    if (collector != mark_compact_collector_) {
+      temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+      CHECK(temp_space_->IsEmpty());
+    }
+    gc_type = collector::kGcTypeFull;  // TODO: Not hard code this in.
   } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
       current_allocator_ == kAllocatorTypeDlMalloc) {
     collector = FindCollectorByGcType(gc_type);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9b49373..368a20c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -66,6 +66,7 @@
 namespace collector {
   class ConcurrentCopying;
   class GarbageCollector;
+  class MarkCompact;
   class MarkSweep;
   class SemiSpace;
 }  // namespace collector
@@ -573,7 +574,7 @@
   }
   static bool IsMovingGc(CollectorType collector_type) {
     return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
-        collector_type == kCollectorTypeCC;
+        collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC;
   }
   bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -952,12 +953,14 @@
 
   std::vector<collector::GarbageCollector*> garbage_collectors_;
   collector::SemiSpace* semi_space_collector_;
+  collector::MarkCompact* mark_compact_collector_;
   collector::ConcurrentCopying* concurrent_copying_collector_;
 
   const bool running_on_valgrind_;
   const bool use_tlab_;
 
   friend class collector::GarbageCollector;
+  friend class collector::MarkCompact;
   friend class collector::MarkSweep;
   friend class collector::SemiSpace;
   friend class ReferenceQueue;
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 3ff9889..292781e 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -205,6 +205,10 @@
   }
 }
 
+void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) {
+  cleared_references_.UpdateRoots(callback, arg);
+}
+
 void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
   Locks::mutator_lock_->AssertNotHeld(self);
   if (!cleared_references_.IsEmpty()) {
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index ff7da52..2771ea8 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -59,6 +59,8 @@
   void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
                               IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void UpdateRoots(IsMarkedCallback* callback, void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
  private:
   class ProcessReferencesArgs {
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 19476e6..c3931e8 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -163,5 +163,11 @@
   } while (LIKELY(ref != head));
 }
 
+void ReferenceQueue::UpdateRoots(IsMarkedCallback* callback, void* arg) {
+  if (list_ != nullptr) {
+    list_ = down_cast<mirror::Reference*>(callback(list_, arg));
+  }
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 8ef0d20..cd814bb 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -83,12 +83,16 @@
   mirror::Reference* GetList() {
     return list_;
   }
+  // Visits list_, currently only used for the mark compact GC.
+  void UpdateRoots(IsMarkedCallback* callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
   // calling AtomicEnqueueIfNotEnqueued.
   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  // The actual reference list. Not a root since it will be nullptr when the GC is not running.
+  // The actual reference list. Only a root for the mark compact GC since it will be null for other
+  // GC types.
   mirror::Reference* list_;
 };
 
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 9e61f30..feee34f 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -145,6 +145,12 @@
 
   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
 
+  // Record objects / bytes freed.
+  void RecordFree(int32_t objects, int32_t bytes) {
+    objects_allocated_.FetchAndSubSequentiallyConsistent(objects);
+    bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes);
+  }
+
   // Object alignment within the space.
   static constexpr size_t kAlignment = 8;
 
diff --git a/runtime/globals.h b/runtime/globals.h
index 58c2118..3a906f1 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -74,8 +74,11 @@
 
 // Garbage collector constants.
 static constexpr bool kMovingCollector = true && !kUsePortableCompiler;
+static constexpr bool kMarkCompactSupport = false && kMovingCollector;
+// True if we allow moving field arrays, this can cause complication with mark compact.
+static constexpr bool kMoveFieldArrays = !kMarkCompactSupport;
 // True if we allow moving classes.
-static constexpr bool kMovingClasses = true;
+static constexpr bool kMovingClasses = !kMarkCompactSupport;
 // True if we allow moving fields.
 static constexpr bool kMovingFields = false;
 // True if we allow moving methods.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 512a66f..6205f70 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -505,8 +505,10 @@
 
 template <bool kVisitClass, typename Visitor>
 inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
-  VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
+  // Visit the static fields first so that we don't overwrite the SFields / IFields instance
+  // fields.
   VisitStaticFieldsReferences<kVisitClass>(this, visitor);
+  VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
 }
 
 inline bool Class::IsArtFieldClass() const {
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index ec3e514..3f20bf4 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -25,8 +25,9 @@
 inline void IfTable::SetInterface(int32_t i, Class* interface) {
   DCHECK(interface != NULL);
   DCHECK(interface->IsInterface());
-  DCHECK(Get((i * kMax) + kInterface) == NULL);
-  Set<false>((i * kMax) + kInterface, interface);
+  const size_t idx = i * kMax + kInterface;
+  DCHECK_EQ(Get(idx), static_cast<Object*>(nullptr));
+  Set<false>(idx, interface);
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 567ce3e..15ecd3c 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -26,6 +26,7 @@
 #include "class.h"
 #include "lock_word-inl.h"
 #include "monitor.h"
+#include "object_array-inl.h"
 #include "read_barrier-inl.h"
 #include "runtime.h"
 #include "reference.h"
@@ -667,10 +668,9 @@
         mirror::ArtField* field = kIsStatic ? klass->GetStaticField(i) : klass->GetInstanceField(i);
         MemberOffset field_offset = field->GetOffset();
         // TODO: Do a simpler check?
-        if (!kVisitClass && UNLIKELY(field_offset.Uint32Value() == ClassOffset().Uint32Value())) {
-          continue;
+        if (kVisitClass || field_offset.Uint32Value() != ClassOffset().Uint32Value()) {
+          visitor(this, field_offset, kIsStatic);
         }
-        visitor(this, field_offset, kIsStatic);
       }
     }
   }
@@ -693,18 +693,16 @@
 inline void Object::VisitReferences(const Visitor& visitor,
                                     const JavaLangRefVisitor& ref_visitor) {
   mirror::Class* klass = GetClass<kVerifyFlags>();
-  if (klass->IsVariableSize()) {
-    if (klass->IsClassClass()) {
-      AsClass<kVerifyNone>()->VisitReferences<kVisitClass>(klass, visitor);
-    } else {
-      DCHECK(klass->IsArrayClass<kVerifyFlags>());
-      if (klass->IsObjectArrayClass<kVerifyNone>()) {
-        AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences<kVisitClass>(visitor);
-      } else if (kVisitClass) {
-        visitor(this, ClassOffset(), false);
-      }
+  if (klass == Class::GetJavaLangClass()) {
+    AsClass<kVerifyNone>()->VisitReferences<kVisitClass>(klass, visitor);
+  } else if (klass->IsArrayClass()) {
+    if (klass->IsObjectArrayClass<kVerifyNone>()) {
+      AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences<kVisitClass>(visitor);
+    } else if (kVisitClass) {
+      visitor(this, ClassOffset(), false);
     }
   } else {
+    DCHECK(!klass->IsVariableSize());
     VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
     if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
       ref_visitor(klass, AsReference());
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index ef20d1c..7cdd8f5 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -113,6 +113,8 @@
     return gc::kCollectorTypeGSS;
   } else if (option == "CC") {
     return gc::kCollectorTypeCC;
+  } else if (option == "MC") {
+    return gc::kCollectorTypeMC;
   } else {
     return gc::kCollectorTypeNone;
   }