Merge "Remove unused DexFile UTF-16-based string lookup"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index b483e5f..be27869 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -89,43 +89,59 @@
 ART_TEST_HOST_GTEST_MultiDexUncompressed_DEX := $(basename $(ART_TEST_HOST_GTEST_MultiDex_DEX))Uncompressed$(suffix $(ART_TEST_HOST_GTEST_MultiDex_DEX))
 ART_TEST_TARGET_GTEST_MultiDexUncompressed_DEX := $(basename $(ART_TEST_TARGET_GTEST_MultiDex_DEX))Uncompressed$(suffix $(ART_TEST_TARGET_GTEST_MultiDex_DEX))
 
+ifdef ART_TEST_HOST_GTEST_Main_DEX
 $(ART_TEST_HOST_GTEST_MainStripped_DEX): $(ART_TEST_HOST_GTEST_Main_DEX)
 	cp $< $@
 	$(call dexpreopt-remove-classes.dex,$@)
+endif
 
+ifdef ART_TEST_TARGET_GTEST_Main_DEX
 $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX)
 	cp $< $@
 	$(call dexpreopt-remove-classes.dex,$@)
+endif
 
+ifdef ART_TEST_HOST_GTEST_Main_DEX
 $(ART_TEST_HOST_GTEST_MainUncompressed_DEX): $(ART_TEST_HOST_GTEST_Main_DEX) $(ZIPALIGN)
 	cp $< $@
 	$(call uncompress-dexs, $@)
 	$(call align-package, $@)
+endif
 
+ifdef ART_TEST_TARGET_GTEST_Main_DEX
 $(ART_TEST_TARGET_GTEST_MainUncompressed_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX) $(ZIPALIGN)
 	cp $< $@
 	$(call uncompress-dexs, $@)
 	$(call align-package, $@)
+endif
 
+ifdef ART_TEST_HOST_GTEST_Main_DEX
 $(ART_TEST_HOST_GTEST_EmptyUncompressed_DEX): $(ZIPALIGN)
 	touch $(dir $@)classes.dex
 	zip -j -qD -X -0 $@ $(dir $@)classes.dex
 	rm $(dir $@)classes.dex
+endif
 
+ifdef ART_TEST_TARGET_GTEST_Main_DEX
 $(ART_TEST_TARGET_GTEST_EmptyUncompressed_DEX): $(ZIPALIGN)
 	touch $(dir $@)classes.dex
 	zip -j -qD -X -0 $@ $(dir $@)classes.dex
 	rm $(dir $@)classes.dex
+endif
 
+ifdef ART_TEST_HOST_GTEST_MultiDex_DEX
 $(ART_TEST_HOST_GTEST_MultiDexUncompressed_DEX): $(ART_TEST_HOST_GTEST_MultiDex_DEX) $(ZIPALIGN)
 	cp $< $@
 	$(call uncompress-dexs, $@)
 	$(call align-package, $@)
+endif
 
+ifdef ART_TEST_TARGET_GTEST_MultiDex_DEX
 $(ART_TEST_TARGET_GTEST_MultiDexUncompressed_DEX): $(ART_TEST_TARGET_GTEST_MultiDex_DEX) $(ZIPALIGN)
 	cp $< $@
 	$(call uncompress-dexs, $@)
 	$(call align-package, $@)
+endif
 
 ART_TEST_GTEST_VerifierDeps_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDeps/*.smali))
 ART_TEST_GTEST_VerifierDepsMulti_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDepsMulti/*.smali))
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index c8be69d..5a25a6c 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -416,8 +416,6 @@
     return gc::kCollectorTypeGSS;
   } else if (option == "CC") {
     return gc::kCollectorTypeCC;
-  } else if (option == "MC") {
-    return gc::kCollectorTypeMC;
   } else {
     return gc::kCollectorTypeNone;
   }
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc
index 95f2e98..d9df23f 100644
--- a/compiler/optimizing/block_builder.cc
+++ b/compiler/optimizing/block_builder.cc
@@ -107,6 +107,7 @@
       number_of_branches_++;
       MaybeCreateBlockAt(dex_pc + instruction.GetTargetOffset());
     } else if (instruction.IsSwitch()) {
+      number_of_branches_++;  // count as at least one branch (b/77652521)
       DexSwitchTable table(instruction, dex_pc);
       for (DexSwitchTableIterator s_it(table); !s_it.Done(); s_it.Advance()) {
         MaybeCreateBlockAt(dex_pc + s_it.CurrentTargetOffset());
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index 6dd981d..f26cf07 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -45,8 +45,6 @@
   private: \
     void* operator new(size_t) = delete  // NOLINT
 
-#define SIZEOF_MEMBER(t, f) sizeof((reinterpret_cast<t*>(4096))->f)  // NOLINT
-
 #define OFFSETOF_MEMBER(t, f) \
   (reinterpret_cast<uintptr_t>(&reinterpret_cast<t*>(16)->f) - static_cast<uintptr_t>(16u))  // NOLINT
 
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index b9a9a69..dc9d990 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -700,6 +700,7 @@
 }
 
 bool PatchOat::WriteImage(File* out) {
+  CHECK(out != nullptr);
   TimingLogger::ScopedTiming t("Writing image File", timings_);
   std::string error_msg;
 
@@ -709,7 +710,6 @@
                                             true /* read_only_mode */, &error_msg);
 
   CHECK(image_ != nullptr);
-  CHECK(out != nullptr);
   size_t expect = image_->Size();
   if (out->WriteFully(reinterpret_cast<char*>(image_->Begin()), expect) &&
       out->SetLength(expect) == 0) {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 9271a05..00d4a60 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -65,7 +65,6 @@
         "gc/collector/garbage_collector.cc",
         "gc/collector/immune_region.cc",
         "gc/collector/immune_spaces.cc",
-        "gc/collector/mark_compact.cc",
         "gc/collector/mark_sweep.cc",
         "gc/collector/partial_mark_sweep.cc",
         "gc/collector/semi_space.cc",
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
deleted file mode 100644
index 34cc129..0000000
--- a/runtime/gc/collector/mark_compact.cc
+++ /dev/null
@@ -1,642 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mark_compact.h"
-
-#include <android-base/logging.h>
-
-#include "base/macros.h"
-#include "base/mutex-inl.h"
-#include "base/timing_logger.h"
-#include "gc/accounting/heap_bitmap-inl.h"
-#include "gc/accounting/mod_union_table.h"
-#include "gc/accounting/space_bitmap-inl.h"
-#include "gc/heap.h"
-#include "gc/reference_processor.h"
-#include "gc/space/bump_pointer_space-inl.h"
-#include "gc/space/large_object_space.h"
-#include "gc/space/space-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/object-refvisitor-inl.h"
-#include "runtime.h"
-#include "stack.h"
-#include "thread-current-inl.h"
-#include "thread_list.h"
-
-namespace art {
-namespace gc {
-namespace collector {
-
-void MarkCompact::BindBitmaps() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  // Mark all of the spaces we never collect as immune.
-  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
-        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
-      immune_spaces_.AddSpace(space);
-    }
-  }
-}
-
-MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
-    : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
-      mark_stack_(nullptr),
-      space_(nullptr),
-      mark_bitmap_(nullptr),
-      collector_name_(name_),
-      bump_pointer_(nullptr),
-      live_objects_in_space_(0),
-      updating_references_(false) {}
-
-void MarkCompact::RunPhases() {
-  Thread* self = Thread::Current();
-  InitializePhase();
-  CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
-  {
-    ScopedPause pause(this);
-    GetHeap()->PreGcVerificationPaused(this);
-    GetHeap()->PrePauseRosAllocVerification(this);
-    MarkingPhase();
-    ReclaimPhase();
-  }
-  GetHeap()->PostGcVerification(this);
-  FinishPhase();
-}
-
-void MarkCompact::ForwardObject(mirror::Object* obj) {
-  const size_t alloc_size = RoundUp(obj->SizeOf(), space::BumpPointerSpace::kAlignment);
-  LockWord lock_word = obj->GetLockWord(false);
-  // If we have a non empty lock word, store it and restore it later.
-  if (!LockWord::IsDefault(lock_word)) {
-    // Set the bit in the bitmap so that we know to restore it later.
-    objects_with_lockword_->Set(obj);
-    lock_words_to_restore_.push_back(lock_word);
-  }
-  obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(bump_pointer_)),
-                   false);
-  bump_pointer_ += alloc_size;
-  ++live_objects_in_space_;
-}
-
-
-void MarkCompact::CalculateObjectForwardingAddresses() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  // The bump pointer in the space where the next forwarding address will be.
-  bump_pointer_ = reinterpret_cast<uint8_t*>(space_->Begin());
-  // Visit all the marked objects in the bitmap.
-  objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
-                                               reinterpret_cast<uintptr_t>(space_->End()),
-                                               [this](mirror::Object* obj)
-      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-    DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
-    DCHECK(IsMarked(obj) != nullptr);
-    ForwardObject(obj);
-  });
-}
-
-void MarkCompact::InitializePhase() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  mark_stack_ = heap_->GetMarkStack();
-  DCHECK(mark_stack_ != nullptr);
-  immune_spaces_.Reset();
-  CHECK(space_->CanMoveObjects()) << "Attempting compact non-movable space from " << *space_;
-  // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
-  ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  mark_bitmap_ = heap_->GetMarkBitmap();
-  live_objects_in_space_ = 0;
-}
-
-void MarkCompact::ProcessReferences(Thread* self) {
-  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-  heap_->GetReferenceProcessor()->ProcessReferences(
-      false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
-}
-
-inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
-  if (obj == nullptr) {
-    return nullptr;
-  }
-  if (kUseBakerReadBarrier) {
-    // Verify all the objects have the correct forward state installed.
-    obj->AssertReadBarrierState();
-  }
-  if (!immune_spaces_.IsInImmuneRegion(obj)) {
-    if (objects_before_forwarding_->HasAddress(obj)) {
-      if (!objects_before_forwarding_->Set(obj)) {
-        MarkStackPush(obj);  // This object was not previously marked.
-      }
-    } else {
-      DCHECK(!space_->HasAddress(obj));
-      auto slow_path = [](const mirror::Object* ref)
-          REQUIRES_SHARED(Locks::mutator_lock_) {
-        // Marking a large object, make sure its aligned as a sanity check.
-        if (!IsAligned<kPageSize>(ref)) {
-          Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
-          LOG(FATAL) << ref;
-        }
-      };
-      if (!mark_bitmap_->Set(obj, slow_path)) {
-        // This object was not previously marked.
-        MarkStackPush(obj);
-      }
-    }
-  }
-  return obj;
-}
-
-void MarkCompact::MarkingPhase() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  Thread* self = Thread::Current();
-  // Bitmap which describes which objects we have to move.
-  objects_before_forwarding_.reset(accounting::ContinuousSpaceBitmap::Create(
-      "objects before forwarding", space_->Begin(), space_->Size()));
-  // Bitmap which describes which lock words we need to restore.
-  objects_with_lockword_.reset(accounting::ContinuousSpaceBitmap::Create(
-      "objects with lock words", space_->Begin(), space_->Size()));
-  CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
-  // Assume the cleared space is already empty.
-  BindBitmaps();
-  t.NewTiming("ProcessCards");
-  // Process dirty cards and add dirty cards to mod-union tables.
-  heap_->ProcessCards(GetTimings(), false, false, true);
-  // Clear the whole card table since we cannot get any additional dirty cards during the
-  // paused GC. This saves memory but only works for pause the world collectors.
-  t.NewTiming("ClearCardTable");
-  heap_->GetCardTable()->ClearCardTable();
-  // Need to do this before the checkpoint since we don't want any threads to add references to
-  // the live stack during the recursive mark.
-  if (kUseThreadLocalAllocationStack) {
-    t.NewTiming("RevokeAllThreadLocalAllocationStacks");
-    heap_->RevokeAllThreadLocalAllocationStacks(self);
-  }
-  t.NewTiming("SwapStacks");
-  heap_->SwapStacks();
-  {
-    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    MarkRoots();
-    // Mark roots of immune spaces.
-    UpdateAndMarkModUnion();
-    // Recursively mark remaining objects.
-    MarkReachableObjects();
-  }
-  ProcessReferences(self);
-  {
-    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    SweepSystemWeaks();
-  }
-  Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
-  // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
-  // before they are properly counted.
-  RevokeAllThreadLocalBuffers();
-  // Disabled due to an issue where we have objects in the bump pointer space which reference dead
-  // objects.
-  // heap_->PreSweepingGcVerification(this);
-}
-
-void MarkCompact::UpdateAndMarkModUnion() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  for (auto& space : heap_->GetContinuousSpaces()) {
-    // If the space is immune then we need to mark the references to other spaces.
-    if (immune_spaces_.ContainsSpace(space)) {
-      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
-      if (table != nullptr) {
-        // TODO: Improve naming.
-        TimingLogger::ScopedTiming t2(
-            space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
-                                     "UpdateAndMarkImageModUnionTable", GetTimings());
-        table->UpdateAndMarkReferences(this);
-      }
-    }
-  }
-}
-
-void MarkCompact::MarkReachableObjects() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
-  {
-    TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
-    heap_->MarkAllocStackAsLive(live_stack);
-  }
-  live_stack->Reset();
-  // Recursively process the mark stack.
-  ProcessMarkStack();
-}
-
-void MarkCompact::ReclaimPhase() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  // Reclaim unmarked objects.
-  Sweep(false);
-  // Swap the live and mark bitmaps for each space which we modified space. This is an
-  // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
-  // bitmaps.
-  SwapBitmaps();
-  GetHeap()->UnBindBitmaps();  // Unbind the live and mark bitmaps.
-  Compact();
-}
-
-void MarkCompact::ResizeMarkStack(size_t new_size) {
-  std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
-  CHECK_LE(mark_stack_->Size(), new_size);
-  mark_stack_->Resize(new_size);
-  for (auto& obj : temp) {
-    mark_stack_->PushBack(obj.AsMirrorPtr());
-  }
-}
-
-inline void MarkCompact::MarkStackPush(mirror::Object* obj) {
-  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
-    ResizeMarkStack(mark_stack_->Capacity() * 2);
-  }
-  // The object must be pushed on to the mark stack.
-  mark_stack_->PushBack(obj);
-}
-
-void MarkCompact::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
-                                    bool do_atomic_update ATTRIBUTE_UNUSED) {
-  if (updating_references_) {
-    UpdateHeapReference(obj_ptr);
-  } else {
-    MarkObject(obj_ptr->AsMirrorPtr());
-  }
-}
-
-void MarkCompact::VisitRoots(
-    mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
-  for (size_t i = 0; i < count; ++i) {
-    MarkObject(*roots[i]);
-  }
-}
-
-void MarkCompact::VisitRoots(
-    mirror::CompressedReference<mirror::Object>** roots, size_t count,
-    const RootInfo& info ATTRIBUTE_UNUSED) {
-  for (size_t i = 0; i < count; ++i) {
-    MarkObject(roots[i]->AsMirrorPtr());
-  }
-}
-
-class MarkCompact::UpdateRootVisitor : public RootVisitor {
- public:
-  explicit UpdateRootVisitor(MarkCompact* collector) : collector_(collector) {}
-
-  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES(Locks::mutator_lock_)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
-    for (size_t i = 0; i < count; ++i) {
-      mirror::Object* obj = *roots[i];
-      mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
-      if (obj != new_obj) {
-        *roots[i] = new_obj;
-        DCHECK(new_obj != nullptr);
-      }
-    }
-  }
-
-  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED)
-      OVERRIDE REQUIRES(Locks::mutator_lock_)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
-    for (size_t i = 0; i < count; ++i) {
-      mirror::Object* obj = roots[i]->AsMirrorPtr();
-      mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
-      if (obj != new_obj) {
-        roots[i]->Assign(new_obj);
-        DCHECK(new_obj != nullptr);
-      }
-    }
-  }
-
- private:
-  MarkCompact* const collector_;
-};
-
-class MarkCompact::UpdateObjectReferencesVisitor {
- public:
-  explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {}
-
-  void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::heap_bitmap_lock_)
-          REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
-    collector_->UpdateObjectReferences(obj);
-  }
-
- private:
-  MarkCompact* const collector_;
-};
-
-void MarkCompact::UpdateReferences() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  updating_references_ = true;
-  Runtime* runtime = Runtime::Current();
-  // Update roots.
-  UpdateRootVisitor update_root_visitor(this);
-  runtime->VisitRoots(&update_root_visitor);
-  // Update object references in mod union tables and spaces.
-  for (const auto& space : heap_->GetContinuousSpaces()) {
-    // If the space is immune then we need to mark the references to other spaces.
-    accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
-    if (table != nullptr) {
-      // TODO: Improve naming.
-      TimingLogger::ScopedTiming t2(
-          space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
-                                   "UpdateImageModUnionTableReferences",
-                                   GetTimings());
-      table->UpdateAndMarkReferences(this);
-    } else {
-      // No mod union table, so we need to scan the space using bitmap visit.
-      // Scan the space using bitmap visit.
-      accounting::ContinuousSpaceBitmap* bitmap = space->GetLiveBitmap();
-      if (bitmap != nullptr) {
-        UpdateObjectReferencesVisitor visitor(this);
-        bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
-                                 reinterpret_cast<uintptr_t>(space->End()),
-                                 visitor);
-      }
-    }
-  }
-  CHECK(!kMovingClasses)
-      << "Didn't update large object classes since they are assumed to not move.";
-  // Update the system weaks, these should already have been swept.
-  runtime->SweepSystemWeaks(this);
-  // Update the objects in the bump pointer space last, these objects don't have a bitmap.
-  UpdateObjectReferencesVisitor visitor(this);
-  objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
-                                               reinterpret_cast<uintptr_t>(space_->End()),
-                                               visitor);
-  // Update the reference processor cleared list.
-  heap_->GetReferenceProcessor()->UpdateRoots(this);
-  updating_references_ = false;
-}
-
-void MarkCompact::Compact() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  CalculateObjectForwardingAddresses();
-  UpdateReferences();
-  MoveObjects();
-  // Space
-  int64_t objects_freed = space_->GetObjectsAllocated() - live_objects_in_space_;
-  int64_t bytes_freed = reinterpret_cast<int64_t>(space_->End()) -
-      reinterpret_cast<int64_t>(bump_pointer_);
-  t.NewTiming("RecordFree");
-  space_->RecordFree(objects_freed, bytes_freed);
-  RecordFree(ObjectBytePair(objects_freed, bytes_freed));
-  space_->SetEnd(bump_pointer_);
-  // Need to zero out the memory we freed. TODO: Use madvise for pages.
-  memset(bump_pointer_, 0, bytes_freed);
-}
-
-// Marks all objects in the root set.
-void MarkCompact::MarkRoots() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  Runtime::Current()->VisitRoots(this);
-}
-
-inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference) {
-  mirror::Object* obj = reference->AsMirrorPtr();
-  if (obj != nullptr) {
-    mirror::Object* new_obj = GetMarkedForwardAddress(obj);
-    if (obj != new_obj) {
-      DCHECK(new_obj != nullptr);
-      reference->Assign(new_obj);
-    }
-  }
-}
-
-class MarkCompact::UpdateReferenceVisitor {
- public:
-  explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {}
-
-  void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
-      ALWAYS_INLINE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-    collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
-  }
-
-  void operator()(ObjPtr<mirror::Class> /*klass*/, mirror::Reference* ref) const
-      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-    collector_->UpdateHeapReference(
-        ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
-  }
-
-  // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
-  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
-      NO_THREAD_SAFETY_ANALYSIS {
-    if (!root->IsNull()) {
-      VisitRoot(root);
-    }
-  }
-
-  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
-      NO_THREAD_SAFETY_ANALYSIS {
-    root->Assign(collector_->GetMarkedForwardAddress(root->AsMirrorPtr()));
-  }
-
- private:
-  MarkCompact* const collector_;
-};
-
-void MarkCompact::UpdateObjectReferences(mirror::Object* obj) {
-  UpdateReferenceVisitor visitor(this);
-  obj->VisitReferences(visitor, visitor);
-}
-
-inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) {
-  DCHECK(obj != nullptr);
-  if (objects_before_forwarding_->HasAddress(obj)) {
-    DCHECK(objects_before_forwarding_->Test(obj));
-    mirror::Object* ret =
-        reinterpret_cast<mirror::Object*>(obj->GetLockWord(false).ForwardingAddress());
-    DCHECK(ret != nullptr);
-    return ret;
-  }
-  DCHECK(!space_->HasAddress(obj));
-  return obj;
-}
-
-mirror::Object* MarkCompact::IsMarked(mirror::Object* object) {
-  if (immune_spaces_.IsInImmuneRegion(object)) {
-    return object;
-  }
-  if (updating_references_) {
-    return GetMarkedForwardAddress(object);
-  }
-  if (objects_before_forwarding_->HasAddress(object)) {
-    return objects_before_forwarding_->Test(object) ? object : nullptr;
-  }
-  return mark_bitmap_->Test(object) ? object : nullptr;
-}
-
-bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr,
-                                              // MarkCompact does the GC in a pause. No CAS needed.
-                                              bool do_atomic_update ATTRIBUTE_UNUSED) {
-  // Side effect free since we call this before ever moving objects.
-  mirror::Object* obj = ref_ptr->AsMirrorPtr();
-  if (obj == nullptr) {
-    return true;
-  }
-  return IsMarked(obj) != nullptr;
-}
-
-void MarkCompact::SweepSystemWeaks() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  Runtime::Current()->SweepSystemWeaks(this);
-}
-
-bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
-  return space != space_ && !immune_spaces_.ContainsSpace(space);
-}
-
-void MarkCompact::MoveObject(mirror::Object* obj, size_t len) {
-  // Look at the forwarding address stored in the lock word to know where to copy.
-  DCHECK(space_->HasAddress(obj)) << obj;
-  uintptr_t dest_addr = obj->GetLockWord(false).ForwardingAddress();
-  mirror::Object* dest_obj = reinterpret_cast<mirror::Object*>(dest_addr);
-  DCHECK(space_->HasAddress(dest_obj)) << dest_obj;
-  // Use memmove since there may be overlap.
-  memmove(reinterpret_cast<void*>(dest_addr), reinterpret_cast<const void*>(obj), len);
-  // Restore the saved lock word if needed.
-  LockWord lock_word = LockWord::Default();
-  if (UNLIKELY(objects_with_lockword_->Test(obj))) {
-    lock_word = lock_words_to_restore_.front();
-    lock_words_to_restore_.pop_front();
-  }
-  dest_obj->SetLockWord(lock_word, false);
-}
-
-void MarkCompact::MoveObjects() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  // Move the objects in the before forwarding bitmap.
-  objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
-                                               reinterpret_cast<uintptr_t>(space_->End()),
-                                               [this](mirror::Object* obj)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_)
-      REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
-    MoveObject(obj, obj->SizeOf());
-  });
-  CHECK(lock_words_to_restore_.empty());
-}
-
-void MarkCompact::Sweep(bool swap_bitmaps) {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  DCHECK(mark_stack_->IsEmpty());
-  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-    if (space->IsContinuousMemMapAllocSpace()) {
-      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
-      if (!ShouldSweepSpace(alloc_space)) {
-        continue;
-      }
-      TimingLogger::ScopedTiming t2(
-          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
-      RecordFree(alloc_space->Sweep(swap_bitmaps));
-    }
-  }
-  SweepLargeObjects(swap_bitmaps);
-}
-
-void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
-  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
-  if (los != nullptr) {
-    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());\
-    RecordFreeLOS(los->Sweep(swap_bitmaps));
-  }
-}
-
-// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
-// marked, put it on the appropriate list in the heap for later processing.
-void MarkCompact::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
-                                         ObjPtr<mirror::Reference> reference) {
-  heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
-}
-
-class MarkCompact::MarkObjectVisitor {
- public:
-  explicit MarkObjectVisitor(MarkCompact* collector) : collector_(collector) {}
-
-  void operator()(ObjPtr<mirror::Object> obj,
-                  MemberOffset offset,
-                  bool /*is_static*/) const ALWAYS_INLINE
-      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-    // Object was already verified when we scanned it.
-    collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
-  }
-
-  void operator()(ObjPtr<mirror::Class> klass,
-                  ObjPtr<mirror::Reference> ref) const
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(Locks::heap_bitmap_lock_) {
-    collector_->DelayReferenceReferent(klass, ref);
-  }
-
-  // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
-  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
-      NO_THREAD_SAFETY_ANALYSIS {
-    if (!root->IsNull()) {
-      VisitRoot(root);
-    }
-  }
-
-  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
-      NO_THREAD_SAFETY_ANALYSIS {
-    collector_->MarkObject(root->AsMirrorPtr());
-  }
-
- private:
-  MarkCompact* const collector_;
-};
-
-// Visit all of the references of an object and update.
-void MarkCompact::ScanObject(mirror::Object* obj) {
-  MarkObjectVisitor visitor(this);
-  obj->VisitReferences(visitor, visitor);
-}
-
-// Scan anything that's on the mark stack.
-void MarkCompact::ProcessMarkStack() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  while (!mark_stack_->IsEmpty()) {
-    mirror::Object* obj = mark_stack_->PopBack();
-    DCHECK(obj != nullptr);
-    ScanObject(obj);
-  }
-}
-
-void MarkCompact::SetSpace(space::BumpPointerSpace* space) {
-  DCHECK(space != nullptr);
-  space_ = space;
-}
-
-void MarkCompact::FinishPhase() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  space_ = nullptr;
-  CHECK(mark_stack_->IsEmpty());
-  mark_stack_->Reset();
-  // Clear all of the spaces' mark bitmaps.
-  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  heap_->ClearMarkedObjects();
-  // Release our bitmaps.
-  objects_before_forwarding_.reset(nullptr);
-  objects_with_lockword_.reset(nullptr);
-}
-
-void MarkCompact::RevokeAllThreadLocalBuffers() {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  GetHeap()->RevokeAllThreadLocalBuffers();
-}
-
-}  // namespace collector
-}  // namespace gc
-}  // namespace art
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
deleted file mode 100644
index e774959..0000000
--- a/runtime/gc/collector/mark_compact.h
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
-#define ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
-
-#include <deque>
-#include <memory>  // For unique_ptr.
-
-#include "base/atomic.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "garbage_collector.h"
-#include "gc/accounting/heap_bitmap.h"
-#include "gc_root.h"
-#include "immune_spaces.h"
-#include "lock_word.h"
-#include "offsets.h"
-
-namespace art {
-
-class Thread;
-
-namespace mirror {
-class Class;
-class Object;
-}  // namespace mirror
-
-namespace gc {
-
-class Heap;
-
-namespace accounting {
-template <typename T> class AtomicStack;
-typedef AtomicStack<mirror::Object> ObjectStack;
-}  // namespace accounting
-
-namespace space {
-class BumpPointerSpace;
-class ContinuousMemMapAllocSpace;
-class ContinuousSpace;
-}  // namespace space
-
-namespace collector {
-
-class MarkCompact : public GarbageCollector {
- public:
-  explicit MarkCompact(Heap* heap, const std::string& name_prefix = "");
-  ~MarkCompact() {}
-
-  virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
-  void InitializePhase();
-  void MarkingPhase() REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!Locks::heap_bitmap_lock_);
-  void ReclaimPhase() REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!Locks::heap_bitmap_lock_);
-  void FinishPhase() REQUIRES(Locks::mutator_lock_);
-  void MarkReachableObjects()
-      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-  virtual GcType GetGcType() const OVERRIDE {
-    return kGcTypePartial;
-  }
-  virtual CollectorType GetCollectorType() const OVERRIDE {
-    return kCollectorTypeMC;
-  }
-
-  // Sets which space we will be copying objects in.
-  void SetSpace(space::BumpPointerSpace* space);
-
-  // Initializes internal structures.
-  void Init();
-
-  // Find the default mark bitmap.
-  void FindDefaultMarkBitmap();
-
-  void ScanObject(mirror::Object* obj)
-      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
-  // Marks the root set at the start of a garbage collection.
-  void MarkRoots()
-      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
-  // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
-  // the image. Mark that portion of the heap as immune.
-  void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::heap_bitmap_lock_);
-
-  void UnBindBitmaps()
-      REQUIRES(Locks::heap_bitmap_lock_);
-
-  void ProcessReferences(Thread* self) REQUIRES(Locks::mutator_lock_)
-      REQUIRES(Locks::mutator_lock_);
-
-  // Sweeps unmarked objects to complete the garbage collection.
-  void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
-  // Sweeps unmarked objects to complete the garbage collection.
-  void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
-
-  void SweepSystemWeaks()
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
-  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
-      OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
-  virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
-                          const RootInfo& info)
-      OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
-  // Schedules an unmarked object for reference processing.
-  void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- protected:
-  // Returns null if the object is not marked, otherwise returns the forwarding address (same as
-  // object for non movable things).
-  mirror::Object* GetMarkedForwardAddress(mirror::Object* object)
-      REQUIRES(Locks::mutator_lock_)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_);
-
-  // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
-  // mark, otherwise we unmark.
-  bool MarkLargeObject(const mirror::Object* obj)
-      REQUIRES(Locks::heap_bitmap_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Expand mark stack to 2x its current size.
-  void ResizeMarkStack(size_t new_size) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Returns true if we should sweep the space.
-  bool ShouldSweepSpace(space::ContinuousSpace* space) const;
-
-  // Push an object onto the mark stack.
-  void MarkStackPush(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  void UpdateAndMarkModUnion()
-      REQUIRES(Locks::heap_bitmap_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Recursively blackens objects on the mark stack.
-  void ProcessMarkStack()
-      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
-  // 3 pass mark compact approach.
-  void Compact() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-  // Calculate the forwarding address of objects marked as "live" in the objects_before_forwarding
-  // bitmap.
-  void CalculateObjectForwardingAddresses()
-      REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-  // Update the references of objects by using the forwarding addresses.
-  void UpdateReferences() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-  // Move objects and restore lock words.
-  void MoveObjects() REQUIRES(Locks::mutator_lock_);
-  // Move a single object to its forward address.
-  void MoveObject(mirror::Object* obj, size_t len) REQUIRES(Locks::mutator_lock_);
-  // Mark a single object.
-  virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
-      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
-                                 bool do_atomic_update) OVERRIDE
-      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-  virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_)
-      REQUIRES(Locks::mutator_lock_);
-  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
-                                           bool do_atomic_update) OVERRIDE
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_)
-      REQUIRES(Locks::mutator_lock_);
-  void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
-                                                                   Locks::mutator_lock_);
-  // Update a single heap reference.
-  void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_)
-      REQUIRES(Locks::mutator_lock_);
-  // Update all of the references of a single object.
-  void UpdateObjectReferences(mirror::Object* obj)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_)
-      REQUIRES(Locks::mutator_lock_);
-
-  // Revoke all the thread-local buffers.
-  void RevokeAllThreadLocalBuffers();
-
-  accounting::ObjectStack* mark_stack_;
-
-  // Every object inside the immune spaces is assumed to be marked.
-  ImmuneSpaces immune_spaces_;
-
-  // Bump pointer space which we are collecting.
-  space::BumpPointerSpace* space_;
-  // Cached mark bitmap as an optimization.
-  accounting::HeapBitmap* mark_bitmap_;
-
-  // The name of the collector.
-  std::string collector_name_;
-
-  // The bump pointer in the space where the next forwarding address will be.
-  uint8_t* bump_pointer_;
-  // How many live objects we have in the space.
-  size_t live_objects_in_space_;
-
-  // Bitmap which describes which objects we have to move, need to do / 2 so that we can handle
-  // objects which are only 8 bytes.
-  std::unique_ptr<accounting::ContinuousSpaceBitmap> objects_before_forwarding_;
-  // Bitmap which describes which lock words we need to restore.
-  std::unique_ptr<accounting::ContinuousSpaceBitmap> objects_with_lockword_;
-  // Which lock words we need to restore as we are moving objects.
-  std::deque<LockWord> lock_words_to_restore_;
-
-  // State whether or not we are updating references.
-  bool updating_references_;
-
- private:
-  class MarkObjectVisitor;
-  class UpdateObjectReferencesVisitor;
-  class UpdateReferenceVisitor;
-  class UpdateRootVisitor;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(MarkCompact);
-};
-
-}  // namespace collector
-}  // namespace gc
-}  // namespace art
-
-#endif  // ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 8979e74..4759fca 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -34,8 +34,6 @@
   kCollectorTypeSS,
   // A generational variant of kCollectorTypeSS.
   kCollectorTypeGSS,
-  // Mark compact collector.
-  kCollectorTypeMC,
   // Heap trimming collector, doesn't do any actual collecting.
   kCollectorTypeHeapTrim,
   // A (mostly) concurrent copying collector.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f16138c..6849220 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -48,7 +48,6 @@
 #include "gc/accounting/remembered_set.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/collector/concurrent_copying.h"
-#include "gc/collector/mark_compact.h"
 #include "gc/collector/mark_sweep.h"
 #include "gc/collector/partial_mark_sweep.h"
 #include "gc/collector/semi_space.h"
@@ -262,7 +261,6 @@
       verify_object_mode_(kVerifyObjectModeDisabled),
       disable_moving_gc_count_(0),
       semi_space_collector_(nullptr),
-      mark_compact_collector_(nullptr),
       concurrent_copying_collector_(nullptr),
       is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
       use_tlab_(use_tlab),
@@ -603,10 +601,6 @@
       concurrent_copying_collector_->SetRegionSpace(region_space_);
       garbage_collectors_.push_back(concurrent_copying_collector_);
     }
-    if (MayUseCollector(kCollectorTypeMC)) {
-      mark_compact_collector_ = new collector::MarkCompact(this);
-      garbage_collectors_.push_back(mark_compact_collector_);
-    }
   }
   if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
       (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
@@ -2122,10 +2116,6 @@
 void Heap::ChangeCollector(CollectorType collector_type) {
   // TODO: Only do this with all mutators suspended to avoid races.
   if (collector_type != collector_type_) {
-    if (collector_type == kCollectorTypeMC) {
-      // Don't allow mark compact unless support is compiled in.
-      CHECK(kMarkCompactSupport);
-    }
     collector_type_ = collector_type;
     gc_plan_.clear();
     switch (collector_type_) {
@@ -2138,7 +2128,6 @@
         }
         break;
       }
-      case kCollectorTypeMC:  // Fall-through.
       case kCollectorTypeSS:  // Fall-through.
       case kCollectorTypeGSS: {
         gc_plan_.push_back(collector::kGcTypeFull);
@@ -2487,13 +2476,9 @@
     semi_space_collector_->SetToSpace(target_space);
     semi_space_collector_->Run(gc_cause, false);
     return semi_space_collector_;
-  } else {
-    CHECK(target_space->IsBumpPointerSpace())
-        << "In-place compaction is only supported for bump pointer spaces";
-    mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
-    mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
-    return mark_compact_collector_;
   }
+  LOG(FATAL) << "Unsupported";
+  UNREACHABLE();
 }
 
 void Heap::TraceHeapSize(size_t heap_size) {
@@ -2583,14 +2568,10 @@
       case kCollectorTypeCC:
         collector = concurrent_copying_collector_;
         break;
-      case kCollectorTypeMC:
-        mark_compact_collector_->SetSpace(bump_pointer_space_);
-        collector = mark_compact_collector_;
-        break;
       default:
         LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
     }
-    if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
+    if (collector != concurrent_copying_collector_) {
       temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
       if (kIsDebugBuild) {
         // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index ef1c088..99ebab9 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -84,7 +84,6 @@
 namespace collector {
 class ConcurrentCopying;
 class GarbageCollector;
-class MarkCompact;
 class MarkSweep;
 class SemiSpace;
 }  // namespace collector
@@ -883,7 +882,6 @@
         collector_type == kCollectorTypeGSS ||
         collector_type == kCollectorTypeCC ||
         collector_type == kCollectorTypeCCBackground ||
-        collector_type == kCollectorTypeMC ||
         collector_type == kCollectorTypeHomogeneousSpaceCompact;
   }
   bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
@@ -1354,7 +1352,6 @@
 
   std::vector<collector::GarbageCollector*> garbage_collectors_;
   collector::SemiSpace* semi_space_collector_;
-  collector::MarkCompact* mark_compact_collector_;
   collector::ConcurrentCopying* concurrent_copying_collector_;
 
   const bool is_running_on_memory_tool_;
@@ -1442,7 +1439,6 @@
 
   friend class CollectorTransitionTask;
   friend class collector::GarbageCollector;
-  friend class collector::MarkCompact;
   friend class collector::ConcurrentCopying;
   friend class collector::MarkSweep;
   friend class collector::SemiSpace;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 676071b..99bc0b2 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -1487,7 +1487,7 @@
                                         true,
                                         false);
     int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-    EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+    ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
     EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
   }
 
@@ -1497,7 +1497,7 @@
                                         true,
                                         true);
     int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-    EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+    ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
     EXPECT_FALSE(oat_file_assistant.GetBestOatFile()->IsExecutable());
   }
 
@@ -1510,7 +1510,7 @@
                                         true,
                                         false);
     int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-    EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+    ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
     EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
   }
 
@@ -1520,7 +1520,7 @@
                                         true,
                                         true);
     int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
-    EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+    ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
     EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
   }
 }
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 8948c71..9e5d9ab 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -112,7 +112,7 @@
 
 TEST_F(ParsedOptionsTest, ParsedOptionsGc) {
   RuntimeOptions options;
-  options.push_back(std::make_pair("-Xgc:MC", nullptr));
+  options.push_back(std::make_pair("-Xgc:SS", nullptr));
 
   RuntimeArgumentMap map;
   bool parsed = ParsedOptions::Parse(options, false, &map);
@@ -124,7 +124,7 @@
   EXPECT_TRUE(map.Exists(Opt::GcOption));
 
   XGcOption xgc = map.GetOrDefault(Opt::GcOption);
-  EXPECT_EQ(gc::kCollectorTypeMC, xgc.collector_type_);
+  EXPECT_EQ(gc::kCollectorTypeSS, xgc.collector_type_);
 }
 
 TEST_F(ParsedOptionsTest, ParsedOptionsInstructionSet) {
diff --git a/test/151-OpenFileLimit/src/Main.java b/test/151-OpenFileLimit/src/Main.java
index de5890c..9b16090 100644
--- a/test/151-OpenFileLimit/src/Main.java
+++ b/test/151-OpenFileLimit/src/Main.java
@@ -38,7 +38,8 @@
             if (e.getMessage().contains("Too many open files")) {
                 System.out.println("Message includes \"Too many open files\"");
             } else {
-                System.out.println(e.getMessage());
+                System.out.println("Unexpected exception:");
+                e.printStackTrace();
             }
         }
 
diff --git a/test/712-varhandle-invocations/build b/test/712-varhandle-invocations/build
index 6d4429f..253765b 100755
--- a/test/712-varhandle-invocations/build
+++ b/test/712-varhandle-invocations/build
@@ -35,8 +35,5 @@
 # Desugar is not happy with our Java 9 byte code, it shouldn't be necessary here anyway.
 export USE_DESUGAR=false
 
-# See b/65168732
-export USE_D8=false
-
 # Invoke default build with increased heap size for dx
 ./default-build "$@" --experimental var-handles --dx-vm-option -JXmx384m
diff --git a/test/etc/default-build b/test/etc/default-build
index dd55602..8bb898c 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -145,7 +145,7 @@
 declare -A DX_EXPERIMENTAL_ARGS
 DX_EXPERIMENTAL_ARGS["method-handles"]="--min-sdk-version=26"
 DX_EXPERIMENTAL_ARGS["parameter-annotations"]="--min-sdk-version=25"
-DX_EXPERIMENTAL_ARGS["var-handles"]="--min-sdk-version=26"
+DX_EXPERIMENTAL_ARGS["var-handles"]="--min-sdk-version=28"
 
 while true; do
   if [ "x$1" = "x--dx-option" ]; then