ART: Compacting ROS/DlMalloc spaces with semispace copy GC

Current semispace copy GC is mainly associated with bump pointer
spaces. Though it squeezes fragmentation most aggressively, an extra
copy is required to re-establish the data in the ROS/DlMalloc space to allow
CMS GCs to happen afterwards. As semispace copy GC is still stop-the-world,
this not only introduces unnecessary overheads but also longer response time.
Response time indicates the time duration between the start of transition
request and the start of transition animation, which may impact the user
experience.

Using semispace copy GC to compact the data in a ROS space to another ROS(or
DlMalloc space to another DlMalloc) space solves this problem. Although it
squeezes less fragmentation, CMS GCs can run immediately after the compaction.

We apply this algorithm in two cases:
1) Right before throwing an OOM if -XX:EnableHSpaceCompactForOOM is passed in
as true.
2) When app is switched to background if the -XX:BackgroundGC option has value
HSpaceCompact.

For case 1), OOMs are significantly delayed in the harmony GC stress test,
with compaction ratio up to 0.87. For case 2), compaction ratio around 0.5 is
observed in both built-in SMS and browser. Similar results have been obtained
on other apps as well.

Change-Id: Iad9eabc6d046659fda3535ae20f21bc31f89ded3
Signed-off-by: Wang, Zuo <zuo.wang@intel.com>
Signed-off-by: Chang, Yang <yang.chang@intel.com>
Signed-off-by: Lei Li <lei.l.li@intel.com>
Signed-off-by: Lin Zang <lin.zang@intel.com>
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index ad0a4f43..46b9363 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -50,8 +50,9 @@
 template <typename Visitor>
 inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
                               const Visitor& visitor, const byte minimum_age) const {
-  DCHECK(bitmap->HasAddress(scan_begin));
-  DCHECK(bitmap->HasAddress(scan_end - 1));  // scan_end is the byte after the last byte we scan.
+  DCHECK_GE(scan_begin, reinterpret_cast<byte*>(bitmap->HeapBegin()));
+  // scan_end is the byte after the last byte we scan.
+  DCHECK_LE(scan_end, reinterpret_cast<byte*>(bitmap->HeapLimit()));
   byte* card_cur = CardFromAddr(scan_begin);
   byte* card_end = CardFromAddr(scan_end);
   CheckCardValid(card_cur);
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index c66e80d..ad22a2e 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -68,9 +68,9 @@
              << ", capacity=" << std::dec << capacity_
              << ", max_capacity=" << std::dec << max_capacity_;
   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
-    size_bracket_lock_names[i] =
+    size_bracket_lock_names_[i] =
         StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
-    size_bracket_locks_[i] = new Mutex(size_bracket_lock_names[i].c_str(), kRosAllocBracketLock);
+    size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
     current_runs_[i] = dedicated_full_run_;
   }
   DCHECK_EQ(footprint_, capacity_);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 85a8225..c0ab151 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -440,7 +440,7 @@
   // The mutexes, one per size bracket.
   Mutex* size_bracket_locks_[kNumOfSizeBrackets];
   // Bracket lock names (since locks only have char* names).
-  std::string size_bracket_lock_names[kNumOfSizeBrackets];
+  std::string size_bracket_lock_names_[kNumOfSizeBrackets];
   // The types of page map entries.
   enum {
     kPageMapReleased = 0,     // Zero and released back to the OS.
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 530a3c9..ef5d56e 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -40,6 +40,9 @@
   kCollectorTypeHeapTrim,
   // A (mostly) concurrent copying collector.
   kCollectorTypeCC,
+  // A homogeneous space compaction collector used in background transition
+  // when both foreground and background collector are CMS.
+  kCollectorTypeHomogeneousSpaceCompact,
 };
 std::ostream& operator<<(std::ostream& os, const CollectorType& collector_type);
 
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 9e73f14..f0e1512 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -31,6 +31,7 @@
     case kGcCauseForNativeAlloc: return "NativeAlloc";
     case kGcCauseCollectorTransition: return "CollectorTransition";
     case kGcCauseDisableMovingGc: return "DisableMovingGc";
+    case kGcCauseHomogeneousSpaceCompact: return "HomogeneousSpaceCompact";
     case kGcCauseTrim: return "HeapTrim";
     default:
       LOG(FATAL) << "Unreachable";
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 10e6667..1f2643a 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -39,6 +39,8 @@
   kGcCauseDisableMovingGc,
   // Not a real GC cause, used when we trim the heap.
   kGcCauseTrim,
+  // GC triggered for background transition when both foreground and background collector are CMS.
+  kGcCauseHomogeneousSpaceCompact,
 };
 
 const char* PrettyCause(GcCause cause);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 19715e9..4ec9bc2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -93,6 +93,10 @@
 static constexpr size_t kAllocationStackReserveSize = 1024;
 // Default mark stack size in bytes.
 static const size_t kDefaultMarkStackSize = 64 * KB;
+// Define space name.
+static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
+static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
+static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
 
 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
            double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
@@ -103,7 +107,8 @@
            bool ignore_max_footprint, bool use_tlab,
            bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
            bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
-           bool verify_post_gc_rosalloc)
+           bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction_for_oom,
+           uint64_t min_interval_homogeneous_space_compaction_by_oom)
     : non_moving_space_(nullptr),
       rosalloc_space_(nullptr),
       dlmalloc_space_(nullptr),
@@ -173,7 +178,11 @@
       verify_object_mode_(kVerifyObjectModeDisabled),
       disable_moving_gc_count_(0),
       running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
-      use_tlab_(use_tlab) {
+      use_tlab_(use_tlab),
+      main_space_backup_(nullptr),
+      min_interval_homogeneous_space_compaction_by_oom_(min_interval_homogeneous_space_compaction_by_oom),
+      last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
+      use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
@@ -205,30 +214,90 @@
     CHECK_GT(oat_file_end_addr, image_space->End());
     requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
   }
+
+  /*
+  requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-  nonmoving space (kNonMovingSpaceCapacity) +-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-        main alloc space (capacity_)        +-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-       main alloc space 1 (capacity_)       +-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+  */
+  bool create_backup_main_space =
+      background_collector_type == gc::kCollectorTypeHomogeneousSpaceCompact ||
+      use_homogeneous_space_compaction_for_oom;
   if (is_zygote) {
     // Reserve the address range before we create the non moving space to make sure bitmaps don't
     // take it.
     std::string error_str;
-    MemMap* mem_map = MemMap::MapAnonymous(
-        "main space", requested_alloc_space_begin + kNonMovingSpaceCapacity, capacity,
+    MemMap* main_space_map = MemMap::MapAnonymous(
+        kMemMapSpaceName[0], requested_alloc_space_begin + kNonMovingSpaceCapacity, capacity_,
         PROT_READ | PROT_WRITE, true, &error_str);
-    CHECK(mem_map != nullptr) << error_str;
+    CHECK(main_space_map != nullptr) << error_str;
+    MemMap* main_space_1_map = nullptr;
+    // Attempt to reserve an extra mem_map for homogeneous space compaction right after the main space map.
+    if (create_backup_main_space) {
+      main_space_1_map = MemMap::MapAnonymous(kMemMapSpaceName[1], main_space_map->End(), capacity_,
+                                               PROT_READ | PROT_WRITE, true, &error_str);
+      if (main_space_1_map == nullptr) {
+        LOG(WARNING) << "Failed to create map " <<  kMemMapSpaceName[1] << " with error "
+                     << error_str;
+      }
+    }
     // Non moving space is always dlmalloc since we currently don't have support for multiple
-    // rosalloc spaces.
+    // active rosalloc spaces.
     non_moving_space_ = space::DlMallocSpace::Create(
-        "zygote / non moving space", initial_size, kNonMovingSpaceCapacity, kNonMovingSpaceCapacity,
-        requested_alloc_space_begin, false);
+        "zygote / non moving space", initial_size, kNonMovingSpaceCapacity,
+        kNonMovingSpaceCapacity, requested_alloc_space_begin, false);
     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
-    CreateMainMallocSpace(mem_map, initial_size, growth_limit, capacity);
+    CreateMainMallocSpace(main_space_map, initial_size, growth_limit_, capacity_);
+    if (main_space_1_map != nullptr) {
+      const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+      main_space_backup_ = CreateMallocSpaceFromMemMap(main_space_1_map, initial_size,
+                                                       growth_limit_, capacity_, name, true);
+    }
   } else {
     std::string error_str;
-    MemMap* mem_map = MemMap::MapAnonymous("main/non-moving space", requested_alloc_space_begin,
-                                           capacity, PROT_READ | PROT_WRITE, true, &error_str);
-    CHECK(mem_map != nullptr) << error_str;
+    byte* request_begin = requested_alloc_space_begin;
+    if (request_begin == nullptr) {
+      // Disable homogeneous space compaction since we don't have an image.
+      create_backup_main_space = false;
+    }
+    MemMap* main_space_1_map = nullptr;
+    if (create_backup_main_space) {
+      request_begin += kNonMovingSpaceCapacity;
+      // Attempt to reserve an extra mem_map for homogeneous space compaction right after the main space map.
+      main_space_1_map = MemMap::MapAnonymous(kMemMapSpaceName[1], request_begin + capacity_,
+                                               capacity_, PROT_READ | PROT_WRITE, true, &error_str);
+      if (main_space_1_map == nullptr) {
+        LOG(WARNING) << "Failed to create map " <<  kMemMapSpaceName[1] << " with error "
+                     << error_str;
+        request_begin = requested_alloc_space_begin;
+      }
+    }
+    MemMap* main_space_map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
+                                                  PROT_READ | PROT_WRITE, true, &error_str);
+    CHECK(main_space_map != nullptr) << error_str;
+    // Introduce a seperate non moving space.
+    if (main_space_1_map != nullptr) {
+      // Do this before creating the main malloc space to prevent bitmaps from being placed here.
+      non_moving_space_ = space::DlMallocSpace::Create(
+          "non moving space", kDefaultInitialSize, kNonMovingSpaceCapacity, kNonMovingSpaceCapacity,
+          requested_alloc_space_begin, false);
+      non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
+    }
     // Create the main free list space, which doubles as the non moving space. We can do this since
     // non zygote means that we won't have any background compaction.
-    CreateMainMallocSpace(mem_map, initial_size, growth_limit, capacity);
-    non_moving_space_ = main_space_;
+    CreateMainMallocSpace(main_space_map, initial_size, growth_limit_, capacity_);
+    if (main_space_1_map != nullptr) {
+      const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+      main_space_backup_ = CreateMallocSpaceFromMemMap(main_space_1_map, initial_size,
+                                                       growth_limit_, capacity_, name, true);
+      CHECK(main_space_backup_ != nullptr);
+    } else {
+      non_moving_space_ = main_space_;
+    }
   }
   CHECK(non_moving_space_ != nullptr);
 
@@ -240,7 +309,7 @@
       (IsMovingGc(foreground_collector_type_) || IsMovingGc(background_collector_type_))) {
     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
     // Divide by 2 for a temporary fix for reducing virtual memory usage.
-    const size_t bump_pointer_space_capacity = capacity / 2;
+    const size_t bump_pointer_space_capacity = capacity_ / 2;
     bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
                                                           bump_pointer_space_capacity, nullptr);
     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
@@ -253,13 +322,25 @@
   if (non_moving_space_ != main_space_) {
     AddSpace(non_moving_space_);
   }
+  if (main_space_backup_ != nullptr) {
+    AddSpace(main_space_backup_);
+  } else {
+    const char* disable_msg = "Disabling homogenous space compact due to no backup main space";
+    if (background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact) {
+      background_collector_type_ = collector_type_;
+      LOG(WARNING) << disable_msg;
+    } else if (use_homogeneous_space_compaction_for_oom_) {
+      LOG(WARNING) << disable_msg;
+    }
+    use_homogeneous_space_compaction_for_oom_ = false;
+  }
   if (main_space_ != nullptr) {
     AddSpace(main_space_);
   }
 
   // Allocate the large object space.
   if (kUseFreeListSpaceForLOS) {
-    large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity);
+    large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
   } else {
     large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
   }
@@ -328,7 +409,7 @@
   }
   if (kMovingCollector) {
     // TODO: Clean this up.
-    bool generational = foreground_collector_type_ == kCollectorTypeGSS;
+    const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
     semi_space_collector_ = new collector::SemiSpace(this, generational,
                                                      generational ? "generational" : "");
     garbage_collectors_.push_back(semi_space_collector_);
@@ -339,9 +420,8 @@
   }
 
   if (GetImageSpace() != nullptr && main_space_ != nullptr) {
-    // Check that there's no gap between the image space and the main
-    // space so that the immune region won't break (eg. due to a large
-    // object allocated in the gap).
+    // Check that there's no gap between the image space and the main space so that the immune
+    // region won't break (eg. due to a large object allocated in the gap).
     bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(), main_space_->GetMemMap());
     if (!no_gap) {
       MemMap::DumpMaps(LOG(ERROR));
@@ -358,11 +438,36 @@
   }
 }
 
+space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
+                                                      size_t growth_limit, size_t capacity,
+                                                      const char* name, bool can_move_objects) {
+  space::MallocSpace* malloc_space = nullptr;
+  if (kUseRosAlloc) {
+    // Create rosalloc space.
+    malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
+                                                          initial_size, growth_limit, capacity,
+                                                          low_memory_mode_, can_move_objects);
+  } else {
+    malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
+                                                          initial_size, growth_limit, capacity,
+                                                          can_move_objects);
+  }
+  if (collector::SemiSpace::kUseRememberedSet) {
+    accounting::RememberedSet* rem_set  =
+        new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
+    CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
+    AddRememberedSet(rem_set);
+  }
+  CHECK(malloc_space != nullptr) << "Failed to create " << name;
+  malloc_space->SetFootprintLimit(malloc_space->Capacity());
+  return malloc_space;
+}
+
 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
                                  size_t capacity) {
   // Is background compaction is enabled?
   bool can_move_objects = IsMovingGc(background_collector_type_) !=
-      IsMovingGc(foreground_collector_type_);
+      IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
   // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
   // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
   // from the main space to the zygote space. If background compaction is enabled, always pass in
@@ -375,26 +480,10 @@
   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
     RemoveRememberedSet(main_space_);
   }
-  if (kUseRosAlloc) {
-    rosalloc_space_ = space::RosAllocSpace::CreateFromMemMap(
-        mem_map, "main rosalloc space", kDefaultStartingSize, initial_size, growth_limit, capacity,
-        low_memory_mode_, can_move_objects);
-    main_space_ = rosalloc_space_;
-    CHECK(main_space_ != nullptr) << "Failed to create rosalloc space";
-  } else {
-    dlmalloc_space_ = space::DlMallocSpace::CreateFromMemMap(
-        mem_map, "main dlmalloc space", kDefaultStartingSize, initial_size, growth_limit, capacity,
-        can_move_objects);
-    main_space_ = dlmalloc_space_;
-    CHECK(main_space_ != nullptr) << "Failed to create dlmalloc space";
-  }
-  main_space_->SetFootprintLimit(main_space_->Capacity());
-  if (collector::SemiSpace::kUseRememberedSet) {
-    accounting::RememberedSet* main_space_rem_set =
-        new accounting::RememberedSet("Main space remembered set", this, main_space_);
-    CHECK(main_space_rem_set != nullptr) << "Failed to create main space remembered set";
-    AddRememberedSet(main_space_rem_set);
-  }
+  const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
+  main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
+                                            can_move_objects);
+  SetSpaceAsDefault(main_space_);
   VLOG(heap) << "Created main space " << main_space_;
 }
 
@@ -547,8 +636,11 @@
       RequestCollectorTransition(foreground_collector_type_, 0);
     } else {
       // Don't delay for debug builds since we may want to stress test the GC.
-      RequestCollectorTransition(background_collector_type_, kIsDebugBuild ? 0 :
-          kCollectorTransitionWait);
+      // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
+      // special handling which does a homogenous space compaction once but then doesn't transition
+      // the collector.
+      RequestCollectorTransition(background_collector_type_,
+                                 kIsDebugBuild ? 0 : kCollectorTransitionWait);
     }
   }
 }
@@ -605,7 +697,7 @@
 }
 
 void Heap::AddSpace(space::Space* space) {
-  DCHECK(space != nullptr);
+  CHECK(space != nullptr);
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   if (space->IsContinuousSpace()) {
     DCHECK(!space->IsDiscontinuousSpace());
@@ -811,7 +903,7 @@
   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
       << " free bytes";
   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
-  if (allocator_type != kAllocatorTypeLOS && total_bytes_free >= byte_count) {
+  if (total_bytes_free >= byte_count) {
     space::MallocSpace* space = nullptr;
     if (allocator_type == kAllocatorTypeNonMoving) {
       space = non_moving_space_;
@@ -844,6 +936,15 @@
     ScopedThreadStateChange tsc(self, kSleeping);
     usleep(wait_time / 1000);  // Usleep takes microseconds.
   }
+  // Launch homogeneous space compaction if it is desired.
+  if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
+    if (!CareAboutPauseTimes()) {
+      PerformHomogeneousSpaceCompact();
+    }
+    // No need to Trim(). Homogeneous space compaction may free more virtual and physical memory.
+    desired_collector_type = collector_type_;
+    return;
+  }
   // Transition the collector if the desired collector type is not the same as the current
   // collector type.
   TransitionCollector(desired_collector_type);
@@ -1092,6 +1193,17 @@
   }
 }
 
+space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
+  for (const auto& space : continuous_spaces_) {
+    if (space->AsContinuousSpace()->IsRosAllocSpace()) {
+      if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
+        return space->AsContinuousSpace()->AsRosAllocSpace();
+      }
+    }
+  }
+  return nullptr;
+}
+
 mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
                                              size_t alloc_size, size_t* bytes_allocated,
                                              size_t* usable_size,
@@ -1173,6 +1285,44 @@
     return nullptr;
   }
   ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+  if (ptr == nullptr && use_homogeneous_space_compaction_for_oom_) {
+    const uint64_t current_time = NanoTime();
+    if ((allocator == kAllocatorTypeRosAlloc || allocator == kAllocatorTypeDlMalloc) &&
+        current_time - last_time_homogeneous_space_compaction_by_oom_ >
+        min_interval_homogeneous_space_compaction_by_oom_) {
+      last_time_homogeneous_space_compaction_by_oom_ = current_time;
+      HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
+      switch (result) {
+        case HomogeneousSpaceCompactResult::kSuccess:
+          // If the allocation succeeded, we delayed an oom.
+          ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+          if (ptr != nullptr) {
+            count_delayed_oom_++;
+          }
+          break;
+        case HomogeneousSpaceCompactResult::kErrorReject:
+          // Reject due to disabled moving GC.
+          break;
+        case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
+          // Throw OOM by default.
+          break;
+        default: {
+          LOG(FATAL) << "Unimplemented homogeneous space compaction result " << static_cast<size_t>(result);
+        }
+      }
+      // Always print that we ran homogeneous space compation since this can cause jank.
+      VLOG(heap) << "Ran heap homogeneous space compaction, "
+                << " requested defragmentation "
+                << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                << " performed defragmentation "
+                << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                << " ignored homogeneous space compaction "
+                << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                << " delayed count = "
+                << count_delayed_oom_.LoadSequentiallyConsistent();
+    }
+  }
+  // If the allocation hasn't succeeded by this point, throw an OOM error.
   if (ptr == nullptr) {
     ThrowOutOfMemoryError(self, alloc_size, allocator);
   }
@@ -1331,6 +1481,66 @@
   CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
 }
 
+HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
+  Thread* self = Thread::Current();
+  // Inc requested homogeneous space compaction.
+  count_requested_homogeneous_space_compaction_++;
+  // Store performed homogeneous space compaction at a new request arrival.
+  ThreadList* tl = Runtime::Current()->GetThreadList();
+  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
+  Locks::mutator_lock_->AssertNotHeld(self);
+  {
+    ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+    MutexLock mu(self, *gc_complete_lock_);
+    // Ensure there is only one GC at a time.
+    WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
+    // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
+    // is non zero.
+    // If the collecotr type changed to something which doesn't benefit from homogeneous space compaction,
+    // exit.
+    if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_)) {
+      return HomogeneousSpaceCompactResult::kErrorReject;
+    }
+    collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
+  }
+  if (Runtime::Current()->IsShuttingDown(self)) {
+    // Don't allow heap transitions to happen if the runtime is shutting down since these can
+    // cause objects to get finalized.
+    FinishGC(self, collector::kGcTypeNone);
+    return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
+  }
+  // Suspend all threads.
+  tl->SuspendAll();
+  uint64_t start_time = NanoTime();
+  // Launch compaction.
+  space::MallocSpace* to_space = main_space_backup_;
+  space::MallocSpace* from_space = main_space_;
+  to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+  const uint64_t space_size_before_compaction = from_space->Size();
+  Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
+  // Leave as prot read so that we can still run ROSAlloc verification on this space.
+  from_space->GetMemMap()->Protect(PROT_READ);
+  const uint64_t space_size_after_compaction = to_space->Size();
+  std::swap(main_space_, main_space_backup_);
+  SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
+  // Update performed homogeneous space compaction count.
+  count_performed_homogeneous_space_compaction_++;
+  // Print statics log and resume all threads.
+  uint64_t duration = NanoTime() - start_time;
+  LOG(INFO) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
+            << PrettySize(space_size_before_compaction) << " -> "
+            << PrettySize(space_size_after_compaction) << " compact-ratio: "
+            << std::fixed << static_cast<double>(space_size_after_compaction) /
+            static_cast<double>(space_size_before_compaction);
+  tl->ResumeAll();
+  // Finish GC.
+  reference_processor_.EnqueueClearedReferences(self);
+  GrowForUtilization(semi_space_collector_);
+  FinishGC(self, collector::kGcTypeFull);
+  return HomogeneousSpaceCompactResult::kSuccess;
+}
+
+
 void Heap::TransitionCollector(CollectorType collector_type) {
   if (collector_type == collector_type_) {
     return;
@@ -1385,7 +1595,7 @@
         // We are transitioning from non moving GC -> moving GC, since we copied from the bump
         // pointer space last transition it will be protected.
         bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
-        Compact(bump_pointer_space_, main_space_);
+        Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
         // Remove the main space so that we don't try to trim it, this doens't work for debug
         // builds since RosAlloc attempts to read the magic number from a protected page.
         RemoveSpace(main_space_);
@@ -1399,7 +1609,7 @@
         // Compact to the main space from the bump pointer space, don't need to swap semispaces.
         AddSpace(main_space_);
         main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
-        Compact(main_space_, bump_pointer_space_);
+        Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
       }
       break;
     }
@@ -1725,14 +1935,15 @@
 }
 
 void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
-                   space::ContinuousMemMapAllocSpace* source_space) {
+                   space::ContinuousMemMapAllocSpace* source_space,
+                   GcCause gc_cause) {
   CHECK(kMovingCollector);
   if (target_space != source_space) {
     // Don't swap spaces since this isn't a typical semi space collection.
     semi_space_collector_->SetSwapSemiSpaces(false);
     semi_space_collector_->SetFromSpace(source_space);
     semi_space_collector_->SetToSpace(target_space);
-    semi_space_collector_->Run(kGcCauseCollectorTransition, false);
+    semi_space_collector_->Run(gc_cause, false);
   } else {
     CHECK(target_space->IsBumpPointerSpace())
         << "In-place compaction is only supported for bump pointer spaces";
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 86dab21..b207953 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -72,6 +72,10 @@
   class SemiSpace;
 }  // namespace collector
 
+namespace allocator {
+  class RosAlloc;
+}  // namespace allocator
+
 namespace space {
   class AllocSpace;
   class BumpPointerSpace;
@@ -97,6 +101,15 @@
   }
 };
 
+enum HomogeneousSpaceCompactResult {
+  // Success.
+  kSuccess,
+  // Reject due to disabled moving GC.
+  kErrorReject,
+  // System is shutting down.
+  kErrorVMShuttingDown,
+};
+
 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
 static constexpr bool kUseRosAlloc = true;
 
@@ -151,7 +164,8 @@
                 bool ignore_max_footprint, bool use_tlab,
                 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
                 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
-                bool verify_post_gc_rosalloc);
+                bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction,
+                uint64_t min_interval_homogeneous_space_compaction_by_oom);
 
   ~Heap();
 
@@ -499,6 +513,9 @@
     return rosalloc_space_;
   }
 
+  // Return the corresponding rosalloc space.
+  space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const;
+
   space::MallocSpace* GetNonMovingSpace() const {
     return non_moving_space_;
   }
@@ -568,12 +585,19 @@
   }
 
  private:
+  // Compact source space to target space.
   void Compact(space::ContinuousMemMapAllocSpace* target_space,
-               space::ContinuousMemMapAllocSpace* source_space)
+               space::ContinuousMemMapAllocSpace* source_space,
+               GcCause gc_cause)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
 
+  bool SupportHSpaceCompaction() const {
+    // Returns true if we can do hspace compaction.
+    return main_space_backup_ != nullptr;
+  }
+
   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
     return
         allocator_type != kAllocatorTypeBumpPointer &&
@@ -584,7 +608,8 @@
   }
   static bool IsMovingGc(CollectorType collector_type) {
     return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
-        collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC;
+        collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC ||
+        collector_type == kCollectorTypeHomogeneousSpaceCompact;
   }
   bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -682,10 +707,18 @@
   // Find a collector based on GC type.
   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
 
-  // Create the main free list space, typically either a RosAlloc space or DlMalloc space.
+  // Create a new alloc space and compact default alloc space to it.
+  HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact();
+
+  // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
   void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
                              size_t capacity);
 
+  // Create a malloc space based on a mem map. Does not set the space as default.
+  space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
+                                                  size_t growth_limit, size_t capacity,
+                                                  const char* name, bool can_move_objects);
+
   // Given the current contents of the alloc space, increase the allowed heap footprint to match
   // the target utilization ratio.  This should only be called immediately after a full garbage
   // collection.
@@ -972,6 +1005,30 @@
   const bool running_on_valgrind_;
   const bool use_tlab_;
 
+  // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
+  space::MallocSpace* main_space_backup_;
+
+  // Minimal interval allowed between two homogeneous space compactions caused by OOM.
+  uint64_t min_interval_homogeneous_space_compaction_by_oom_;
+
+  // Times of the last homogeneous space compaction caused by OOM.
+  uint64_t last_time_homogeneous_space_compaction_by_oom_;
+
+  // Saved OOMs by homogeneous space compaction.
+  Atomic<size_t> count_delayed_oom_;
+
+  // Count for requested homogeneous space compaction.
+  Atomic<size_t> count_requested_homogeneous_space_compaction_;
+
+  // Count for ignored homogeneous space compaction.
+  Atomic<size_t> count_ignored_homogeneous_space_compaction_;
+
+  // Count for performed homogeneous space compaction.
+  Atomic<size_t> count_performed_homogeneous_space_compaction_;
+
+  // Whether or not we use homogeneous space compaction to avoid OOM errors.
+  bool use_homogeneous_space_compaction_for_oom_;
+
   friend class collector::GarbageCollector;
   friend class collector::MarkCompact;
   friend class collector::MarkSweep;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 5738d47..92c6f53 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -227,7 +227,7 @@
 // Callback from rosalloc when it needs to increase the footprint
 extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intptr_t increment) {
   Heap* heap = Runtime::Current()->GetHeap();
-  RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace();
+  RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
   DCHECK(rosalloc_space != nullptr);
   DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc);
   return rosalloc_space->MoreCore(increment);
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index e1e133f..a016cc5 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -197,13 +197,18 @@
 #else
 #error "ART default GC type must be set"
 #endif
+  // If we are using homogeneous space compaction then default background compaction to off since
+  // homogeneous space compactions when we transition to not jank perceptible.
+  use_homogeneous_space_compaction_for_oom_ = false;
   // If background_collector_type_ is kCollectorTypeNone, it defaults to the collector_type_ after
-  // parsing options.
+  // parsing options. If you set this to kCollectorTypeHSpaceCompact then we will do an hspace
+  // compaction when we transition to background instead of a normal collector transition.
   background_collector_type_ = gc::kCollectorTypeSS;
   stack_size_ = 0;  // 0 means default.
   max_spins_before_thin_lock_inflation_ = Monitor::kDefaultMaxSpinsBeforeThinLockInflation;
   low_memory_mode_ = false;
   use_tlab_ = false;
+  min_interval_homogeneous_space_compaction_by_oom_ = MsToNs(100 * 1000);  // 100s.
   verify_pre_gc_heap_ = false;
   // Pre sweeping is the one that usually fails if the GC corrupted the heap.
   verify_pre_sweeping_heap_ = kIsDebugBuild;
@@ -416,6 +421,10 @@
       low_memory_mode_ = true;
     } else if (option == "-XX:UseTLAB") {
       use_tlab_ = true;
+    } else if (option == "-XX:EnableHSpaceCompactForOOM") {
+      use_homogeneous_space_compaction_for_oom_ = true;
+    } else if (option == "-XX:DisableHSpaceCompactForOOM") {
+      use_homogeneous_space_compaction_for_oom_ = false;
     } else if (StartsWith(option, "-D")) {
       properties_.push_back(option.substr(strlen("-D")));
     } else if (StartsWith(option, "-Xjnitrace:")) {
@@ -439,12 +448,17 @@
       if (!ParseStringAfterChar(option, '=', &substring)) {
         return false;
       }
-      gc::CollectorType collector_type = ParseCollectorType(substring);
-      if (collector_type != gc::kCollectorTypeNone) {
-        background_collector_type_ = collector_type;
+      // Special handling for HSpaceCompact since this is only valid as a background GC type.
+      if (substring == "HSpaceCompact") {
+        background_collector_type_ = gc::kCollectorTypeHomogeneousSpaceCompact;
       } else {
-        Usage("Unknown -XX:BackgroundGC option %s\n", substring.c_str());
-        return false;
+        gc::CollectorType collector_type = ParseCollectorType(substring);
+        if (collector_type != gc::kCollectorTypeNone) {
+          background_collector_type_ = collector_type;
+        } else {
+          Usage("Unknown -XX:BackgroundGC option %s\n", substring.c_str());
+          return false;
+        }
       }
     } else if (option == "-XX:+DisableExplicitGC") {
       is_explicit_gc_disabled_ = true;
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index d0f3c12..4c74be6 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -88,6 +88,12 @@
   static constexpr uint32_t kExplicitSuspendCheck = 2;
   static constexpr uint32_t kExplicitStackOverflowCheck = 4;
   uint32_t explicit_checks_;
+  // Whether or not we use homogeneous space compaction to avoid OOM errors. If enabled,
+  // the heap will attempt to create an extra space which enables compacting from a malloc space to
+  // another malloc space when we are about to throw OOM.
+  bool use_homogeneous_space_compaction_for_oom_;
+  // Minimal interval allowed between two homogeneous space compactions caused by OOM.
+  uint64_t min_interval_homogeneous_space_compaction_by_oom_;
 
  private:
   ParsedOptions() {}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 9cbc31b..94d6cdf 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -638,7 +638,9 @@
                        options->verify_post_gc_heap_,
                        options->verify_pre_gc_rosalloc_,
                        options->verify_pre_sweeping_rosalloc_,
-                       options->verify_post_gc_rosalloc_);
+                       options->verify_post_gc_rosalloc_,
+                       options->use_homogeneous_space_compaction_for_oom_,
+                       options->min_interval_homogeneous_space_compaction_by_oom_);
 
   dump_gc_performance_on_shutdown_ = options->dump_gc_performance_on_shutdown_;
 
diff --git a/runtime/thread.h b/runtime/thread.h
index 3f7c21b..bf125f9 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -899,7 +899,7 @@
   // first if possible.
   /***********************************************************************************************/
 
-  struct PACKED(4)  tls_32bit_sized_values {
+  struct PACKED(4) tls_32bit_sized_values {
     // We have no control over the size of 'bool', but want our boolean fields
     // to be 4-byte quantities.
     typedef uint32_t bool32_t;
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index f412034..739dbf8 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -61,9 +61,6 @@
     test_Memory_pokeShort();
     test_Memory_pokeInt();
     test_Memory_pokeLong();
-    test_AtomicBoolean_compareAndSet();
-    test_AtomicInteger_compareAndSet();
-    test_AtomicLong_compareAndSet();
   }
 
   /*
@@ -96,60 +93,6 @@
     Assert.assertNotNull(Thread.currentThread());
   }
 
-  /**
-   * Will test inlining CAS, by inclusion of AtomicBoolean in core.oat.
-   */
-  public static void test_AtomicBoolean_compareAndSet() {
-    java.util.concurrent.atomic.AtomicBoolean ab = new java.util.concurrent.atomic.AtomicBoolean();
-    Assert.assertEquals(ab.compareAndSet(false, false), true);
-    Assert.assertEquals(ab.compareAndSet(true, false), false);
-    Assert.assertEquals(ab.compareAndSet(true, true), false);
-    Assert.assertEquals(ab.compareAndSet(false, true), true);
-    Assert.assertEquals(ab.compareAndSet(false, true), false);
-    Assert.assertEquals(ab.compareAndSet(false, false), false);
-    Assert.assertEquals(ab.compareAndSet(true, true), true);
-    Assert.assertEquals(ab.compareAndSet(true, false), true);
-    Assert.assertEquals(ab.compareAndSet(true, false), false);
-    Assert.assertEquals(ab.compareAndSet(true, true), false);
-    Assert.assertEquals(ab.compareAndSet(false, false), true);
-  }
-
-  /**
-   * Will test inlining CAS, by inclusion of AtomicInteger in core.oat.
-   */
-  public static void test_AtomicInteger_compareAndSet() {
-    java.util.concurrent.atomic.AtomicInteger ab = new java.util.concurrent.atomic.AtomicInteger();
-    Assert.assertEquals(ab.compareAndSet(0, 0), true);
-    Assert.assertEquals(ab.compareAndSet(0x12345678, 0), false);
-    Assert.assertEquals(ab.compareAndSet(0x12345678, 0x12345678), false);
-    Assert.assertEquals(ab.compareAndSet(0, 0x12345678), true);
-    Assert.assertEquals(ab.compareAndSet(0, 0x12345678), false);
-    Assert.assertEquals(ab.compareAndSet(0, 0), false);
-    Assert.assertEquals(ab.compareAndSet(0x12345678, 0x12345678), true);
-    Assert.assertEquals(ab.compareAndSet(0x12345678, 0), true);
-    Assert.assertEquals(ab.compareAndSet(0x12345678, 0), false);
-    Assert.assertEquals(ab.compareAndSet(0x12345678, 0x12345678), false);
-    Assert.assertEquals(ab.compareAndSet(0, 0), true);
-  }
-
-  /**
-   * Will test inlining CAS, by inclusion of AtomicLong in core.oat.
-   */
-  public static void test_AtomicLong_compareAndSet() {
-    java.util.concurrent.atomic.AtomicLong ab = new java.util.concurrent.atomic.AtomicLong();
-    Assert.assertEquals(ab.compareAndSet(0l, 0l), true);
-    Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0l), false);
-    Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0x1234567890l), false);
-    Assert.assertEquals(ab.compareAndSet(0l, 0x1234567890l), true);
-    Assert.assertEquals(ab.compareAndSet(0l, 0x1234567890l), false);
-    Assert.assertEquals(ab.compareAndSet(0l, 0l), false);
-    Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0x1234567890l), true);
-    Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0l), true);
-    Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0l), false);
-    Assert.assertEquals(ab.compareAndSet(0x1234567890l, 0x1234567890l), false);
-    Assert.assertEquals(ab.compareAndSet(0l, 0l), true);
-  }
-
   public static void test_String_length() {
     String str0 = "";
     String str1 = "x";
@@ -580,6 +523,7 @@
 
   static Object runtime;
   static Method address_of;
+  static Method new_non_movable_array;
   static Method peek_byte;
   static Method peek_short;
   static Method peek_int;
@@ -594,6 +538,7 @@
     Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
     runtime = get_runtime.invoke(null);
     address_of = vm_runtime.getDeclaredMethod("addressOf", Object.class);
+    new_non_movable_array = vm_runtime.getDeclaredMethod("newNonMovableArray", Class.class, Integer.TYPE);
 
     Class<?> io_memory = Class.forName("libcore.io.Memory");
     peek_byte = io_memory.getDeclaredMethod("peekByte", Long.TYPE);
@@ -607,7 +552,7 @@
   }
 
   public static void test_Memory_peekByte() throws Exception {
-    byte[] b = new byte [2];
+    byte[] b = (byte[])new_non_movable_array.invoke(runtime, Byte.TYPE, 2);
     b[0] = 0x12;
     b[1] = 0x11;
     long address = (long)address_of.invoke(runtime, b);
@@ -616,7 +561,7 @@
   }
 
   public static void test_Memory_peekShort() throws Exception {
-    byte[] b = new byte [3];
+    byte[] b = (byte[])new_non_movable_array.invoke(runtime, Byte.TYPE, 3);
     b[0] = 0x13;
     b[1] = 0x12;
     b[2] = 0x11;
@@ -626,7 +571,7 @@
   }
 
   public static void test_Memory_peekInt() throws Exception {
-    byte[] b = new byte [5];
+    byte[] b = (byte[])new_non_movable_array.invoke(runtime, Byte.TYPE, 5);
     b[0] = 0x15;
     b[1] = 0x14;
     b[2] = 0x13;
@@ -638,7 +583,7 @@
   }
 
   public static void test_Memory_peekLong() throws Exception {
-    byte[] b = new byte [9];
+    byte[] b = (byte[])new_non_movable_array.invoke(runtime, Byte.TYPE, 9);
     b[0] = 0x19;
     b[1] = 0x18;
     b[2] = 0x17;
@@ -655,7 +600,7 @@
 
   public static void test_Memory_pokeByte() throws Exception {
     byte[] r = {0x11, 0x12};
-    byte[] b = new byte [2];
+    byte[] b = (byte[])new_non_movable_array.invoke(runtime, Byte.TYPE, 2);
     long address = (long)address_of.invoke(runtime, b);
     poke_byte.invoke(null, address, (byte)0x11);
     poke_byte.invoke(null, address + 1, (byte)0x12);
@@ -665,7 +610,7 @@
   public static void test_Memory_pokeShort() throws Exception {
     byte[] ra = {0x12, 0x11, 0x13};
     byte[] ru = {0x12, 0x22, 0x21};
-    byte[] b = new byte [3];
+    byte[] b = (byte[])new_non_movable_array.invoke(runtime, Byte.TYPE, 3);
     long address = (long)address_of.invoke(runtime, b);
 
     // Aligned write
@@ -681,7 +626,7 @@
   public static void test_Memory_pokeInt() throws Exception {
     byte[] ra = {0x14, 0x13, 0x12, 0x11, 0x15};
     byte[] ru = {0x14, 0x24, 0x23, 0x22, 0x21};
-    byte[] b = new byte [5];
+    byte[] b = (byte[])new_non_movable_array.invoke(runtime, Byte.TYPE, 5);
     long address = (long)address_of.invoke(runtime, b);
 
     b[4] = 0x15;
@@ -695,7 +640,7 @@
   public static void test_Memory_pokeLong() throws Exception {
     byte[] ra = {0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x19};
     byte[] ru = {0x18, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21};
-    byte[] b = new byte [9];
+    byte[] b = (byte[])new_non_movable_array.invoke(runtime, Byte.TYPE, 9);
     long address = (long)address_of.invoke(runtime, b);
 
     b[8] = 0x19;