Remove mandatory address for non moving space / zygote
This used to not be safe when the immune region didn't correctly
handle gaps between the regions.
The immune spaces handles this correctly but may cause the GC to be
slightly slower when it occurs.
Test: test-art-host
Bug: 74062530
Change-Id: Ia24db1c0a8c7b3667f3f893d0d9e9ead33d6a248
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index e8f720b..a725ec4 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -149,15 +149,15 @@
#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
// 300 MB (0x12c00000) - (default non-moving space capacity).
-static uint8_t* const kPreferredAllocSpaceBegin =
- reinterpret_cast<uint8_t*>(300 * MB - Heap::kDefaultNonMovingSpaceCapacity);
+uint8_t* const Heap::kPreferredAllocSpaceBegin =
+ reinterpret_cast<uint8_t*>(300 * MB - kDefaultNonMovingSpaceCapacity);
#else
#ifdef __ANDROID__
// For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
-static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
+uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
#else
// For 32-bit host, use 0x40000000 because asan uses most of the space below this.
-static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
+uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
#endif
#endif
@@ -386,10 +386,10 @@
const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
// Reserve the non moving mem map before the other two since it needs to be at a specific
// address.
- non_moving_space_mem_map.reset(
- MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
- non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
- &error_str));
+ non_moving_space_mem_map.reset(MapAnonymousPreferredAddress(space_name,
+ requested_alloc_space_begin,
+ non_moving_space_capacity,
+ &error_str));
CHECK(non_moving_space_mem_map != nullptr) << error_str;
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d9eff7b..021fe58 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -144,6 +144,7 @@
static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
// Whether or not parallel GC is enabled. If not, then we never create the thread pool.
static constexpr bool kDefaultEnableParallelGC = false;
+ static uint8_t* const kPreferredAllocSpaceBegin;
// Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
// since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 2def524..c6b2120 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -27,7 +27,26 @@
namespace art {
namespace gc {
-class HeapTest : public CommonRuntimeTest {};
+class HeapTest : public CommonRuntimeTest {
+ public:
+ void SetUp() OVERRIDE {
+ MemMap::Init();
+ std::string error_msg;
+ // Reserve the preferred address to force the heap to use another one for testing.
+ reserved_.reset(MemMap::MapAnonymous("ReserveMap",
+ gc::Heap::kPreferredAllocSpaceBegin,
+ 16 * KB,
+ PROT_READ,
+ /*low_4gb*/ true,
+ /*reuse*/ false,
+ &error_msg));
+ ASSERT_TRUE(reserved_ != nullptr) << error_msg;
+ CommonRuntimeTest::SetUp();
+ }
+
+ private:
+ std::unique_ptr<MemMap> reserved_;
+};
TEST_F(HeapTest, ClearGrowthLimit) {
Heap* heap = Runtime::Current()->GetHeap();