summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/heap.cc16
-rw-r--r--runtime/gc/heap.h1
-rw-r--r--runtime/gc/heap_test.cc21
3 files changed, 29 insertions, 9 deletions
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 3dc2cb572e..3011c37f3a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -145,15 +145,15 @@ static constexpr bool kUsePartialTlabs = true;
#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
// 300 MB (0x12c00000) - (default non-moving space capacity).
-static uint8_t* const kPreferredAllocSpaceBegin =
- reinterpret_cast<uint8_t*>(300 * MB - Heap::kDefaultNonMovingSpaceCapacity);
+uint8_t* const Heap::kPreferredAllocSpaceBegin =
+ reinterpret_cast<uint8_t*>(300 * MB - kDefaultNonMovingSpaceCapacity);
#else
#ifdef __ANDROID__
// For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
-static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
+uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
#else
// For 32-bit host, use 0x40000000 because asan uses most of the space below this.
-static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
+uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
#endif
#endif
@@ -382,10 +382,10 @@ Heap::Heap(size_t initial_size,
const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
// Reserve the non moving mem map before the other two since it needs to be at a specific
// address.
- non_moving_space_mem_map.reset(
- MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
- non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
- &error_str));
+ non_moving_space_mem_map.reset(MapAnonymousPreferredAddress(space_name,
+ requested_alloc_space_begin,
+ non_moving_space_capacity,
+ &error_str));
CHECK(non_moving_space_mem_map != nullptr) << error_str;
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 4de03318a0..5ce01bc9d2 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -144,6 +144,7 @@ class Heap {
static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
// Whether or not parallel GC is enabled. If not, then we never create the thread pool.
static constexpr bool kDefaultEnableParallelGC = false;
+ static uint8_t* const kPreferredAllocSpaceBegin;
// Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
// since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 2def52450b..c6b2120f5b 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -27,7 +27,26 @@
namespace art {
namespace gc {
-class HeapTest : public CommonRuntimeTest {};
+class HeapTest : public CommonRuntimeTest {
+ public:
+ void SetUp() OVERRIDE {
+ MemMap::Init();
+ std::string error_msg;
+ // Reserve the preferred address to force the heap to use another one for testing.
+ reserved_.reset(MemMap::MapAnonymous("ReserveMap",
+ gc::Heap::kPreferredAllocSpaceBegin,
+ 16 * KB,
+ PROT_READ,
+ /*low_4gb*/ true,
+ /*reuse*/ false,
+ &error_msg));
+ ASSERT_TRUE(reserved_ != nullptr) << error_msg;
+ CommonRuntimeTest::SetUp();
+ }
+
+ private:
+ std::unique_ptr<MemMap> reserved_;
+};
TEST_F(HeapTest, ClearGrowthLimit) {
Heap* heap = Runtime::Current()->GetHeap();