diff options
| -rw-r--r-- | runtime/mem_map.cc | 16 | ||||
| -rw-r--r-- | runtime/mem_map_test.cc | 53 |
2 files changed, 68 insertions, 1 deletions
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 1594338df3..98b0bbf125 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -128,6 +128,20 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count // We need to store and potentially set an error number for pretty printing of errors int saved_errno = 0; +#ifdef __LP64__ + // When requesting low_4g memory and having an expectation, the requested range should fit into + // 4GB. + if (low_4gb && ( + // Start out of bounds. + (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 || + // End out of bounds. For simplicity, this will fail for the last page of memory. + (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) { + *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb", + expected, expected + page_aligned_byte_count); + return nullptr; + } +#endif + // TODO: // A page allocator would be a useful abstraction here, as // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us @@ -192,7 +206,7 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count #else #ifdef __x86_64__ - if (low_4gb) { + if (low_4gb && expected == nullptr) { flags |= MAP_32BIT; } #endif diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc index eea330799c..2b59cd9a4a 100644 --- a/runtime/mem_map_test.cc +++ b/runtime/mem_map_test.cc @@ -163,4 +163,57 @@ TEST_F(MemMapTest, RemapAtEnd32bit) { } #endif +TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) { + std::string error_msg; + UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr", + reinterpret_cast<byte*>(0x71000000), + 0x21000000, + PROT_READ | PROT_WRITE, + true, + &error_msg)); + ASSERT_TRUE(map.get() != nullptr) << error_msg; + ASSERT_TRUE(error_msg.empty()); + ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 0x71000000U); +} + +TEST_F(MemMapTest, MapAnonymousOverflow) { + std::string error_msg; + uintptr_t ptr = 0; + ptr -= kPageSize; // Now it's close to the top. + UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow", + reinterpret_cast<byte*>(ptr), + 2 * kPageSize, // brings it over the top. + PROT_READ | PROT_WRITE, + false, + &error_msg)); + ASSERT_EQ(nullptr, map.get()); + ASSERT_FALSE(error_msg.empty()); +} + +#ifdef __LP64__ +TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) { + std::string error_msg; + UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh", + reinterpret_cast<byte*>(UINT64_C(0x100000000)), + kPageSize, + PROT_READ | PROT_WRITE, + true, + &error_msg)); + ASSERT_EQ(nullptr, map.get()); + ASSERT_FALSE(error_msg.empty()); +} + +TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) { + std::string error_msg; + UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh", + reinterpret_cast<byte*>(0xF0000000), + 0x20000000, + PROT_READ | PROT_WRITE, + true, + &error_msg)); + ASSERT_EQ(nullptr, map.get()); + ASSERT_FALSE(error_msg.empty()); +} +#endif + } // namespace art |