diff options
Diffstat (limited to 'runtime/mem_map.cc')
| -rw-r--r-- | runtime/mem_map.cc | 61 |
1 files changed, 51 insertions, 10 deletions
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index d8c1ec1508..2e335dcae5 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -240,6 +240,22 @@ static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte return false; } +#if USE_ART_LOW_4G_ALLOCATOR +static inline void* TryMemMapLow4GB(void* ptr, size_t page_aligned_byte_count, int prot, int flags, + int fd) { + void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, 0); + if (actual != MAP_FAILED) { + // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low + // 4GB. If this is the case, unmap and retry. + if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) { + munmap(actual, page_aligned_byte_count); + actual = MAP_FAILED; + } + } + return actual; +} +#endif + MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot, bool low_4gb, bool reuse, std::string* error_msg) { #ifndef __LP64__ @@ -314,7 +330,39 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt if (low_4gb && expected_ptr == nullptr) { bool first_run = true; + MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) { + // Use maps_ as an optimization to skip over large maps. + // Find the first map which is address > ptr. + auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr)); + if (it != maps_->begin()) { + auto before_it = it; + --before_it; + // Start at the end of the map before the upper bound. + ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd())); + CHECK_ALIGNED(ptr, kPageSize); + } + while (it != maps_->end()) { + // How much space do we have until the next map? + size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr; + // If the space may be sufficient, break out of the loop. + if (delta >= page_aligned_byte_count) { + break; + } + // Otherwise, skip to the end of the map. + ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd()); + CHECK_ALIGNED(ptr, kPageSize); + ++it; + } + + // Try to see if we get lucky with this address since none of the ART maps overlap. + actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, + fd.get()); + if (actual != MAP_FAILED) { + next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count; + break; + } + if (4U * GB - ptr < page_aligned_byte_count) { // Not enough memory until 4GB. if (first_run) { @@ -344,17 +392,10 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region if (safe == true) { - actual = mmap(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, fd.get(), - 0); + actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, + fd.get()); if (actual != MAP_FAILED) { - // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low - // 4GB. If this is the case, unmap and retry. - if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count < 4 * GB) { break; - } else { - munmap(actual, page_aligned_byte_count); - actual = MAP_FAILED; - } } } else { // Skip over last page. @@ -395,7 +436,7 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt return nullptr; } return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual, - page_aligned_byte_count, prot, false); + page_aligned_byte_count, prot, reuse); } MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags, |