diff options
| -rw-r--r-- | runtime/base/mutex.cc | 5 | ||||
| -rw-r--r-- | runtime/base/mutex.h | 4 | ||||
| -rw-r--r-- | runtime/mem_map.cc | 39 | ||||
| -rw-r--r-- | runtime/mem_map.h | 28 |
4 files changed, 44 insertions, 32 deletions
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index e05a85a116..6e102be1a1 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -49,7 +49,6 @@ Mutex* Locks::intern_table_lock_ = nullptr; Mutex* Locks::jni_function_table_lock_ = nullptr; Mutex* Locks::jni_libraries_lock_ = nullptr; Mutex* Locks::logging_lock_ = nullptr; -Mutex* Locks::mem_maps_lock_ = nullptr; Mutex* Locks::modify_ldt_lock_ = nullptr; MutatorMutex* Locks::mutator_lock_ = nullptr; Mutex* Locks::profiler_lock_ = nullptr; @@ -1116,10 +1115,6 @@ void Locks::Init() { DCHECK(unexpected_signal_lock_ == nullptr); unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true); - UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock); - DCHECK(mem_maps_lock_ == nullptr); - mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level); - UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock); DCHECK(logging_lock_ == nullptr); logging_lock_ = new Mutex("logging lock", current_lock_level, true); diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 21dd437711..ffe18c6a50 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -55,7 +55,6 @@ class Thread; // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163 enum LockLevel { kLoggingLock = 0, - kMemMapsLock, kSwapMutexesLock, kUnexpectedSignalLock, kThreadSuspendCountLock, @@ -712,9 +711,6 @@ class Locks { // One unexpected signal at a time lock. static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_); - // Guards the maps in mem_map. - static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_); - // Have an exclusive logging thread. static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_); }; diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 19a65bb27e..dce56b3c58 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -70,6 +70,7 @@ std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) { return os; } +std::mutex* MemMap::mem_maps_lock_ = nullptr; MemMap::Maps* MemMap::maps_ = nullptr; #if USE_ART_LOW_4G_ALLOCATOR @@ -139,7 +140,7 @@ bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate // further. { - MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); + std::lock_guard<std::mutex> mu(*mem_maps_lock_); for (auto& pair : *maps_) { MemMap* const map = pair.second; if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) && @@ -490,7 +491,7 @@ MemMap::~MemMap() { } // Remove it from maps_. - MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); + std::lock_guard<std::mutex> mu(*mem_maps_lock_); bool found = false; DCHECK(maps_ != nullptr); for (auto it = maps_->lower_bound(base_begin_), end = maps_->end(); @@ -518,7 +519,7 @@ MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_ CHECK_NE(base_size_, 0U); // Add it to maps_. - MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); + std::lock_guard<std::mutex> mu(*mem_maps_lock_); DCHECK(maps_ != nullptr); maps_->insert(std::make_pair(base_begin_, this)); } @@ -637,7 +638,7 @@ bool MemMap::Protect(int prot) { } bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) { - MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); + std::lock_guard<std::mutex> mu(*mem_maps_lock_); CHECK(begin_map != nullptr); CHECK(end_map != nullptr); CHECK(HasMemMap(begin_map)); @@ -656,7 +657,7 @@ bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) { } void MemMap::DumpMaps(std::ostream& os, bool terse) { - MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); + std::lock_guard<std::mutex> mu(*mem_maps_lock_); DumpMapsLocked(os, terse); } @@ -747,17 +748,31 @@ MemMap* MemMap::GetLargestMemMapAt(void* address) { } void MemMap::Init() { - MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); - if (maps_ == nullptr) { + if (mem_maps_lock_ != nullptr) { // dex2oat calls MemMap::Init twice since its needed before the runtime is created. - maps_ = new Maps; + return; } + mem_maps_lock_ = new std::mutex(); + // Not for thread safety, but for the annotation that maps_ is GUARDED_BY(mem_maps_lock_). + std::lock_guard<std::mutex> mu(*mem_maps_lock_); + DCHECK(maps_ == nullptr); + maps_ = new Maps; } void MemMap::Shutdown() { - MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); - delete maps_; - maps_ = nullptr; + if (mem_maps_lock_ == nullptr) { + // If MemMap::Shutdown is called more than once, there is no effect. + return; + } + { + // Not for thread safety, but for the annotation that maps_ is GUARDED_BY(mem_maps_lock_). + std::lock_guard<std::mutex> mu(*mem_maps_lock_); + DCHECK(maps_ != nullptr); + delete maps_; + maps_ = nullptr; + } + delete mem_maps_lock_; + mem_maps_lock_ = nullptr; } void MemMap::SetSize(size_t new_size) { @@ -813,7 +828,7 @@ void* MemMap::MapInternal(void* addr, if (low_4gb && addr == nullptr) { bool first_run = true; - MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); + std::lock_guard<std::mutex> mu(*mem_maps_lock_); for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) { // Use maps_ as an optimization to skip over large maps. // Find the first map which is address > ptr. diff --git a/runtime/mem_map.h b/runtime/mem_map.h index 0fea1a52c9..71db3f7014 100644 --- a/runtime/mem_map.h +++ b/runtime/mem_map.h @@ -21,6 +21,7 @@ #include <string> #include <map> +#include <mutex> #include <stddef.h> #include <sys/mman.h> // For the PROT_* and MAP_* constants. @@ -120,7 +121,7 @@ class MemMap { std::string* error_msg); // Releases the memory mapping. - ~MemMap() REQUIRES(!Locks::mem_maps_lock_); + ~MemMap() REQUIRES(!MemMap::mem_maps_lock_); const std::string& GetName() const { return name_; @@ -175,14 +176,17 @@ class MemMap { bool use_ashmem = true); static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map) - REQUIRES(!Locks::mem_maps_lock_); + REQUIRES(!MemMap::mem_maps_lock_); static void DumpMaps(std::ostream& os, bool terse = false) - REQUIRES(!Locks::mem_maps_lock_); + REQUIRES(!MemMap::mem_maps_lock_); typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps; - static void Init() REQUIRES(!Locks::mem_maps_lock_); - static void Shutdown() REQUIRES(!Locks::mem_maps_lock_); + // Init and Shutdown are NOT thread safe. + // Both may be called multiple times and MemMap objects may be created any + // time after the first call to Init and before the first call to Shutodwn. + static void Init() REQUIRES(!MemMap::mem_maps_lock_); + static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_); // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working @@ -197,16 +201,16 @@ class MemMap { size_t base_size, int prot, bool reuse, - size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_); + size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_); static void DumpMapsLocked(std::ostream& os, bool terse) - REQUIRES(Locks::mem_maps_lock_); + REQUIRES(MemMap::mem_maps_lock_); static bool HasMemMap(MemMap* map) - REQUIRES(Locks::mem_maps_lock_); + REQUIRES(MemMap::mem_maps_lock_); static MemMap* GetLargestMemMapAt(void* address) - REQUIRES(Locks::mem_maps_lock_); + REQUIRES(MemMap::mem_maps_lock_); static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) - REQUIRES(!Locks::mem_maps_lock_); + REQUIRES(!MemMap::mem_maps_lock_); // Internal version of mmap that supports low 4gb emulation. static void* MapInternal(void* addr, @@ -236,8 +240,10 @@ class MemMap { static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent. #endif + static std::mutex* mem_maps_lock_; + // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()). - static Maps* maps_ GUARDED_BY(Locks::mem_maps_lock_); + static Maps* maps_ GUARDED_BY(MemMap::mem_maps_lock_); friend class MemMapTest; // To allow access to base_begin_ and base_size_. }; |