diff options
author | 2019-08-20 10:34:02 +0100 | |
---|---|---|
committer | 2019-09-01 21:08:43 +0000 | |
commit | 87fb032ee1e7ae98df26c646c450ef44e23fc805 (patch) | |
tree | 6754b2d2a0e38277885a691d1be6f0d796478cc7 | |
parent | a86a5d162e6b59a32e8ea7991e6c8a157aca5a0a (diff) |
Fix JIT data dual mapping for apps.
We don't use it now, but it would be nice to make it functional.
Mark the read-only memory as const, and fix the compile errors.
Test: test.py -b --host --jit
Bug: 119800099
Change-Id: Ic1c45072f3c97f560e843f95fb87b95f754c6e03
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 14 | ||||
-rw-r--r-- | runtime/jit/debugger_interface.cc | 4 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 43 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.h | 18 | ||||
-rw-r--r-- | runtime/jit/jit_memory_region.cc | 17 | ||||
-rw-r--r-- | runtime/jit/jit_memory_region.h | 29 |
6 files changed, 62 insertions, 63 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 8ef1b5516c..c55fc8ad72 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1290,11 +1290,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator, jni_compiled_method); - uint8_t* roots_data = code_cache->ReserveData(self, - region, - stack_map.size(), - /* number_of_roots= */ 0, - method); + const uint8_t* roots_data = code_cache->ReserveData( + self, region, stack_map.size(), /* number_of_roots= */ 0, method); if (roots_data == nullptr) { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); return false; @@ -1386,11 +1383,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item); size_t number_of_roots = codegen->GetNumberOfJitRoots(); - uint8_t* roots_data = code_cache->ReserveData(self, - region, - stack_map.size(), - number_of_roots, - method); + const uint8_t* roots_data = code_cache->ReserveData( + self, region, stack_map.size(), number_of_roots, method); if (roots_data == nullptr) { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); return false; diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc index fd1d9a69a0..24ca0fc1e1 100644 --- a/runtime/jit/debugger_interface.cc +++ b/runtime/jit/debugger_interface.cc @@ -174,9 +174,7 @@ struct JitNativeInfo { static JITDescriptor& Descriptor() { return __jit_debug_descriptor; } static void NotifyNativeDebugger() { __jit_debug_register_code_ptr(); } static const void* Alloc(size_t size) { return Memory()->AllocateData(size); } - static void Free(const void* ptr) { - Memory()->FreeData(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(ptr))); - } + static void Free(const void* ptr) { Memory()->FreeData(reinterpret_cast<const uint8_t*>(ptr)); } static void Free(void* ptr) = delete; template<class T> static T* Writable(const T* v) { return const_cast<T*>(Memory()->GetWritableDataAddress(v)); diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index dc2bb7c2b4..c0342baa23 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -330,7 +330,7 @@ uint8_t* JitCodeCache::CommitCode(Thread* self, size_t code_size, const uint8_t* stack_map, size_t stack_map_size, - uint8_t* roots_data, + const uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots, bool osr, bool has_should_deoptimize_flag, @@ -407,7 +407,7 @@ static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots } } -static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) { +static const uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) { OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); uint8_t* data = method_header->GetOptimizedCodeInfoPtr(); uint32_t roots = GetNumberOfRoots(data); @@ -454,7 +454,10 @@ void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) { MutexLock mu(Thread::Current(), *Locks::jit_lock_); for (const auto& entry : method_code_map_) { uint32_t number_of_roots = 0; - uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots); + const uint8_t* root_table = GetRootTable(entry.first, &number_of_roots); + uint8_t* roots_data = private_region_.IsInDataSpace(root_table) + ? private_region_.GetWritableDataAddress(root_table) + : shared_region_.GetWritableDataAddress(root_table); GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data); for (uint32_t i = 0; i < number_of_roots; ++i) { // This does not need a read barrier because this is called by GC. @@ -581,7 +584,7 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { ProfilingInfo* info = *it; if (alloc.ContainsUnsafe(info->GetMethod())) { info->GetMethod()->SetProfilingInfo(nullptr); - private_region_.FreeData(reinterpret_cast<uint8_t*>(info)); + private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info)); it = profiling_infos_.erase(it); } else { ++it; @@ -672,7 +675,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, size_t code_size, const uint8_t* stack_map, size_t stack_map_size, - uint8_t* roots_data, + const uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots, bool osr, bool has_should_deoptimize_flag, @@ -687,7 +690,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, } size_t root_table_size = ComputeRootTableSize(roots.size()); - uint8_t* stack_map_data = roots_data + root_table_size; + const uint8_t* stack_map_data = roots_data + root_table_size; MutexLock mu(self, *Locks::jit_lock_); // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to @@ -954,19 +957,19 @@ size_t JitCodeCache::DataCacheSizeLocked() { void JitCodeCache::ClearData(Thread* self, JitMemoryRegion* region, - uint8_t* roots_data) { + const uint8_t* roots_data) { MutexLock mu(self, *Locks::jit_lock_); - region->FreeData(reinterpret_cast<uint8_t*>(roots_data)); + region->FreeData(roots_data); } -uint8_t* JitCodeCache::ReserveData(Thread* self, - JitMemoryRegion* region, - size_t stack_map_size, - size_t number_of_roots, - ArtMethod* method) { +const uint8_t* JitCodeCache::ReserveData(Thread* self, + JitMemoryRegion* region, + size_t stack_map_size, + size_t number_of_roots, + ArtMethod* method) { size_t table_size = ComputeRootTableSize(number_of_roots); size_t size = RoundUp(stack_map_size + table_size, sizeof(void*)); - uint8_t* result = nullptr; + const uint8_t* result = nullptr; { ScopedThreadSuspension sts(self, kSuspended); @@ -1318,7 +1321,7 @@ void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { info->GetMethod()->SetProfilingInfo(info); } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) { // No need for this ProfilingInfo object anymore. - private_region_.FreeData(reinterpret_cast<uint8_t*>(info)); + private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info)); return true; } return false; @@ -1448,11 +1451,12 @@ ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNU return info; } - uint8_t* data = private_region_.AllocateData(profile_info_size); + const uint8_t* data = private_region_.AllocateData(profile_info_size); if (data == nullptr) { return nullptr; } - info = new (data) ProfilingInfo(method, entries); + uint8_t* writable_data = private_region_.GetWritableDataAddress(data); + info = new (writable_data) ProfilingInfo(method, entries); // Make sure other threads see the data in the profiling info object before the // store in the ArtMethod's ProfilingInfo pointer. @@ -1801,7 +1805,8 @@ void ZygoteMap::Initialize(uint32_t number_of_methods) { // Allocate for 40-80% capacity. This will offer OK lookup times, and termination // cases. size_t capacity = RoundUpToPowerOfTwo(number_of_methods * 100 / 80); - Entry* data = reinterpret_cast<Entry*>(region_->AllocateData(capacity * sizeof(Entry))); + const Entry* data = + reinterpret_cast<const Entry*>(region_->AllocateData(capacity * sizeof(Entry))); if (data != nullptr) { region_->FillData(data, capacity, Entry { nullptr, nullptr }); map_ = ArrayRef(data, capacity); @@ -1869,7 +1874,7 @@ void ZygoteMap::Put(const void* code, ArtMethod* method) { // be added, we are guaranteed to find a free slot in the array, and // therefore for this loop to terminate. while (true) { - Entry* entry = &map_[index]; + const Entry* entry = &map_[index]; if (entry->method == nullptr) { // Note that readers can read this memory concurrently, but that's OK as // we are writing pointers. diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 6aa5f317cf..64607b695e 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -116,7 +116,7 @@ class ZygoteMap { }; // The map allocated with `region_`. - ArrayRef<Entry> map_; + ArrayRef<const Entry> map_; // The region in which the map is allocated. JitMemoryRegion* const region_; @@ -183,7 +183,7 @@ class JitCodeCache { size_t code_size, const uint8_t* stack_map, size_t stack_map_size, - uint8_t* roots_data, + const uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots, bool osr, bool has_should_deoptimize_flag, @@ -207,17 +207,17 @@ class JitCodeCache { // Allocate a region of data that will contain a stack map of size `stack_map_size` and // `number_of_roots` roots accessed by the JIT code. // Return a pointer to where roots will be stored. - uint8_t* ReserveData(Thread* self, - JitMemoryRegion* region, - size_t stack_map_size, - size_t number_of_roots, - ArtMethod* method) + const uint8_t* ReserveData(Thread* self, + JitMemoryRegion* region, + size_t stack_map_size, + size_t number_of_roots, + ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); // Clear data from the data portion of the code cache. void ClearData( - Thread* self, JitMemoryRegion* region, uint8_t* roots_data) + Thread* self, JitMemoryRegion* region, const uint8_t* roots_data) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); @@ -351,7 +351,7 @@ class JitCodeCache { size_t code_size, const uint8_t* stack_map, size_t stack_map_size, - uint8_t* roots_data, + const uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots, bool osr, bool has_should_deoptimize_flag, diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc index 39353cacf2..447bbf49ca 100644 --- a/runtime/jit/jit_memory_region.cc +++ b/runtime/jit/jit_memory_region.cc @@ -452,11 +452,11 @@ static void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror:: reinterpret_cast<uint32_t*>(roots_data)[length] = length; } -bool JitMemoryRegion::CommitData(uint8_t* roots_data, +bool JitMemoryRegion::CommitData(const uint8_t* readonly_roots_data, const std::vector<Handle<mirror::Object>>& roots, const uint8_t* stack_map, size_t stack_map_size) { - roots_data = GetWritableDataAddress(roots_data); + uint8_t* roots_data = GetWritableDataAddress(readonly_roots_data); size_t root_table_size = ComputeRootTableSize(roots.size()); uint8_t* stack_map_data = roots_data + root_table_size; FillRootTable(roots_data, roots); @@ -476,16 +476,19 @@ void JitMemoryRegion::FreeCode(const uint8_t* code) { mspace_free(exec_mspace_, const_cast<uint8_t*>(code)); } -uint8_t* JitMemoryRegion::AllocateData(size_t data_size) { +const uint8_t* JitMemoryRegion::AllocateData(size_t data_size) { void* result = mspace_malloc(data_mspace_, data_size); used_memory_for_data_ += mspace_usable_size(result); return reinterpret_cast<uint8_t*>(GetNonWritableDataAddress(result)); } -void JitMemoryRegion::FreeData(uint8_t* data) { - data = GetWritableDataAddress(data); - used_memory_for_data_ -= mspace_usable_size(data); - mspace_free(data_mspace_, data); +void JitMemoryRegion::FreeData(const uint8_t* data) { + FreeWritableData(GetWritableDataAddress(data)); +} + +void JitMemoryRegion::FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) { + used_memory_for_data_ -= mspace_usable_size(writable_data); + mspace_free(data_mspace_, writable_data); } #if defined(__BIONIC__) && defined(ART_TARGET) diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h index bc05cb6253..2bb69a7984 100644 --- a/runtime/jit/jit_memory_region.h +++ b/runtime/jit/jit_memory_region.h @@ -28,8 +28,6 @@ namespace art { -struct JitNativeInfo; - namespace mirror { class Object; } @@ -89,11 +87,13 @@ class JitMemoryRegion { bool has_should_deoptimize_flag) REQUIRES(Locks::jit_lock_); void FreeCode(const uint8_t* code) REQUIRES(Locks::jit_lock_); - uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_); - void FreeData(uint8_t* data) REQUIRES(Locks::jit_lock_); + const uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_); + void FreeData(const uint8_t* data) REQUIRES(Locks::jit_lock_); + void FreeData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) = delete; + void FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_); // Emit roots and stack map into the memory pointed by `roots_data`. - bool CommitData(uint8_t* roots_data, + bool CommitData(const uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots, const uint8_t* stack_map, size_t stack_map_size) @@ -114,14 +114,14 @@ class JitMemoryRegion { } template <typename T> - void FillData(T* address, size_t n, const T& t) REQUIRES(Locks::jit_lock_) { + void FillData(const T* address, size_t n, const T& t) REQUIRES(Locks::jit_lock_) { std::fill_n(GetWritableDataAddress(address), n, t); } // Generic helper for writing abritrary data in the data portion of the // region. template <typename T> - void WriteData(T* address, const T& value) { + void WriteData(const T* address, const T& value) { *GetWritableDataAddress(address) = value; } @@ -179,6 +179,13 @@ class JitMemoryRegion { return data_end_; } + template <typename T> T* GetWritableDataAddress(const T* src_ptr) { + if (!HasDualDataMapping()) { + return const_cast<T*>(src_ptr); + } + return const_cast<T*>(TranslateAddress(src_ptr, data_pages_, writable_data_pages_)); + } + private: template <typename T> T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) { @@ -212,13 +219,6 @@ class JitMemoryRegion { return TranslateAddress(src_ptr, writable_data_pages_, data_pages_); } - template <typename T> T* GetWritableDataAddress(T* src_ptr) { - if (!HasDualDataMapping()) { - return src_ptr; - } - return TranslateAddress(src_ptr, data_pages_, writable_data_pages_); - } - template <typename T> T* GetExecutableAddress(T* src_ptr) { if (!HasDualCodeMapping()) { return src_ptr; @@ -279,7 +279,6 @@ class JitMemoryRegion { friend class ScopedCodeCacheWrite; // For GetUpdatableCodeMapping friend class TestZygoteMemory; - friend struct art::JitNativeInfo; // For GetWritableDataAddress. }; } // namespace jit |