diff options
| -rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 58 | ||||
| -rw-r--r-- | runtime/jit/jit_code_cache.cc | 98 | ||||
| -rw-r--r-- | runtime/jit/jit_code_cache.h | 41 | ||||
| -rw-r--r-- | runtime/jit/jit_memory_region.cc | 27 | ||||
| -rw-r--r-- | runtime/jit/jit_memory_region.h | 21 |
5 files changed, 118 insertions, 127 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 9da282bd2c..2153ddd7b4 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1281,36 +1281,31 @@ bool OptimizingCompiler::JitCompile(Thread* self, ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator, jni_compiled_method); - uint8_t* stack_map_data = nullptr; - uint8_t* roots_data = nullptr; - uint32_t data_size = code_cache->ReserveData(self, - region, - stack_map.size(), - /* number_of_roots= */ 0, - method, - &stack_map_data, - &roots_data); - if (stack_map_data == nullptr || roots_data == nullptr) { + uint8_t* roots_data = code_cache->ReserveData(self, + region, + stack_map.size(), + /* number_of_roots= */ 0, + method); + if (roots_data == nullptr) { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); return false; } - memcpy(stack_map_data, stack_map.data(), stack_map.size()); const void* code = code_cache->CommitCode( self, region, method, - stack_map_data, - roots_data, jni_compiled_method.GetCode().data(), jni_compiled_method.GetCode().size(), - data_size, - osr, + stack_map.data(), + stack_map.size(), + roots_data, roots, + osr, /* has_should_deoptimize_flag= */ false, cha_single_implementation_list); if (code == nullptr) { - code_cache->ClearData(self, region, stack_map_data, roots_data); + code_cache->ClearData(self, region, roots_data); return false; } @@ -1381,20 +1376,15 @@ bool OptimizingCompiler::JitCompile(Thread* self, ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item); size_t number_of_roots = codegen->GetNumberOfJitRoots(); - uint8_t* stack_map_data = nullptr; - uint8_t* roots_data = nullptr; - uint32_t data_size = code_cache->ReserveData(self, - region, - stack_map.size(), - number_of_roots, - method, - &stack_map_data, - &roots_data); - if (stack_map_data == nullptr || roots_data == nullptr) { + uint8_t* roots_data = code_cache->ReserveData(self, + region, + stack_map.size(), + number_of_roots, + method); + if (roots_data == nullptr) { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); return false; } - memcpy(stack_map_data, stack_map.data(), stack_map.size()); std::vector<Handle<mirror::Object>> roots; codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots); // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope. @@ -1408,25 +1398,25 @@ bool OptimizingCompiler::JitCompile(Thread* self, self, region, method, - stack_map_data, - roots_data, code_allocator.GetMemory().data(), code_allocator.GetMemory().size(), - data_size, - osr, + stack_map.data(), + stack_map.size(), + roots_data, roots, + osr, codegen->GetGraph()->HasShouldDeoptimizeFlag(), codegen->GetGraph()->GetCHASingleImplementationList()); if (code == nullptr) { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); - code_cache->ClearData(self, region, stack_map_data, roots_data); + code_cache->ClearData(self, region, roots_data); return false; } const CompilerOptions& compiler_options = GetCompilerOptions(); if (compiler_options.GenerateAnyDebugInfo()) { - const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); + const OatQuickMethodHeader* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); debug::MethodDebugInfo info = {}; DCHECK(info.custom_name.empty()); @@ -1443,7 +1433,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, info.code_address = code_address; info.code_size = code_allocator.GetMemory().size(); info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); - info.code_info = stack_map.size() == 0 ? nullptr : stack_map_data; + info.code_info = stack_map.size() == 0 ? nullptr : method_header->GetOptimizedCodeInfoPtr(); info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()); GenerateJitDebugInfo(method, info); } diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 4e64af6898..6ea2af0ef9 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -323,25 +323,25 @@ const void* JitCodeCache::GetZygoteSavedEntryPoint(ArtMethod* method) { uint8_t* JitCodeCache::CommitCode(Thread* self, JitMemoryRegion* region, ArtMethod* method, - uint8_t* stack_map, - uint8_t* roots_data, const uint8_t* code, size_t code_size, - size_t data_size, - bool osr, + const uint8_t* stack_map, + size_t stack_map_size, + uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots, + bool osr, bool has_should_deoptimize_flag, const ArenaSet<ArtMethod*>& cha_single_implementation_list) { uint8_t* result = CommitCodeInternal(self, region, method, - stack_map, - roots_data, code, code_size, - data_size, - osr, + stack_map, + stack_map_size, + roots_data, roots, + osr, has_should_deoptimize_flag, cha_single_implementation_list); if (result == nullptr) { @@ -350,13 +350,13 @@ uint8_t* JitCodeCache::CommitCode(Thread* self, result = CommitCodeInternal(self, region, method, - stack_map, - roots_data, code, code_size, - data_size, - osr, + stack_map, + stack_map_size, + roots_data, roots, + osr, has_should_deoptimize_flag, cha_single_implementation_list); } @@ -377,26 +377,12 @@ static uintptr_t FromCodeToAllocation(const void* code) { return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment); } -static uint32_t ComputeRootTableSize(uint32_t number_of_roots) { - return sizeof(uint32_t) + number_of_roots * sizeof(GcRoot<mirror::Object>); -} - static uint32_t GetNumberOfRoots(const uint8_t* stack_map) { // The length of the table is stored just before the stack map (and therefore at the end of // the table itself), in order to be able to fetch it from a `stack_map` pointer. return reinterpret_cast<const uint32_t*>(stack_map)[-1]; } -static void FillRootTableLength(uint8_t* roots_data, uint32_t length) { - // Store the length of the table at the end. This will allow fetching it from a `stack_map` - // pointer. - reinterpret_cast<uint32_t*>(roots_data)[length] = length; -} - -static const uint8_t* FromStackMapToRoots(const uint8_t* stack_map_data) { - return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data)); -} - static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots) REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) { if (!kIsDebugBuild) { @@ -413,17 +399,6 @@ static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots } } -void JitCodeCache::FillRootTable(uint8_t* roots_data, - const std::vector<Handle<mirror::Object>>& roots) { - GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data); - const uint32_t length = roots.size(); - // Put all roots in `roots_data`. - for (uint32_t i = 0; i < length; ++i) { - ObjPtr<mirror::Object> object = roots[i].Get(); - gc_roots[i] = GcRoot<mirror::Object>(object); - } -} - static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) { OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); uint8_t* data = method_header->GetOptimizedCodeInfoPtr(); @@ -674,13 +649,13 @@ void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) { uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, JitMemoryRegion* region, ArtMethod* method, - uint8_t* stack_map, - uint8_t* roots_data, const uint8_t* code, size_t code_size, - size_t data_size, - bool osr, + const uint8_t* stack_map, + size_t stack_map_size, + uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots, + bool osr, bool has_should_deoptimize_flag, const ArenaSet<ArtMethod*>& cha_single_implementation_list) { @@ -692,17 +667,23 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, DCheckRootsAreValid(roots); } + size_t root_table_size = ComputeRootTableSize(roots.size()); + uint8_t* stack_map_data = roots_data + root_table_size; + MutexLock mu(self, *Locks::jit_lock_); // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to // finish. WaitForPotentialCollectionToCompleteRunnable(self); const uint8_t* code_ptr = region->AllocateCode( - code, code_size, stack_map, has_should_deoptimize_flag); + code, code_size, stack_map_data, has_should_deoptimize_flag); if (code_ptr == nullptr) { return nullptr; } OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); + // Commit roots and stack maps before updating the entry point. + region->CommitData(roots_data, roots, stack_map, stack_map_size); + number_of_compilations_++; // We need to update the entry point in the runnable state for the instrumentation. @@ -754,14 +735,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, } } } else { - // Fill the root table before updating the entry point. - DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data); - DCHECK_LE(roots_data, stack_map); - FillRootTable(roots_data, roots); - { - // Flush data cache, as compiled code references literals in it. - FlushDataCache(roots_data, roots_data + data_size); - } method_code_map_.Put(code_ptr, method); if (osr) { number_of_osr_compilations_++; @@ -963,20 +936,16 @@ size_t JitCodeCache::DataCacheSizeLocked() { void JitCodeCache::ClearData(Thread* self, JitMemoryRegion* region, - uint8_t* stack_map_data, uint8_t* roots_data) { - DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data); MutexLock mu(self, *Locks::jit_lock_); region->FreeData(reinterpret_cast<uint8_t*>(roots_data)); } -size_t JitCodeCache::ReserveData(Thread* self, - JitMemoryRegion* region, - size_t stack_map_size, - size_t number_of_roots, - ArtMethod* method, - uint8_t** stack_map_data, - uint8_t** roots_data) { +uint8_t* JitCodeCache::ReserveData(Thread* self, + JitMemoryRegion* region, + size_t stack_map_size, + size_t number_of_roots, + ArtMethod* method) { size_t table_size = ComputeRootTableSize(number_of_roots); size_t size = RoundUp(stack_map_size + table_size, sizeof(void*)); uint8_t* result = nullptr; @@ -1005,16 +974,7 @@ size_t JitCodeCache::ReserveData(Thread* self, << " for stack maps of " << ArtMethod::PrettyMethod(method); } - if (result != nullptr) { - *roots_data = result; - *stack_map_data = result + table_size; - FillRootTableLength(*roots_data, number_of_roots); - return size; - } else { - *roots_data = nullptr; - *stack_map_data = nullptr; - return 0; - } + return result; } class MarkCodeClosure final : public Closure { diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 8a9c010f0a..a4e2964499 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -129,13 +129,13 @@ class JitCodeCache { uint8_t* CommitCode(Thread* self, JitMemoryRegion* region, ArtMethod* method, - uint8_t* stack_map, - uint8_t* roots_data, const uint8_t* code, size_t code_size, - size_t data_size, - bool osr, + const uint8_t* stack_map, + size_t stack_map_size, + uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots, + bool osr, bool has_should_deoptimize_flag, const ArenaSet<ArtMethod*>& cha_single_implementation_list) REQUIRES_SHARED(Locks::mutator_lock_) @@ -154,22 +154,20 @@ class JitCodeCache { // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise. const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_); - // Allocate a region of data that contain `size` bytes, and potentially space - // for storing `number_of_roots` roots. Returns null if there is no more room. - // Return the number of bytes allocated. - size_t ReserveData(Thread* self, - JitMemoryRegion* region, - size_t stack_map_size, - size_t number_of_roots, - ArtMethod* method, - uint8_t** stack_map_data, - uint8_t** roots_data) + // Allocate a region of data that will contain a stack map of size `stack_map_size` and + // `number_of_roots` roots accessed by the JIT code. + // Return a pointer to where roots will be stored. + uint8_t* ReserveData(Thread* self, + JitMemoryRegion* region, + size_t stack_map_size, + size_t number_of_roots, + ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); // Clear data from the data portion of the code cache. void ClearData( - Thread* self, JitMemoryRegion* region, uint8_t* stack_map_data, uint8_t* roots_data) + Thread* self, JitMemoryRegion* region, uint8_t* roots_data) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); @@ -288,23 +286,18 @@ class JitCodeCache { uint8_t* CommitCodeInternal(Thread* self, JitMemoryRegion* region, ArtMethod* method, - uint8_t* stack_map, - uint8_t* roots_data, const uint8_t* code, size_t code_size, - size_t data_size, - bool osr, + const uint8_t* stack_map, + size_t stack_map_size, + uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots, + bool osr, bool has_should_deoptimize_flag, const ArenaSet<ArtMethod*>& cha_single_implementation_list) REQUIRES(!Locks::jit_lock_) REQUIRES_SHARED(Locks::mutator_lock_); - // Adds the given roots to the roots_data. Only a member for annotalysis. - void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots) - REQUIRES(Locks::jit_lock_) - REQUIRES_SHARED(Locks::mutator_lock_); - ProfilingInfo* AddProfilingInfoInternal(Thread* self, ArtMethod* method, const std::vector<uint32_t>& entries) diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc index c250d6b24d..b8b7827cb4 100644 --- a/runtime/jit/jit_memory_region.cc +++ b/runtime/jit/jit_memory_region.cc @@ -374,6 +374,33 @@ const uint8_t* JitMemoryRegion::AllocateCode(const uint8_t* code, return result; } +static void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots) + REQUIRES(Locks::jit_lock_) + REQUIRES_SHARED(Locks::mutator_lock_) { + GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data); + const uint32_t length = roots.size(); + // Put all roots in `roots_data`. + for (uint32_t i = 0; i < length; ++i) { + ObjPtr<mirror::Object> object = roots[i].Get(); + gc_roots[i] = GcRoot<mirror::Object>(object); + } + // Store the length of the table at the end. This will allow fetching it from a stack_map + // pointer. + reinterpret_cast<uint32_t*>(roots_data)[length] = length; +} + +void JitMemoryRegion::CommitData(uint8_t* roots_data, + const std::vector<Handle<mirror::Object>>& roots, + const uint8_t* stack_map, + size_t stack_map_size) { + size_t root_table_size = ComputeRootTableSize(roots.size()); + uint8_t* stack_map_data = roots_data + root_table_size; + FillRootTable(roots_data, roots); + memcpy(stack_map_data, stack_map, stack_map_size); + // Flush data cache, as compiled code references literals in it. + FlushDataCache(roots_data, roots_data + root_table_size + stack_map_size); +} + void JitMemoryRegion::FreeCode(const uint8_t* code) { code = GetNonExecutableAddress(code); used_memory_for_code_ -= mspace_usable_size(code); diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h index 8885ee881f..7aa640ca63 100644 --- a/runtime/jit/jit_memory_region.h +++ b/runtime/jit/jit_memory_region.h @@ -23,8 +23,15 @@ #include "base/globals.h" #include "base/locks.h" #include "base/mem_map.h" +#include "gc_root-inl.h" +#include "handle.h" namespace art { + +namespace mirror { +class Object; +} + namespace jit { class TestZygoteMemory; @@ -44,6 +51,12 @@ size_t inline GetJitCodeAlignment() { return GetInstructionSetAlignment(kRuntimeISA); } +// Helper to get the size required for emitting `number_of_roots` in the +// data portion of a JIT memory region. +uint32_t inline ComputeRootTableSize(uint32_t number_of_roots) { + return sizeof(uint32_t) + number_of_roots * sizeof(GcRoot<mirror::Object>); +} + // Represents a memory region for the JIT, where code and data are stored. This class // provides allocation and deallocation primitives. class JitMemoryRegion { @@ -86,6 +99,14 @@ class JitMemoryRegion { uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_); void FreeData(uint8_t* data) REQUIRES(Locks::jit_lock_); + // Emit roots and stack map into the memory pointed by `roots_data`. + void CommitData(uint8_t* roots_data, + const std::vector<Handle<mirror::Object>>& roots, + const uint8_t* stack_map, + size_t stack_map_size) + REQUIRES(Locks::jit_lock_) + REQUIRES_SHARED(Locks::mutator_lock_); + bool HasDualCodeMapping() const { return non_exec_pages_.IsValid(); } |