diff options
| -rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 66 | ||||
| -rw-r--r-- | runtime/jit/jit_code_cache.cc | 3 | ||||
| -rw-r--r-- | runtime/oat.h | 4 | ||||
| -rw-r--r-- | runtime/oat_quick_method_header.cc | 8 | ||||
| -rw-r--r-- | runtime/stack.cc | 5 |
5 files changed, 67 insertions, 19 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 5352f26e46..f4bafcbef0 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -61,6 +61,7 @@ #include "ssa_builder.h" #include "ssa_liveness_analysis.h" #include "ssa_phi_elimination.h" +#include "stack_map_stream.h" #include "utils/assembler.h" #include "verifier/verifier_compiler_binding.h" @@ -1106,14 +1107,35 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, return compiled_method; } +static void CreateJniStackMap(ArenaStack* arena_stack, + const JniCompiledMethod& jni_compiled_method, + /* out */ ArenaVector<uint8_t>* stack_map, + /* out */ ArenaVector<uint8_t>* method_info) { + ScopedArenaAllocator allocator(arena_stack); + StackMapStream stack_map_stream(&allocator, jni_compiled_method.GetInstructionSet()); + stack_map_stream.BeginMethod( + jni_compiled_method.GetFrameSize(), + jni_compiled_method.GetCoreSpillMask(), + jni_compiled_method.GetFpSpillMask(), + /* num_dex_registers */ 0); + stack_map_stream.EndMethod(); + stack_map->resize(stack_map_stream.PrepareForFillIn()); + method_info->resize(stack_map_stream.ComputeMethodInfoSize()); + stack_map_stream.FillInCodeInfo(MemoryRegion(stack_map->data(), stack_map->size())); + stack_map_stream.FillInMethodInfo(MemoryRegion(method_info->data(), method_info->size())); +} + CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, uint32_t method_idx, const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) const { + Runtime* runtime = Runtime::Current(); + ArenaAllocator allocator(runtime->GetArenaPool()); + ArenaStack arena_stack(runtime->GetArenaPool()); + const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); if (compiler_options.IsBootImage()) { ScopedObjectAccess soa(Thread::Current()); - Runtime* runtime = Runtime::Current(); ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod( method_idx, dex_cache.Get(), /* class_loader */ nullptr); if (method != nullptr && UNLIKELY(method->IsIntrinsic())) { @@ -1128,8 +1150,6 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, access_flags, /* verified_method */ nullptr, dex_cache); - ArenaAllocator allocator(runtime->GetArenaPool()); - ArenaStack arena_stack(runtime->GetArenaPool()); CodeVectorAllocator code_allocator(&allocator); VariableSizedHandleScope handles(soa.Self()); // Go to native so that we don't block GC during compilation. @@ -1155,6 +1175,10 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod( compiler_options, access_flags, method_idx, dex_file); MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub); + + ArenaVector<uint8_t> stack_map(allocator.Adapter(kArenaAllocStackMaps)); + ArenaVector<uint8_t> method_info(allocator.Adapter(kArenaAllocStackMaps)); + CreateJniStackMap(&arena_stack, jni_compiled_method, &stack_map, &method_info); return CompiledMethod::SwapAllocCompiledMethod( GetCompilerDriver(), jni_compiled_method.GetInstructionSet(), @@ -1162,8 +1186,8 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, jni_compiled_method.GetFrameSize(), jni_compiled_method.GetCoreSpillMask(), jni_compiled_method.GetFpSpillMask(), - /* method_info */ ArrayRef<const uint8_t>(), - /* vmap_table */ ArrayRef<const uint8_t>(), + ArrayRef<const uint8_t>(method_info), + ArrayRef<const uint8_t>(stack_map), jni_compiled_method.GetCfi(), /* patches */ ArrayRef<const linker::LinkerPatch>()); } @@ -1223,18 +1247,42 @@ bool OptimizingCompiler::JitCompile(Thread* self, ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots; ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list( allocator.Adapter(kArenaAllocCHA)); + ArenaVector<uint8_t> stack_map(allocator.Adapter(kArenaAllocStackMaps)); + ArenaVector<uint8_t> method_info(allocator.Adapter(kArenaAllocStackMaps)); + ArenaStack arena_stack(runtime->GetJitArenaPool()); + // StackMapStream is large and it does not fit into this frame, so we need helper method. + // TODO: Try to avoid the extra memory copy that results from this. + CreateJniStackMap(&arena_stack, jni_compiled_method, &stack_map, &method_info); + uint8_t* stack_map_data = nullptr; + uint8_t* method_info_data = nullptr; + uint8_t* roots_data = nullptr; + uint32_t data_size = code_cache->ReserveData(self, + stack_map.size(), + method_info.size(), + /* number_of_roots */ 0, + method, + &stack_map_data, + &method_info_data, + &roots_data); + if (stack_map_data == nullptr || roots_data == nullptr) { + MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); + return false; + } + memcpy(stack_map_data, stack_map.data(), stack_map.size()); + memcpy(method_info_data, method_info.data(), method_info.size()); + const void* code = code_cache->CommitCode( self, method, - /* stack_map_data */ nullptr, - /* method_info_data */ nullptr, - /* roots_data */ nullptr, + stack_map_data, + method_info_data, + roots_data, jni_compiled_method.GetFrameSize(), jni_compiled_method.GetCoreSpillMask(), jni_compiled_method.GetFpSpillMask(), jni_compiled_method.GetCode().data(), jni_compiled_method.GetCode().size(), - /* data_size */ 0u, + data_size, osr, roots, /* has_should_deoptimize_flag */ false, diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 3d7fe89cd0..fe154a92f5 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -734,7 +734,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, bool has_should_deoptimize_flag, const ArenaSet<ArtMethod*>& cha_single_implementation_list) { - DCHECK_NE(stack_map != nullptr, method->IsNative()); DCHECK(!method->IsNative() || !osr); size_t alignment = GetInstructionSetAlignment(kRuntimeISA); // Ensure the header ends up at expected instruction alignment. @@ -816,8 +815,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, // but below we still make the compiled code valid for the method. MutexLock mu(self, lock_); if (UNLIKELY(method->IsNative())) { - DCHECK(stack_map == nullptr); - DCHECK(roots_data == nullptr); auto it = jni_stubs_map_.find(JniStubKey(method)); DCHECK(it != jni_stubs_map_.end()) << "Entry inserted in NotifyCompilationOf() should be alive."; diff --git a/runtime/oat.h b/runtime/oat.h index 6c3cc20032..01ef424b92 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,8 +32,8 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - // Last oat version changed reason: Remove explicit size from CodeInfo. - static constexpr uint8_t kOatVersion[] = { '1', '5', '1', '\0' }; + // Last oat version changed reason: Add CodeInfo for JNI methods. + static constexpr uint8_t kOatVersion[] = { '1', '5', '2', '\0' }; static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc index 52714f916b..0b239c1919 100644 --- a/runtime/oat_quick_method_header.cc +++ b/runtime/oat_quick_method_header.cc @@ -40,15 +40,15 @@ uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method, bool abort_on_failure) const { const void* entry_point = GetEntryPoint(); uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point); - if (IsOptimized()) { + if (method->IsNative()) { + return dex::kDexNoIndex; + } else { + DCHECK(IsOptimized()); CodeInfo code_info(this); StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset); if (stack_map.IsValid()) { return stack_map.GetDexPc(); } - } else { - DCHECK(method->IsNative()); - return dex::kDexNoIndex; } if (abort_on_failure) { ScopedObjectAccess soa(Thread::Current()); diff --git a/runtime/stack.cc b/runtime/stack.cc index 2fb8c413e9..e99cb1b9fa 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -814,7 +814,10 @@ void StackVisitor::WalkStack(bool include_transitions) { if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames) && (cur_oat_quick_method_header_ != nullptr) - && cur_oat_quick_method_header_->IsOptimized()) { + && cur_oat_quick_method_header_->IsOptimized() + // JNI methods cannot have any inlined frames. + && !method->IsNative()) { + DCHECK_NE(cur_quick_frame_pc_, 0u); CodeInfo code_info(cur_oat_quick_method_header_); uint32_t native_pc_offset = cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_); |