diff options
author | 2022-07-13 07:17:56 +0000 | |
---|---|---|
committer | 2022-07-13 09:25:08 +0000 | |
commit | 26aef1213dbdd7ab03688d898cf802c8c8d7e610 (patch) | |
tree | 62c107594123219d845a6730b4781706682ed7b0 | |
parent | 4ec05bb85ba1107c8295a295eec7e70bace0d047 (diff) |
Revert "Introduce a flag to check if JITed code has instrumentation support"
This reverts commit fc067a360d14db5f84fd4b58e0dee6cb04ee759b.
Reason for revert: test failures on jit-on-first-use: https://android-build.googleplex.com/builds/submitted/8821659/art-jit-on-first-use/latest/view/logs/build_error.log
Change-Id: Ie9bc243baac777ecc4f47cc961494ca6ab3ef4c6
-rw-r--r-- | compiler/exception_test.cc | 7 | ||||
-rw-r--r-- | compiler/jni/quick/jni_compiler.cc | 16 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.cc | 3 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 29 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_stream.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_stream.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_test.cc | 56 | ||||
-rw-r--r-- | dex2oat/linker/code_info_table_deduper_test.cc | 7 | ||||
-rw-r--r-- | libartbase/base/bit_memory_region.h | 33 | ||||
-rw-r--r-- | openjdkjvmti/events.cc | 2 | ||||
-rw-r--r-- | runtime/check_reference_map_visitor.h | 2 | ||||
-rw-r--r-- | runtime/entrypoints/quick/quick_trampoline_entrypoints.cc | 2 | ||||
-rw-r--r-- | runtime/instrumentation.cc | 98 | ||||
-rw-r--r-- | runtime/quick_exception_handler.cc | 2 | ||||
-rw-r--r-- | runtime/runtime.cc | 13 | ||||
-rw-r--r-- | runtime/runtime.h | 3 | ||||
-rw-r--r-- | runtime/stack_map.h | 11 | ||||
-rw-r--r-- | runtime/thread.cc | 2 | ||||
-rw-r--r-- | test/543-env-long-ref/env_long_ref.cc | 3 | ||||
-rw-r--r-- | test/common/stack_inspect.cc | 3 |
20 files changed, 94 insertions, 207 deletions
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc index a49c6c630e..4471b93f17 100644 --- a/compiler/exception_test.cc +++ b/compiler/exception_test.cc @@ -78,12 +78,7 @@ class ExceptionTest : public CommonRuntimeTest { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stack_maps(&allocator, kRuntimeISA); - stack_maps.BeginMethod(/* frame_size_in_bytes= */ 4 * sizeof(void*), - /* core_spill_mask= */ 0u, - /* fp_spill_mask= */ 0u, - /* num_dex_registers= */ 0u, - /* baseline= */ false, - /* debuggable= */ false); + stack_maps.BeginMethod(4 * sizeof(void*), 0u, 0u, 0u); stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset); stack_maps.EndStackMapEntry(); stack_maps.EndMethod(code_size); diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index 42072eb6e0..d672500126 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -103,19 +103,19 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp // i.e. if the method was annotated with @CriticalNative const bool is_critical_native = (access_flags & kAccCriticalNative) != 0u; - bool needs_entry_exit_hooks = - compiler_options.GetDebuggable() && compiler_options.IsJitCompiler(); - // We don't support JITing stubs for critical native methods in debuggable runtimes yet. - // TODO(mythria): Add support required for calling method entry / exit hooks from critical native - // methods. - DCHECK_IMPLIES(needs_entry_exit_hooks, !is_critical_native); - // When walking the stack the top frame doesn't have a pc associated with it. We then depend on // the invariant that we don't have JITed code when AOT code is available. In debuggable runtimes // this invariant doesn't hold. So we tag the SP for JITed code to indentify if we are executing // JITed code or AOT code. Since tagging involves additional instructions we tag only in // debuggable runtimes. - bool should_tag_sp = needs_entry_exit_hooks; + bool should_tag_sp = compiler_options.GetDebuggable() && compiler_options.IsJitCompiler(); + + // We don't JIT stubs for critical native methods in debuggable runtimes. + // TODO(mythria): Add support required for calling method entry / exit hooks from critical native + // methods. + bool needs_entry_exit_hooks = compiler_options.GetDebuggable() && + compiler_options.IsJitCompiler() && + !is_critical_native; VLOG(jni) << "JniCompile: Method :: " << dex_file.PrettyMethod(method_idx, /* with signature */ true) diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index d8fc3ba690..8bd4406332 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -389,8 +389,7 @@ void CodeGenerator::Compile(CodeAllocator* allocator) { core_spill_mask_, fpu_spill_mask_, GetGraph()->GetNumberOfVRegs(), - GetGraph()->IsCompilingBaseline(), - GetGraph()->IsDebuggable()); + GetGraph()->IsCompilingBaseline()); size_t frame_start = GetAssembler()->CodeSize(); GenerateFrameEntry(); diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index b0fa251f55..a499c55757 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1115,18 +1115,17 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator, const JniCompiledMethod& jni_compiled_method, - size_t code_size, - bool debuggable) { + size_t code_size) { // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator // to stay clear of the frame size limit. std::unique_ptr<StackMapStream> stack_map_stream( new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet())); - stack_map_stream->BeginMethod(jni_compiled_method.GetFrameSize(), - jni_compiled_method.GetCoreSpillMask(), - jni_compiled_method.GetFpSpillMask(), - /* num_dex_registers= */ 0, - /* baseline= */ false, - debuggable); + stack_map_stream->BeginMethod( + jni_compiled_method.GetFrameSize(), + jni_compiled_method.GetCoreSpillMask(), + jni_compiled_method.GetFpSpillMask(), + /* num_dex_registers= */ 0, + /* baseline= */ false); stack_map_stream->EndMethod(code_size); return stack_map_stream->Encode(); } @@ -1188,11 +1187,8 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub); ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. - ScopedArenaVector<uint8_t> stack_map = - CreateJniStackMap(&stack_map_allocator, - jni_compiled_method, - jni_compiled_method.GetCode().size(), - compiler_options.GetDebuggable() && compiler_options.IsJitCompiler()); + ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap( + &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size()); return CompiledMethod::SwapAllocCompiledMethod( GetCompiledMethodStorage(), jni_compiled_method.GetInstructionSet(), @@ -1253,11 +1249,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, ArenaStack arena_stack(runtime->GetJitArenaPool()); // StackMapStream is large and it does not fit into this frame, so we need helper method. ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. - ScopedArenaVector<uint8_t> stack_map = - CreateJniStackMap(&stack_map_allocator, - jni_compiled_method, - jni_compiled_method.GetCode().size(), - compiler_options.GetDebuggable() && compiler_options.IsJitCompiler()); + ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap( + &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size()); ArrayRef<const uint8_t> reserved_code; ArrayRef<const uint8_t> reserved_data; diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index c13a35567b..f55bbee1c8 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -49,8 +49,7 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes, size_t core_spill_mask, size_t fp_spill_mask, uint32_t num_dex_registers, - bool baseline, - bool debuggable) { + bool baseline) { DCHECK(!in_method_) << "Mismatched Begin/End calls"; in_method_ = true; DCHECK_EQ(packed_frame_size_, 0u) << "BeginMethod was already called"; @@ -61,7 +60,6 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes, fp_spill_mask_ = fp_spill_mask; num_dex_registers_ = num_dex_registers; baseline_ = baseline; - debuggable_ = debuggable; if (kVerifyStackMaps) { dchecks_.emplace_back([=](const CodeInfo& code_info) { @@ -369,7 +367,6 @@ ScopedArenaVector<uint8_t> StackMapStream::Encode() { uint32_t flags = (inline_infos_.size() > 0) ? CodeInfo::kHasInlineInfo : 0; flags |= baseline_ ? CodeInfo::kIsBaseline : 0; - flags |= debuggable_ ? CodeInfo::kIsDebuggable : 0; DCHECK_LE(flags, kVarintMax); // Ensure flags can be read directly as byte. uint32_t bit_table_flags = 0; ForEachBitTable([&bit_table_flags](size_t i, auto bit_table) { diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index 1aaa6aee9e..27145a174c 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -64,8 +64,7 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> { size_t core_spill_mask, size_t fp_spill_mask, uint32_t num_dex_registers, - bool baseline, - bool debuggable); + bool baseline = false); void EndMethod(size_t code_size); void BeginStackMapEntry(uint32_t dex_pc, @@ -126,7 +125,6 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> { uint32_t fp_spill_mask_ = 0; uint32_t num_dex_registers_ = 0; bool baseline_; - bool debuggable_; BitTableBuilder<StackMap> stack_maps_; BitTableBuilder<RegisterMask> register_masks_; BitmapTableBuilder stack_masks_; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index 23af1f7fa1..f6a739e15a 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -52,12 +52,7 @@ TEST(StackMapTest, Test1) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArenaBitVector sp_mask(&allocator, 0, false); size_t number_of_dex_registers = 2; @@ -111,12 +106,7 @@ TEST(StackMapTest, Test2) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArtMethod art_method; ArenaBitVector sp_mask1(&allocator, 0, true); @@ -310,12 +300,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArtMethod art_method; ArenaBitVector sp_mask1(&allocator, 0, true); @@ -378,12 +363,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 2; @@ -431,12 +411,7 @@ TEST(StackMapTest, TestShareDexRegisterMap) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 2; @@ -492,12 +467,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 1, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 1); ArenaBitVector sp_mask(&allocator, 0, false); stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask); @@ -542,12 +512,7 @@ TEST(StackMapTest, InlineTest) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArtMethod art_method; ArenaBitVector sp_mask1(&allocator, 0, true); @@ -737,12 +702,7 @@ TEST(StackMapTest, TestDeduplicateStackMask) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 0, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 0); ArenaBitVector sp_mask(&allocator, 0, true); sp_mask.SetBit(1); diff --git a/dex2oat/linker/code_info_table_deduper_test.cc b/dex2oat/linker/code_info_table_deduper_test.cc index 54b7dd5940..8913b07a51 100644 --- a/dex2oat/linker/code_info_table_deduper_test.cc +++ b/dex2oat/linker/code_info_table_deduper_test.cc @@ -35,12 +35,7 @@ TEST(StackMapTest, TestDedupeBitTables) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); stream.BeginStackMapEntry(0, 64 * kPcAlign); stream.AddDexRegisterEntry(Kind::kInStack, 0); diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h index baac2f50ea..c5224a5421 100644 --- a/libartbase/base/bit_memory_region.h +++ b/libartbase/base/bit_memory_region.h @@ -324,37 +324,8 @@ class BitMemoryRegion final : public ValueObject { size_t bit_size_ = 0; }; -// Minimum number of bits used for varint. A varint represents either a value stored "inline" or -// the number of bytes that are required to encode the value. -constexpr uint32_t kVarintBits = 4; -// Maximum value which is stored "inline". We use the rest of the values to encode the number of -// bytes required to encode the value when the value is greater than kVarintMax. -// We encode any value less than or equal to 11 inline. We use 12, 13, 14 and 15 -// to represent that the value is encoded in 1, 2, 3 and 4 bytes respectively. -// -// For example if we want to encode 1, 15, 16, 7, 11, 256: -// -// Low numbers (1, 7, 11) are encoded inline. 15 and 12 are set with 12 to show -// we need to load one byte for each to have their real values (15 and 12), and -// 256 is set with 13 to show we need to load two bytes. This is done to -// compress the values in the bit array and keep the size down. Where the actual value -// is read from depends on the use case. -// -// Values greater than kVarintMax could be encoded as a separate list referred -// to as InterleavedVarints (see ReadInterleavedVarints / WriteInterleavedVarints). -// This is used when there are fixed number of fields like CodeInfo headers. -// In our example the interleaved encoding looks like below: -// -// Meaning: 1--- 15-- 12-- 7--- 11-- 256- 15------- 12------- 256---------------- -// Bits: 0001 1100 1100 0111 1011 1101 0000 1111 0000 1100 0000 0001 0000 0000 -// -// In other cases the value is recorded just following the size encoding. This is -// referred as consecutive encoding (See ReadVarint / WriteVarint). In our -// example the consecutively encoded varints looks like below: -// -// Meaning: 1--- 15-- 15------- 12-- 12------- 7--- 11-- 256- 256---------------- -// Bits: 0001 1100 0000 1100 1100 0000 1100 0111 1011 1101 0000 0001 0000 0000 -constexpr uint32_t kVarintMax = 11; +constexpr uint32_t kVarintBits = 4; // Minimum number of bits used for varint. +constexpr uint32_t kVarintMax = 11; // Maximum value which is stored "inline". class BitMemoryReader { public: diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc index e23beecc99..58601aa3fc 100644 --- a/openjdkjvmti/events.cc +++ b/openjdkjvmti/events.cc @@ -1258,7 +1258,7 @@ void EventHandler::HandleLocalAccessCapabilityAdded() { if (m.IsNative() || m.IsProxyMethod()) { continue; } else if (!runtime_->GetClassLinker()->IsQuickToInterpreterBridge(code) && - !runtime_->IsAsyncDeoptimizeable(&m, reinterpret_cast<uintptr_t>(code))) { + !runtime_->IsAsyncDeoptimizeable(reinterpret_cast<uintptr_t>(code))) { runtime_->GetInstrumentation()->InitializeMethodsCode(&m, /*aot_code=*/ nullptr); } } diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h index d0cb199aa2..a7c3e45ae7 100644 --- a/runtime/check_reference_map_visitor.h +++ b/runtime/check_reference_map_visitor.h @@ -91,7 +91,7 @@ class CheckReferenceMapVisitor : public StackVisitor { CodeItemDataAccessor accessor(m->DexInstructionData()); uint16_t number_of_dex_registers = accessor.RegistersSize(); - if (!Runtime::Current()->IsAsyncDeoptimizeable(m, GetCurrentQuickFramePc())) { + if (!Runtime::Current()->IsAsyncDeoptimizeable(GetCurrentQuickFramePc())) { // We can only guarantee dex register info presence for debuggable methods. return; } diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index fc7711027f..7e3fdee3e1 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -714,7 +714,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, if (UNLIKELY(instr->ShouldDeoptimizeCaller(self, sp))) { ArtMethod* caller = QuickArgumentVisitor::GetOuterMethod(sp); uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp); - DCHECK(Runtime::Current()->IsAsyncDeoptimizeable(caller, caller_pc)); + DCHECK(Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)); DCHECK(caller != nullptr); VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod() << " to " << caller->PrettyMethod() << (force_frame_pop ? " for frame-pop" : ""); diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 08e7a7e079..61b0e526b0 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -213,7 +213,7 @@ static bool IsProxyInit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) // Returns true if we need entry exit stub to call entry hooks. JITed code // directly call entry / exit hooks and don't need the stub. -static bool CodeNeedsEntryExitStub(const void* entry_point, ArtMethod* method) +static bool CodeNeedsEntryExitStub(const void* code, ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) { // Proxy.init should never have entry/exit stubs. if (IsProxyInit(method)) { @@ -222,31 +222,32 @@ static bool CodeNeedsEntryExitStub(const void* entry_point, ArtMethod* method) // In some tests runtime isn't setup fully and hence the entry points could // be nullptr. - if (entry_point == nullptr) { + if (code == nullptr) { return true; } // Code running in the interpreter doesn't need entry/exit stubs. - if (Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(entry_point)) { + if (Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(code)) { return false; } - // When jiting code for debuggable runtimes / instrumentation is active we generate the code to - // call method entry / exit hooks when required. Hence it is not required to update to - // instrumentation entry point for JITed code in debuggable mode. - jit::Jit* jit = Runtime::Current()->GetJit(); - if (jit != nullptr && jit->GetCodeCache()->ContainsPc(entry_point)) { - // If JITed code was compiled with instrumentation support we don't need entry / exit stub. - OatQuickMethodHeader* header = OatQuickMethodHeader::FromEntryPoint(entry_point); - return !CodeInfo::IsDebuggable(header->GetOptimizedCodeInfoPtr()); + if (!Runtime::Current()->IsJavaDebuggable()) { + return true; } - // GenericJni trampoline can handle entry / exit hooks in debuggable runtimes. - if (Runtime::Current()->GetClassLinker()->IsQuickGenericJniStub(entry_point) && - Runtime::Current()->IsJavaDebuggable()) { + // Native methods don't need method entry / exit hooks in debuggable runtimes. + // GenericJni trampoline and JITed JNI stubs handle entry / exit hooks + if (method->IsNative()) { return false; } + // When jiting code for debuggable apps we generate the code to call method + // entry / exit hooks when required. Hence it is not required to update + // to instrumentation entry point for JITed code in debuggable mode. + jit::Jit* jit = Runtime::Current()->GetJit(); + if (jit != nullptr && jit->GetCodeCache()->ContainsPc(code)) { + return false; + } return true; } @@ -1668,7 +1669,7 @@ bool Instrumentation::ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) { } if (NeedsSlowInterpreterForMethod(self, caller)) { - if (!Runtime::Current()->IsAsyncDeoptimizeable(caller, caller_pc)) { + if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) { LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " << caller->PrettyMethod(); return false; @@ -1693,10 +1694,10 @@ bool Instrumentation::ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) { } } - if (should_deoptimize_frame && !Runtime::Current()->IsAsyncDeoptimizeable(caller, caller_pc)) { - LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " - << caller->PrettyMethod(); - return false; + if (should_deoptimize_frame && !Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) { + LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " + << caller->PrettyMethod(); + return false; } return should_deoptimize_frame; } @@ -1775,39 +1776,36 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, // Restore the return value if it's a reference since it might have moved. *reinterpret_cast<mirror::Object**>(gpr_result) = res.Get(); } - - if (deoptimize) { - if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller, *return_pc_addr)) { - if (kVerboseInstrumentation) { - LOG(INFO) << "Deoptimizing " - << visitor.caller->PrettyMethod() - << " by returning from " - << method->PrettyMethod() - << " with result " - << std::hex << return_value.GetJ() << std::dec - << " in " - << *self; - } - DeoptimizationMethodType deopt_method_type = GetDeoptimizationMethodType(method); - self->PushDeoptimizationContext(return_value, - is_ref, - /* exception= */ nullptr, - /* from_code= */ false, - deopt_method_type); - return GetTwoWordSuccessValue( - *return_pc_addr, reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint())); - } else { - VLOG(deopt) << "Got a deoptimization request on un-deoptimizable " - << visitor.caller->PrettyMethod() << " at PC " - << reinterpret_cast<void*>(*return_pc_addr); + if (deoptimize && Runtime::Current()->IsAsyncDeoptimizeable(*return_pc_addr)) { + if (kVerboseInstrumentation) { + LOG(INFO) << "Deoptimizing " + << visitor.caller->PrettyMethod() + << " by returning from " + << method->PrettyMethod() + << " with result " + << std::hex << return_value.GetJ() << std::dec + << " in " + << *self; } + DeoptimizationMethodType deopt_method_type = GetDeoptimizationMethodType(method); + self->PushDeoptimizationContext(return_value, + is_ref, + /* exception= */ nullptr, + /* from_code= */ false, + deopt_method_type); + return GetTwoWordSuccessValue(*return_pc_addr, + reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint())); + } else { + if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc_addr)) { + VLOG(deopt) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod() + << " at PC " << reinterpret_cast<void*>(*return_pc_addr); + } + if (kVerboseInstrumentation) { + LOG(INFO) << "Returning from " << method->PrettyMethod() + << " to PC " << reinterpret_cast<void*>(*return_pc_addr); + } + return GetTwoWordSuccessValue(0, *return_pc_addr); } - - if (kVerboseInstrumentation) { - LOG(INFO) << "Returning from " << method->PrettyMethod() << " to PC " - << reinterpret_cast<void*>(*return_pc_addr); - } - return GetTwoWordSuccessValue(0, *return_pc_addr); } uintptr_t Instrumentation::PopInstrumentationStackUntil(Thread* self, uintptr_t pop_until) const { diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index 1b43717860..40a1c16905 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -406,7 +406,7 @@ class DeoptimizeStackVisitor final : public StackVisitor { callee_method_ = method; return true; } else if (!single_frame_deopt_ && - !Runtime::Current()->IsAsyncDeoptimizeable(method, GetCurrentQuickFramePc())) { + !Runtime::Current()->IsAsyncDeoptimizeable(GetCurrentQuickFramePc())) { // We hit some code that's not deoptimizeable. However, Single-frame deoptimization triggered // from compiled code is always allowed since HDeoptimize always saves the full environment. LOG(WARNING) << "Got request to deoptimize un-deoptimizable method " diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 3645695bce..72fa118f5b 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -3068,13 +3068,12 @@ bool Runtime::IsVerificationSoftFail() const { return verify_ == verifier::VerifyMode::kSoftFail; } -bool Runtime::IsAsyncDeoptimizeable(ArtMethod* method, uintptr_t code) const { +bool Runtime::IsAsyncDeoptimizeable(uintptr_t code) const { if (OatQuickMethodHeader::NterpMethodHeader != nullptr) { if (OatQuickMethodHeader::NterpMethodHeader->Contains(code)) { return true; } } - // We only support async deopt (ie the compiled code is not explicitly asking for // deopt, but something else like the debugger) in debuggable JIT code. // We could look at the oat file where `code` is being defined, @@ -3082,14 +3081,8 @@ bool Runtime::IsAsyncDeoptimizeable(ArtMethod* method, uintptr_t code) const { // only rely on the JIT for debuggable apps. // The JIT-zygote is not debuggable so we need to be sure to exclude code from the non-private // region as well. - if (GetJit() != nullptr && - GetJit()->GetCodeCache()->PrivateRegionContainsPc(reinterpret_cast<const void*>(code))) { - // If the code is JITed code then check if it was compiled as debuggable. - const OatQuickMethodHeader* header = method->GetOatQuickMethodHeader(code); - return CodeInfo::IsDebuggable(header->GetOptimizedCodeInfoPtr()); - } - - return false; + return IsJavaDebuggable() && GetJit() != nullptr && + GetJit()->GetCodeCache()->PrivateRegionContainsPc(reinterpret_cast<const void*>(code)); } LinearAlloc* Runtime::CreateLinearAlloc() { diff --git a/runtime/runtime.h b/runtime/runtime.h index 38741f1706..91f164465b 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -901,8 +901,7 @@ class Runtime { // Returns if the code can be deoptimized asynchronously. Code may be compiled with some // optimization that makes it impossible to deoptimize. - bool IsAsyncDeoptimizeable(ArtMethod* method, uintptr_t code) const - REQUIRES_SHARED(Locks::mutator_lock_); + bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_); // Returns a saved copy of the environment (getenv/setenv values). // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc. diff --git a/runtime/stack_map.h b/runtime/stack_map.h index 7876a67381..7a13dbd3ac 100644 --- a/runtime/stack_map.h +++ b/runtime/stack_map.h @@ -449,10 +449,6 @@ class CodeInfo { return (*code_info_data & kIsBaseline) != 0; } - ALWAYS_INLINE static bool IsDebuggable(const uint8_t* code_info_data) { - return (*code_info_data & kIsDebuggable) != 0; - } - private: // Scan backward to determine dex register locations at given stack map. void DecodeDexRegisterMap(uint32_t stack_map_index, @@ -499,16 +495,11 @@ class CodeInfo { enum Flags { kHasInlineInfo = 1 << 0, kIsBaseline = 1 << 1, - kIsDebuggable = 1 << 2, }; // The CodeInfo starts with sequence of variable-length bit-encoded integers. - // (Please see kVarintMax for more details about encoding). static constexpr size_t kNumHeaders = 7; - // Note that the space for flags is limited to three bits. We use a custom encoding where we - // encode the value inline if it is less than kVarintMax. We want to access flags without - // decoding the entire CodeInfo so the value of flags cannot be more than kVarintMax. - uint32_t flags_ = 0; + uint32_t flags_ = 0; // Note that the space is limited to three bits. uint32_t code_size_ = 0; // The size of native PC range in bytes. uint32_t packed_frame_size_ = 0; // Frame size in kStackAlignment units. uint32_t core_spill_mask_ = 0; diff --git a/runtime/thread.cc b/runtime/thread.cc index 73c309cba3..0c146d5bf1 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -3772,7 +3772,7 @@ void Thread::QuickDeliverException(bool is_method_exit_exception) { if (Dbg::IsForcedInterpreterNeededForException(this) || force_deopt || IsForceInterpreter()) { NthCallerVisitor visitor(this, 0, false); visitor.WalkStack(); - if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller, visitor.caller_pc)) { + if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) { // method_type shouldn't matter due to exception handling. const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault; // Save the exception into the deoptimization context so it can be restored diff --git a/test/543-env-long-ref/env_long_ref.cc b/test/543-env-long-ref/env_long_ref.cc index bfaf1f15bd..1c30d469f8 100644 --- a/test/543-env-long-ref/env_long_ref.cc +++ b/test/543-env-long-ref/env_long_ref.cc @@ -36,8 +36,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_lookForMyRegisters(JNIEnv*, jclass, found = true; // For optimized non-debuggable code do not expect dex register info to be present. if (stack_visitor->GetCurrentShadowFrame() == nullptr && - !Runtime::Current()->IsAsyncDeoptimizeable(m, - stack_visitor->GetCurrentQuickFramePc())) { + !Runtime::Current()->IsAsyncDeoptimizeable(stack_visitor->GetCurrentQuickFramePc())) { return true; } uint32_t stack_value = 0; diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc index 7537705d21..79c7a36a8e 100644 --- a/test/common/stack_inspect.cc +++ b/test/common/stack_inspect.cc @@ -195,8 +195,7 @@ extern "C" JNIEXPORT jobject JNICALL Java_Main_getThisOfCaller( if (stack_visitor->GetMethod() == nullptr || stack_visitor->GetMethod()->IsNative() || (stack_visitor->GetCurrentShadowFrame() == nullptr && - !Runtime::Current()->IsAsyncDeoptimizeable(stack_visitor->GetMethod(), - stack_visitor->GetCurrentQuickFramePc()))) { + !Runtime::Current()->IsAsyncDeoptimizeable(stack_visitor->GetCurrentQuickFramePc()))) { return true; } result = soa.AddLocalReference<jobject>(stack_visitor->GetThisObject()); |