From fc067a360d14db5f84fd4b58e0dee6cb04ee759b Mon Sep 17 00:00:00 2001 From: Mythri Alle Date: Wed, 23 Mar 2022 12:49:30 +0000 Subject: Introduce a flag to check if JITed code has instrumentation support Introduce a new flag to identify if JITed code was compiled with instrumentation support. We used to check if the runtime is java debuggable to check for instrumentation support of JITed code. We only set the java debuggable at runtime init and never changed it after. So this check was sufficient since we always JIT code with instrumentation support in debuggable runtimes. We want to be able to change the runtime to debuggable after the runtime has started. As a first step, introduce a new flag to explicitly check if JITed code was compiled with instrumentation support. Use this flag to check if code needs entry / exit stubs and to check if code is async deoptimizeable. Bug: 222479430 Test: art/test.py Change-Id: Ibcaeab869aa8ce153920a801dcc60988411c775b --- compiler/optimizing/code_generator.cc | 3 +- compiler/optimizing/optimizing_compiler.cc | 29 ++++++++++------ compiler/optimizing/stack_map_stream.cc | 5 ++- compiler/optimizing/stack_map_stream.h | 4 ++- compiler/optimizing/stack_map_test.cc | 56 +++++++++++++++++++++++++----- 5 files changed, 75 insertions(+), 22 deletions(-) (limited to 'compiler/optimizing') diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 8bd4406332..d8fc3ba690 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -389,7 +389,8 @@ void CodeGenerator::Compile(CodeAllocator* allocator) { core_spill_mask_, fpu_spill_mask_, GetGraph()->GetNumberOfVRegs(), - GetGraph()->IsCompilingBaseline()); + GetGraph()->IsCompilingBaseline(), + GetGraph()->IsDebuggable()); size_t frame_start = GetAssembler()->CodeSize(); GenerateFrameEntry(); diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index a499c55757..b0fa251f55 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1115,17 +1115,18 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, static ScopedArenaVector CreateJniStackMap(ScopedArenaAllocator* allocator, const JniCompiledMethod& jni_compiled_method, - size_t code_size) { + size_t code_size, + bool debuggable) { // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator // to stay clear of the frame size limit. std::unique_ptr stack_map_stream( new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet())); - stack_map_stream->BeginMethod( - jni_compiled_method.GetFrameSize(), - jni_compiled_method.GetCoreSpillMask(), - jni_compiled_method.GetFpSpillMask(), - /* num_dex_registers= */ 0, - /* baseline= */ false); + stack_map_stream->BeginMethod(jni_compiled_method.GetFrameSize(), + jni_compiled_method.GetCoreSpillMask(), + jni_compiled_method.GetFpSpillMask(), + /* num_dex_registers= */ 0, + /* baseline= */ false, + debuggable); stack_map_stream->EndMethod(code_size); return stack_map_stream->Encode(); } @@ -1187,8 +1188,11 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub); ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. - ScopedArenaVector stack_map = CreateJniStackMap( - &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size()); + ScopedArenaVector stack_map = + CreateJniStackMap(&stack_map_allocator, + jni_compiled_method, + jni_compiled_method.GetCode().size(), + compiler_options.GetDebuggable() && compiler_options.IsJitCompiler()); return CompiledMethod::SwapAllocCompiledMethod( GetCompiledMethodStorage(), jni_compiled_method.GetInstructionSet(), @@ -1249,8 +1253,11 @@ bool OptimizingCompiler::JitCompile(Thread* self, ArenaStack arena_stack(runtime->GetJitArenaPool()); // StackMapStream is large and it does not fit into this frame, so we need helper method. ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. - ScopedArenaVector stack_map = CreateJniStackMap( - &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size()); + ScopedArenaVector stack_map = + CreateJniStackMap(&stack_map_allocator, + jni_compiled_method, + jni_compiled_method.GetCode().size(), + compiler_options.GetDebuggable() && compiler_options.IsJitCompiler()); ArrayRef reserved_code; ArrayRef reserved_data; diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index f55bbee1c8..c13a35567b 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -49,7 +49,8 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes, size_t core_spill_mask, size_t fp_spill_mask, uint32_t num_dex_registers, - bool baseline) { + bool baseline, + bool debuggable) { DCHECK(!in_method_) << "Mismatched Begin/End calls"; in_method_ = true; DCHECK_EQ(packed_frame_size_, 0u) << "BeginMethod was already called"; @@ -60,6 +61,7 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes, fp_spill_mask_ = fp_spill_mask; num_dex_registers_ = num_dex_registers; baseline_ = baseline; + debuggable_ = debuggable; if (kVerifyStackMaps) { dchecks_.emplace_back([=](const CodeInfo& code_info) { @@ -367,6 +369,7 @@ ScopedArenaVector StackMapStream::Encode() { uint32_t flags = (inline_infos_.size() > 0) ? CodeInfo::kHasInlineInfo : 0; flags |= baseline_ ? CodeInfo::kIsBaseline : 0; + flags |= debuggable_ ? CodeInfo::kIsDebuggable : 0; DCHECK_LE(flags, kVarintMax); // Ensure flags can be read directly as byte. uint32_t bit_table_flags = 0; ForEachBitTable([&bit_table_flags](size_t i, auto bit_table) { diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index 27145a174c..1aaa6aee9e 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -64,7 +64,8 @@ class StackMapStream : public DeletableArenaObject { size_t core_spill_mask, size_t fp_spill_mask, uint32_t num_dex_registers, - bool baseline = false); + bool baseline, + bool debuggable); void EndMethod(size_t code_size); void BeginStackMapEntry(uint32_t dex_pc, @@ -125,6 +126,7 @@ class StackMapStream : public DeletableArenaObject { uint32_t fp_spill_mask_ = 0; uint32_t num_dex_registers_ = 0; bool baseline_; + bool debuggable_; BitTableBuilder stack_maps_; BitTableBuilder register_masks_; BitmapTableBuilder stack_masks_; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index f6a739e15a..23af1f7fa1 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -52,7 +52,12 @@ TEST(StackMapTest, Test1) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(32, 0, 0, 2); + stream.BeginMethod(/* frame_size_in_bytes= */ 32, + /* core_spill_mask= */ 0, + /* fp_spill_mask= */ 0, + /* num_dex_registers= */ 2, + /* baseline= */ false, + /* debuggable= */ false); ArenaBitVector sp_mask(&allocator, 0, false); size_t number_of_dex_registers = 2; @@ -106,7 +111,12 @@ TEST(StackMapTest, Test2) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(32, 0, 0, 2); + stream.BeginMethod(/* frame_size_in_bytes= */ 32, + /* core_spill_mask= */ 0, + /* fp_spill_mask= */ 0, + /* num_dex_registers= */ 2, + /* baseline= */ false, + /* debuggable= */ false); ArtMethod art_method; ArenaBitVector sp_mask1(&allocator, 0, true); @@ -300,7 +310,12 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(32, 0, 0, 2); + stream.BeginMethod(/* frame_size_in_bytes= */ 32, + /* core_spill_mask= */ 0, + /* fp_spill_mask= */ 0, + /* num_dex_registers= */ 2, + /* baseline= */ false, + /* debuggable= */ false); ArtMethod art_method; ArenaBitVector sp_mask1(&allocator, 0, true); @@ -363,7 +378,12 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(32, 0, 0, 2); + stream.BeginMethod(/* frame_size_in_bytes= */ 32, + /* core_spill_mask= */ 0, + /* fp_spill_mask= */ 0, + /* num_dex_registers= */ 2, + /* baseline= */ false, + /* debuggable= */ false); ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 2; @@ -411,7 +431,12 @@ TEST(StackMapTest, TestShareDexRegisterMap) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(32, 0, 0, 2); + stream.BeginMethod(/* frame_size_in_bytes= */ 32, + /* core_spill_mask= */ 0, + /* fp_spill_mask= */ 0, + /* num_dex_registers= */ 2, + /* baseline= */ false, + /* debuggable= */ false); ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 2; @@ -467,7 +492,12 @@ TEST(StackMapTest, TestNoDexRegisterMap) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(32, 0, 0, 1); + stream.BeginMethod(/* frame_size_in_bytes= */ 32, + /* core_spill_mask= */ 0, + /* fp_spill_mask= */ 0, + /* num_dex_registers= */ 1, + /* baseline= */ false, + /* debuggable= */ false); ArenaBitVector sp_mask(&allocator, 0, false); stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask); @@ -512,7 +542,12 @@ TEST(StackMapTest, InlineTest) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(32, 0, 0, 2); + stream.BeginMethod(/* frame_size_in_bytes= */ 32, + /* core_spill_mask= */ 0, + /* fp_spill_mask= */ 0, + /* num_dex_registers= */ 2, + /* baseline= */ false, + /* debuggable= */ false); ArtMethod art_method; ArenaBitVector sp_mask1(&allocator, 0, true); @@ -702,7 +737,12 @@ TEST(StackMapTest, TestDeduplicateStackMask) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(32, 0, 0, 0); + stream.BeginMethod(/* frame_size_in_bytes= */ 32, + /* core_spill_mask= */ 0, + /* fp_spill_mask= */ 0, + /* num_dex_registers= */ 0, + /* baseline= */ false, + /* debuggable= */ false); ArenaBitVector sp_mask(&allocator, 0, true); sp_mask.SetBit(1); -- cgit v1.2.3-59-g8ed1b