diff options
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/exception_test.cc | 7 | ||||
-rw-r--r-- | compiler/jni/quick/jni_compiler.cc | 16 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.cc | 3 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 29 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_stream.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_stream.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_test.cc | 56 |
7 files changed, 31 insertions, 89 deletions
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc index a49c6c630e..4471b93f17 100644 --- a/compiler/exception_test.cc +++ b/compiler/exception_test.cc @@ -78,12 +78,7 @@ class ExceptionTest : public CommonRuntimeTest { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stack_maps(&allocator, kRuntimeISA); - stack_maps.BeginMethod(/* frame_size_in_bytes= */ 4 * sizeof(void*), - /* core_spill_mask= */ 0u, - /* fp_spill_mask= */ 0u, - /* num_dex_registers= */ 0u, - /* baseline= */ false, - /* debuggable= */ false); + stack_maps.BeginMethod(4 * sizeof(void*), 0u, 0u, 0u); stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset); stack_maps.EndStackMapEntry(); stack_maps.EndMethod(code_size); diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index 42072eb6e0..d672500126 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -103,19 +103,19 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp // i.e. if the method was annotated with @CriticalNative const bool is_critical_native = (access_flags & kAccCriticalNative) != 0u; - bool needs_entry_exit_hooks = - compiler_options.GetDebuggable() && compiler_options.IsJitCompiler(); - // We don't support JITing stubs for critical native methods in debuggable runtimes yet. - // TODO(mythria): Add support required for calling method entry / exit hooks from critical native - // methods. - DCHECK_IMPLIES(needs_entry_exit_hooks, !is_critical_native); - // When walking the stack the top frame doesn't have a pc associated with it. We then depend on // the invariant that we don't have JITed code when AOT code is available. In debuggable runtimes // this invariant doesn't hold. So we tag the SP for JITed code to indentify if we are executing // JITed code or AOT code. Since tagging involves additional instructions we tag only in // debuggable runtimes. - bool should_tag_sp = needs_entry_exit_hooks; + bool should_tag_sp = compiler_options.GetDebuggable() && compiler_options.IsJitCompiler(); + + // We don't JIT stubs for critical native methods in debuggable runtimes. + // TODO(mythria): Add support required for calling method entry / exit hooks from critical native + // methods. + bool needs_entry_exit_hooks = compiler_options.GetDebuggable() && + compiler_options.IsJitCompiler() && + !is_critical_native; VLOG(jni) << "JniCompile: Method :: " << dex_file.PrettyMethod(method_idx, /* with signature */ true) diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index d8fc3ba690..8bd4406332 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -389,8 +389,7 @@ void CodeGenerator::Compile(CodeAllocator* allocator) { core_spill_mask_, fpu_spill_mask_, GetGraph()->GetNumberOfVRegs(), - GetGraph()->IsCompilingBaseline(), - GetGraph()->IsDebuggable()); + GetGraph()->IsCompilingBaseline()); size_t frame_start = GetAssembler()->CodeSize(); GenerateFrameEntry(); diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index b0fa251f55..a499c55757 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1115,18 +1115,17 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator, const JniCompiledMethod& jni_compiled_method, - size_t code_size, - bool debuggable) { + size_t code_size) { // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator // to stay clear of the frame size limit. std::unique_ptr<StackMapStream> stack_map_stream( new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet())); - stack_map_stream->BeginMethod(jni_compiled_method.GetFrameSize(), - jni_compiled_method.GetCoreSpillMask(), - jni_compiled_method.GetFpSpillMask(), - /* num_dex_registers= */ 0, - /* baseline= */ false, - debuggable); + stack_map_stream->BeginMethod( + jni_compiled_method.GetFrameSize(), + jni_compiled_method.GetCoreSpillMask(), + jni_compiled_method.GetFpSpillMask(), + /* num_dex_registers= */ 0, + /* baseline= */ false); stack_map_stream->EndMethod(code_size); return stack_map_stream->Encode(); } @@ -1188,11 +1187,8 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub); ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. - ScopedArenaVector<uint8_t> stack_map = - CreateJniStackMap(&stack_map_allocator, - jni_compiled_method, - jni_compiled_method.GetCode().size(), - compiler_options.GetDebuggable() && compiler_options.IsJitCompiler()); + ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap( + &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size()); return CompiledMethod::SwapAllocCompiledMethod( GetCompiledMethodStorage(), jni_compiled_method.GetInstructionSet(), @@ -1253,11 +1249,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, ArenaStack arena_stack(runtime->GetJitArenaPool()); // StackMapStream is large and it does not fit into this frame, so we need helper method. ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. - ScopedArenaVector<uint8_t> stack_map = - CreateJniStackMap(&stack_map_allocator, - jni_compiled_method, - jni_compiled_method.GetCode().size(), - compiler_options.GetDebuggable() && compiler_options.IsJitCompiler()); + ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap( + &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size()); ArrayRef<const uint8_t> reserved_code; ArrayRef<const uint8_t> reserved_data; diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index c13a35567b..f55bbee1c8 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -49,8 +49,7 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes, size_t core_spill_mask, size_t fp_spill_mask, uint32_t num_dex_registers, - bool baseline, - bool debuggable) { + bool baseline) { DCHECK(!in_method_) << "Mismatched Begin/End calls"; in_method_ = true; DCHECK_EQ(packed_frame_size_, 0u) << "BeginMethod was already called"; @@ -61,7 +60,6 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes, fp_spill_mask_ = fp_spill_mask; num_dex_registers_ = num_dex_registers; baseline_ = baseline; - debuggable_ = debuggable; if (kVerifyStackMaps) { dchecks_.emplace_back([=](const CodeInfo& code_info) { @@ -369,7 +367,6 @@ ScopedArenaVector<uint8_t> StackMapStream::Encode() { uint32_t flags = (inline_infos_.size() > 0) ? CodeInfo::kHasInlineInfo : 0; flags |= baseline_ ? CodeInfo::kIsBaseline : 0; - flags |= debuggable_ ? CodeInfo::kIsDebuggable : 0; DCHECK_LE(flags, kVarintMax); // Ensure flags can be read directly as byte. uint32_t bit_table_flags = 0; ForEachBitTable([&bit_table_flags](size_t i, auto bit_table) { diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index 1aaa6aee9e..27145a174c 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -64,8 +64,7 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> { size_t core_spill_mask, size_t fp_spill_mask, uint32_t num_dex_registers, - bool baseline, - bool debuggable); + bool baseline = false); void EndMethod(size_t code_size); void BeginStackMapEntry(uint32_t dex_pc, @@ -126,7 +125,6 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> { uint32_t fp_spill_mask_ = 0; uint32_t num_dex_registers_ = 0; bool baseline_; - bool debuggable_; BitTableBuilder<StackMap> stack_maps_; BitTableBuilder<RegisterMask> register_masks_; BitmapTableBuilder stack_masks_; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index 23af1f7fa1..f6a739e15a 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -52,12 +52,7 @@ TEST(StackMapTest, Test1) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArenaBitVector sp_mask(&allocator, 0, false); size_t number_of_dex_registers = 2; @@ -111,12 +106,7 @@ TEST(StackMapTest, Test2) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArtMethod art_method; ArenaBitVector sp_mask1(&allocator, 0, true); @@ -310,12 +300,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArtMethod art_method; ArenaBitVector sp_mask1(&allocator, 0, true); @@ -378,12 +363,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 2; @@ -431,12 +411,7 @@ TEST(StackMapTest, TestShareDexRegisterMap) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 2; @@ -492,12 +467,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 1, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 1); ArenaBitVector sp_mask(&allocator, 0, false); stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask); @@ -542,12 +512,7 @@ TEST(StackMapTest, InlineTest) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 2, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 2); ArtMethod art_method; ArenaBitVector sp_mask1(&allocator, 0, true); @@ -737,12 +702,7 @@ TEST(StackMapTest, TestDeduplicateStackMask) { ArenaStack arena_stack(&pool); ScopedArenaAllocator allocator(&arena_stack); StackMapStream stream(&allocator, kRuntimeISA); - stream.BeginMethod(/* frame_size_in_bytes= */ 32, - /* core_spill_mask= */ 0, - /* fp_spill_mask= */ 0, - /* num_dex_registers= */ 0, - /* baseline= */ false, - /* debuggable= */ false); + stream.BeginMethod(32, 0, 0, 0); ArenaBitVector sp_mask(&allocator, 0, true); sp_mask.SetBit(1); |