summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Mythri Alle <mythria@google.com> 2022-03-23 12:49:30 +0000
committer Mythri Alle <mythria@google.com> 2022-07-12 13:03:15 +0000
commitfc067a360d14db5f84fd4b58e0dee6cb04ee759b (patch)
treea9217edb3a03e3937411407f704ad26e5887fb9a /compiler
parent0ae89052f7213701b8b3a782266e84b3d3600dbf (diff)
Introduce a flag to check if JITed code has instrumentation support
Introduce a new flag to identify if JITed code was compiled with instrumentation support. We used to check if the runtime is java debuggable to check for instrumentation support of JITed code. We only set the java debuggable at runtime init and never changed it after. So this check was sufficient since we always JIT code with instrumentation support in debuggable runtimes. We want to be able to change the runtime to debuggable after the runtime has started. As a first step, introduce a new flag to explicitly check if JITed code was compiled with instrumentation support. Use this flag to check if code needs entry / exit stubs and to check if code is async deoptimizeable. Bug: 222479430 Test: art/test.py Change-Id: Ibcaeab869aa8ce153920a801dcc60988411c775b
Diffstat (limited to 'compiler')
-rw-r--r--compiler/exception_test.cc7
-rw-r--r--compiler/jni/quick/jni_compiler.cc16
-rw-r--r--compiler/optimizing/code_generator.cc3
-rw-r--r--compiler/optimizing/optimizing_compiler.cc29
-rw-r--r--compiler/optimizing/stack_map_stream.cc5
-rw-r--r--compiler/optimizing/stack_map_stream.h4
-rw-r--r--compiler/optimizing/stack_map_test.cc56
7 files changed, 89 insertions, 31 deletions
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 4471b93f17..a49c6c630e 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -78,7 +78,12 @@ class ExceptionTest : public CommonRuntimeTest {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stack_maps(&allocator, kRuntimeISA);
- stack_maps.BeginMethod(4 * sizeof(void*), 0u, 0u, 0u);
+ stack_maps.BeginMethod(/* frame_size_in_bytes= */ 4 * sizeof(void*),
+ /* core_spill_mask= */ 0u,
+ /* fp_spill_mask= */ 0u,
+ /* num_dex_registers= */ 0u,
+ /* baseline= */ false,
+ /* debuggable= */ false);
stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset);
stack_maps.EndStackMapEntry();
stack_maps.EndMethod(code_size);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index d672500126..42072eb6e0 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -103,19 +103,19 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
// i.e. if the method was annotated with @CriticalNative
const bool is_critical_native = (access_flags & kAccCriticalNative) != 0u;
+ bool needs_entry_exit_hooks =
+ compiler_options.GetDebuggable() && compiler_options.IsJitCompiler();
+ // We don't support JITing stubs for critical native methods in debuggable runtimes yet.
+ // TODO(mythria): Add support required for calling method entry / exit hooks from critical native
+ // methods.
+ DCHECK_IMPLIES(needs_entry_exit_hooks, !is_critical_native);
+
// When walking the stack the top frame doesn't have a pc associated with it. We then depend on
// the invariant that we don't have JITed code when AOT code is available. In debuggable runtimes
// this invariant doesn't hold. So we tag the SP for JITed code to indentify if we are executing
// JITed code or AOT code. Since tagging involves additional instructions we tag only in
// debuggable runtimes.
- bool should_tag_sp = compiler_options.GetDebuggable() && compiler_options.IsJitCompiler();
-
- // We don't JIT stubs for critical native methods in debuggable runtimes.
- // TODO(mythria): Add support required for calling method entry / exit hooks from critical native
- // methods.
- bool needs_entry_exit_hooks = compiler_options.GetDebuggable() &&
- compiler_options.IsJitCompiler() &&
- !is_critical_native;
+ bool should_tag_sp = needs_entry_exit_hooks;
VLOG(jni) << "JniCompile: Method :: "
<< dex_file.PrettyMethod(method_idx, /* with signature */ true)
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8bd4406332..d8fc3ba690 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -389,7 +389,8 @@ void CodeGenerator::Compile(CodeAllocator* allocator) {
core_spill_mask_,
fpu_spill_mask_,
GetGraph()->GetNumberOfVRegs(),
- GetGraph()->IsCompilingBaseline());
+ GetGraph()->IsCompilingBaseline(),
+ GetGraph()->IsDebuggable());
size_t frame_start = GetAssembler()->CodeSize();
GenerateFrameEntry();
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a499c55757..b0fa251f55 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1115,17 +1115,18 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
const JniCompiledMethod& jni_compiled_method,
- size_t code_size) {
+ size_t code_size,
+ bool debuggable) {
// StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
// to stay clear of the frame size limit.
std::unique_ptr<StackMapStream> stack_map_stream(
new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet()));
- stack_map_stream->BeginMethod(
- jni_compiled_method.GetFrameSize(),
- jni_compiled_method.GetCoreSpillMask(),
- jni_compiled_method.GetFpSpillMask(),
- /* num_dex_registers= */ 0,
- /* baseline= */ false);
+ stack_map_stream->BeginMethod(jni_compiled_method.GetFrameSize(),
+ jni_compiled_method.GetCoreSpillMask(),
+ jni_compiled_method.GetFpSpillMask(),
+ /* num_dex_registers= */ 0,
+ /* baseline= */ false,
+ debuggable);
stack_map_stream->EndMethod(code_size);
return stack_map_stream->Encode();
}
@@ -1187,8 +1188,11 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
- ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(
- &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size());
+ ScopedArenaVector<uint8_t> stack_map =
+ CreateJniStackMap(&stack_map_allocator,
+ jni_compiled_method,
+ jni_compiled_method.GetCode().size(),
+ compiler_options.GetDebuggable() && compiler_options.IsJitCompiler());
return CompiledMethod::SwapAllocCompiledMethod(
GetCompiledMethodStorage(),
jni_compiled_method.GetInstructionSet(),
@@ -1249,8 +1253,11 @@ bool OptimizingCompiler::JitCompile(Thread* self,
ArenaStack arena_stack(runtime->GetJitArenaPool());
// StackMapStream is large and it does not fit into this frame, so we need helper method.
ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
- ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(
- &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size());
+ ScopedArenaVector<uint8_t> stack_map =
+ CreateJniStackMap(&stack_map_allocator,
+ jni_compiled_method,
+ jni_compiled_method.GetCode().size(),
+ compiler_options.GetDebuggable() && compiler_options.IsJitCompiler());
ArrayRef<const uint8_t> reserved_code;
ArrayRef<const uint8_t> reserved_data;
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index f55bbee1c8..c13a35567b 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -49,7 +49,8 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes,
size_t core_spill_mask,
size_t fp_spill_mask,
uint32_t num_dex_registers,
- bool baseline) {
+ bool baseline,
+ bool debuggable) {
DCHECK(!in_method_) << "Mismatched Begin/End calls";
in_method_ = true;
DCHECK_EQ(packed_frame_size_, 0u) << "BeginMethod was already called";
@@ -60,6 +61,7 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes,
fp_spill_mask_ = fp_spill_mask;
num_dex_registers_ = num_dex_registers;
baseline_ = baseline;
+ debuggable_ = debuggable;
if (kVerifyStackMaps) {
dchecks_.emplace_back([=](const CodeInfo& code_info) {
@@ -367,6 +369,7 @@ ScopedArenaVector<uint8_t> StackMapStream::Encode() {
uint32_t flags = (inline_infos_.size() > 0) ? CodeInfo::kHasInlineInfo : 0;
flags |= baseline_ ? CodeInfo::kIsBaseline : 0;
+ flags |= debuggable_ ? CodeInfo::kIsDebuggable : 0;
DCHECK_LE(flags, kVarintMax); // Ensure flags can be read directly as byte.
uint32_t bit_table_flags = 0;
ForEachBitTable([&bit_table_flags](size_t i, auto bit_table) {
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 27145a174c..1aaa6aee9e 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -64,7 +64,8 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> {
size_t core_spill_mask,
size_t fp_spill_mask,
uint32_t num_dex_registers,
- bool baseline = false);
+ bool baseline,
+ bool debuggable);
void EndMethod(size_t code_size);
void BeginStackMapEntry(uint32_t dex_pc,
@@ -125,6 +126,7 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> {
uint32_t fp_spill_mask_ = 0;
uint32_t num_dex_registers_ = 0;
bool baseline_;
+ bool debuggable_;
BitTableBuilder<StackMap> stack_maps_;
BitTableBuilder<RegisterMask> register_masks_;
BitmapTableBuilder stack_masks_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index f6a739e15a..23af1f7fa1 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -52,7 +52,12 @@ TEST(StackMapTest, Test1) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
- stream.BeginMethod(32, 0, 0, 2);
+ stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+ /* core_spill_mask= */ 0,
+ /* fp_spill_mask= */ 0,
+ /* num_dex_registers= */ 2,
+ /* baseline= */ false,
+ /* debuggable= */ false);
ArenaBitVector sp_mask(&allocator, 0, false);
size_t number_of_dex_registers = 2;
@@ -106,7 +111,12 @@ TEST(StackMapTest, Test2) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
- stream.BeginMethod(32, 0, 0, 2);
+ stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+ /* core_spill_mask= */ 0,
+ /* fp_spill_mask= */ 0,
+ /* num_dex_registers= */ 2,
+ /* baseline= */ false,
+ /* debuggable= */ false);
ArtMethod art_method;
ArenaBitVector sp_mask1(&allocator, 0, true);
@@ -300,7 +310,12 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
- stream.BeginMethod(32, 0, 0, 2);
+ stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+ /* core_spill_mask= */ 0,
+ /* fp_spill_mask= */ 0,
+ /* num_dex_registers= */ 2,
+ /* baseline= */ false,
+ /* debuggable= */ false);
ArtMethod art_method;
ArenaBitVector sp_mask1(&allocator, 0, true);
@@ -363,7 +378,12 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
- stream.BeginMethod(32, 0, 0, 2);
+ stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+ /* core_spill_mask= */ 0,
+ /* fp_spill_mask= */ 0,
+ /* num_dex_registers= */ 2,
+ /* baseline= */ false,
+ /* debuggable= */ false);
ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 2;
@@ -411,7 +431,12 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
- stream.BeginMethod(32, 0, 0, 2);
+ stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+ /* core_spill_mask= */ 0,
+ /* fp_spill_mask= */ 0,
+ /* num_dex_registers= */ 2,
+ /* baseline= */ false,
+ /* debuggable= */ false);
ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 2;
@@ -467,7 +492,12 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
- stream.BeginMethod(32, 0, 0, 1);
+ stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+ /* core_spill_mask= */ 0,
+ /* fp_spill_mask= */ 0,
+ /* num_dex_registers= */ 1,
+ /* baseline= */ false,
+ /* debuggable= */ false);
ArenaBitVector sp_mask(&allocator, 0, false);
stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask);
@@ -512,7 +542,12 @@ TEST(StackMapTest, InlineTest) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
- stream.BeginMethod(32, 0, 0, 2);
+ stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+ /* core_spill_mask= */ 0,
+ /* fp_spill_mask= */ 0,
+ /* num_dex_registers= */ 2,
+ /* baseline= */ false,
+ /* debuggable= */ false);
ArtMethod art_method;
ArenaBitVector sp_mask1(&allocator, 0, true);
@@ -702,7 +737,12 @@ TEST(StackMapTest, TestDeduplicateStackMask) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
- stream.BeginMethod(32, 0, 0, 0);
+ stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+ /* core_spill_mask= */ 0,
+ /* fp_spill_mask= */ 0,
+ /* num_dex_registers= */ 0,
+ /* baseline= */ false,
+ /* debuggable= */ false);
ArenaBitVector sp_mask(&allocator, 0, true);
sp_mask.SetBit(1);