Reland "Introduce a flag to check if JITed code has instrumentation support"

This reverts commit 26aef1213dbdd7ab03688d898cf802c8c8d7e610.

Reason for revert: Relanding after a fix. When checking if the caller
is deoptimizaeble we should consider the outer caller and not the
inlined method that we could be executing currently.

Bug: 222479430
Change-Id: I37cbc8f1b34113a36a92c3801db72b16d2b9c81a
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 4471b93..a49c6c6 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -78,7 +78,12 @@
     ArenaStack arena_stack(&pool);
     ScopedArenaAllocator allocator(&arena_stack);
     StackMapStream stack_maps(&allocator, kRuntimeISA);
-    stack_maps.BeginMethod(4 * sizeof(void*), 0u, 0u, 0u);
+    stack_maps.BeginMethod(/* frame_size_in_bytes= */ 4 * sizeof(void*),
+                           /* core_spill_mask= */ 0u,
+                           /* fp_spill_mask= */ 0u,
+                           /* num_dex_registers= */ 0u,
+                           /* baseline= */ false,
+                           /* debuggable= */ false);
     stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset);
     stack_maps.EndStackMapEntry();
     stack_maps.EndMethod(code_size);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index d672500..42072eb 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -103,19 +103,19 @@
   // i.e. if the method was annotated with @CriticalNative
   const bool is_critical_native = (access_flags & kAccCriticalNative) != 0u;
 
+  bool needs_entry_exit_hooks =
+      compiler_options.GetDebuggable() && compiler_options.IsJitCompiler();
+  // We don't support JITing stubs for critical native methods in debuggable runtimes yet.
+  // TODO(mythria): Add support required for calling method entry / exit hooks from critical native
+  // methods.
+  DCHECK_IMPLIES(needs_entry_exit_hooks, !is_critical_native);
+
   // When  walking the stack the top frame doesn't have a pc associated with it. We then depend on
   // the invariant that we don't have JITed code when AOT code is available. In debuggable runtimes
   // this invariant doesn't hold. So we tag the SP for JITed code to indentify if we are executing
   // JITed code or AOT code. Since tagging involves additional instructions we tag only in
   // debuggable runtimes.
-  bool should_tag_sp = compiler_options.GetDebuggable() && compiler_options.IsJitCompiler();
-
-  // We don't JIT stubs for critical native methods in debuggable runtimes.
-  // TODO(mythria): Add support required for calling method entry / exit hooks from critical native
-  // methods.
-  bool needs_entry_exit_hooks = compiler_options.GetDebuggable() &&
-                                compiler_options.IsJitCompiler() &&
-                                !is_critical_native;
+  bool should_tag_sp = needs_entry_exit_hooks;
 
   VLOG(jni) << "JniCompile: Method :: "
               << dex_file.PrettyMethod(method_idx, /* with signature */ true)
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8bd4406..d8fc3ba 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -389,7 +389,8 @@
                                    core_spill_mask_,
                                    fpu_spill_mask_,
                                    GetGraph()->GetNumberOfVRegs(),
-                                   GetGraph()->IsCompilingBaseline());
+                                   GetGraph()->IsCompilingBaseline(),
+                                   GetGraph()->IsDebuggable());
 
   size_t frame_start = GetAssembler()->CodeSize();
   GenerateFrameEntry();
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a499c55..b0fa251 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1115,17 +1115,18 @@
 
 static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
                                                     const JniCompiledMethod& jni_compiled_method,
-                                                    size_t code_size) {
+                                                    size_t code_size,
+                                                    bool debuggable) {
   // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
   // to stay clear of the frame size limit.
   std::unique_ptr<StackMapStream> stack_map_stream(
       new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet()));
-  stack_map_stream->BeginMethod(
-      jni_compiled_method.GetFrameSize(),
-      jni_compiled_method.GetCoreSpillMask(),
-      jni_compiled_method.GetFpSpillMask(),
-      /* num_dex_registers= */ 0,
-      /* baseline= */ false);
+  stack_map_stream->BeginMethod(jni_compiled_method.GetFrameSize(),
+                                jni_compiled_method.GetCoreSpillMask(),
+                                jni_compiled_method.GetFpSpillMask(),
+                                /* num_dex_registers= */ 0,
+                                /* baseline= */ false,
+                                debuggable);
   stack_map_stream->EndMethod(code_size);
   return stack_map_stream->Encode();
 }
@@ -1187,8 +1188,11 @@
   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
 
   ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
-  ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(
-      &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size());
+  ScopedArenaVector<uint8_t> stack_map =
+      CreateJniStackMap(&stack_map_allocator,
+                        jni_compiled_method,
+                        jni_compiled_method.GetCode().size(),
+                        compiler_options.GetDebuggable() && compiler_options.IsJitCompiler());
   return CompiledMethod::SwapAllocCompiledMethod(
       GetCompiledMethodStorage(),
       jni_compiled_method.GetInstructionSet(),
@@ -1249,8 +1253,11 @@
     ArenaStack arena_stack(runtime->GetJitArenaPool());
     // StackMapStream is large and it does not fit into this frame, so we need helper method.
     ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
-    ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(
-        &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size());
+    ScopedArenaVector<uint8_t> stack_map =
+        CreateJniStackMap(&stack_map_allocator,
+                          jni_compiled_method,
+                          jni_compiled_method.GetCode().size(),
+                          compiler_options.GetDebuggable() && compiler_options.IsJitCompiler());
 
     ArrayRef<const uint8_t> reserved_code;
     ArrayRef<const uint8_t> reserved_data;
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index f55bbee..c13a355 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -49,7 +49,8 @@
                                  size_t core_spill_mask,
                                  size_t fp_spill_mask,
                                  uint32_t num_dex_registers,
-                                 bool baseline) {
+                                 bool baseline,
+                                 bool debuggable) {
   DCHECK(!in_method_) << "Mismatched Begin/End calls";
   in_method_ = true;
   DCHECK_EQ(packed_frame_size_, 0u) << "BeginMethod was already called";
@@ -60,6 +61,7 @@
   fp_spill_mask_ = fp_spill_mask;
   num_dex_registers_ = num_dex_registers;
   baseline_ = baseline;
+  debuggable_ = debuggable;
 
   if (kVerifyStackMaps) {
     dchecks_.emplace_back([=](const CodeInfo& code_info) {
@@ -367,6 +369,7 @@
 
   uint32_t flags = (inline_infos_.size() > 0) ? CodeInfo::kHasInlineInfo : 0;
   flags |= baseline_ ? CodeInfo::kIsBaseline : 0;
+  flags |= debuggable_ ? CodeInfo::kIsDebuggable : 0;
   DCHECK_LE(flags, kVarintMax);  // Ensure flags can be read directly as byte.
   uint32_t bit_table_flags = 0;
   ForEachBitTable([&bit_table_flags](size_t i, auto bit_table) {
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 27145a1..1aaa6ae 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -64,7 +64,8 @@
                    size_t core_spill_mask,
                    size_t fp_spill_mask,
                    uint32_t num_dex_registers,
-                   bool baseline = false);
+                   bool baseline,
+                   bool debuggable);
   void EndMethod(size_t code_size);
 
   void BeginStackMapEntry(uint32_t dex_pc,
@@ -125,6 +126,7 @@
   uint32_t fp_spill_mask_ = 0;
   uint32_t num_dex_registers_ = 0;
   bool baseline_;
+  bool debuggable_;
   BitTableBuilder<StackMap> stack_maps_;
   BitTableBuilder<RegisterMask> register_masks_;
   BitmapTableBuilder stack_masks_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index f6a739e..23af1f7 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -52,7 +52,12 @@
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
-  stream.BeginMethod(32, 0, 0, 2);
+  stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+                     /* core_spill_mask= */ 0,
+                     /* fp_spill_mask= */ 0,
+                     /* num_dex_registers= */ 2,
+                     /* baseline= */ false,
+                     /* debuggable= */ false);
 
   ArenaBitVector sp_mask(&allocator, 0, false);
   size_t number_of_dex_registers = 2;
@@ -106,7 +111,12 @@
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
-  stream.BeginMethod(32, 0, 0, 2);
+  stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+                     /* core_spill_mask= */ 0,
+                     /* fp_spill_mask= */ 0,
+                     /* num_dex_registers= */ 2,
+                     /* baseline= */ false,
+                     /* debuggable= */ false);
   ArtMethod art_method;
 
   ArenaBitVector sp_mask1(&allocator, 0, true);
@@ -300,7 +310,12 @@
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
-  stream.BeginMethod(32, 0, 0, 2);
+  stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+                     /* core_spill_mask= */ 0,
+                     /* fp_spill_mask= */ 0,
+                     /* num_dex_registers= */ 2,
+                     /* baseline= */ false,
+                     /* debuggable= */ false);
   ArtMethod art_method;
 
   ArenaBitVector sp_mask1(&allocator, 0, true);
@@ -363,7 +378,12 @@
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
-  stream.BeginMethod(32, 0, 0, 2);
+  stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+                     /* core_spill_mask= */ 0,
+                     /* fp_spill_mask= */ 0,
+                     /* num_dex_registers= */ 2,
+                     /* baseline= */ false,
+                     /* debuggable= */ false);
 
   ArenaBitVector sp_mask(&allocator, 0, false);
   uint32_t number_of_dex_registers = 2;
@@ -411,7 +431,12 @@
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
-  stream.BeginMethod(32, 0, 0, 2);
+  stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+                     /* core_spill_mask= */ 0,
+                     /* fp_spill_mask= */ 0,
+                     /* num_dex_registers= */ 2,
+                     /* baseline= */ false,
+                     /* debuggable= */ false);
 
   ArenaBitVector sp_mask(&allocator, 0, false);
   uint32_t number_of_dex_registers = 2;
@@ -467,7 +492,12 @@
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
-  stream.BeginMethod(32, 0, 0, 1);
+  stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+                     /* core_spill_mask= */ 0,
+                     /* fp_spill_mask= */ 0,
+                     /* num_dex_registers= */ 1,
+                     /* baseline= */ false,
+                     /* debuggable= */ false);
 
   ArenaBitVector sp_mask(&allocator, 0, false);
   stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask);
@@ -512,7 +542,12 @@
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
-  stream.BeginMethod(32, 0, 0, 2);
+  stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+                     /* core_spill_mask= */ 0,
+                     /* fp_spill_mask= */ 0,
+                     /* num_dex_registers= */ 2,
+                     /* baseline= */ false,
+                     /* debuggable= */ false);
   ArtMethod art_method;
 
   ArenaBitVector sp_mask1(&allocator, 0, true);
@@ -702,7 +737,12 @@
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
-  stream.BeginMethod(32, 0, 0, 0);
+  stream.BeginMethod(/* frame_size_in_bytes= */ 32,
+                     /* core_spill_mask= */ 0,
+                     /* fp_spill_mask= */ 0,
+                     /* num_dex_registers= */ 0,
+                     /* baseline= */ false,
+                     /* debuggable= */ false);
 
   ArenaBitVector sp_mask(&allocator, 0, true);
   sp_mask.SetBit(1);