summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.bp1
-rw-r--r--compiler/optimizing/code_generator_arm64.cc35
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc40
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc50
-rw-r--r--compiler/optimizing/code_generator_x86.cc46
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc34
-rw-r--r--compiler/optimizing/optimizing_compiler.cc17
-rw-r--r--compiler/optimizing/profiling_info_builder.cc89
-rw-r--r--compiler/optimizing/profiling_info_builder.h60
-rw-r--r--runtime/jit/jit_code_cache.cc12
-rw-r--r--runtime/jit/profiling_info.cc16
-rw-r--r--runtime/jit/profiling_info.h4
-rw-r--r--test/570-checker-osr/osr.cc15
-rw-r--r--test/570-checker-osr/src/Main.java5
-rw-r--r--test/595-profile-saving/expected-stdout.txt1
-rw-r--r--test/595-profile-saving/profile-saving.cc11
-rw-r--r--test/595-profile-saving/run.py2
-rw-r--r--test/595-profile-saving/src/Main.java15
-rw-r--r--test/common/runtime_state.cc1
19 files changed, 300 insertions, 154 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 4427cd0f22..e0c1744ce3 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -177,6 +177,7 @@ art_cc_defaults {
"optimizing/optimizing_compiler.cc",
"optimizing/parallel_move_resolver.cc",
"optimizing/prepare_for_register_allocation.cc",
+ "optimizing/profiling_info_builder.cc",
"optimizing/reference_type_propagation.cc",
"optimizing/register_allocation_resolver.cc",
"optimizing/register_allocator.cc",
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4f5db1f1c2..4be0542236 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -45,6 +45,7 @@
#include "offsets.h"
#include "optimizing/common_arm64.h"
#include "optimizing/nodes.h"
+#include "profiling_info_builder.h"
#include "thread.h"
#include "trace.h"
#include "utils/arm64/assembler_arm64.h"
@@ -4593,24 +4594,26 @@ void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
void CodeGeneratorARM64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
Register klass) {
DCHECK_EQ(klass.GetCode(), 0u);
- // We know the destination of an intrinsic, so no need to record inline
- // caches.
- if (!instruction->GetLocations()->Intrinsified() &&
- GetGraph()->IsCompilingBaseline() &&
- !Runtime::Current()->IsAotCompiler()) {
- DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
+ if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke())) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
- InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
- uint64_t address = reinterpret_cast64<uint64_t>(cache);
- vixl::aarch64::Label done;
- __ Mov(x8, address);
- __ Ldr(w9, MemOperand(x8, InlineCache::ClassesOffset().Int32Value()));
- // Fast path for a monomorphic cache.
- __ Cmp(klass.W(), w9);
- __ B(eq, &done);
- InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
- __ Bind(&done);
+ InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
+ if (cache != nullptr) {
+ uint64_t address = reinterpret_cast64<uint64_t>(cache);
+ vixl::aarch64::Label done;
+ __ Mov(x8, address);
+ __ Ldr(w9, MemOperand(x8, InlineCache::ClassesOffset().Int32Value()));
+ // Fast path for a monomorphic cache.
+ __ Cmp(klass.W(), w9);
+ __ B(eq, &done);
+ InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
+ __ Bind(&done);
+ } else {
+ // This is unexpected, but we don't guarantee stable compilation across
+ // JIT runs so just warn about it.
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod();
+ }
}
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index dacf034327..627c9bcf0b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -39,6 +39,7 @@
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "mirror/var_handle.h"
+#include "profiling_info_builder.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "trace.h"
@@ -3684,26 +3685,27 @@ void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
void CodeGeneratorARMVIXL::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
vixl32::Register klass) {
DCHECK_EQ(r0.GetCode(), klass.GetCode());
- // We know the destination of an intrinsic, so no need to record inline
- // caches.
- if (!instruction->GetLocations()->Intrinsified() &&
- GetGraph()->IsCompilingBaseline() &&
- !Runtime::Current()->IsAotCompiler()) {
- DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
+ if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke())) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
- DCHECK(info != nullptr);
- InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
- uint32_t address = reinterpret_cast32<uint32_t>(cache);
- vixl32::Label done;
- UseScratchRegisterScope temps(GetVIXLAssembler());
- temps.Exclude(ip);
- __ Mov(r4, address);
- __ Ldr(ip, MemOperand(r4, InlineCache::ClassesOffset().Int32Value()));
- // Fast path for a monomorphic cache.
- __ Cmp(klass, ip);
- __ B(eq, &done, /* is_far_target= */ false);
- InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
- __ Bind(&done);
+ InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
+ if (cache != nullptr) {
+ uint32_t address = reinterpret_cast32<uint32_t>(cache);
+ vixl32::Label done;
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ temps.Exclude(ip);
+ __ Mov(r4, address);
+ __ Ldr(ip, MemOperand(r4, InlineCache::ClassesOffset().Int32Value()));
+ // Fast path for a monomorphic cache.
+ __ Cmp(klass, ip);
+ __ B(eq, &done, /* is_far_target= */ false);
+ InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
+ __ Bind(&done);
+ } else {
+ // This is unexpected, but we don't guarantee stable compilation across
+ // JIT runs so just warn about it.
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod();
+ }
}
}
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 1e1dd4ab3b..f5b16d43d1 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -34,6 +34,7 @@
#include "linker/linker_patch.h"
#include "mirror/class-inl.h"
#include "optimizing/nodes.h"
+#include "optimizing/profiling_info_builder.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "stack_map_stream.h"
@@ -6720,32 +6721,35 @@ void CodeGeneratorRISCV64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* inv
void CodeGeneratorRISCV64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
XRegister klass) {
- // We know the destination of an intrinsic, so no need to record inline caches.
- if (!instruction->GetLocations()->Intrinsified() &&
- GetGraph()->IsCompilingBaseline() &&
- !Runtime::Current()->IsAotCompiler()) {
- DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
+ if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke())) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
- InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
- uint64_t address = reinterpret_cast64<uint64_t>(cache);
- Riscv64Label done;
- // The `art_quick_update_inline_cache` expects the inline cache in T5.
- XRegister ic_reg = T5;
- ScratchRegisterScope srs(GetAssembler());
- DCHECK_EQ(srs.AvailableXRegisters(), 2u);
- srs.ExcludeXRegister(ic_reg);
- DCHECK_EQ(srs.AvailableXRegisters(), 1u);
- __ LoadConst64(ic_reg, address);
- {
- ScratchRegisterScope srs2(GetAssembler());
- XRegister tmp = srs2.AllocateXRegister();
- __ Loadd(tmp, ic_reg, InlineCache::ClassesOffset().Int32Value());
- // Fast path for a monomorphic cache.
- __ Beq(klass, tmp, &done);
+ InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
+ if (cache != nullptr) {
+ uint64_t address = reinterpret_cast64<uint64_t>(cache);
+ Riscv64Label done;
+ // The `art_quick_update_inline_cache` expects the inline cache in T5.
+ XRegister ic_reg = T5;
+ ScratchRegisterScope srs(GetAssembler());
+ DCHECK_EQ(srs.AvailableXRegisters(), 2u);
+ srs.ExcludeXRegister(ic_reg);
+ DCHECK_EQ(srs.AvailableXRegisters(), 1u);
+ __ LoadConst64(ic_reg, address);
+ {
+ ScratchRegisterScope srs2(GetAssembler());
+ XRegister tmp = srs2.AllocateXRegister();
+ __ Loadd(tmp, ic_reg, InlineCache::ClassesOffset().Int32Value());
+ // Fast path for a monomorphic cache.
+ __ Beq(klass, tmp, &done);
+ }
+ InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
+ __ Bind(&done);
+ } else {
+ // This is unexpected, but we don't guarantee stable compilation across
+ // JIT runs so just warn about it.
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod();
}
- InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
- __ Bind(&done);
}
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a923e578c0..f07860031d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -37,6 +37,7 @@
#include "mirror/class-inl.h"
#include "mirror/var_handle.h"
#include "optimizing/nodes.h"
+#include "profiling_info_builder.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "trace.h"
@@ -2798,7 +2799,7 @@ void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
HandleInvoke(invoke);
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (ProfilingInfoBuilder::IsInlineCacheUseful(invoke)) {
// Add one temporary for inline cache update.
invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP));
}
@@ -2826,7 +2827,7 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
// Add the hidden argument.
invoke->GetLocations()->AddTemp(Location::FpuRegisterLocation(XMM7));
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (ProfilingInfoBuilder::IsInlineCacheUseful(invoke)) {
// Add one temporary for inline cache update.
invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP));
}
@@ -2844,29 +2845,30 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
void CodeGeneratorX86::MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass) {
DCHECK_EQ(EAX, klass);
- // We know the destination of an intrinsic, so no need to record inline
- // caches (also the intrinsic location builder doesn't request an additional
- // temporary).
- if (!instruction->GetLocations()->Intrinsified() &&
- GetGraph()->IsCompilingBaseline() &&
- !Runtime::Current()->IsAotCompiler()) {
- DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
+ if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke())) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
- InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
- uint32_t address = reinterpret_cast32<uint32_t>(cache);
- if (kIsDebugBuild) {
- uint32_t temp_index = instruction->GetLocations()->GetTempCount() - 1u;
- CHECK_EQ(EBP, instruction->GetLocations()->GetTemp(temp_index).AsRegister<Register>());
+ InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
+ if (cache != nullptr) {
+ uint32_t address = reinterpret_cast32<uint32_t>(cache);
+ if (kIsDebugBuild) {
+ uint32_t temp_index = instruction->GetLocations()->GetTempCount() - 1u;
+ CHECK_EQ(EBP, instruction->GetLocations()->GetTemp(temp_index).AsRegister<Register>());
+ }
+ Register temp = EBP;
+ NearLabel done;
+ __ movl(temp, Immediate(address));
+ // Fast path for a monomorphic cache.
+ __ cmpl(klass, Address(temp, InlineCache::ClassesOffset().Int32Value()));
+ __ j(kEqual, &done);
+ GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(kQuickUpdateInlineCache).Int32Value());
+ __ Bind(&done);
+ } else {
+ // This is unexpected, but we don't guarantee stable compilation across
+ // JIT runs so just warn about it.
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod();
}
- Register temp = EBP;
- NearLabel done;
- __ movl(temp, Immediate(address));
- // Fast path for a monomorphic cache.
- __ cmpl(klass, Address(temp, InlineCache::ClassesOffset().Int32Value()));
- __ j(kEqual, &done);
- GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(kQuickUpdateInlineCache).Int32Value());
- __ Bind(&done);
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 24cb0c30b7..c777258201 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -38,6 +38,7 @@
#include "mirror/object_reference.h"
#include "mirror/var_handle.h"
#include "optimizing/nodes.h"
+#include "profiling_info_builder.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
#include "trace.h"
@@ -3094,23 +3095,26 @@ void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
void CodeGeneratorX86_64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
CpuRegister klass) {
DCHECK_EQ(RDI, klass.AsRegister());
- // We know the destination of an intrinsic, so no need to record inline
- // caches.
- if (!instruction->GetLocations()->Intrinsified() &&
- GetGraph()->IsCompilingBaseline() &&
- !Runtime::Current()->IsAotCompiler()) {
+ if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke())) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
- InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
- uint64_t address = reinterpret_cast64<uint64_t>(cache);
- NearLabel done;
- __ movq(CpuRegister(TMP), Immediate(address));
- // Fast path for a monomorphic cache.
- __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass);
- __ j(kEqual, &done);
- GenerateInvokeRuntime(
- GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value());
- __ Bind(&done);
+ InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
+ if (cache != nullptr) {
+ uint64_t address = reinterpret_cast64<uint64_t>(cache);
+ NearLabel done;
+ __ movq(CpuRegister(TMP), Immediate(address));
+ // Fast path for a monomorphic cache.
+ __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass);
+ __ j(kEqual, &done);
+ GenerateInvokeRuntime(
+ GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value());
+ __ Bind(&done);
+ } else {
+ // This is unexpected, but we don't guarantee stable compilation across
+ // JIT runs so just warn about it.
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod();
+ }
}
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 0069a20a26..2886e731b5 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -53,6 +53,7 @@
#include "oat_quick_method_header.h"
#include "optimizing/write_barrier_elimination.h"
#include "prepare_for_register_allocation.h"
+#include "profiling_info_builder.h"
#include "reference_type_propagation.h"
#include "register_allocator_linear_scan.h"
#include "select_generator.h"
@@ -835,8 +836,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
ProfilingInfo* info = jit->GetCodeCache()->GetProfilingInfo(method, Thread::Current());
- DCHECK_IMPLIES(compilation_kind == CompilationKind::kBaseline, info != nullptr)
- << "Compiling a method baseline should always have a ProfilingInfo";
graph->SetProfilingInfo(info);
}
@@ -920,6 +919,20 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
&pass_observer,
regalloc_strategy,
compilation_stats_.get());
+ // If we are compiling baseline and we haven't created a profiling info for
+ // this method already, do it now.
+ if (jit != nullptr &&
+ compilation_kind == CompilationKind::kBaseline &&
+ graph->GetProfilingInfo() == nullptr) {
+ ProfilingInfoBuilder(graph, codegen->GetCompilerOptions(), compilation_stats_.get()).Run();
+ // We expect a profiling info to be created and attached to the graph.
+ // However, we may have run out of memory trying to create it, so in this
+ // case just abort the compilation.
+ if (graph->GetProfilingInfo() == nullptr) {
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
+ return nullptr;
+ }
+ }
codegen->Compile();
pass_observer.DumpDisassembly();
diff --git a/compiler/optimizing/profiling_info_builder.cc b/compiler/optimizing/profiling_info_builder.cc
new file mode 100644
index 0000000000..7e8cdb1454
--- /dev/null
+++ b/compiler/optimizing/profiling_info_builder.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "profiling_info_builder.h"
+
+#include "art_method-inl.h"
+#include "driver/compiler_options.h"
+#include "dex/code_item_accessors-inl.h"
+#include "jit/profiling_info.h"
+#include "optimizing_compiler_stats.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art HIDDEN {
+
+void ProfilingInfoBuilder::Run() {
+ DCHECK_EQ(GetGraph()->GetProfilingInfo(), nullptr);
+ // Order does not matter.
+ for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
+ // No need to visit the phis.
+ for (HInstructionIteratorHandleChanges inst_it(block->GetInstructions()); !inst_it.Done();
+ inst_it.Advance()) {
+ inst_it.Current()->Accept(this);
+ }
+ }
+
+ ScopedObjectAccess soa(Thread::Current());
+ GetGraph()->SetProfilingInfo(
+ ProfilingInfo::Create(soa.Self(), GetGraph()->GetArtMethod(), inline_caches_));
+}
+
+void ProfilingInfoBuilder::HandleInvoke(HInvoke* invoke) {
+ DCHECK(!invoke->GetEnvironment()->IsFromInlinedInvoke());
+ if (IsInlineCacheUseful(invoke)) {
+ inline_caches_.push_back(invoke->GetDexPc());
+ }
+}
+
+void ProfilingInfoBuilder::VisitInvokeInterface(HInvokeInterface* invoke) {
+ HandleInvoke(invoke);
+}
+
+void ProfilingInfoBuilder::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+bool ProfilingInfoBuilder::IsInlineCacheUseful(HInvoke* invoke) {
+ DCHECK(invoke->IsInvokeVirtual() || invoke->IsInvokeInterface());
+ if (invoke->IsIntrinsic()) {
+ return false;
+ }
+ if (!invoke->GetBlock()->GetGraph()->IsCompilingBaseline()) {
+ return false;
+ }
+ if (Runtime::Current()->IsAotCompiler()) {
+ return false;
+ }
+ if (invoke->InputAt(0)->GetReferenceTypeInfo().IsExact()) {
+ return false;
+ }
+ if (invoke->GetResolvedMethod() != nullptr) {
+ ScopedObjectAccess soa(Thread::Current());
+ if (invoke->GetResolvedMethod()->IsFinal() ||
+ invoke->GetResolvedMethod()->GetDeclaringClass()->IsFinal()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+InlineCache* ProfilingInfoBuilder::GetInlineCache(ProfilingInfo* info, HInvoke* instruction) {
+ DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
+ ScopedObjectAccess soa(Thread::Current());
+ return info->GetInlineCache(instruction->GetDexPc());
+}
+
+} // namespace art
diff --git a/compiler/optimizing/profiling_info_builder.h b/compiler/optimizing/profiling_info_builder.h
new file mode 100644
index 0000000000..315b7de418
--- /dev/null
+++ b/compiler/optimizing/profiling_info_builder.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_PROFILING_INFO_BUILDER_H_
+#define ART_COMPILER_OPTIMIZING_PROFILING_INFO_BUILDER_H_
+
+#include "base/macros.h"
+#include "nodes.h"
+
+namespace art HIDDEN {
+
+class CompilerOptions;
+class InlineCache;
+class ProfilingInfo;
+
+class ProfilingInfoBuilder : public HGraphDelegateVisitor {
+ public:
+ ProfilingInfoBuilder(HGraph* graph,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr)
+ : HGraphDelegateVisitor(graph, stats),
+ compiler_options_(compiler_options) {}
+
+ void Run();
+
+ static constexpr const char* kProfilingInfoBuilderPassName =
+ "profiling_info_builder";
+
+ static InlineCache* GetInlineCache(ProfilingInfo* info, HInvoke* invoke);
+ static bool IsInlineCacheUseful(HInvoke* invoke);
+
+ private:
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) override;
+ void VisitInvokeInterface(HInvokeInterface* invoke) override;
+
+ void HandleInvoke(HInvoke* invoke);
+
+ [[maybe_unused]] const CompilerOptions& compiler_options_;
+ std::vector<uint32_t> inline_caches_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProfilingInfoBuilder);
+};
+
+} // namespace art
+
+
+#endif // ART_COMPILER_OPTIMIZING_PROFILING_INFO_BUILDER_H_
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 6a7820b207..d05baf2c57 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1599,18 +1599,6 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
} else {
if (compilation_kind == CompilationKind::kBaseline) {
DCHECK(CanAllocateProfilingInfo());
- bool has_profiling_info = false;
- {
- MutexLock mu(self, *Locks::jit_lock_);
- has_profiling_info = (profiling_infos_.find(method) != profiling_infos_.end());
- }
- if (!has_profiling_info) {
- if (ProfilingInfo::Create(self, method) == nullptr) {
- VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled baseline";
- ClearMethodCounter(method, /*was_warm=*/ false);
- return false;
- }
- }
}
}
return true;
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 31bee69896..2e94a41093 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -50,22 +50,16 @@ uint16_t ProfilingInfo::GetOptimizeThreshold() {
return Runtime::Current()->GetJITOptions()->GetOptimizeThreshold();
}
-ProfilingInfo* ProfilingInfo::Create(Thread* self, ArtMethod* method) {
+ProfilingInfo* ProfilingInfo::Create(Thread* self,
+ ArtMethod* method,
+ const std::vector<uint32_t>& inline_cache_entries) {
// Walk over the dex instructions of the method and keep track of
// instructions we are interested in profiling.
DCHECK(!method->IsNative());
- std::vector<uint32_t> inline_cache_entries;
std::vector<uint32_t> branch_cache_entries;
for (const DexInstructionPcPair& inst : method->DexInstructions()) {
switch (inst->Opcode()) {
- case Instruction::INVOKE_VIRTUAL:
- case Instruction::INVOKE_VIRTUAL_RANGE:
- case Instruction::INVOKE_INTERFACE:
- case Instruction::INVOKE_INTERFACE_RANGE:
- inline_cache_entries.push_back(inst.DexPc());
- break;
-
case Instruction::IF_EQ:
case Instruction::IF_EQZ:
case Instruction::IF_NE:
@@ -102,9 +96,7 @@ InlineCache* ProfilingInfo::GetInlineCache(uint32_t dex_pc) {
return &caches[i];
}
}
- ScopedObjectAccess soa(Thread::Current());
- LOG(FATAL) << "No inline cache found for " << ArtMethod::PrettyMethod(method_) << "@" << dex_pc;
- UNREACHABLE();
+ return nullptr;
}
BranchCache* ProfilingInfo::GetBranchCache(uint32_t dex_pc) {
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 219699421f..62b431d7a2 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -99,7 +99,9 @@ class BranchCache {
class ProfilingInfo {
public:
// Create a ProfilingInfo for 'method'.
- static ProfilingInfo* Create(Thread* self, ArtMethod* method)
+ static ProfilingInfo* Create(Thread* self,
+ ArtMethod* method,
+ const std::vector<uint32_t>& inline_cache_entries)
REQUIRES_SHARED(Locks::mutator_lock_);
// Add information from an executed INVOKE instruction to the profile.
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index d1caf3fc8c..506887a8ea 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -98,21 +98,6 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInInterpreter(JNIEnv* env,
return in_interpreter;
}
-extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasProfilingInfo(JNIEnv* env,
- jclass,
- jstring method_name) {
- if (!Runtime::Current()->UseJitCompilation()) {
- return;
- }
- ProcessMethodWithName(
- env,
- method_name,
- [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = stack_visitor->GetMethod();
- ProfilingInfo::Create(Thread::Current(), m);
- });
-}
-
extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasOsrCode(JNIEnv* env,
jclass,
jstring method_name) {
diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java
index 78f3c32d2f..752d338bfa 100644
--- a/test/570-checker-osr/src/Main.java
+++ b/test/570-checker-osr/src/Main.java
@@ -297,8 +297,11 @@ public class Main {
public static native boolean isInOsrCode(String methodName);
public static native boolean isInInterpreter(String methodName);
- public static native void ensureHasProfilingInfo(String methodName);
+ public static void ensureHasProfilingInfo(String methodName) {
+ ensureJitBaselineCompiled(Main.class, methodName);
+ }
public static native void ensureHasOsrCode(String methodName);
+ public static native void ensureJitBaselineCompiled(Class<?> cls, String methodName);
}
class SubMain extends Main {
diff --git a/test/595-profile-saving/expected-stdout.txt b/test/595-profile-saving/expected-stdout.txt
index 9e28e07261..6a5618ebc6 100644
--- a/test/595-profile-saving/expected-stdout.txt
+++ b/test/595-profile-saving/expected-stdout.txt
@@ -1,2 +1 @@
JNI_OnLoad called
-IsForBootImage: true
diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc
index bec4ae923f..912762c33d 100644
--- a/test/595-profile-saving/profile-saving.cc
+++ b/test/595-profile-saving/profile-saving.cc
@@ -32,17 +32,6 @@
namespace art {
namespace {
-extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfilingInfo(JNIEnv* env,
- jclass,
- jobject method) {
- CHECK(method != nullptr);
- ScopedObjectAccess soa(env);
- ObjPtr<mirror::Executable> exec = soa.Decode<mirror::Executable>(method);
- ArtMethod* art_method = exec->GetArtMethod();
- if (ProfilingInfo::Create(soa.Self(), art_method) == nullptr) {
- LOG(ERROR) << "Failed to create profiling info for method " << art_method->PrettyMethod();
- }
-}
extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfileProcessing(JNIEnv*, jclass) {
ProfileSaver::ForceProcessProfiles();
diff --git a/test/595-profile-saving/run.py b/test/595-profile-saving/run.py
index 599f76a6fc..3375b0ebb4 100644
--- a/test/595-profile-saving/run.py
+++ b/test/595-profile-saving/run.py
@@ -20,7 +20,6 @@ def run(ctx, args):
# --compiler-filter=verify to make sure that the test is not compiled AOT
# and to make sure the test is not compiled when loaded (by PathClassLoader)
# -Xjitsaveprofilinginfo to enable profile saving
- # -Xusejit:false to disable jit and only test profiles.
# -Xjitinitialsize:32M to prevent profiling info creation failure.
ctx.default_run(
args,
@@ -29,6 +28,5 @@ def run(ctx, args):
"-Xcompiler-option --compiler-filter=verify",
"-Xjitinitialsize:32M",
"-Xjitsaveprofilinginfo",
- "-Xusejit:false",
"-Xps-profile-boot-class-path",
])
diff --git a/test/595-profile-saving/src/Main.java b/test/595-profile-saving/src/Main.java
index 3229b53d7d..8fdc4e1ede 100644
--- a/test/595-profile-saving/src/Main.java
+++ b/test/595-profile-saving/src/Main.java
@@ -23,6 +23,11 @@ public class Main {
public static void main(String[] args) throws Exception {
System.loadLibrary(args[0]);
+ if (!hasJit()) {
+ // Test requires JIT for creating profiling infos.
+ return;
+ }
+
File file = null;
File file2 = null;
File file3 = null;
@@ -88,7 +93,9 @@ public class Main {
testProfileNotExist(file2);
- System.out.println("IsForBootImage: " + isForBootImage(file.getPath()));
+ if (!isForBootImage(file.getPath())) {
+ throw new Error("Expected profile to be for boot image");
+ }
} finally {
if (file != null) {
file.delete();
@@ -129,13 +136,17 @@ public class Main {
}
// Ensure a method has a profiling info.
- public static native void ensureProfilingInfo(Method method);
+ public static void ensureProfilingInfo(Method method) {
+ ensureJitBaselineCompiled(method.getDeclaringClass(), method.getName());
+ }
+ public static native void ensureJitBaselineCompiled(Class<?> cls, String methodName);
// Ensures the profile saver does its usual processing.
public static native void ensureProfileProcessing();
// Checks if the profiles saver knows about the method.
public static native boolean presentInProfile(String profile, Method method);
// Returns true if the profile is for the boot image.
public static native boolean isForBootImage(String profile);
+ public static native boolean hasJit();
private static final String TEMP_FILE_NAME_PREFIX = "temp";
private static final String TEMP_FILE_NAME_SUFFIX = "-file";
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index c1c8936e9e..a530bd4794 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -51,6 +51,7 @@ static jit::Jit* GetJitIfEnabled() {
bool can_jit =
runtime != nullptr
&& runtime->GetJit() != nullptr
+ && runtime->UseJitCompilation()
&& runtime->GetInstrumentation()->GetCurrentInstrumentationLevel() !=
instrumentation::Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter;
return can_jit ? runtime->GetJit() : nullptr;