summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc1
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc1
-rw-r--r--compiler/optimizing/code_generator_x86.cc1
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc1
-rw-r--r--compiler/optimizing/nodes.h13
5 files changed, 16 insertions, 1 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e1a4718140..92b43fb927 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4636,6 +4636,7 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(
switch (invoke->GetCodePtrLocation()) {
case CodePtrLocation::kCallSelf:
{
+ DCHECK(!GetGraph()->HasShouldDeoptimizeFlag());
// Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
ExactAssemblyScope eas(GetVIXLAssembler(),
kInstructionSize,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index bca093606d..b24dca9cb2 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -9253,6 +9253,7 @@ void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
switch (invoke->GetCodePtrLocation()) {
case CodePtrLocation::kCallSelf:
{
+ DCHECK(!GetGraph()->HasShouldDeoptimizeFlag());
// Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
ExactAssemblyScope aas(GetVIXLAssembler(),
vixl32::k32BitT32InstructionSizeInBytes,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 4fc29fcb0c..099d84d9a6 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -5280,6 +5280,7 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(
switch (invoke->GetCodePtrLocation()) {
case CodePtrLocation::kCallSelf:
+ DCHECK(!GetGraph()->HasShouldDeoptimizeFlag());
__ call(GetFrameEntryLabel());
RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
break;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d54484c065..9541933a2d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1077,6 +1077,7 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
switch (invoke->GetCodePtrLocation()) {
case CodePtrLocation::kCallSelf:
+ DCHECK(!GetGraph()->HasShouldDeoptimizeFlag());
__ call(&frame_entry_label_);
RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
break;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 939c49f9a6..5a62580fac 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4932,7 +4932,18 @@ class HInvokeStaticOrDirect final : public HInvoke {
}
MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
- CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
+ CodePtrLocation GetCodePtrLocation() const {
+ // We do CHA analysis after sharpening. When a method has CHA inlining, it
+ // cannot call itself, as if the CHA optmization is invalid we want to make
+ // sure the method is never executed again. So, while sharpening can return
+ // kCallSelf, we bypass it here if there is a CHA optimization.
+ if (dispatch_info_.code_ptr_location == CodePtrLocation::kCallSelf &&
+ GetBlock()->GetGraph()->HasShouldDeoptimizeFlag()) {
+ return CodePtrLocation::kCallArtMethod;
+ } else {
+ return dispatch_info_.code_ptr_location;
+ }
+ }
bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }