Don't do a recursive call when there are CHA guards.
Otherwise we would continue execute the method with invalid inlining
optimizations.
Test: 832-cha-recursive
Bug: 19381779
Change-Id: I57d73828d2a9c30f429cf32906f94244346c1310
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4830d65..2549546 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4639,6 +4639,7 @@
switch (invoke->GetCodePtrLocation()) {
case CodePtrLocation::kCallSelf:
{
+ DCHECK(!GetGraph()->HasShouldDeoptimizeFlag());
// Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
ExactAssemblyScope eas(GetVIXLAssembler(),
kInstructionSize,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 06bf8b5..22f7f17 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -9255,6 +9255,7 @@
switch (invoke->GetCodePtrLocation()) {
case CodePtrLocation::kCallSelf:
{
+ DCHECK(!GetGraph()->HasShouldDeoptimizeFlag());
// Use a scope to help guarantee that `RecordPcInfo()` records the correct pc.
ExactAssemblyScope aas(GetVIXLAssembler(),
vixl32::k32BitT32InstructionSizeInBytes,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a37f809..2bea51f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -5295,6 +5295,7 @@
switch (invoke->GetCodePtrLocation()) {
case CodePtrLocation::kCallSelf:
+ DCHECK(!GetGraph()->HasShouldDeoptimizeFlag());
__ call(GetFrameEntryLabel());
RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
break;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index deabc87..0f76bd3 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1088,6 +1088,7 @@
switch (invoke->GetCodePtrLocation()) {
case CodePtrLocation::kCallSelf:
+ DCHECK(!GetGraph()->HasShouldDeoptimizeFlag());
__ call(&frame_entry_label_);
RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
break;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d6b3726..94eae4d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4939,7 +4939,18 @@
}
MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
- CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
+ CodePtrLocation GetCodePtrLocation() const {
+ // We do CHA analysis after sharpening. When a method has CHA inlining, it
+ // cannot call itself, as if the CHA optmization is invalid we want to make
+ // sure the method is never executed again. So, while sharpening can return
+ // kCallSelf, we bypass it here if there is a CHA optimization.
+ if (dispatch_info_.code_ptr_location == CodePtrLocation::kCallSelf &&
+ GetBlock()->GetGraph()->HasShouldDeoptimizeFlag()) {
+ return CodePtrLocation::kCallArtMethod;
+ } else {
+ return dispatch_info_.code_ptr_location;
+ }
+ }
bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }