MIPS: Improve InstructionCodeGeneratorMIPS*::GenerateSuspendCheck().
Relax the only back-edge restriction. Implement optimization for
MIPS32/MIPS64 which has already been done for the ARM & x86
architectures in
https://android-review.googlesource.com/#/c/platform/art/+/149370/.
Test: Boot & run tests on 32- & 64-bit version of QEMU.
Test: test/testrunner/testrunner.py --target --optimizing
Test: test-art-host-gtest
Test: test-art-target-gtest
Change-Id: Ie0a4c19ee50ad532fe53933d5808f9d7a4f89b8e
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 2f65e8c..cc14809 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -461,6 +461,10 @@
const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
// If not null, the block to branch to after the suspend check.
HBasicBlock* const successor_;
@@ -1994,8 +1998,19 @@
void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS* slow_path =
- new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathMIPS*>(instruction->GetSlowPath());
+
+ if (slow_path == nullptr) {
+ slow_path =
+ new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
__ LoadFromOffset(kLoadUnsignedHalfword,
TMP,