Optimize stack overflow handling.
We now subtract the frame size from the stack pointer for methods
which have a frame smaller than a certain size. Also changed code to
use slow paths instead of launchpads.
Delete kStackOverflow launchpad since it is no longer needed.
ARM optimizations:
One less move per stack overflow check (without fault handler for
stack overflows). Use ldr pc instead of ldr r12, b r12.
Code size (boot.oat):
Before: 58405348
After: 57803236
TODO: X86 doesn't have the case for large frames. This could case an
incoming signal to go past the end of the stack (unlikely however).
Change-Id: Ie3a5635cd6fb09de27960e1f8cee45bfae38fb33
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 234299e..95fd6e7 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -317,12 +317,36 @@
SpillCoreRegs();
/* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
DCHECK_EQ(num_fp_spills_, 0);
+ const int frame_sub = frame_size_ - spill_count * 4;
if (!skip_overflow_check) {
- OpRegRegImm(kOpSub, new_sp, rMIPS_SP, frame_size_ - (spill_count * 4));
- GenRegRegCheck(kCondUlt, new_sp, check_reg, kThrowStackOverflow);
+ class StackOverflowSlowPath : public LIRSlowPath {
+ public:
+ StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) {
+ }
+ void Compile() OVERRIDE {
+ m2l_->ResetRegPool();
+ m2l_->ResetDefTracking();
+ GenerateTargetLabel();
+ // LR is offset 0 since we push in reverse order.
+ m2l_->LoadWordDisp(kMipsRegSP, 0, kMipsRegLR);
+ m2l_->OpRegImm(kOpAdd, kMipsRegSP, sp_displace_);
+ m2l_->ClobberCallerSave();
+ ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow);
+ int r_tgt = m2l_->CallHelperSetup(func_offset); // Doesn't clobber LR.
+ m2l_->CallHelper(r_tgt, func_offset, false /* MarkSafepointPC */, false /* UseLink */);
+ }
+
+ private:
+ const size_t sp_displace_;
+ };
+ OpRegRegImm(kOpSub, new_sp, rMIPS_SP, frame_sub);
+ LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
+ AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 4));
+ // TODO: avoid copy for small frame sizes.
OpRegCopy(rMIPS_SP, new_sp); // Establish stack
} else {
- OpRegImm(kOpSub, rMIPS_SP, frame_size_ - (spill_count * 4));
+ OpRegImm(kOpSub, rMIPS_SP, frame_sub);
}
FlushIns(ArgLocs, rl_method);