summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc25
-rw-r--r--compiler/optimizing/code_generator.h3
-rw-r--r--compiler/optimizing/code_generator_arm64.cc9
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc9
-rw-r--r--compiler/optimizing/code_generator_mips.cc8
-rw-r--r--compiler/optimizing/code_generator_mips64.cc8
-rw-r--r--compiler/optimizing/code_generator_x86.cc8
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc8
-rw-r--r--compiler/optimizing/register_allocator.cc15
-rw-r--r--compiler/optimizing/register_allocator.h2
10 files changed, 72 insertions, 23 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index dd8e3d240f..84f01828b2 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -935,7 +935,7 @@ void CodeGenerator::RecordCatchBlockInfo() {
if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
} else {
- Location location = current_phi->GetLiveInterval()->ToLocation();
+ Location location = current_phi->GetLocations()->Out();
switch (location.GetKind()) {
case Location::kStackSlot: {
stack_map_stream_.AddDexRegisterEntry(
@@ -1202,22 +1202,21 @@ void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
}
}
-void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const {
+void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
+ HParallelMove* spills) const {
LocationSummary* locations = suspend_check->GetLocations();
HBasicBlock* block = suspend_check->GetBlock();
DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
DCHECK(block->IsLoopHeader());
-
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- LiveInterval* interval = current->GetLiveInterval();
- // We only need to clear bits of loop phis containing objects and allocated in register.
- // Loop phis allocated on stack already have the object in the stack.
- if (current->GetType() == DataType::Type::kReference
- && interval->HasRegister()
- && interval->HasSpillSlot()) {
- locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize);
- }
+ DCHECK(block->GetFirstInstruction() == spills);
+
+ for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
+ Location dest = spills->MoveOperandsAt(i)->GetDestination();
+ // All parallel moves in loop headers are spills.
+ DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
+ // Clear the stack bit marking a reference. Do not bother to check if the spill is
+ // actually a reference spill, clearing bits that are already zero is harmless.
+ locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
}
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2c3cf262b1..2904b71991 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -380,7 +380,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// for the suspend check at the back edge (instead of where the suspend check
// is, which is the loop entry). At this point, the spill slots for the phis
// have not been written to.
- void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const;
+ void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
+ HParallelMove* spills) const;
bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; }
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9be9117967..e6e69846e4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2209,7 +2209,6 @@ void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruct
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
DCHECK(successor->IsLoopHeader());
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
}
} else {
DCHECK_EQ(slow_path->GetSuccessor(), successor);
@@ -3560,7 +3559,6 @@ void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* s
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -5420,6 +5418,13 @@ void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBU
}
void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
+ if (instruction->GetNext()->IsSuspendCheck() &&
+ instruction->GetBlock()->GetLoopInformation() != nullptr) {
+ HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+ // The back edge will generate the suspend check.
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+ }
+
codegen_->GetMoveResolver()->EmitNativeCode(instruction);
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d7137a3b28..251f390ce3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2858,7 +2858,6 @@ void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock*
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -6741,6 +6740,13 @@ void LocationsBuilderARMVIXL::VisitParallelMove(HParallelMove* instruction ATTRI
}
void InstructionCodeGeneratorARMVIXL::VisitParallelMove(HParallelMove* instruction) {
+ if (instruction->GetNext()->IsSuspendCheck() &&
+ instruction->GetBlock()->GetLoopInformation() != nullptr) {
+ HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+ // The back edge will generate the suspend check.
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+ }
+
codegen_->GetMoveResolver()->EmitNativeCode(instruction);
}
@@ -6776,7 +6782,6 @@ void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instru
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
DCHECK(successor->IsLoopHeader());
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
}
} else {
DCHECK_EQ(slow_path->GetSuccessor(), successor);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 7ea7b9cee2..e58f43e1bb 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3967,7 +3967,6 @@ void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* su
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -8359,6 +8358,13 @@ void LocationsBuilderMIPS::VisitParallelMove(HParallelMove* instruction ATTRIBUT
}
void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
+ if (instruction->GetNext()->IsSuspendCheck() &&
+ instruction->GetBlock()->GetLoopInformation() != nullptr) {
+ HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+ // The back edge will generate the suspend check.
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+ }
+
codegen_->GetMoveResolver()->EmitNativeCode(instruction);
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index fad0fe74e5..11120cf07a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3488,7 +3488,6 @@ void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock*
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -6490,6 +6489,13 @@ void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIB
}
void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
+ if (instruction->GetNext()->IsSuspendCheck() &&
+ instruction->GetBlock()->GetLoopInformation() != nullptr) {
+ HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+ // The back edge will generate the suspend check.
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+ }
+
codegen_->GetMoveResolver()->EmitNativeCode(instruction);
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d8a47fa1ea..39a07b82d1 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -5680,6 +5680,13 @@ void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction ATTRIBUTE
}
void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction) {
+ if (instruction->GetNext()->IsSuspendCheck() &&
+ instruction->GetBlock()->GetLoopInformation() != nullptr) {
+ HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+ // The back edge will generate the suspend check.
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+ }
+
codegen_->GetMoveResolver()->EmitNativeCode(instruction);
}
@@ -5717,7 +5724,6 @@ void InstructionCodeGeneratorX86::GenerateSuspendCheck(HSuspendCheck* instructio
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
DCHECK(successor->IsLoopHeader());
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
}
} else {
DCHECK_EQ(slow_path->GetSuccessor(), successor);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b6aa110f2d..c8032c25df 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5125,6 +5125,13 @@ void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction ATTRIB
}
void InstructionCodeGeneratorX86_64::VisitParallelMove(HParallelMove* instruction) {
+ if (instruction->GetNext()->IsSuspendCheck() &&
+ instruction->GetBlock()->GetLoopInformation() != nullptr) {
+ HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
+ // The back edge will generate the suspend check.
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
+ }
+
codegen_->GetMoveResolver()->EmitNativeCode(instruction);
}
@@ -5162,7 +5169,6 @@ void InstructionCodeGeneratorX86_64::GenerateSuspendCheck(HSuspendCheck* instruc
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
DCHECK(successor->IsLoopHeader());
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
}
} else {
DCHECK_EQ(slow_path->GetSuccessor(), successor);
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index ece9904426..86e971353f 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -53,6 +53,21 @@ std::unique_ptr<RegisterAllocator> RegisterAllocator::Create(ScopedArenaAllocato
}
}
+RegisterAllocator::~RegisterAllocator() {
+ if (kIsDebugBuild) {
+ // Poison live interval pointers with "Error: BAD 71ve1nt3rval."
+ LiveInterval* bad_live_interval = reinterpret_cast<LiveInterval*>(0xebad7113u);
+ for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ it.Current()->SetLiveInterval(bad_live_interval);
+ }
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ it.Current()->SetLiveInterval(bad_live_interval);
+ }
+ }
+ }
+}
+
bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
InstructionSet instruction_set) {
return instruction_set == kArm
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index eaeec3b261..e284498563 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -50,7 +50,7 @@ class RegisterAllocator : public DeletableArenaObject<kArenaAllocRegisterAllocat
const SsaLivenessAnalysis& analysis,
Strategy strategy = kRegisterAllocatorDefault);
- virtual ~RegisterAllocator() = default;
+ virtual ~RegisterAllocator();
// Main entry point for the register allocator. Given the liveness analysis,
// allocates registers to live intervals.