summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/load_store_elimination.cc6
-rw-r--r--compiler/optimizing/register_allocator.cc16
2 files changed, 11 insertions, 11 deletions
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 9a97f54d54..8eaac0bbd3 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -61,7 +61,7 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
(use->IsStaticFieldSet() && (reference_ == use->InputAt(1))) ||
(use->IsUnresolvedStaticFieldSet() && (reference_ == use->InputAt(0))) ||
(use->IsArraySet() && (reference_ == use->InputAt(2)))) {
- // reference_ is merged to a phi/HSelect, passed to a callee, or stored to heap.
+ // reference_ is merged to HPhi/HSelect, passed to a callee, or stored to heap.
// reference_ isn't the only name that can refer to its value anymore.
is_singleton_ = false;
is_singleton_and_not_returned_ = false;
@@ -458,6 +458,10 @@ class HeapLocationCollector : public HGraphVisitor {
CreateReferenceInfoForReferenceType(instruction);
}
+ void VisitSelect(HSelect* instruction) OVERRIDE {
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) OVERRIDE {
may_deoptimize_ = true;
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 5cd30adb45..b8d76b912e 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -994,10 +994,6 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
return false;
}
- // We use the first use to compare with other intervals. If this interval
- // is used after any active intervals, we will spill this interval.
- size_t first_use = current->FirstUseAfter(current->GetStart());
-
// First set all registers as not being used.
size_t* next_use = registers_array_;
for (size_t i = 0; i < number_of_registers_; ++i) {
@@ -1011,7 +1007,7 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
if (active->IsFixed()) {
next_use[active->GetRegister()] = current->GetStart();
} else {
- size_t use = active->FirstUseAfter(current->GetStart());
+ size_t use = active->FirstRegisterUseAfter(current->GetStart());
if (use != kNoLifetime) {
next_use[active->GetRegister()] = use;
}
@@ -1052,16 +1048,16 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
DCHECK(current->IsHighInterval());
reg = current->GetRegister();
// When allocating the low part, we made sure the high register was available.
- DCHECK_LT(first_use, next_use[reg]);
+ DCHECK_LT(first_register_use, next_use[reg]);
} else if (current->IsLowInterval()) {
- reg = FindAvailableRegisterPair(next_use, first_use);
+ reg = FindAvailableRegisterPair(next_use, first_register_use);
// We should spill if both registers are not available.
- should_spill = (first_use >= next_use[reg])
- || (first_use >= next_use[GetHighForLowRegister(reg)]);
+ should_spill = (first_register_use >= next_use[reg])
+ || (first_register_use >= next_use[GetHighForLowRegister(reg)]);
} else {
DCHECK(!current->IsHighInterval());
reg = FindAvailableRegister(next_use, current);
- should_spill = (first_use >= next_use[reg]);
+ should_spill = (first_register_use >= next_use[reg]);
}
DCHECK_NE(reg, kNoRegister);