diff options
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/optimizing/code_generator.cc | 6 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 25 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 2 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 2 | ||||
| -rw-r--r-- | compiler/optimizing/locations.h | 12 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 50 | ||||
| -rw-r--r-- | compiler/optimizing/parallel_move_resolver.cc | 3 | ||||
| -rw-r--r-- | compiler/optimizing/parallel_move_test.cc | 94 | ||||
| -rw-r--r-- | compiler/optimizing/register_allocator.cc | 10 |
9 files changed, 122 insertions, 82 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 9e8907078b..85724fa7c8 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -696,11 +696,9 @@ void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend } void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) { - MoveOperands move1(from1, to1, nullptr); - MoveOperands move2(from2, to2, nullptr); HParallelMove parallel_move(GetGraph()->GetArena()); - parallel_move.AddMove(&move1); - parallel_move.AddMove(&move2); + parallel_move.AddMove(from1, to1, nullptr); + parallel_move.AddMove(from2, to2, nullptr); GetMoveResolver()->EmitNativeCode(¶llel_move); } diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index c4ba0fd3e5..5b2db130a5 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -3302,15 +3302,6 @@ void ParallelMoveResolverARM::EmitMove(size_t index) { DCHECK(destination.IsStackSlot()); __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex()); } - } else if (source.IsFpuRegisterPair()) { - if (destination.IsFpuRegisterPair()) { - __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), - FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())); - } else { - DCHECK(destination.IsDoubleStackSlot()) << destination; - __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()), - SP, destination.GetStackIndex()); - } } else if (source.IsDoubleStackSlot()) { if (destination.IsFpuRegisterPair()) { __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), @@ -3396,22 +3387,6 @@ void ParallelMoveResolverARM::EmitSwap(size_t index) { __ vmovs(STMP, reg); __ LoadSFromOffset(reg, SP, mem); __ StoreSToOffset(STMP, SP, mem); - } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) { - __ vmovd(DTMP, FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())); - __ vmovd(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()), - FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>())); - __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), DTMP); - } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) { - DRegister reg = source.IsFpuRegisterPair() - ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()) - : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()); - int mem = source.IsFpuRegisterPair() - ? destination.GetStackIndex() - : source.GetStackIndex(); - - __ vmovd(DTMP, reg); - __ LoadDFromOffset(reg, SP, mem); - __ StoreDToOffset(DTMP, SP, mem); } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { // TODO: We could use DTMP and ask for a pair scratch register (float or core). // This would save four instructions if two scratch registers are available, and diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 1a0df44ea6..bdd0979cae 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -3363,7 +3363,7 @@ void ParallelMoveResolverX86::EmitMove(size_t index) { __ movl(Address(ESP, destination.GetStackIndex()), imm); } } else { - LOG(FATAL) << "Unimplemented"; + LOG(FATAL) << "Unimplemented move: " << destination << " <- " << source; } } diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index c1f4c94b7f..2c239458f1 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -116,7 +116,7 @@ static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorX Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType()); Location actual_loc = locations->InAt(i); - parallel_move.AddMove(new (arena) MoveOperands(actual_loc, cc_loc, nullptr)); + parallel_move.AddMove(actual_loc, cc_loc, nullptr); } codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h index d41b3aecfd..68d605957f 100644 --- a/compiler/optimizing/locations.h +++ b/compiler/optimizing/locations.h @@ -290,18 +290,6 @@ class Location : public ValueObject { return value_ == other.value_; } - // Returns whether this location contains `other`. - bool Contains(Location other) const { - if (Equals(other)) return true; - if (IsRegisterPair() && other.IsRegister()) { - return low() == other.reg() || high() == other.reg(); - } - if (IsFpuRegisterPair() && other.IsFpuRegister()) { - return low() == other.reg() || high() == other.reg(); - } - return false; - } - const char* DebugString() const { switch (GetKind()) { case kInvalid: return "I"; diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index f2aa043849..fa51f27f0a 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -2755,7 +2755,7 @@ class MoveOperands : public ArenaObject<kArenaAllocMisc> { // True if this blocks a move from the given location. bool Blocks(Location loc) const { - return !IsEliminated() && source_.Contains(loc); + return !IsEliminated() && source_.Equals(loc); } // A move is redundant if it's been eliminated, if its source and @@ -2784,8 +2784,6 @@ class MoveOperands : public ArenaObject<kArenaAllocMisc> { // This is only used in debug mode, to ensure we do not connect interval siblings // in the same parallel move. HInstruction* instruction_; - - DISALLOW_COPY_AND_ASSIGN(MoveOperands); }; static constexpr size_t kDefaultNumberOfMoves = 4; @@ -2795,24 +2793,46 @@ class HParallelMove : public HTemplateInstruction<0> { explicit HParallelMove(ArenaAllocator* arena) : HTemplateInstruction(SideEffects::None()), moves_(arena, kDefaultNumberOfMoves) {} - void AddMove(MoveOperands* move) { - if (kIsDebugBuild) { - if (move->GetInstruction() != nullptr) { + void AddMove(Location source, Location destination, HInstruction* instruction) { + DCHECK(source.IsValid()); + DCHECK(destination.IsValid()); + // The parallel move resolver does not handle pairs. So we decompose the + // pair locations into two moves. + if (source.IsPair() && destination.IsPair()) { + AddMove(source.ToLow(), destination.ToLow(), instruction); + AddMove(source.ToHigh(), destination.ToHigh(), nullptr); + } else if (source.IsPair()) { + DCHECK(destination.IsDoubleStackSlot()); + AddMove(source.ToLow(), Location::StackSlot(destination.GetStackIndex()), instruction); + AddMove(source.ToHigh(), Location::StackSlot(destination.GetHighStackIndex(4)), nullptr); + } else if (destination.IsPair()) { + DCHECK(source.IsDoubleStackSlot()); + AddMove(Location::StackSlot(source.GetStackIndex()), destination.ToLow(), instruction); + // TODO: rewrite GetHighStackIndex to not require a word size. It's supposed to + // always be 4. + static constexpr int kHighOffset = 4; + AddMove(Location::StackSlot(source.GetHighStackIndex(kHighOffset)), + destination.ToHigh(), + nullptr); + } else { + if (kIsDebugBuild) { + if (instruction != nullptr) { + for (size_t i = 0, e = moves_.Size(); i < e; ++i) { + DCHECK_NE(moves_.Get(i).GetInstruction(), instruction) + << "Doing parallel moves for the same instruction."; + } + } for (size_t i = 0, e = moves_.Size(); i < e; ++i) { - DCHECK_NE(moves_.Get(i)->GetInstruction(), move->GetInstruction()) - << "Doing parallel moves for the same instruction."; + DCHECK(!destination.Equals(moves_.Get(i).GetDestination())) + << "Same destination for two moves in a parallel move."; } } - for (size_t i = 0, e = moves_.Size(); i < e; ++i) { - DCHECK(!move->GetDestination().Contains(moves_.Get(i)->GetDestination())) - << "Same destination for two moves in a parallel move."; - } + moves_.Add(MoveOperands(source, destination, instruction)); } - moves_.Add(move); } MoveOperands* MoveOperandsAt(size_t index) const { - return moves_.Get(index); + return moves_.GetRawStorage() + index; } size_t NumMoves() const { return moves_.Size(); } @@ -2820,7 +2840,7 @@ class HParallelMove : public HTemplateInstruction<0> { DECLARE_INSTRUCTION(ParallelMove); private: - GrowableArray<MoveOperands*> moves_; + GrowableArray<MoveOperands> moves_; DISALLOW_COPY_AND_ASSIGN(HParallelMove); }; diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc index b8f5070999..debe466560 100644 --- a/compiler/optimizing/parallel_move_resolver.cc +++ b/compiler/optimizing/parallel_move_resolver.cc @@ -57,6 +57,9 @@ void ParallelMoveResolver::BuildInitialMoveList(HParallelMove* parallel_move) { // unallocated, or the move was already eliminated). for (size_t i = 0; i < parallel_move->NumMoves(); ++i) { MoveOperands* move = parallel_move->MoveOperandsAt(i); + // The parallel move resolver algorithm does not work with register pairs. + DCHECK(!move->GetSource().IsPair()); + DCHECK(!move->GetDestination().IsPair()); if (!move->IsRedundant()) { moves_.Add(move); } diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc index 7ab41b6ddc..28b5697bbd 100644 --- a/compiler/optimizing/parallel_move_test.cc +++ b/compiler/optimizing/parallel_move_test.cc @@ -26,20 +26,26 @@ class TestParallelMoveResolver : public ParallelMoveResolver { public: explicit TestParallelMoveResolver(ArenaAllocator* allocator) : ParallelMoveResolver(allocator) {} + void Dump(Location location) { + if (location.IsConstant()) { + message_ << "C"; + } else if (location.IsPair()) { + message_ << location.low() << "," << location.high(); + } else { + message_ << location.reg(); + } + } + virtual void EmitMove(size_t index) { MoveOperands* move = moves_.Get(index); if (!message_.str().empty()) { message_ << " "; } message_ << "("; - if (move->GetSource().IsConstant()) { - message_ << "C"; - } else { - message_ << move->GetSource().reg(); - } - message_ << " -> " - << move->GetDestination().reg() - << ")"; + Dump(move->GetSource()); + message_ << " -> "; + Dump(move->GetDestination()); + message_ << ")"; } virtual void EmitSwap(size_t index) { @@ -47,11 +53,11 @@ class TestParallelMoveResolver : public ParallelMoveResolver { if (!message_.str().empty()) { message_ << " "; } - message_ << "(" - << move->GetSource().reg() - << " <-> " - << move->GetDestination().reg() - << ")"; + message_ << "("; + Dump(move->GetSource()); + message_ << " <-> "; + Dump(move->GetDestination()); + message_ << ")"; } virtual void SpillScratch(int reg ATTRIBUTE_UNUSED) {} @@ -73,10 +79,10 @@ static HParallelMove* BuildParallelMove(ArenaAllocator* allocator, size_t number_of_moves) { HParallelMove* moves = new (allocator) HParallelMove(allocator); for (size_t i = 0; i < number_of_moves; ++i) { - moves->AddMove(new (allocator) MoveOperands( + moves->AddMove( Location::RegisterLocation(operands[i][0]), Location::RegisterLocation(operands[i][1]), - nullptr)); + nullptr); } return moves; } @@ -131,16 +137,66 @@ TEST(ParallelMoveTest, ConstantLast) { ArenaAllocator allocator(&pool); TestParallelMoveResolver resolver(&allocator); HParallelMove* moves = new (&allocator) HParallelMove(&allocator); - moves->AddMove(new (&allocator) MoveOperands( + moves->AddMove( Location::ConstantLocation(new (&allocator) HIntConstant(0)), Location::RegisterLocation(0), - nullptr)); - moves->AddMove(new (&allocator) MoveOperands( + nullptr); + moves->AddMove( Location::RegisterLocation(1), Location::RegisterLocation(2), - nullptr)); + nullptr); resolver.EmitNativeCode(moves); ASSERT_STREQ("(1 -> 2) (C -> 0)", resolver.GetMessage().c_str()); } +TEST(ParallelMoveTest, Pairs) { + ArenaPool pool; + ArenaAllocator allocator(&pool); + + { + TestParallelMoveResolver resolver(&allocator); + HParallelMove* moves = new (&allocator) HParallelMove(&allocator); + moves->AddMove( + Location::RegisterLocation(2), + Location::RegisterLocation(4), + nullptr); + moves->AddMove( + Location::RegisterPairLocation(0, 1), + Location::RegisterPairLocation(2, 3), + nullptr); + resolver.EmitNativeCode(moves); + ASSERT_STREQ("(2 -> 4) (0 -> 2) (1 -> 3)", resolver.GetMessage().c_str()); + } + + { + TestParallelMoveResolver resolver(&allocator); + HParallelMove* moves = new (&allocator) HParallelMove(&allocator); + moves->AddMove( + Location::RegisterPairLocation(0, 1), + Location::RegisterPairLocation(2, 3), + nullptr); + moves->AddMove( + Location::RegisterLocation(2), + Location::RegisterLocation(4), + nullptr); + resolver.EmitNativeCode(moves); + ASSERT_STREQ("(2 -> 4) (0 -> 2) (1 -> 3)", resolver.GetMessage().c_str()); + } + + { + TestParallelMoveResolver resolver(&allocator); + HParallelMove* moves = new (&allocator) HParallelMove(&allocator); + moves->AddMove( + Location::RegisterPairLocation(0, 1), + Location::RegisterPairLocation(2, 3), + nullptr); + moves->AddMove( + Location::RegisterLocation(2), + Location::RegisterLocation(0), + nullptr); + resolver.EmitNativeCode(moves); + ASSERT_STREQ("(2 <-> 0) (1 -> 3)", resolver.GetMessage().c_str()); + } +} + } // namespace art diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index 93ed44ee44..1b42e94960 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -1051,7 +1051,7 @@ void RegisterAllocator::AddInputMoveFor(HInstruction* user, move = previous->AsParallelMove(); } DCHECK_EQ(move->GetLifetimePosition(), user->GetLifetimePosition()); - move->AddMove(new (allocator_) MoveOperands(source, destination, nullptr)); + move->AddMove(source, destination, nullptr); } static bool IsInstructionStart(size_t position) { @@ -1123,7 +1123,7 @@ void RegisterAllocator::InsertParallelMoveAt(size_t position, } } DCHECK_EQ(move->GetLifetimePosition(), position); - move->AddMove(new (allocator_) MoveOperands(source, destination, instruction)); + move->AddMove(source, destination, instruction); } void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block, @@ -1153,7 +1153,7 @@ void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block, } else { move = previous->AsParallelMove(); } - move->AddMove(new (allocator_) MoveOperands(source, destination, instruction)); + move->AddMove(source, destination, instruction); } void RegisterAllocator::InsertParallelMoveAtEntryOf(HBasicBlock* block, @@ -1172,7 +1172,7 @@ void RegisterAllocator::InsertParallelMoveAtEntryOf(HBasicBlock* block, move->SetLifetimePosition(block->GetLifetimeStart()); block->InsertInstructionBefore(move, first); } - move->AddMove(new (allocator_) MoveOperands(source, destination, instruction)); + move->AddMove(source, destination, instruction); } void RegisterAllocator::InsertMoveAfter(HInstruction* instruction, @@ -1196,7 +1196,7 @@ void RegisterAllocator::InsertMoveAfter(HInstruction* instruction, move->SetLifetimePosition(position); instruction->GetBlock()->InsertInstructionBefore(move, instruction->GetNext()); } - move->AddMove(new (allocator_) MoveOperands(source, destination, instruction)); + move->AddMove(source, destination, instruction); } void RegisterAllocator::ConnectSiblings(LiveInterval* interval) { |