blob: f9d812f6a6695d2cd88d612bb1f79f8a5738f140 [file] [log] [blame]
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +01001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Nicolas Geoffraya2d15b52015-03-31 18:13:51 +010016#include <iostream>
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010017
18#include "parallel_move_resolver.h"
19#include "nodes.h"
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010020
21namespace art {
22
Zheng Xuad4450e2015-04-17 18:48:56 +080023void ParallelMoveResolver::BuildInitialMoveList(HParallelMove* parallel_move) {
24 // Perform a linear sweep of the moves to add them to the initial list of
25 // moves to perform, ignoring any move that is redundant (the source is
26 // the same as the destination, the destination is ignored and
27 // unallocated, or the move was already eliminated).
28 for (size_t i = 0; i < parallel_move->NumMoves(); ++i) {
29 MoveOperands* move = parallel_move->MoveOperandsAt(i);
30 if (!move->IsRedundant()) {
31 moves_.Add(move);
32 }
33 }
34}
35
36void ParallelMoveResolverWithSwap::EmitNativeCode(HParallelMove* parallel_move) {
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010037 DCHECK(moves_.IsEmpty());
38 // Build up a worklist of moves.
39 BuildInitialMoveList(parallel_move);
40
Mark Mendell6e18dcb2015-07-28 17:26:55 -040041 // Move stack/stack slot to take advantage of a free register on constrained machines.
42 for (size_t i = 0; i < moves_.Size(); ++i) {
43 const MoveOperands& move = *moves_.Get(i);
44 // Ignore constants and moves already eliminated.
45 if (move.IsEliminated() || move.GetSource().IsConstant()) {
46 continue;
47 }
48
49 if ((move.GetSource().IsStackSlot() || move.GetSource().IsDoubleStackSlot()) &&
50 (move.GetDestination().IsStackSlot() || move.GetDestination().IsDoubleStackSlot())) {
51 PerformMove(i);
52 }
53 }
54
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010055 for (size_t i = 0; i < moves_.Size(); ++i) {
56 const MoveOperands& move = *moves_.Get(i);
57 // Skip constants to perform them last. They don't block other moves
58 // and skipping such moves with register destinations keeps those
59 // registers free for the whole algorithm.
60 if (!move.IsEliminated() && !move.GetSource().IsConstant()) {
61 PerformMove(i);
62 }
63 }
64
65 // Perform the moves with constant sources.
66 for (size_t i = 0; i < moves_.Size(); ++i) {
Nicolas Geoffray48c310c2015-01-14 10:45:05 +000067 MoveOperands* move = moves_.Get(i);
68 if (!move->IsEliminated()) {
69 DCHECK(move->GetSource().IsConstant());
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010070 EmitMove(i);
Nicolas Geoffray48c310c2015-01-14 10:45:05 +000071 // Eliminate the move, in case following moves need a scratch register.
72 move->Eliminate();
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010073 }
74 }
75
76 moves_.Reset();
77}
78
Nicolas Geoffraya2d15b52015-03-31 18:13:51 +010079Location LowOf(Location location) {
80 if (location.IsRegisterPair()) {
81 return Location::RegisterLocation(location.low());
82 } else if (location.IsFpuRegisterPair()) {
83 return Location::FpuRegisterLocation(location.low());
84 } else if (location.IsDoubleStackSlot()) {
85 return Location::StackSlot(location.GetStackIndex());
86 } else {
87 return Location::NoLocation();
88 }
89}
90
91Location HighOf(Location location) {
92 if (location.IsRegisterPair()) {
93 return Location::RegisterLocation(location.high());
94 } else if (location.IsFpuRegisterPair()) {
95 return Location::FpuRegisterLocation(location.high());
96 } else if (location.IsDoubleStackSlot()) {
97 return Location::StackSlot(location.GetHighStackIndex(4));
98 } else {
99 return Location::NoLocation();
100 }
101}
102
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000103// Update the source of `move`, knowing that `updated_location` has been swapped
104// with `new_source`. Note that `updated_location` can be a pair, therefore if
105// `move` is non-pair, we need to extract which register to use.
106static void UpdateSourceOf(MoveOperands* move, Location updated_location, Location new_source) {
107 Location source = move->GetSource();
Nicolas Geoffraya2d15b52015-03-31 18:13:51 +0100108 if (LowOf(updated_location).Equals(source)) {
109 move->SetSource(LowOf(new_source));
110 } else if (HighOf(updated_location).Equals(source)) {
111 move->SetSource(HighOf(new_source));
112 } else {
113 DCHECK(updated_location.Equals(source)) << updated_location << " " << source;
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000114 move->SetSource(new_source);
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000115 }
116}
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100117
Zheng Xuad4450e2015-04-17 18:48:56 +0800118MoveOperands* ParallelMoveResolverWithSwap::PerformMove(size_t index) {
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100119 // Each call to this function performs a move and deletes it from the move
120 // graph. We first recursively perform any move blocking this one. We
121 // mark a move as "pending" on entry to PerformMove in order to detect
122 // cycles in the move graph. We use operand swaps to resolve cycles,
123 // which means that a call to PerformMove could change any source operand
124 // in the move graph.
125
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000126 MoveOperands* move = moves_.Get(index);
127 DCHECK(!move->IsPending());
128 if (move->IsRedundant()) {
129 // Because we swap register pairs first, following, un-pending
130 // moves may become redundant.
131 move->Eliminate();
132 return nullptr;
133 }
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100134
135 // Clear this move's destination to indicate a pending move. The actual
136 // destination is saved in a stack-allocated local. Recursion may allow
137 // multiple moves to be pending.
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000138 DCHECK(!move->GetSource().IsInvalid());
139 Location destination = move->MarkPending();
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100140
141 // Perform a depth-first traversal of the move graph to resolve
142 // dependencies. Any unperformed, unpending move with a source the same
143 // as this one's destination blocks this one so recursively perform all
144 // such moves.
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000145 MoveOperands* required_swap = nullptr;
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100146 for (size_t i = 0; i < moves_.Size(); ++i) {
147 const MoveOperands& other_move = *moves_.Get(i);
148 if (other_move.Blocks(destination) && !other_move.IsPending()) {
149 // Though PerformMove can change any source operand in the move graph,
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000150 // calling `PerformMove` cannot create a blocking move via a swap
151 // (this loop does not miss any).
152 // For example, assume there is a non-blocking move with source A
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100153 // and this move is blocked on source B and there is a swap of A and
154 // B. Then A and B must be involved in the same cycle (or they would
155 // not be swapped). Since this move's destination is B and there is
156 // only a single incoming edge to an operand, this move must also be
157 // involved in the same cycle. In that case, the blocking move will
158 // be created but will be "pending" when we return from PerformMove.
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000159 required_swap = PerformMove(i);
160
161 if (required_swap == move) {
162 // If this move is required to swap, we do so without looking
163 // at the next moves. Swapping is not blocked by anything, it just
164 // updates other moves's source.
165 break;
166 } else if (required_swap == moves_.Get(i)) {
167 // If `other_move` was swapped, we iterate again to find a new
168 // potential cycle.
169 required_swap = nullptr;
170 i = 0;
171 } else if (required_swap != nullptr) {
172 // A move is required to swap. We walk back the cycle to find the
173 // move by just returning from this `PerforrmMove`.
174 moves_.Get(index)->ClearPending(destination);
175 return required_swap;
176 }
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100177 }
178 }
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100179
180 // We are about to resolve this move and don't need it marked as
181 // pending, so restore its destination.
182 move->ClearPending(destination);
183
184 // This move's source may have changed due to swaps to resolve cycles and
185 // so it may now be the last move in the cycle. If so remove it.
186 if (move->GetSource().Equals(destination)) {
187 move->Eliminate();
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000188 DCHECK(required_swap == nullptr);
189 return nullptr;
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100190 }
191
192 // The move may be blocked on a (at most one) pending move, in which case
193 // we have a cycle. Search for such a blocking move and perform a swap to
194 // resolve it.
195 bool do_swap = false;
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000196 if (required_swap != nullptr) {
197 DCHECK_EQ(required_swap, move);
198 do_swap = true;
199 } else {
200 for (size_t i = 0; i < moves_.Size(); ++i) {
201 const MoveOperands& other_move = *moves_.Get(i);
202 if (other_move.Blocks(destination)) {
203 DCHECK(other_move.IsPending());
Nicolas Geoffray90218252015-04-15 11:56:51 +0100204 if (!move->Is64BitMove() && other_move.Is64BitMove()) {
205 // We swap 64bits moves before swapping 32bits moves. Go back from the
206 // cycle by returning the move that must be swapped.
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000207 return moves_.Get(i);
208 }
209 do_swap = true;
210 break;
211 }
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100212 }
213 }
214
215 if (do_swap) {
216 EmitSwap(index);
217 // Any unperformed (including pending) move with a source of either
218 // this move's source or destination needs to have their source
219 // changed to reflect the state of affairs after the swap.
220 Location source = move->GetSource();
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800221 Location swap_destination = move->GetDestination();
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100222 move->Eliminate();
223 for (size_t i = 0; i < moves_.Size(); ++i) {
224 const MoveOperands& other_move = *moves_.Get(i);
225 if (other_move.Blocks(source)) {
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000226 UpdateSourceOf(moves_.Get(i), source, swap_destination);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800227 } else if (other_move.Blocks(swap_destination)) {
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000228 UpdateSourceOf(moves_.Get(i), swap_destination, source);
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100229 }
230 }
Nicolas Geoffray90218252015-04-15 11:56:51 +0100231 // If the swap was required because of a 64bits move in the middle of a cycle,
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000232 // we return the swapped move, so that the caller knows it needs to re-iterate
233 // its dependency loop.
234 return required_swap;
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100235 } else {
236 // This move is not blocked.
237 EmitMove(index);
238 move->Eliminate();
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000239 DCHECK(required_swap == nullptr);
240 return nullptr;
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100241 }
242}
243
Zheng Xuad4450e2015-04-17 18:48:56 +0800244bool ParallelMoveResolverWithSwap::IsScratchLocation(Location loc) {
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100245 for (size_t i = 0; i < moves_.Size(); ++i) {
246 if (moves_.Get(i)->Blocks(loc)) {
247 return false;
248 }
249 }
250
251 for (size_t i = 0; i < moves_.Size(); ++i) {
252 if (moves_.Get(i)->GetDestination().Equals(loc)) {
253 return true;
254 }
255 }
256
257 return false;
258}
259
Zheng Xuad4450e2015-04-17 18:48:56 +0800260int ParallelMoveResolverWithSwap::AllocateScratchRegister(int blocked,
261 int register_count,
262 int if_scratch,
263 bool* spilled) {
Nicolas Geoffraye27f31a2014-06-12 17:53:14 +0100264 DCHECK_NE(blocked, if_scratch);
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100265 int scratch = -1;
266 for (int reg = 0; reg < register_count; ++reg) {
Nicolas Geoffray56b9ee62014-10-09 11:47:51 +0100267 if ((blocked != reg) && IsScratchLocation(Location::RegisterLocation(reg))) {
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100268 scratch = reg;
269 break;
270 }
271 }
272
273 if (scratch == -1) {
274 *spilled = true;
Nicolas Geoffraye27f31a2014-06-12 17:53:14 +0100275 scratch = if_scratch;
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100276 } else {
277 *spilled = false;
278 }
279
280 return scratch;
281}
282
283
Zheng Xuad4450e2015-04-17 18:48:56 +0800284ParallelMoveResolverWithSwap::ScratchRegisterScope::ScratchRegisterScope(
285 ParallelMoveResolverWithSwap* resolver, int blocked, int if_scratch, int number_of_registers)
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100286 : resolver_(resolver),
287 reg_(kNoRegister),
288 spilled_(false) {
Nicolas Geoffraye27f31a2014-06-12 17:53:14 +0100289 reg_ = resolver_->AllocateScratchRegister(blocked, number_of_registers, if_scratch, &spilled_);
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100290
291 if (spilled_) {
292 resolver->SpillScratch(reg_);
293 }
294}
295
296
Zheng Xuad4450e2015-04-17 18:48:56 +0800297ParallelMoveResolverWithSwap::ScratchRegisterScope::~ScratchRegisterScope() {
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100298 if (spilled_) {
299 resolver_->RestoreScratch(reg_);
300 }
301}
302
Zheng Xuad4450e2015-04-17 18:48:56 +0800303void ParallelMoveResolverNoSwap::EmitNativeCode(HParallelMove* parallel_move) {
304 DCHECK_EQ(GetNumberOfPendingMoves(), 0u);
305 DCHECK(moves_.IsEmpty());
306 DCHECK(scratches_.IsEmpty());
307
308 // Backend dependent initialization.
309 PrepareForEmitNativeCode();
310
311 // Build up a worklist of moves.
312 BuildInitialMoveList(parallel_move);
313
314 for (size_t i = 0; i < moves_.Size(); ++i) {
315 const MoveOperands& move = *moves_.Get(i);
316 // Skip constants to perform them last. They don't block other moves and
317 // skipping such moves with register destinations keeps those registers
318 // free for the whole algorithm.
319 if (!move.IsEliminated() && !move.GetSource().IsConstant()) {
320 PerformMove(i);
321 }
322 }
323
324 // Perform the moves with constant sources and register destinations with UpdateMoveSource()
325 // to reduce the number of literal loads. Stack destinations are skipped since we won't be benefit
326 // from changing the constant sources to stack locations.
327 for (size_t i = 0; i < moves_.Size(); ++i) {
328 MoveOperands* move = moves_.Get(i);
329 Location destination = move->GetDestination();
330 if (!move->IsEliminated() && !destination.IsStackSlot() && !destination.IsDoubleStackSlot()) {
331 Location source = move->GetSource();
332 EmitMove(i);
333 move->Eliminate();
334 // This may introduce additional instruction dependency, but reduce number
335 // of moves and possible literal loads. For example,
336 // Original moves:
337 // 1234.5678 -> D0
338 // 1234.5678 -> D1
339 // Updated moves:
340 // 1234.5678 -> D0
341 // D0 -> D1
342 UpdateMoveSource(source, destination);
343 }
344 }
345
346 // Perform the rest of the moves.
347 for (size_t i = 0; i < moves_.Size(); ++i) {
348 MoveOperands* move = moves_.Get(i);
349 if (!move->IsEliminated()) {
350 EmitMove(i);
351 move->Eliminate();
352 }
353 }
354
355 // All pending moves that we have added for resolve cycles should be performed.
356 DCHECK_EQ(GetNumberOfPendingMoves(), 0u);
357
358 // Backend dependent cleanup.
359 FinishEmitNativeCode();
360
361 moves_.Reset();
362 scratches_.Reset();
363}
364
365Location ParallelMoveResolverNoSwap::GetScratchLocation(Location::Kind kind) {
366 for (size_t i = 0; i < scratches_.Size(); ++i) {
367 Location loc = scratches_.Get(i);
368 if (loc.GetKind() == kind && !IsBlockedByMoves(loc)) {
369 return loc;
370 }
371 }
372 for (size_t i = 0; i < moves_.Size(); ++i) {
373 Location loc = moves_.Get(i)->GetDestination();
374 if (loc.GetKind() == kind && !IsBlockedByMoves(loc)) {
375 return loc;
376 }
377 }
378 return Location::NoLocation();
379}
380
381void ParallelMoveResolverNoSwap::AddScratchLocation(Location loc) {
382 if (kIsDebugBuild) {
383 for (size_t i = 0; i < scratches_.Size(); ++i) {
384 DCHECK(!loc.Equals(scratches_.Get(i)));
385 }
386 }
387 scratches_.Add(loc);
388}
389
390void ParallelMoveResolverNoSwap::RemoveScratchLocation(Location loc) {
391 DCHECK(!IsBlockedByMoves(loc));
392 for (size_t i = 0; i < scratches_.Size(); ++i) {
393 if (loc.Equals(scratches_.Get(i))) {
394 scratches_.DeleteAt(i);
395 break;
396 }
397 }
398}
399
400void ParallelMoveResolverNoSwap::PerformMove(size_t index) {
401 // Each call to this function performs a move and deletes it from the move
402 // graph. We first recursively perform any move blocking this one. We mark
403 // a move as "pending" on entry to PerformMove in order to detect cycles
404 // in the move graph. We use scratch location to resolve cycles, also
405 // additional pending moves might be added. After move has been performed,
406 // we will update source operand in the move graph to reduce dependencies in
407 // the graph.
408
409 MoveOperands* move = moves_.Get(index);
410 DCHECK(!move->IsPending());
411 DCHECK(!move->IsEliminated());
412 if (move->IsRedundant()) {
413 // Previous operations on the list of moves have caused this particular move
414 // to become a no-op, so we can safely eliminate it. Consider for example
415 // (0 -> 1) (1 -> 0) (1 -> 2). There is a cycle (0 -> 1) (1 -> 0), that we will
416 // resolve as (1 -> scratch) (0 -> 1) (scratch -> 0). If, by chance, '2' is
417 // used as the scratch location, the move (1 -> 2) will occur while resolving
418 // the cycle. When that move is emitted, the code will update moves with a '1'
419 // as their source to use '2' instead (see `UpdateMoveSource()`. In our example
420 // the initial move (1 -> 2) would then become the no-op (2 -> 2) that can be
421 // eliminated here.
422 move->Eliminate();
423 return;
424 }
425
426 // Clear this move's destination to indicate a pending move. The actual
427 // destination is saved in a stack-allocated local. Recursion may allow
428 // multiple moves to be pending.
429 DCHECK(!move->GetSource().IsInvalid());
430 Location destination = move->MarkPending();
431
432 // Perform a depth-first traversal of the move graph to resolve
433 // dependencies. Any unperformed, unpending move with a source the same
434 // as this one's destination blocks this one so recursively perform all
435 // such moves.
436 for (size_t i = 0; i < moves_.Size(); ++i) {
437 const MoveOperands& other_move = *moves_.Get(i);
438 if (other_move.Blocks(destination) && !other_move.IsPending()) {
439 PerformMove(i);
440 }
441 }
442
443 // We are about to resolve this move and don't need it marked as
444 // pending, so restore its destination.
445 move->ClearPending(destination);
446
447 // No one else should write to the move destination when the it is pending.
448 DCHECK(!move->IsRedundant());
449
450 Location source = move->GetSource();
451 // The move may be blocked on several pending moves, in case we have a cycle.
452 if (IsBlockedByMoves(destination)) {
453 // For a cycle like: (A -> B) (B -> C) (C -> A), we change it to following
454 // sequence:
455 // (C -> scratch) # Emit right now.
456 // (A -> B) (B -> C) # Unblocked.
457 // (scratch -> A) # Add to pending_moves_, blocked by (A -> B).
458 Location::Kind kind = source.GetKind();
459 DCHECK_NE(kind, Location::kConstant);
460 Location scratch = AllocateScratchLocationFor(kind);
461 // We only care about the move size.
462 Primitive::Type type = move->Is64BitMove() ? Primitive::kPrimLong : Primitive::kPrimInt;
463 // Perform (C -> scratch)
464 move->SetDestination(scratch);
465 EmitMove(index);
466 move->Eliminate();
467 UpdateMoveSource(source, scratch);
468 // Add (scratch -> A).
469 AddPendingMove(scratch, destination, type);
470 } else {
471 // This move is not blocked.
472 EmitMove(index);
473 move->Eliminate();
474 UpdateMoveSource(source, destination);
475 }
476
477 // Moves in the pending list should not block any other moves. But performing
478 // unblocked moves in the pending list can free scratch registers, so we do this
479 // as early as possible.
480 MoveOperands* pending_move;
481 while ((pending_move = GetUnblockedPendingMove(source)) != nullptr) {
482 Location pending_source = pending_move->GetSource();
483 Location pending_destination = pending_move->GetDestination();
484 // We do not depend on the pending move index. So just delete the move instead
485 // of eliminating it to make the pending list cleaner.
486 DeletePendingMove(pending_move);
487 move->SetSource(pending_source);
488 move->SetDestination(pending_destination);
489 EmitMove(index);
490 move->Eliminate();
491 UpdateMoveSource(pending_source, pending_destination);
492 // Free any unblocked locations in the scratch location list.
493 for (size_t i = 0; i < scratches_.Size(); ++i) {
494 Location scratch = scratches_.Get(i);
495 // Only scratch overlapping with performed move source can be unblocked.
496 if (scratch.OverlapsWith(pending_source) && !IsBlockedByMoves(scratch)) {
497 FreeScratchLocation(pending_source);
498 }
499 }
500 }
501}
502
503void ParallelMoveResolverNoSwap::UpdateMoveSource(Location from, Location to) {
504 // This function is used to reduce the dependencies in the graph after
505 // (from -> to) has been performed. Since we ensure there is no move with the same
506 // destination, (to -> X) can not be blocked while (from -> X) might still be
507 // blocked. Consider for example the moves (0 -> 1) (1 -> 2) (1 -> 3). After
508 // (1 -> 2) has been performed, the moves left are (0 -> 1) and (1 -> 3). There is
509 // a dependency between the two. If we update the source location from 1 to 2, we
510 // will get (0 -> 1) and (2 -> 3). There is no dependency between the two.
511 //
512 // This is not something we must do, but we can use fewer scratch locations with
513 // this trick. For example, we can avoid using additional scratch locations for
514 // moves (0 -> 1), (1 -> 2), (1 -> 0).
515 for (size_t i = 0; i < moves_.Size(); ++i) {
516 MoveOperands* move = moves_.Get(i);
517 if (move->GetSource().Equals(from)) {
518 move->SetSource(to);
519 }
520 }
521}
522
523void ParallelMoveResolverNoSwap::AddPendingMove(Location source,
524 Location destination, Primitive::Type type) {
525 pending_moves_.Add(new (allocator_) MoveOperands(source, destination, type, nullptr));
526}
527
528void ParallelMoveResolverNoSwap::DeletePendingMove(MoveOperands* move) {
529 pending_moves_.Delete(move);
530}
531
532MoveOperands* ParallelMoveResolverNoSwap::GetUnblockedPendingMove(Location loc) {
533 for (size_t i = 0; i < pending_moves_.Size(); ++i) {
534 MoveOperands* move = pending_moves_.Get(i);
535 Location destination = move->GetDestination();
536 // Only moves with destination overlapping with input loc can be unblocked.
537 if (destination.OverlapsWith(loc) && !IsBlockedByMoves(destination)) {
538 return move;
539 }
540 }
541 return nullptr;
542}
543
544bool ParallelMoveResolverNoSwap::IsBlockedByMoves(Location loc) {
545 for (size_t i = 0; i < pending_moves_.Size(); ++i) {
546 if (pending_moves_.Get(i)->Blocks(loc)) {
547 return true;
548 }
549 }
550 for (size_t i = 0; i < moves_.Size(); ++i) {
551 if (moves_.Get(i)->Blocks(loc)) {
552 return true;
553 }
554 }
555 return false;
556}
557
558// So far it is only used for debugging purposes to make sure all pending moves
559// have been performed.
560size_t ParallelMoveResolverNoSwap::GetNumberOfPendingMoves() {
561 return pending_moves_.Size();
562}
563
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100564} // namespace art