Optimizing: Tag arena allocations in code generators.
And completely remove the deprecated GrowableArray.
Replace GrowableArray with ArenaVector in code generators
and related classes and tag arena allocations.
Label arrays use direct allocations from ArenaAllocator
because Label is non-copyable and non-movable and as such
cannot be really held in a container. The GrowableArray
never actually constructed them, instead relying on the
zero-initialized storage from the arena allocator to be
correct. We now actually construct the labels.
Also avoid StackMapStream::ComputeDexRegisterMapSize() being
passed null references, even though unused.
Change-Id: I26a46fdd406b23a3969300a67739d55528df8bf4
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6b0ccf8..b19726d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -35,7 +35,6 @@
#include "offsets.h"
#include "primitive.h"
#include "utils/arena_bit_vector.h"
-#include "utils/growable_array.h"
namespace art {
@@ -5054,7 +5053,10 @@
class HParallelMove : public HTemplateInstruction<0> {
public:
explicit HParallelMove(ArenaAllocator* arena, uint32_t dex_pc = kNoDexPc)
- : HTemplateInstruction(SideEffects::None(), dex_pc), moves_(arena, kDefaultNumberOfMoves) {}
+ : HTemplateInstruction(SideEffects::None(), dex_pc),
+ moves_(arena->Adapter(kArenaAllocMoveOperands)) {
+ moves_.reserve(kDefaultNumberOfMoves);
+ }
void AddMove(Location source,
Location destination,
@@ -5064,15 +5066,15 @@
DCHECK(destination.IsValid());
if (kIsDebugBuild) {
if (instruction != nullptr) {
- for (size_t i = 0, e = moves_.Size(); i < e; ++i) {
- if (moves_.Get(i).GetInstruction() == instruction) {
+ for (const MoveOperands& move : moves_) {
+ if (move.GetInstruction() == instruction) {
// Special case the situation where the move is for the spill slot
// of the instruction.
if ((GetPrevious() == instruction)
|| ((GetPrevious() == nullptr)
&& instruction->IsPhi()
&& instruction->GetBlock() == GetBlock())) {
- DCHECK_NE(destination.GetKind(), moves_.Get(i).GetDestination().GetKind())
+ DCHECK_NE(destination.GetKind(), move.GetDestination().GetKind())
<< "Doing parallel moves for the same instruction.";
} else {
DCHECK(false) << "Doing parallel moves for the same instruction.";
@@ -5080,26 +5082,27 @@
}
}
}
- for (size_t i = 0, e = moves_.Size(); i < e; ++i) {
- DCHECK(!destination.OverlapsWith(moves_.Get(i).GetDestination()))
+ for (const MoveOperands& move : moves_) {
+ DCHECK(!destination.OverlapsWith(move.GetDestination()))
<< "Overlapped destination for two moves in a parallel move: "
- << moves_.Get(i).GetSource() << " ==> " << moves_.Get(i).GetDestination() << " and "
+ << move.GetSource() << " ==> " << move.GetDestination() << " and "
<< source << " ==> " << destination;
}
}
- moves_.Add(MoveOperands(source, destination, type, instruction));
+ moves_.emplace_back(source, destination, type, instruction);
}
- MoveOperands* MoveOperandsAt(size_t index) const {
- return moves_.GetRawStorage() + index;
+ MoveOperands* MoveOperandsAt(size_t index) {
+ DCHECK_LT(index, moves_.size());
+ return &moves_[index];
}
- size_t NumMoves() const { return moves_.Size(); }
+ size_t NumMoves() const { return moves_.size(); }
DECLARE_INSTRUCTION(ParallelMove);
private:
- GrowableArray<MoveOperands> moves_;
+ ArenaVector<MoveOperands> moves_;
DISALLOW_COPY_AND_ASSIGN(HParallelMove);
};