ART: Use ScopedArenaAllocator for pass-local data.

Passes using local ArenaAllocator were hiding their memory
usage from the allocation counting, making it difficult to
track down where memory was used. Using ScopedArenaAllocator
reveals the memory usage.

This changes the HGraph constructor which requires a lot of
changes in tests. Refactor these tests to limit the amount
of work needed the next time we change that constructor.

Test: m test-art-host-gtest
Test: testrunner.py --host
Test: Build with kArenaAllocatorCountAllocations = true.
Bug: 64312607
Change-Id: I34939e4086b500d6e827ff3ef2211d1a421ac91a
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index b558eb1..d8ebac9 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -16,6 +16,10 @@
 
 #include "code_sinking.h"
 
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
 #include "common_dominator.h"
 #include "nodes.h"
 
@@ -115,7 +119,7 @@
 static void AddInstruction(HInstruction* instruction,
                            const ArenaBitVector& processed_instructions,
                            const ArenaBitVector& discard_blocks,
-                           ArenaVector<HInstruction*>* worklist) {
+                           ScopedArenaVector<HInstruction*>* worklist) {
   // Add to the work list if the instruction is not in the list of blocks
   // to discard, hasn't been already processed and is of interest.
   if (!discard_blocks.IsBitSet(instruction->GetBlock()->GetBlockId()) &&
@@ -128,7 +132,7 @@
 static void AddInputs(HInstruction* instruction,
                       const ArenaBitVector& processed_instructions,
                       const ArenaBitVector& discard_blocks,
-                      ArenaVector<HInstruction*>* worklist) {
+                      ScopedArenaVector<HInstruction*>* worklist) {
   for (HInstruction* input : instruction->GetInputs()) {
     AddInstruction(input, processed_instructions, discard_blocks, worklist);
   }
@@ -137,7 +141,7 @@
 static void AddInputs(HBasicBlock* block,
                       const ArenaBitVector& processed_instructions,
                       const ArenaBitVector& discard_blocks,
-                      ArenaVector<HInstruction*>* worklist) {
+                      ScopedArenaVector<HInstruction*>* worklist) {
   for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
     AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
   }
@@ -242,17 +246,19 @@
 
 
 void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
-  // Local allocator to discard data structures created below at the end of
-  // this optimization.
-  ArenaAllocator allocator(graph_->GetArena()->GetArenaPool());
+  // Local allocator to discard data structures created below at the end of this optimization.
+  ScopedArenaAllocator allocator(graph_->GetArenaStack());
 
   size_t number_of_instructions = graph_->GetCurrentInstructionId();
-  ArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
+  ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
   ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+  processed_instructions.ClearAllBits();
   ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+  post_dominated.ClearAllBits();
   ArenaBitVector instructions_that_can_move(
       &allocator, number_of_instructions, /* expandable */ false);
-  ArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
+  instructions_that_can_move.ClearAllBits();
+  ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
 
   // Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
   // TODO(ngeoffray): Getting the full set of post-dominated shoud be done by