Remove -Wno-unused-parameter and -Wno-sign-promo from base cflags.

Fix associated errors about unused paramenters and implict sign conversions.
For sign conversion this was largely in the area of enums, so add ostream
operators for the effected enums and fix tools/generate-operator-out.py.
Tidy arena allocation code and arena allocated data types, rather than fixing
new and delete operators.
Remove dead code.

Change-Id: I5b433e722d2f75baacfacae4d32aef4a828bfe1b
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index b2f5ca9..6d21399 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -82,7 +82,7 @@
   ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
 
   void Copy(const ArenaAllocatorStatsImpl& other) { UNUSED(other); }
-  void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes); UNUSED(kind); }
+  void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes, kind); }
   size_t NumAllocations() const { return 0u; }
   size_t BytesAllocated() const { return 0u; }
   void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const {
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index de35f3d..f17e5a9 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -16,12 +16,12 @@
 
 #include "arena_allocator.h"
 #include "arena_bit_vector.h"
-#include "base/allocator.h"
 
 namespace art {
 
 template <typename ArenaAlloc>
-class ArenaBitVectorAllocator FINAL : public Allocator {
+class ArenaBitVectorAllocator FINAL : public Allocator,
+    public ArenaObject<kArenaAllocGrowableBitMap> {
  public:
   explicit ArenaBitVectorAllocator(ArenaAlloc* arena) : arena_(arena) {}
   ~ArenaBitVectorAllocator() {}
@@ -32,11 +32,6 @@
 
   virtual void Free(void*) {}  // Nop.
 
-  static void* operator new(size_t size, ArenaAlloc* arena) {
-    return arena->Alloc(sizeof(ArenaBitVectorAllocator), kArenaAllocGrowableBitMap);
-  }
-  static void operator delete(void* p) {}  // Nop.
-
  private:
   ArenaAlloc* const arena_;
   DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
diff --git a/compiler/utils/arena_bit_vector.h b/compiler/utils/arena_bit_vector.h
index c92658f..34f1ca9 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/compiler/utils/arena_bit_vector.h
@@ -17,12 +17,14 @@
 #ifndef ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
 #define ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
 
+#include "arena_object.h"
 #include "base/bit_vector.h"
-#include "utils/arena_allocator.h"
-#include "utils/scoped_arena_allocator.h"
 
 namespace art {
 
+class ArenaAllocator;
+class ScopedArenaAllocator;
+
 // Type of growable bitmap for memory tuning.
 enum OatBitMapKind {
   kBitMapMisc = 0,
@@ -50,7 +52,7 @@
 /*
  * A BitVector implementation that uses Arena allocation.
  */
-class ArenaBitVector : public BitVector {
+class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableBitMap> {
  public:
   ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
                  OatBitMapKind kind = kBitMapMisc);
@@ -58,16 +60,10 @@
                  OatBitMapKind kind = kBitMapMisc);
   ~ArenaBitVector() {}
 
-  static void* operator new(size_t size, ArenaAllocator* arena) {
-    return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
-  }
-  static void* operator new(size_t size, ScopedArenaAllocator* arena) {
-    return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
-  }
-  static void operator delete(void* p) {}  // Nop.
-
  private:
   const OatBitMapKind kind_;      // for memory use tuning. TODO: currently unused.
+
+  DISALLOW_COPY_AND_ASSIGN(ArenaBitVector);
 };
 
 
diff --git a/compiler/utils/arena_containers.h b/compiler/utils/arena_containers.h
index c48b0c8..8252591 100644
--- a/compiler/utils/arena_containers.h
+++ b/compiler/utils/arena_containers.h
@@ -66,7 +66,7 @@
 class ArenaAllocatorAdapterKindImpl<false> {
  public:
   // Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL.
-  explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { }
+  explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { UNUSED(kind); }
   ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
   ArenaAllocKind Kind() { return kArenaAllocSTL; }
 };
@@ -159,11 +159,13 @@
   const_pointer address(const_reference x) const { return &x; }
 
   pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+    UNUSED(hint);
     DCHECK_LE(n, max_size());
     return reinterpret_cast<T*>(arena_allocator_->Alloc(n * sizeof(T),
                                                         ArenaAllocatorAdapterKind::Kind()));
   }
   void deallocate(pointer p, size_type n) {
+    UNUSED(p, n);
   }
 
   void construct(pointer p, const_reference val) {
diff --git a/compiler/utils/arena_object.h b/compiler/utils/arena_object.h
index 8f6965e..d64c419 100644
--- a/compiler/utils/arena_object.h
+++ b/compiler/utils/arena_object.h
@@ -19,14 +19,21 @@
 
 #include "arena_allocator.h"
 #include "base/logging.h"
+#include "scoped_arena_allocator.h"
 
 namespace art {
 
+// Parent for arena allocated objects giving appropriate new and delete operators.
+template<enum ArenaAllocKind kAllocKind>
 class ArenaObject {
  public:
   // Allocate a new ArenaObject of 'size' bytes in the Arena.
   void* operator new(size_t size, ArenaAllocator* allocator) {
-    return allocator->Alloc(size, kArenaAllocMisc);
+    return allocator->Alloc(size, kAllocKind);
+  }
+
+  static void* operator new(size_t size, ScopedArenaAllocator* arena) {
+    return arena->Alloc(size, kAllocKind);
   }
 
   void operator delete(void*, size_t) {
@@ -35,6 +42,26 @@
   }
 };
 
+
+// Parent for arena allocated objects that get deleted, gives appropriate new and delete operators.
+// Currently this is used by the quick compiler for debug reference counting arena allocations.
+template<enum ArenaAllocKind kAllocKind>
+class DeletableArenaObject {
+ public:
+  // Allocate a new ArenaObject of 'size' bytes in the Arena.
+  void* operator new(size_t size, ArenaAllocator* allocator) {
+    return allocator->Alloc(size, kAllocKind);
+  }
+
+  static void* operator new(size_t size, ScopedArenaAllocator* arena) {
+    return arena->Alloc(size, kAllocKind);
+  }
+
+  void operator delete(void*, size_t) {
+    // Nop.
+  }
+};
+
 }  // namespace art
 
 #endif  // ART_COMPILER_UTILS_ARENA_OBJECT_H_
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index c1ed6a2..dca2ab7 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -20,6 +20,7 @@
 #include <vector>
 
 #include "base/logging.h"
+#include "base/value_object.h"
 #include "constants_arm.h"
 #include "utils/arm/managed_register_arm.h"
 #include "utils/assembler.h"
@@ -179,8 +180,12 @@
   DB_W         = (8|0|1) << 21,  // decrement before with writeback to base
   IB_W         = (8|4|1) << 21   // increment before with writeback to base
 };
+inline std::ostream& operator<<(std::ostream& os, const BlockAddressMode& rhs) {
+  os << static_cast<int>(rhs);
+  return os;
+}
 
-class Address {
+class Address : public ValueObject {
  public:
   // Memory operand addressing mode (in ARM encoding form.  For others we need
   // to adjust)
@@ -260,13 +265,17 @@
   }
 
  private:
-  Register rn_;
-  Register rm_;
-  int32_t offset_;      // Used as shift amount for register offset.
-  Mode am_;
-  bool is_immed_offset_;
-  Shift shift_;
+  const Register rn_;
+  const Register rm_;
+  const int32_t offset_;      // Used as shift amount for register offset.
+  const Mode am_;
+  const bool is_immed_offset_;
+  const Shift shift_;
 };
+inline std::ostream& operator<<(std::ostream& os, const Address::Mode& rhs) {
+  os << static_cast<int>(rhs);
+  return os;
+}
 
 // Instruction encoding bits.
 enum {
@@ -344,10 +353,6 @@
 
 extern const char* kRegisterNames[];
 extern const char* kConditionNames[];
-extern std::ostream& operator<<(std::ostream& os, const Register& rhs);
-extern std::ostream& operator<<(std::ostream& os, const SRegister& rhs);
-extern std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
-extern std::ostream& operator<<(std::ostream& os, const Condition& rhs);
 
 // This is an abstract ARM assembler.  Subclasses provide assemblers for the individual
 // instruction sets (ARM32, Thumb2, etc.)
@@ -448,8 +453,10 @@
   virtual void bkpt(uint16_t imm16) = 0;
   virtual void svc(uint32_t imm24) = 0;
 
-  virtual void it(Condition firstcond, ItState i1 = kItOmitted,
-                  ItState i2 = kItOmitted, ItState i3 = kItOmitted) {
+  virtual void it(Condition firstcond ATTRIBUTE_UNUSED,
+                  ItState i1 ATTRIBUTE_UNUSED = kItOmitted,
+                  ItState i2 ATTRIBUTE_UNUSED = kItOmitted,
+                  ItState i3 ATTRIBUTE_UNUSED = kItOmitted) {
     // Ignored if not supported.
   }
 
@@ -537,14 +544,9 @@
                            Condition cond = AL) = 0;
   virtual void AddConstantSetFlags(Register rd, Register rn, int32_t value,
                                    Condition cond = AL) = 0;
-  virtual void AddConstantWithCarry(Register rd, Register rn, int32_t value,
-                                    Condition cond = AL) = 0;
 
   // Load and Store. May clobber IP.
   virtual void LoadImmediate(Register rd, int32_t value, Condition cond = AL) = 0;
-  virtual void LoadSImmediate(SRegister sd, float value, Condition cond = AL) = 0;
-  virtual void LoadDImmediate(DRegister dd, double value,
-                              Register scratch, Condition cond = AL) = 0;
   virtual void MarkExceptionHandler(Label* label) = 0;
   virtual void LoadFromOffset(LoadOperandType type,
                               Register reg,
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index d262b6a..c8a57b1 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1303,7 +1303,6 @@
   }
 }
 
-
 void Arm32Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
   ShifterOperand shifter_op;
   if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
@@ -1483,12 +1482,12 @@
 }
 
 
-void Arm32Assembler::cbz(Register rn, Label* target) {
+void Arm32Assembler::cbz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
   LOG(FATAL) << "cbz is not supported on ARM32";
 }
 
 
-void Arm32Assembler::cbnz(Register rn, Label* target) {
+void Arm32Assembler::cbnz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
   LOG(FATAL) << "cbnz is not supported on ARM32";
 }
 
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index cfc300b..dbabb99 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -238,14 +238,9 @@
                    Condition cond = AL) OVERRIDE;
   void AddConstantSetFlags(Register rd, Register rn, int32_t value,
                            Condition cond = AL) OVERRIDE;
-  void AddConstantWithCarry(Register rd, Register rn, int32_t value,
-                            Condition cond = AL) {}
 
   // Load and Store. May clobber IP.
   void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
-  void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {}
-  void LoadDImmediate(DRegister dd, double value,
-                      Register scratch, Condition cond = AL) {}
   void MarkExceptionHandler(Label* label) OVERRIDE;
   void LoadFromOffset(LoadOperandType type,
                       Register reg,
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 633f55b..fd2613a 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -152,6 +152,8 @@
 
 
 void Thumb2Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
+  CheckCondition(cond);
+
   if (rd == rm && !IsHighRegister(rd) && !IsHighRegister(rn) && !force_32bit_) {
     // 16 bit.
     int16_t encoding = B14 | B9 | B8 | B6 |
@@ -176,6 +178,8 @@
 
 void Thumb2Assembler::mla(Register rd, Register rn, Register rm, Register ra,
                           Condition cond) {
+  CheckCondition(cond);
+
   uint32_t op1 = 0U /* 0b000 */;
   uint32_t op2 = 0U /* 0b00 */;
   int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
@@ -192,6 +196,8 @@
 
 void Thumb2Assembler::mls(Register rd, Register rn, Register rm, Register ra,
                           Condition cond) {
+  CheckCondition(cond);
+
   uint32_t op1 = 0U /* 0b000 */;
   uint32_t op2 = 01 /* 0b01 */;
   int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
@@ -208,6 +214,8 @@
 
 void Thumb2Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
                             Register rm, Condition cond) {
+  CheckCondition(cond);
+
   uint32_t op1 = 2U /* 0b010; */;
   uint32_t op2 = 0U /* 0b0000 */;
   int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 |
@@ -223,6 +231,8 @@
 
 
 void Thumb2Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
+  CheckCondition(cond);
+
   uint32_t op1 = 1U  /* 0b001 */;
   uint32_t op2 = 15U /* 0b1111 */;
   int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B20 |
@@ -238,6 +248,8 @@
 
 
 void Thumb2Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
+  CheckCondition(cond);
+
   uint32_t op1 = 1U  /* 0b001 */;
   uint32_t op2 = 15U /* 0b1111 */;
   int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B21 | B20 |
@@ -293,6 +305,7 @@
 
 
 void Thumb2Assembler::ldrd(Register rd, const Address& ad, Condition cond) {
+  CheckCondition(cond);
   CHECK_EQ(rd % 2, 0);
   // This is different from other loads.  The encoding is like ARM.
   int32_t encoding = B31 | B30 | B29 | B27 | B22 | B20 |
@@ -304,6 +317,7 @@
 
 
 void Thumb2Assembler::strd(Register rd, const Address& ad, Condition cond) {
+  CheckCondition(cond);
   CHECK_EQ(rd % 2, 0);
   // This is different from other loads.  The encoding is like ARM.
   int32_t encoding = B31 | B30 | B29 | B27 | B22 |
@@ -609,9 +623,9 @@
 }
 
 
-bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
+bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
                                             Opcode opcode,
-                                            int set_cc,
+                                            bool set_cc ATTRIBUTE_UNUSED,
                                             Register rn,
                                             Register rd,
                                             const ShifterOperand& so) {
@@ -727,9 +741,9 @@
 }
 
 
-void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
+void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
                                               Opcode opcode,
-                                              int set_cc,
+                                              bool set_cc,
                                               Register rn,
                                               Register rd,
                                               const ShifterOperand& so) {
@@ -789,7 +803,7 @@
       }
       encoding = B31 | B30 | B29 | B28 |
           thumb_opcode << 21 |
-          set_cc << 20 |
+          (set_cc ? 1 : 0) << 20 |
           rn << 16 |
           rd << 8 |
           imm;
@@ -798,7 +812,7 @@
      // Register (possibly shifted)
      encoding = B31 | B30 | B29 | B27 | B25 |
          thumb_opcode << 21 |
-         set_cc << 20 |
+         (set_cc ? 1 : 0) << 20 |
          rn << 16 |
          rd << 8 |
          so.encodingThumb();
@@ -809,7 +823,7 @@
 
 void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
                                               Opcode opcode,
-                                              int set_cc,
+                                              bool set_cc,
                                               Register rn,
                                               Register rd,
                                               const ShifterOperand& so) {
@@ -936,9 +950,9 @@
 
 
 // ADD and SUB are complex enough to warrant their own emitter.
-void Thumb2Assembler::Emit16BitAddSub(Condition cond,
+void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
                                       Opcode opcode,
-                                      int set_cc,
+                                      bool set_cc ATTRIBUTE_UNUSED,
                                       Register rn,
                                       Register rd,
                                       const ShifterOperand& so) {
@@ -1075,7 +1089,7 @@
 
 void Thumb2Assembler::EmitDataProcessing(Condition cond,
                                          Opcode opcode,
-                                         int set_cc,
+                                         bool set_cc,
                                          Register rn,
                                          Register rd,
                                          const ShifterOperand& so) {
@@ -1405,7 +1419,7 @@
 
 
 void Thumb2Assembler::EmitMultiMemOp(Condition cond,
-                                     BlockAddressMode am,
+                                     BlockAddressMode bam,
                                      bool load,
                                      Register base,
                                      RegList regs) {
@@ -1417,7 +1431,7 @@
     must_be_32bit = true;
   }
 
-  uint32_t w_bit = am == IA_W || am == DB_W || am == DA_W || am == IB_W;
+  bool w_bit = bam == IA_W || bam == DB_W || bam == DA_W || bam == IB_W;
   // 16 bit always uses writeback.
   if (!w_bit) {
     must_be_32bit = true;
@@ -1425,7 +1439,7 @@
 
   if (must_be_32bit) {
     uint32_t op = 0;
-    switch (am) {
+    switch (bam) {
       case IA:
       case IA_W:
         op = 1U /* 0b01 */;
@@ -1438,7 +1452,7 @@
       case IB:
       case DA_W:
       case IB_W:
-        LOG(FATAL) << "LDM/STM mode not supported on thumb: " << am;
+        LOG(FATAL) << "LDM/STM mode not supported on thumb: " << bam;
     }
     if (load) {
       // Cannot have SP in the list.
@@ -2354,7 +2368,6 @@
   }
 }
 
-
 void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
   ShifterOperand shifter_op;
   if (ShifterOperand::CanHoldThumb(rd, R0, MOV, value, &shifter_op)) {
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index b26173f..9ccdef7 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -269,14 +269,9 @@
                    Condition cond = AL) OVERRIDE;
   void AddConstantSetFlags(Register rd, Register rn, int32_t value,
                            Condition cond = AL) OVERRIDE;
-  void AddConstantWithCarry(Register rd, Register rn, int32_t value,
-                            Condition cond = AL) {}
 
   // Load and Store. May clobber IP.
   void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
-  void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {}
-  void LoadDImmediate(DRegister dd, double value,
-                      Register scratch, Condition cond = AL) {}
   void MarkExceptionHandler(Label* label) OVERRIDE;
   void LoadFromOffset(LoadOperandType type,
                       Register reg,
@@ -324,40 +319,40 @@
  private:
   // Emit a single 32 or 16 bit data processing instruction.
   void EmitDataProcessing(Condition cond,
-                  Opcode opcode,
-                  int set_cc,
-                  Register rn,
-                  Register rd,
-                  const ShifterOperand& so);
+                          Opcode opcode,
+                          bool set_cc,
+                          Register rn,
+                          Register rd,
+                          const ShifterOperand& so);
 
   // Must the instruction be 32 bits or can it possibly be encoded
   // in 16 bits?
   bool Is32BitDataProcessing(Condition cond,
-                  Opcode opcode,
-                  int set_cc,
-                  Register rn,
-                  Register rd,
-                  const ShifterOperand& so);
+                             Opcode opcode,
+                             bool set_cc,
+                             Register rn,
+                             Register rd,
+                             const ShifterOperand& so);
 
   // Emit a 32 bit data processing instruction.
   void Emit32BitDataProcessing(Condition cond,
-                  Opcode opcode,
-                  int set_cc,
-                  Register rn,
-                  Register rd,
-                  const ShifterOperand& so);
+                               Opcode opcode,
+                               bool set_cc,
+                               Register rn,
+                               Register rd,
+                               const ShifterOperand& so);
 
   // Emit a 16 bit data processing instruction.
   void Emit16BitDataProcessing(Condition cond,
-                  Opcode opcode,
-                  int set_cc,
-                  Register rn,
-                  Register rd,
-                  const ShifterOperand& so);
+                               Opcode opcode,
+                               bool set_cc,
+                               Register rn,
+                               Register rd,
+                               const ShifterOperand& so);
 
   void Emit16BitAddSub(Condition cond,
                        Opcode opcode,
-                       int set_cc,
+                       bool set_cc,
                        Register rn,
                        Register rd,
                        const ShifterOperand& so);
@@ -365,12 +360,12 @@
   uint16_t EmitCompareAndBranch(Register rn, uint16_t prev, bool n);
 
   void EmitLoadStore(Condition cond,
-                 bool load,
-                 bool byte,
-                 bool half,
-                 bool is_signed,
-                 Register rd,
-                 const Address& ad);
+                     bool load,
+                     bool byte,
+                     bool half,
+                     bool is_signed,
+                     Register rd,
+                     const Address& ad);
 
   void EmitMemOpAddressMode3(Condition cond,
                              int32_t mode,
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 702e03a..1513296 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -38,15 +38,6 @@
 // Constants for specific fields are defined in their respective named enums.
 // General constants are in an anonymous enum in class Instr.
 
-
-// We support both VFPv3-D16 and VFPv3-D32 profiles, but currently only one at
-// a time, so that compile time optimizations can be applied.
-// Warning: VFPv3-D32 is untested.
-#define VFPv3_D16
-#if defined(VFPv3_D16) == defined(VFPv3_D32)
-#error "Exactly one of VFPv3_D16 or VFPv3_D32 can be defined at a time."
-#endif
-
 // 4 bits option for the dmb instruction.
 // Order and values follows those of the ARM Architecture Reference Manual.
 enum DmbOptions {
@@ -66,26 +57,23 @@
 };
 
 // Values for double-precision floating point registers.
-enum DRegister {
-  D0  =  0,
-  D1  =  1,
-  D2  =  2,
-  D3  =  3,
-  D4  =  4,
-  D5  =  5,
-  D6  =  6,
-  D7  =  7,
-  D8  =  8,
-  D9  =  9,
+enum DRegister {  // private marker to avoid generate-operator-out.py from processing.
+  D0  = 0,
+  D1  = 1,
+  D2  = 2,
+  D3  = 3,
+  D4  = 4,
+  D5  = 5,
+  D6  = 6,
+  D7  = 7,
+  D8  = 8,
+  D9  = 9,
   D10 = 10,
   D11 = 11,
   D12 = 12,
   D13 = 13,
   D14 = 14,
   D15 = 15,
-#ifdef VFPv3_D16
-  kNumberOfDRegisters = 16,
-#else
   D16 = 16,
   D17 = 17,
   D18 = 18,
@@ -103,7 +91,6 @@
   D30 = 30,
   D31 = 31,
   kNumberOfDRegisters = 32,
-#endif
   kNumberOfOverlappingDRegisters = 16,
   kNoDRegister = -1,
 };
@@ -111,18 +98,18 @@
 
 
 // Values for the condition field as defined in section A3.2.
-enum Condition {
+enum Condition {  // private marker to avoid generate-operator-out.py from processing.
   kNoCondition = -1,
-  EQ =  0,  // equal
-  NE =  1,  // not equal
-  CS =  2,  // carry set/unsigned higher or same
-  CC =  3,  // carry clear/unsigned lower
-  MI =  4,  // minus/negative
-  PL =  5,  // plus/positive or zero
-  VS =  6,  // overflow
-  VC =  7,  // no overflow
-  HI =  8,  // unsigned higher
-  LS =  9,  // unsigned lower or same
+  EQ = 0,   // equal
+  NE = 1,   // not equal
+  CS = 2,   // carry set/unsigned higher or same
+  CC = 3,   // carry clear/unsigned lower
+  MI = 4,   // minus/negative
+  PL = 5,   // plus/positive or zero
+  VS = 6,   // overflow
+  VC = 7,   // no overflow
+  HI = 8,   // unsigned higher
+  LS = 9,   // unsigned lower or same
   GE = 10,  // signed greater than or equal
   LT = 11,  // signed less than
   GT = 12,  // signed greater than
@@ -138,16 +125,16 @@
 // as defined in section A3.4
 enum Opcode {
   kNoOperand = -1,
-  AND =  0,  // Logical AND
-  EOR =  1,  // Logical Exclusive OR
-  SUB =  2,  // Subtract
-  RSB =  3,  // Reverse Subtract
-  ADD =  4,  // Add
-  ADC =  5,  // Add with Carry
-  SBC =  6,  // Subtract with Carry
-  RSC =  7,  // Reverse Subtract with Carry
-  TST =  8,  // Test
-  TEQ =  9,  // Test Equivalence
+  AND = 0,   // Logical AND
+  EOR = 1,   // Logical Exclusive OR
+  SUB = 2,   // Subtract
+  RSB = 3,   // Reverse Subtract
+  ADD = 4,   // Add
+  ADC = 5,   // Add with Carry
+  SBC = 6,   // Subtract with Carry
+  RSC = 7,   // Reverse Subtract with Carry
+  TST = 8,   // Test
+  TEQ = 9,   // Test Equivalence
   CMP = 10,  // Compare
   CMN = 11,  // Compare Negated
   ORR = 12,  // Logical (inclusive) OR
@@ -156,7 +143,7 @@
   MVN = 15,  // Move Not
   kMaxOperand = 16
 };
-
+std::ostream& operator<<(std::ostream& os, const Opcode& rhs);
 
 // Shifter types for Data-processing operands as defined in section A5.1.2.
 enum Shift {
@@ -168,11 +155,11 @@
   RRX = 4,  // Rotate right with extend.
   kMaxShift
 };
-
+std::ostream& operator<<(std::ostream& os, const Shift& rhs);
 
 // Constants used for the decoding or encoding of the individual fields of
 // instructions. Based on the "Figure 3-1 ARM instruction set summary".
-enum InstructionFields {
+enum InstructionFields {  // private marker to avoid generate-operator-out.py from processing.
   kConditionShift = 28,
   kConditionBits = 4,
   kTypeShift = 25,
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 1af7374..02011b8 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -474,7 +474,7 @@
   UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
 }
 
-void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) {
+void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
   // TODO: Should we check that m_scratch is IP? - see arm.
 #if ANDROID_SMP != 0
   ___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h
index e6b4a6a..c137e46 100644
--- a/compiler/utils/array_ref.h
+++ b/compiler/utils/array_ref.h
@@ -68,7 +68,8 @@
 
   template <typename U, size_t size>
   constexpr ArrayRef(U (&array)[size],
-                     typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag())
+                     typename std::enable_if<std::is_same<T, const U>::value, tag>::type
+                         t ATTRIBUTE_UNUSED = tag())
     : array_(array), size_(size) {
   }
 
@@ -76,12 +77,6 @@
       : array_(array), size_(size) {
   }
 
-  template <typename U>
-  constexpr ArrayRef(U* array, size_t size,
-                     typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag())
-      : array_(array), size_(size) {
-  }
-
   template <typename Alloc>
   explicit ArrayRef(std::vector<T, Alloc>& v)
       : array_(v.data()), size_(v.size()) {
@@ -89,7 +84,8 @@
 
   template <typename U, typename Alloc>
   ArrayRef(const std::vector<U, Alloc>& v,
-           typename std::enable_if<std::is_same<T, const U>::value, tag>::tag t = tag())
+           typename std::enable_if<std::is_same<T, const U>::value, tag>::tag
+               t ATTRIBUTE_UNUSED = tag())
       : array_(v.data()), size_(v.size()) {
   }
 
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 8a1289d..6834512 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -125,77 +125,91 @@
   }
 }
 
-void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
-                                         ManagedRegister scratch) {
+void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest ATTRIBUTE_UNUSED,
+                                         uint32_t imm ATTRIBUTE_UNUSED,
+                                         ManagedRegister scratch ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
-                                         ManagedRegister scratch) {
+void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest ATTRIBUTE_UNUSED,
+                                         uint32_t imm ATTRIBUTE_UNUSED,
+                                         ManagedRegister scratch ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
-                                           FrameOffset fr_offs,
-                                           ManagedRegister scratch) {
+void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+                                           FrameOffset fr_offs ATTRIBUTE_UNUSED,
+                                           ManagedRegister scratch ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
-                                           FrameOffset fr_offs,
-                                           ManagedRegister scratch) {
+void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+                                           FrameOffset fr_offs ATTRIBUTE_UNUSED,
+                                           ManagedRegister scratch ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
+void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) {
+void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
+                                 ThreadOffset<4> src ATTRIBUTE_UNUSED,
+                                 size_t size ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) {
+void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
+                                 ThreadOffset<8> src ATTRIBUTE_UNUSED,
+                                 size_t size ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) {
+void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
+                                       ThreadOffset<4> offs ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) {
+void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
+                                       ThreadOffset<8> offs ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
-                                       ManagedRegister scratch) {
+void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED,
+                                       ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+                                       ManagedRegister scratch ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
-                                       ManagedRegister scratch) {
+void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED,
+                                       ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+                                       ManagedRegister scratch ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
-                                     ManagedRegister scratch) {
+void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+                                     FrameOffset fr_offs ATTRIBUTE_UNUSED,
+                                     ManagedRegister scratch ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
-                                     ManagedRegister scratch) {
+void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+                                     FrameOffset fr_offs ATTRIBUTE_UNUSED,
+                                     ManagedRegister scratch ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) {
+void Assembler::CallFromThread32(ThreadOffset<4> offset ATTRIBUTE_UNUSED,
+                                 ManagedRegister scratch ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
-void Assembler::CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) {
+void Assembler::CallFromThread64(ThreadOffset<8> offset ATTRIBUTE_UNUSED,
+                                 ManagedRegister scratch ATTRIBUTE_UNUSED) {
   UNIMPLEMENTED(FATAL);
 }
 
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 2b0c94c..e1b6d7c 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -366,7 +366,7 @@
   }
 
   // TODO: Implement with disassembler.
-  virtual void Comment(const char* format, ...) { }
+  virtual void Comment(const char* format, ...) { UNUSED(format); }
 
   // Emit code that will create an activation on the stack
   virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index 61e420c..fde65e7 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -19,26 +19,20 @@
 
 #include <stdint.h>
 #include <stddef.h>
-#include "arena_allocator.h"
+
+#include "arena_object.h"
 
 namespace art {
 
-// Type of growable list for memory tuning.
-enum OatListKind {
-  kGrowableArrayMisc = 0,
-  kGNumListKinds
-};
-
 // Deprecated
 // TODO: Replace all uses with ArenaVector<T>.
 template<typename T>
-class GrowableArray {
+class GrowableArray : public ArenaObject<kArenaAllocGrowableArray> {
   public:
-    GrowableArray(ArenaAllocator* arena, size_t init_length, OatListKind kind = kGrowableArrayMisc)
+    GrowableArray(ArenaAllocator* arena, size_t init_length)
       : arena_(arena),
         num_allocated_(init_length),
-        num_used_(0),
-        kind_(kind) {
+        num_used_(0) {
       elem_list_ = static_cast<T*>(arena_->Alloc(sizeof(T) * init_length,
                                                  kArenaAllocGrowableArray));
     }
@@ -152,16 +146,10 @@
 
     T* GetRawStorage() const { return elem_list_; }
 
-    static void* operator new(size_t size, ArenaAllocator* arena) {
-      return arena->Alloc(sizeof(GrowableArray<T>), kArenaAllocGrowableArray);
-    }
-    static void operator delete(void* p) {}  // Nop.
-
   private:
     ArenaAllocator* const arena_;
     size_t num_allocated_;
     size_t num_used_;
-    OatListKind kind_;
     T* elem_list_;
 };
 
diff --git a/compiler/utils/scoped_arena_containers.h b/compiler/utils/scoped_arena_containers.h
index 0de7403..df93b27 100644
--- a/compiler/utils/scoped_arena_containers.h
+++ b/compiler/utils/scoped_arena_containers.h
@@ -140,12 +140,15 @@
   const_pointer address(const_reference x) const { return &x; }
 
   pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+    UNUSED(hint);
     DCHECK_LE(n, max_size());
     DebugStackIndirectTopRef::CheckTop();
     return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
                                                     ArenaAllocatorAdapterKind::Kind()));
   }
   void deallocate(pointer p, size_type n) {
+    UNUSED(p);
+    UNUSED(n);
     DebugStackIndirectTopRef::CheckTop();
   }
 
diff --git a/compiler/utils/stack_checks.h b/compiler/utils/stack_checks.h
index ce01077..e762f7d 100644
--- a/compiler/utils/stack_checks.h
+++ b/compiler/utils/stack_checks.h
@@ -35,6 +35,7 @@
 //
 // A frame is considered large when it's above kLargeFrameSize.
 static inline bool FrameNeedsStackCheck(size_t size, InstructionSet isa) {
+  UNUSED(isa);
   return size >= kLargeFrameSize;
 }