Remove -Wno-unused-parameter and -Wno-sign-promo from base cflags.

Fix associated errors about unused paramenters and implict sign conversions.
For sign conversion this was largely in the area of enums, so add ostream
operators for the effected enums and fix tools/generate-operator-out.py.
Tidy arena allocation code and arena allocated data types, rather than fixing
new and delete operators.
Remove dead code.

Change-Id: I5b433e722d2f75baacfacae4d32aef4a828bfe1b
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 753b42d..0a15e9e 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -46,6 +46,7 @@
   }
 
   inline void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
+    UNUSED(new_value);
     if (expected_value == CardTable::kCardDirty) {
       cleared_cards_->insert(card);
     }
@@ -62,6 +63,7 @@
   }
 
   void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card) const {
+    UNUSED(new_card);
     if (expected_card == CardTable::kCardDirty) {
       cleared_cards_->push_back(card);
     }
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index d43dc0a..b16a146 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -43,6 +43,7 @@
       : dirty_cards_(dirty_cards) {}
 
   void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
+    UNUSED(new_value);
     if (expected_value == CardTable::kCardDirty) {
       dirty_cards_->insert(card);
     }
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 40856fc..850325a 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -91,7 +91,7 @@
  public:
   explicit SimpleCounter(size_t* counter) : count_(counter) {}
 
-  void operator()(mirror::Object* obj) const {
+  void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
     (*count_)++;
   }
 
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index fbeba7f..acff52d 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -43,7 +43,8 @@
 }
 
 static void art_heap_usage_error(const char* function, void* p) {
-  LOG(::art::FATAL) << "Incorrect use of function '" << function << "' argument " << p << " not expected";
+  LOG(::art::FATAL) << "Incorrect use of function '" << function << "' argument " << p
+      << " not expected";
 }
 
 #include "globals.h"
@@ -70,7 +71,9 @@
   }
 }
 
-extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
+                                               void* end ATTRIBUTE_UNUSED, size_t used_bytes,
+                                               void* arg) {
   if (used_bytes == 0) {
     return;
   }
@@ -78,7 +81,10 @@
   *bytes_allocated += used_bytes + sizeof(size_t);
 }
 
-extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes,
+                                                 void* arg) {
+  UNUSED(start);
+  UNUSED(end);
   if (used_bytes == 0) {
     return;
   }
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index fa531a7..f5e2fed 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1851,7 +1851,8 @@
   dedicated_full_run_->SetIsThreadLocal(true);
 }
 
-void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
+                                      size_t used_bytes, void* arg) {
   if (used_bytes == 0) {
     return;
   }
@@ -1859,7 +1860,8 @@
   *bytes_allocated += used_bytes;
 }
 
-void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
+                                        size_t used_bytes, void* arg) {
   if (used_bytes == 0) {
     return;
   }
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index ad7f901..a2f8342 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -105,6 +105,9 @@
         rosalloc->ReleasePageRange(start, start + byte_size);
       }
     }
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(FreePageRun);
   };
 
   // Represents a run of memory slots of the same size.
@@ -256,6 +259,8 @@
     size_t MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
     // Turns the bit map into a string for debugging.
     static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec);
+
+    // TODO: DISALLOW_COPY_AND_ASSIGN(Run);
   };
 
   // The magic number for a run.
@@ -446,7 +451,7 @@
   // Bracket lock names (since locks only have char* names).
   std::string size_bracket_lock_names_[kNumOfSizeBrackets];
   // The types of page map entries.
-  enum {
+  enum PageMapKind {
     kPageMapReleased = 0,     // Zero and released back to the OS.
     kPageMapEmpty,            // Zero but probably dirty.
     kPageMapRun,              // The beginning of a run.
@@ -526,11 +531,15 @@
   // Release a range of pages.
   size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
+  // Dumps the page map for debugging.
+  std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
  public:
   RosAlloc(void* base, size_t capacity, size_t max_capacity,
            PageReleaseMode page_release_mode,
            size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
   ~RosAlloc();
+
   // If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
   // If used, this may cause race conditions if multiple threads are allocating at the same time.
   template<bool kThreadSafe = true>
@@ -540,6 +549,7 @@
       LOCKS_EXCLUDED(bulk_free_lock_);
   size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
       LOCKS_EXCLUDED(bulk_free_lock_);
+
   // Returns the size of the allocated slot for a given allocated memory chunk.
   size_t UsableSize(void* ptr);
   // Returns the size of the allocated slot for a given size.
@@ -557,6 +567,7 @@
   void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
                   void* arg)
       LOCKS_EXCLUDED(lock_);
+
   // Release empty pages.
   size_t ReleasePages() LOCKS_EXCLUDED(lock_);
   // Returns the current footprint.
@@ -565,6 +576,7 @@
   size_t FootprintLimit() LOCKS_EXCLUDED(lock_);
   // Update the current capacity.
   void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
+
   // Releases the thread-local runs assigned to the given thread back to the common set of runs.
   void RevokeThreadLocalRuns(Thread* thread);
   // Releases the thread-local runs assigned to all the threads back to the common set of runs.
@@ -573,8 +585,7 @@
   void AssertThreadLocalRunsAreRevoked(Thread* thread);
   // Assert all the thread local runs are revoked.
   void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_);
-  // Dumps the page map for debugging.
-  std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
   static Run* GetDedicatedFullRun() {
     return dedicated_full_run_;
   }
@@ -597,7 +608,13 @@
   void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes);
+
+ private:
+  friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
+
+  DISALLOW_COPY_AND_ASSIGN(RosAlloc);
 };
+std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
 
 }  // namespace allocator
 }  // namespace gc
diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h
index 938b0f1..c6ebc73 100644
--- a/runtime/gc/allocator_type.h
+++ b/runtime/gc/allocator_type.h
@@ -17,6 +17,8 @@
 #ifndef ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
 #define ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
 
+#include <ostream>
+
 namespace art {
 namespace gc {
 
@@ -29,6 +31,7 @@
   kAllocatorTypeNonMoving,  // Special allocator for non moving objects, doesn't have entrypoints.
   kAllocatorTypeLOS,  // Large object space, also doesn't have entrypoints.
 };
+std::ostream& operator<<(std::ostream& os, const AllocatorType& rhs);
 
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index ce7c75a..ee5a785 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -29,7 +29,9 @@
                              const std::string& name_prefix = "")
       : GarbageCollector(heap,
                          name_prefix + (name_prefix.empty() ? "" : " ") +
-                         "concurrent copying + mark sweep") {}
+                         "concurrent copying + mark sweep") {
+    UNUSED(generational);
+  }
 
   ~ConcurrentCopying() {}
 
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index ad3bb11..e3966e3 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -658,6 +658,7 @@
   // Scans all of the objects
   virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    UNUSED(self);
     ScanObjectParallelVisitor visitor(this);
     // TODO: Tune this.
     static const size_t kFifoSize = 4;
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 4ed6abc..5be3db7 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -58,6 +58,7 @@
 }
 
 void StickyMarkSweep::Sweep(bool swap_bitmaps) {
+  UNUSED(swap_bitmaps);
   SweepArray(GetHeap()->GetLiveStack(), false);
 }
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8e080d1..9fd9a2b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -265,14 +265,13 @@
   }
   // Attempt to create 2 mem maps at or after the requested begin.
   main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
-                                                    PROT_READ | PROT_WRITE, &error_str));
+                                                    &error_str));
   CHECK(main_mem_map_1.get() != nullptr) << error_str;
   if (support_homogeneous_space_compaction ||
       background_collector_type_ == kCollectorTypeSS ||
       foreground_collector_type_ == kCollectorTypeSS) {
     main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
-                                                      capacity_, PROT_READ | PROT_WRITE,
-                                                      &error_str));
+                                                      capacity_, &error_str));
     CHECK(main_mem_map_2.get() != nullptr) << error_str;
   }
   // Create the non moving space first so that bitmaps don't take up the address range.
@@ -435,8 +434,8 @@
   }
 }
 
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, size_t capacity,
-                                           int prot_flags, std::string* out_error_str) {
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
+                                           size_t capacity, std::string* out_error_str) {
   while (true) {
     MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
                                        PROT_READ | PROT_WRITE, true, out_error_str);
@@ -887,7 +886,7 @@
   if (result != NULL) {
     return result;
   }
-  return FindDiscontinuousSpaceFromObject(obj, true);
+  return FindDiscontinuousSpaceFromObject(obj, fail_ok);
 }
 
 space::ImageSpace* Heap::GetImageSpace() const {
@@ -1832,6 +1831,7 @@
   virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
     // Don't sweep any spaces since we probably blasted the internal accounting of the free list
     // allocator.
+    UNUSED(space);
     return false;
   }
 
@@ -2239,6 +2239,7 @@
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    UNUSED(klass);
     if (verify_referent_) {
       VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
     }
@@ -2583,6 +2584,7 @@
 }
 
 void Heap::SwapStacks(Thread* self) {
+  UNUSED(self);
   if (kUseThreadLocalAllocationStack) {
     live_stack_->AssertAllZero();
   }
@@ -2711,6 +2713,7 @@
 }
 
 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
+  UNUSED(gc);
   // TODO: Add a new runtime option for this?
   if (verify_pre_gc_rosalloc_) {
     RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7b891a6..cf7352e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -622,8 +622,7 @@
 
   // Create a mem map with a preferred base address.
   static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
-                                              size_t capacity, int prot_flags,
-                                              std::string* out_error_str);
+                                              size_t capacity, std::string* out_error_str);
 
   bool SupportHSpaceCompaction() const {
     // Returns true if we can do hspace compaction
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index d2d95b4..445c720 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -314,6 +314,7 @@
 }
 
 void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
+  UNUSED(failed_alloc_bytes);
   Thread* self = Thread::Current();
   size_t max_contiguous_allocation = 0;
   // To allow the Walk/InspectAll() to exclusively-lock the mutator
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 9434bfe..c0c6444 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -159,7 +159,11 @@
   MutexLock mu(Thread::Current(), lock_);
   auto found = mem_maps_.find(obj);
   CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
-  return found->second->BaseSize();
+  size_t alloc_size = found->second->BaseSize();
+  if (usable_size != nullptr) {
+    *usable_size = alloc_size;
+  }
+  return alloc_size;
 }
 
 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index eb6fe9c..bc870a6 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -44,6 +44,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
+    UNUSED(ptr);
   }
 
   ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator,
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 9de0548..a868e68 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -32,6 +32,7 @@
       : objects_allocated_(objects_allocated) {}
 
   void operator()(mirror::Object* obj) const {
+    UNUSED(obj);
     ++*objects_allocated_;
   }
 
@@ -76,30 +77,29 @@
       << ",name=\"" << GetName() << "\"]";
 }
 
-mirror::Object* ZygoteSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
-                                   size_t* usable_size) {
+mirror::Object* ZygoteSpace::Alloc(Thread*, size_t, size_t*, size_t*) {
   UNIMPLEMENTED(FATAL);
-  return nullptr;
+  UNREACHABLE();
 }
 
-size_t ZygoteSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
+size_t ZygoteSpace::AllocationSize(mirror::Object*, size_t*) {
   UNIMPLEMENTED(FATAL);
-  return 0;
+  UNREACHABLE();
 }
 
-size_t ZygoteSpace::Free(Thread* self, mirror::Object* ptr) {
+size_t ZygoteSpace::Free(Thread*, mirror::Object*) {
   UNIMPLEMENTED(FATAL);
-  return 0;
+  UNREACHABLE();
 }
 
-size_t ZygoteSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+size_t ZygoteSpace::FreeList(Thread*, size_t, mirror::Object**) {
   UNIMPLEMENTED(FATAL);
-  return 0;
+  UNREACHABLE();
 }
 
-void ZygoteSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
-                                               size_t /*failed_alloc_bytes*/) {
+void ZygoteSpace::LogFragmentationAllocFailure(std::ostream&, size_t) {
   UNIMPLEMENTED(FATAL);
+  UNREACHABLE();
 }
 
 void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {