stdint types all the way!

Change-Id: I4e4ef3a2002fc59ebd9097087f150eaf3f2a7e08
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index c69ca48..dd419a4 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -36,7 +36,7 @@
   }
   // Check if the returned memory is really all zero.
   if (kCheckZeroMemory && m != nullptr) {
-    byte* bytes = reinterpret_cast<byte*>(m);
+    uint8_t* bytes = reinterpret_cast<uint8_t*>(m);
     for (size_t i = 0; i < size; ++i) {
       DCHECK_EQ(bytes[i], 0);
     }
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a7e5e74..a3408cf 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -49,7 +49,7 @@
 
 RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
                    PageReleaseMode page_release_mode, size_t page_release_size_threshold)
-    : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
+    : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
       capacity_(capacity), max_capacity_(max_capacity),
       lock_("rosalloc global lock", kRosAllocGlobalLock),
       bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
@@ -107,7 +107,7 @@
   }
 }
 
-void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
+void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
   lock_.AssertHeld(self);
   DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
   FreePageRun* res = NULL;
@@ -128,7 +128,7 @@
       }
       if (req_byte_size < fpr_byte_size) {
         // Split.
-        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
+        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
         if (kIsDebugBuild) {
           remainder->magic_num_ = kMagicNumFree;
         }
@@ -226,7 +226,7 @@
       }
       if (req_byte_size < fpr_byte_size) {
         // Split if there's a remainder.
-        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
+        FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
         if (kIsDebugBuild) {
           remainder->magic_num_ = kMagicNumFree;
         }
@@ -290,9 +290,9 @@
   lock_.AssertHeld(self);
   size_t pm_idx = ToPageMapIndex(ptr);
   DCHECK_LT(pm_idx, page_map_size_);
-  byte pm_type = page_map_[pm_idx];
+  uint8_t pm_type = page_map_[pm_idx];
   DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
-  byte pm_part_type;
+  uint8_t pm_part_type;
   switch (pm_type) {
   case kPageMapRun:
     pm_part_type = kPageMapRunPart;
@@ -319,8 +319,8 @@
   const size_t byte_size = num_pages * kPageSize;
   if (already_zero) {
     if (kCheckZeroMemory) {
-      const uword* word_ptr = reinterpret_cast<uword*>(ptr);
-      for (size_t i = 0; i < byte_size / sizeof(uword); ++i) {
+      const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
+      for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
         CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
       }
     }
@@ -473,9 +473,9 @@
   }
   // Check if the returned memory is really all zero.
   if (kCheckZeroMemory) {
-    CHECK_EQ(total_bytes % sizeof(uword), 0U);
-    const uword* words = reinterpret_cast<uword*>(r);
-    for (size_t i = 0; i < total_bytes / sizeof(uword); ++i) {
+    CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
+    const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
+    for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
       CHECK_EQ(words[i], 0U);
     }
   }
@@ -490,7 +490,7 @@
   {
     MutexLock mu(self, lock_);
     DCHECK_LT(pm_idx, page_map_size_);
-    byte page_map_entry = page_map_[pm_idx];
+    uint8_t page_map_entry = page_map_[pm_idx];
     if (kTraceRosAlloc) {
       LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
                 << ", page_map_entry=" << static_cast<int>(page_map_entry);
@@ -557,7 +557,7 @@
         const size_t num_of_slots = numOfSlots[idx];
         const size_t bracket_size = bracketSizes[idx];
         const size_t num_of_bytes = num_of_slots * bracket_size;
-        byte* begin = reinterpret_cast<byte*>(new_run) + headerSizes[idx];
+        uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
         for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
           __builtin_prefetch(begin + i);
         }
@@ -869,7 +869,7 @@
       DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
       *alloc_bitmap_ptr |= mask;
       DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
-      byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
+      uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
                   << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
@@ -889,10 +889,10 @@
 
 void RosAlloc::Run::FreeSlot(void* ptr) {
   DCHECK(!IsThreadLocal());
-  const byte idx = size_bracket_idx_;
+  const uint8_t idx = size_bracket_idx_;
   const size_t bracket_size = bracketSizes[idx];
-  const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
-      - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
+  const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+      - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
   DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
   size_t slot_idx = offset_from_slot_base / bracket_size;
   DCHECK_LT(slot_idx, numOfSlots[idx]);
@@ -1001,9 +1001,9 @@
 
 inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
                                                   const char* caller_name) {
-  const byte idx = size_bracket_idx_;
-  const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
-      - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
+  const uint8_t idx = size_bracket_idx_;
+  const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+      - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
   const size_t bracket_size = bracketSizes[idx];
   memset(ptr, 0, bracket_size);
   DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
@@ -1037,7 +1037,7 @@
 }
 
 inline bool RosAlloc::Run::IsAllFree() {
-  const byte idx = size_bracket_idx_;
+  const uint8_t idx = size_bracket_idx_;
   const size_t num_slots = numOfSlots[idx];
   const size_t num_vec = NumberOfBitmapVectors();
   DCHECK_NE(num_vec, 0U);
@@ -1095,13 +1095,13 @@
 }
 
 inline void RosAlloc::Run::ZeroHeader() {
-  const byte idx = size_bracket_idx_;
+  const uint8_t idx = size_bracket_idx_;
   memset(this, 0, headerSizes[idx]);
 }
 
 inline void RosAlloc::Run::ZeroData() {
-  const byte idx = size_bracket_idx_;
-  byte* slot_begin = reinterpret_cast<byte*>(this) + headerSizes[idx];
+  const uint8_t idx = size_bracket_idx_;
+  uint8_t* slot_begin = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
   memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
 }
 
@@ -1114,10 +1114,10 @@
 void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
                                     void* arg) {
   size_t idx = size_bracket_idx_;
-  byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
+  uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
   size_t num_slots = numOfSlots[idx];
   size_t bracket_size = IndexToBracketSize(idx);
-  DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
+  DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
   size_t num_vec = RoundUp(num_slots, 32) / 32;
   size_t slots = 0;
   for (size_t v = 0; v < num_vec; v++, slots += 32) {
@@ -1126,7 +1126,7 @@
     size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
     for (size_t i = 0; i < end; ++i) {
       bool is_allocated = ((vec >> i) & 0x1) != 0;
-      byte* slot_addr = slot_base + (slots + i) * bracket_size;
+      uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
       if (is_allocated) {
         handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
       } else {
@@ -1169,7 +1169,7 @@
     Run* run = nullptr;
     if (kReadPageMapEntryWithoutLockInBulkFree) {
       // Read the page map entries without locking the lock.
-      byte page_map_entry = page_map_[pm_idx];
+      uint8_t page_map_entry = page_map_[pm_idx];
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
                   << std::dec << pm_idx
@@ -1196,7 +1196,7 @@
       // Read the page map entries with a lock.
       MutexLock mu(self, lock_);
       DCHECK_LT(pm_idx, page_map_size_);
-      byte page_map_entry = page_map_[pm_idx];
+      uint8_t page_map_entry = page_map_[pm_idx];
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
                   << std::dec << pm_idx
@@ -1354,7 +1354,7 @@
   size_t remaining_curr_fpr_size = 0;
   size_t num_running_empty_pages = 0;
   for (size_t i = 0; i < end; ++i) {
-    byte pm = page_map_[i];
+    uint8_t pm = page_map_[i];
     switch (pm) {
       case kPageMapReleased:
         // Fall-through.
@@ -1472,8 +1472,8 @@
       Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
       DCHECK_EQ(run->magic_num_, kMagicNum);
       size_t idx = run->size_bracket_idx_;
-      size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
-          - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
+      size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+          - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
       DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
       return IndexToBracketSize(idx);
     }
@@ -1503,8 +1503,8 @@
     size_t new_num_of_pages = new_footprint / kPageSize;
     DCHECK_GE(page_map_size_, new_num_of_pages);
     // Zero out the tail of the page map.
-    byte* zero_begin = const_cast<byte*>(page_map_) + new_num_of_pages;
-    byte* madvise_begin = AlignUp(zero_begin, kPageSize);
+    uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
+    uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
     DCHECK_LE(madvise_begin, page_map_mem_map_->End());
     size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
     if (madvise_size > 0) {
@@ -1544,7 +1544,7 @@
   size_t pm_end = page_map_size_;
   size_t i = 0;
   while (i < pm_end) {
-    byte pm = page_map_[i];
+    uint8_t pm = page_map_[i];
     switch (pm) {
       case kPageMapReleased:
         // Fall-through.
@@ -1558,9 +1558,9 @@
         if (kIsDebugBuild) {
           // In the debug build, the first page of a free page run
           // contains a magic number for debugging. Exclude it.
-          start = reinterpret_cast<byte*>(fpr) + kPageSize;
+          start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
         }
-        void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
+        void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
         handler(start, end, 0, arg);
         size_t num_pages = fpr_size / kPageSize;
         if (kIsDebugBuild) {
@@ -1879,7 +1879,7 @@
     size_t pm_end = page_map_size_;
     size_t i = 0;
     while (i < pm_end) {
-      byte pm = page_map_[i];
+      uint8_t pm = page_map_[i];
       switch (pm) {
         case kPageMapReleased:
           // Fall-through.
@@ -1994,13 +1994,13 @@
   DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
   const size_t idx = size_bracket_idx_;
   CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
-  byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
+  uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
   const size_t num_slots = numOfSlots[idx];
   const size_t num_vec = RoundUp(num_slots, 32) / 32;
   CHECK_GT(num_vec, 0U);
   size_t bracket_size = IndexToBracketSize(idx);
   CHECK_EQ(slot_base + num_slots * bracket_size,
-           reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
+           reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
       << "Mismatch in the end address of the run " << Dump();
   // Check that the bulk free bitmap is clean. It's only used during BulkFree().
   CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
@@ -2084,7 +2084,7 @@
       // thread local free bitmap.
       bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
       if (is_allocated && !is_thread_local_freed) {
-        byte* slot_addr = slot_base + (slots + i) * bracket_size;
+        uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
         size_t obj_size = obj->SizeOf();
         CHECK_LE(obj_size, kLargeSizeThreshold)
@@ -2108,7 +2108,7 @@
   while (i < page_map_size_) {
     // Reading the page map without a lock is racy but the race is benign since it should only
     // result in occasionally not releasing pages which we could release.
-    byte pm = page_map_[i];
+    uint8_t pm = page_map_[i];
     switch (pm) {
       case kPageMapReleased:
         // Fall through.
@@ -2129,7 +2129,7 @@
           if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
             size_t fpr_size = fpr->ByteSize(this);
             DCHECK(IsAligned<kPageSize>(fpr_size));
-            byte* start = reinterpret_cast<byte*>(fpr);
+            uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
             reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
             size_t pages = fpr_size / kPageSize;
             CHECK_GT(pages, 0U) << "Infinite loop probable";
@@ -2154,7 +2154,7 @@
   return reclaimed_bytes;
 }
 
-size_t RosAlloc::ReleasePageRange(byte* start, byte* end) {
+size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
   DCHECK_ALIGNED(start, kPageSize);
   DCHECK_ALIGNED(end, kPageSize);
   DCHECK_LT(start, end);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 2fbd97a..8374ff7 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -44,13 +44,13 @@
   // Represents a run of free pages.
   class FreePageRun {
    public:
-    byte magic_num_;  // The magic number used for debugging only.
+    uint8_t magic_num_;  // The magic number used for debugging only.
 
     bool IsFree() const {
       return !kIsDebugBuild || magic_num_ == kMagicNumFree;
     }
     size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      const byte* fpr_base = reinterpret_cast<const byte*>(this);
+      const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this);
       size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
       size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
       DCHECK_GE(byte_size, static_cast<size_t>(0));
@@ -60,7 +60,7 @@
     void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
         EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
       DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
-      byte* fpr_base = reinterpret_cast<byte*>(this);
+      uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
       size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
       rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
     }
@@ -68,8 +68,8 @@
       return reinterpret_cast<void*>(this);
     }
     void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      byte* fpr_base = reinterpret_cast<byte*>(this);
-      byte* end = fpr_base + ByteSize(rosalloc);
+      uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
+      uint8_t* end = fpr_base + ByteSize(rosalloc);
       return end;
     }
     bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
@@ -78,7 +78,7 @@
     }
     bool IsAtEndOfSpace(RosAlloc* rosalloc)
         EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      return reinterpret_cast<byte*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
+      return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
     }
     bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
       switch (rosalloc->page_release_mode_) {
@@ -98,7 +98,7 @@
       }
     }
     void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
-      byte* start = reinterpret_cast<byte*>(this);
+      uint8_t* start = reinterpret_cast<uint8_t*>(this);
       size_t byte_size = ByteSize(rosalloc);
       DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
       if (ShouldReleasePages(rosalloc)) {
@@ -151,10 +151,10 @@
   //
   class Run {
    public:
-    byte magic_num_;                 // The magic number used for debugging.
-    byte size_bracket_idx_;          // The index of the size bracket of this run.
-    byte is_thread_local_;           // True if this run is used as a thread-local run.
-    byte to_be_bulk_freed_;          // Used within BulkFree() to flag a run that's involved with a bulk free.
+    uint8_t magic_num_;                 // The magic number used for debugging.
+    uint8_t size_bracket_idx_;          // The index of the size bracket of this run.
+    uint8_t is_thread_local_;           // True if this run is used as a thread-local run.
+    uint8_t to_be_bulk_freed_;          // Used within BulkFree() to flag a run that's involved with a bulk free.
     uint32_t first_search_vec_idx_;  // The index of the first bitmap vector which may contain an available slot.
     uint32_t alloc_bit_map_[0];      // The bit map that allocates if each slot is in use.
 
@@ -175,20 +175,20 @@
     // Returns the byte size of the header except for the bit maps.
     static size_t fixed_header_size() {
       Run temp;
-      size_t size = reinterpret_cast<byte*>(&temp.alloc_bit_map_) - reinterpret_cast<byte*>(&temp);
+      size_t size = reinterpret_cast<uint8_t*>(&temp.alloc_bit_map_) - reinterpret_cast<uint8_t*>(&temp);
       DCHECK_EQ(size, static_cast<size_t>(8));
       return size;
     }
     // Returns the base address of the free bit map.
     uint32_t* BulkFreeBitMap() {
-      return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
+      return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
     }
     // Returns the base address of the thread local free bit map.
     uint32_t* ThreadLocalFreeBitMap() {
-      return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
+      return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
     }
     void* End() {
-      return reinterpret_cast<byte*>(this) + kPageSize * numOfPages[size_bracket_idx_];
+      return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_];
     }
     // Returns the number of bitmap words per run.
     size_t NumberOfBitmapVectors() const {
@@ -259,13 +259,13 @@
   };
 
   // The magic number for a run.
-  static const byte kMagicNum = 42;
+  static constexpr uint8_t kMagicNum = 42;
   // The magic number for free pages.
-  static const byte kMagicNumFree = 43;
+  static constexpr uint8_t kMagicNumFree = 43;
   // The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
-  static const size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
+  static constexpr size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
   // The number of smaller size brackets that are 16 bytes apart.
-  static const size_t kNumOfQuantumSizeBrackets = 32;
+  static constexpr size_t kNumOfQuantumSizeBrackets = 32;
   // The sizes (the slot sizes, in bytes) of the size brackets.
   static size_t bracketSizes[kNumOfSizeBrackets];
   // The numbers of pages that are used for runs for each size bracket.
@@ -356,13 +356,13 @@
   // address is page size aligned.
   size_t ToPageMapIndex(const void* addr) const {
     DCHECK(base_ <= addr && addr < base_ + capacity_);
-    size_t byte_offset = reinterpret_cast<const byte*>(addr) - base_;
+    size_t byte_offset = reinterpret_cast<const uint8_t*>(addr) - base_;
     DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
     return byte_offset / kPageSize;
   }
   // Returns the page map index from an address with rounding.
   size_t RoundDownToPageMapIndex(void* addr) const {
-    DCHECK(base_ <= addr && addr < reinterpret_cast<byte*>(base_) + capacity_);
+    DCHECK(base_ <= addr && addr < reinterpret_cast<uint8_t*>(base_) + capacity_);
     return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
   }
 
@@ -409,7 +409,7 @@
 
  private:
   // The base address of the memory region that's managed by this allocator.
-  byte* base_;
+  uint8_t* base_;
 
   // The footprint in bytes of the currently allocated portion of the
   // memory region.
@@ -455,7 +455,7 @@
     kPageMapLargeObjectPart,  // The non-beginning part of a large object.
   };
   // The table that indicates what pages are currently used for.
-  volatile byte* page_map_;  // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
+  volatile uint8_t* page_map_;  // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
   size_t page_map_size_;
   size_t max_page_map_size_;
   std::unique_ptr<MemMap> page_map_mem_map_;
@@ -481,12 +481,12 @@
   const size_t page_release_size_threshold_;
 
   // The base address of the memory region that's managed by this allocator.
-  byte* Begin() { return base_; }
+  uint8_t* Begin() { return base_; }
   // The end address of the memory region that's managed by this allocator.
-  byte* End() { return base_ + capacity_; }
+  uint8_t* End() { return base_ + capacity_; }
 
   // Page-granularity alloc/free
-  void* AllocPages(Thread* self, size_t num_pages, byte page_map_type)
+  void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type)
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
   // Returns how many bytes were freed.
   size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_);
@@ -524,7 +524,7 @@
   void RevokeThreadUnsafeCurrentRuns();
 
   // Release a range of pages.
-  size_t ReleasePageRange(byte* start, byte* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
  public:
   RosAlloc(void* base, size_t capacity, size_t max_capacity,
@@ -580,7 +580,7 @@
   }
   bool IsFreePage(size_t idx) const {
     DCHECK_LT(idx, capacity_ / kPageSize);
-    byte pm_type = page_map_[idx];
+    uint8_t pm_type = page_map_[idx];
     return pm_type == kPageMapReleased || pm_type == kPageMapEmpty;
   }