Modernize typedefs with `using`.

Replace many occurences of `typedef` with `using`. For now,
do not update typedefs for function types and aligned types
and do not touch some parts such as jvmti or dmtracedump.

Test: m
Change-Id: Ie97ecbc5abf7e7109ef4b01f208752e2dc26c36d
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index e7b2a1b..8d62747 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -32,7 +32,7 @@
 namespace art {
 namespace debug {
 
-typedef std::vector<DexFile::PositionInfo> PositionInfos;
+using PositionInfos = std::vector<DexFile::PositionInfo>;
 
 template<typename ElfTypes>
 class ElfDebugLineWriter {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 2a09921..7bb754c 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -425,11 +425,11 @@
     return PayloadField::Decode(value_);
   }
 
-  typedef BitField<Kind, 0, kBitsForKind> KindField;
-  typedef BitField<uintptr_t, kBitsForKind, kBitsForPayload> PayloadField;
+  using KindField = BitField<Kind, 0, kBitsForKind>;
+  using PayloadField = BitField<uintptr_t, kBitsForKind, kBitsForPayload>;
 
   // Layout for kUnallocated locations payload.
-  typedef BitField<Policy, 0, 3> PolicyField;
+  using PolicyField = BitField<Policy, 0, 3>;
 
   // Layout for stack slots.
   static const intptr_t kStackIndexBias =
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 939c49f..69ca520 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -194,7 +194,7 @@
 
 class ReferenceTypeInfo : ValueObject {
  public:
-  typedef Handle<mirror::Class> TypeHandle;
+  using TypeHandle = Handle<mirror::Class>;
 
   static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact);
 
@@ -5705,7 +5705,7 @@
 
   template <typename T>
   static T Compute(T value, int32_t distance, int32_t max_shift_distance) {
-    typedef typename std::make_unsigned<T>::type V;
+    using V = std::make_unsigned_t<T>;
     V ux = static_cast<V>(value);
     return static_cast<T>(ux >> (distance & max_shift_distance));
   }
@@ -5862,7 +5862,7 @@
 
   template <typename T>
   static T Compute(T value, int32_t distance, int32_t max_shift_value) {
-    typedef typename std::make_unsigned<T>::type V;
+    using V = std::make_unsigned_t<T>;
     V ux = static_cast<V>(value);
     if ((distance & max_shift_value) == 0) {
       return static_cast<T>(ux);
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 688c093..f42fd97 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -119,8 +119,7 @@
 const int kRegisterSize = 4;
 
 // List of registers used in load/store multiple.
-typedef uint16_t RegList;
-
+using RegList = uint16_t;
 
 }  // namespace arm
 }  // namespace art
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index 76df527..a472943 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -66,7 +66,7 @@
     }
   };
 
-  typedef std::set<SpaceChunk, SortChunkByPtr> FreeByStartSet;
+  using FreeByStartSet = std::set<SpaceChunk, SortChunkByPtr>;
 
   // Map size to an iterator to free_by_start_'s entry.
   struct FreeBySizeEntry {
@@ -87,7 +87,7 @@
       }
     }
   };
-  typedef std::set<FreeBySizeEntry, FreeBySizeComparator> FreeBySizeSet;
+  using FreeBySizeSet = std::set<FreeBySizeEntry, FreeBySizeComparator>;
 
   SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_);
 
@@ -113,13 +113,13 @@
 template <>
 class SwapAllocator<void> {
  public:
-  typedef void value_type;
-  typedef void* pointer;
-  typedef const void* const_pointer;
+  using value_type    = void;
+  using pointer       = void*;
+  using const_pointer = const void*;
 
   template <typename U>
   struct rebind {
-    typedef SwapAllocator<U> other;
+    using other = SwapAllocator<U>;
   };
 
   explicit SwapAllocator(SwapSpace* swap_space) : swap_space_(swap_space) {}
@@ -145,17 +145,17 @@
 template <typename T>
 class SwapAllocator {
  public:
-  typedef T value_type;
-  typedef T* pointer;
-  typedef T& reference;
-  typedef const T* const_pointer;
-  typedef const T& const_reference;
-  typedef size_t size_type;
-  typedef ptrdiff_t difference_type;
+  using value_type      = T;
+  using pointer         = T*;
+  using reference       = T&;
+  using const_pointer   = const T*;
+  using const_reference = const T&;
+  using size_type       = size_t;
+  using difference_type = ptrdiff_t;
 
   template <typename U>
   struct rebind {
-    typedef SwapAllocator<U> other;
+    using other = SwapAllocator<U>;
   };
 
   explicit SwapAllocator(SwapSpace* swap_space) : swap_space_(swap_space) {}
diff --git a/dex2oat/driver/compiler_driver.h b/dex2oat/driver/compiler_driver.h
index bdabb2e..18be20d 100644
--- a/dex2oat/driver/compiler_driver.h
+++ b/dex2oat/driver/compiler_driver.h
@@ -304,7 +304,7 @@
   // All class references that are in the classpath. Indexed by class defs.
   ClassStateTable classpath_classes_;
 
-  typedef AtomicDexRefMap<MethodReference, CompiledMethod*> MethodTable;
+  using MethodTable = AtomicDexRefMap<MethodReference, CompiledMethod*>;
 
   // All method references that this compiler has compiled.
   MethodTable compiled_methods_;
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 1a0d4c8..2a9896a 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -53,7 +53,7 @@
 namespace gc {
 namespace accounting {
 template <size_t kAlignment> class SpaceBitmap;
-typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
+using ContinuousSpaceBitmap = SpaceBitmap<kObjectAlignment>;
 }  // namespace accounting
 namespace space {
 class ImageSpace;
diff --git a/libartbase/arch/instruction_set.h b/libartbase/arch/instruction_set.h
index dbaacda..faf881d 100644
--- a/libartbase/arch/instruction_set.h
+++ b/libartbase/arch/instruction_set.h
@@ -258,7 +258,7 @@
 //            are held when using!
 
 #if defined(__i386__) || defined(__arm__)
-typedef uint64_t TwoWordReturn;
+using TwoWordReturn = uint64_t;
 
 // Encodes method_ptr==nullptr and code_ptr==nullptr
 static inline constexpr TwoWordReturn GetTwoWordFailureValue() {
diff --git a/libartbase/base/allocator.h b/libartbase/base/allocator.h
index 2f85286..81f3a60 100644
--- a/libartbase/base/allocator.h
+++ b/libartbase/base/allocator.h
@@ -105,13 +105,13 @@
 template<class T, AllocatorTag kTag>
 class TrackingAllocatorImpl : public std::allocator<T> {
  public:
-  typedef typename std::allocator<T>::value_type value_type;
-  typedef typename std::allocator<T>::size_type size_type;
-  typedef typename std::allocator<T>::difference_type difference_type;
-  typedef typename std::allocator<T>::pointer pointer;
-  typedef typename std::allocator<T>::const_pointer const_pointer;
-  typedef typename std::allocator<T>::reference reference;
-  typedef typename std::allocator<T>::const_reference const_reference;
+  using value_type      = typename std::allocator<T>::value_type;
+  using size_type       = typename std::allocator<T>::size_type;
+  using difference_type = typename std::allocator<T>::difference_type;
+  using pointer         = typename std::allocator<T>::pointer;
+  using const_pointer   = typename std::allocator<T>::const_pointer;
+  using reference       = typename std::allocator<T>::reference;
+  using const_reference = typename std::allocator<T>::const_reference;
 
   // Used internally by STL data structures.
   template <class U>
@@ -127,7 +127,7 @@
   // Used internally by STL data structures.
   template <class U>
   struct rebind {
-    typedef TrackingAllocatorImpl<U, kTag> other;
+    using other = TrackingAllocatorImpl<U, kTag>;
   };
 
   pointer allocate(size_type n, const_pointer hint ATTRIBUTE_UNUSED = 0) {
@@ -149,11 +149,9 @@
 };
 
 template<class T, AllocatorTag kTag>
-// C++ doesn't allow template typedefs. This is a workaround template typedef which is
-// TrackingAllocatorImpl<T> if kEnableTrackingAllocator is true, std::allocator<T> otherwise.
-using TrackingAllocator = typename std::conditional<kEnableTrackingAllocator,
-                                                    TrackingAllocatorImpl<T, kTag>,
-                                                    std::allocator<T>>::type;
+using TrackingAllocator = std::conditional_t<kEnableTrackingAllocator,
+                                             TrackingAllocatorImpl<T, kTag>,
+                                             std::allocator<T>>;
 
 }  // namespace art
 
diff --git a/libartbase/base/arena_allocator.h b/libartbase/base/arena_allocator.h
index 8ef6565..12a44d5 100644
--- a/libartbase/base/arena_allocator.h
+++ b/libartbase/base/arena_allocator.h
@@ -148,7 +148,7 @@
   static const char* const kAllocNames[];
 };
 
-typedef ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations> ArenaAllocatorStats;
+using ArenaAllocatorStats = ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
 
 class ArenaAllocatorMemoryTool {
  public:
diff --git a/libartbase/base/arena_containers.h b/libartbase/base/arena_containers.h
index 9f64919..f205bc4 100644
--- a/libartbase/base/arena_containers.h
+++ b/libartbase/base/arena_containers.h
@@ -127,18 +127,18 @@
   ArenaAllocKind kind_;
 };
 
-typedef ArenaAllocatorAdapterKindImpl<kArenaAllocatorCountAllocations> ArenaAllocatorAdapterKind;
+using ArenaAllocatorAdapterKind = ArenaAllocatorAdapterKindImpl<kArenaAllocatorCountAllocations>;
 
 template <>
 class ArenaAllocatorAdapter<void> : private ArenaAllocatorAdapterKind {
  public:
-  typedef void value_type;
-  typedef void* pointer;
-  typedef const void* const_pointer;
+  using value_type    = void;
+  using pointer       = void*;
+  using const_pointer = const void*;
 
   template <typename U>
   struct rebind {
-    typedef ArenaAllocatorAdapter<U> other;
+    using other = ArenaAllocatorAdapter<U>;
   };
 
   explicit ArenaAllocatorAdapter(ArenaAllocator* allocator,
@@ -165,17 +165,17 @@
 template <typename T>
 class ArenaAllocatorAdapter : private ArenaAllocatorAdapterKind {
  public:
-  typedef T value_type;
-  typedef T* pointer;
-  typedef T& reference;
-  typedef const T* const_pointer;
-  typedef const T& const_reference;
-  typedef size_t size_type;
-  typedef ptrdiff_t difference_type;
+  using value_type      = T;
+  using pointer         = T*;
+  using reference       = T&;
+  using const_pointer   = const T*;
+  using const_reference = const T&;
+  using size_type       = size_t;
+  using difference_type = ptrdiff_t;
 
   template <typename U>
   struct rebind {
-    typedef ArenaAllocatorAdapter<U> other;
+    using other = ArenaAllocatorAdapter<U>;
   };
 
   ArenaAllocatorAdapter(ArenaAllocator* allocator, ArenaAllocKind kind)
diff --git a/libartbase/base/atomic.h b/libartbase/base/atomic.h
index 9de84cd..226a088 100644
--- a/libartbase/base/atomic.h
+++ b/libartbase/base/atomic.h
@@ -125,7 +125,7 @@
   }
 };
 
-typedef Atomic<int32_t> AtomicInteger;
+using AtomicInteger = Atomic<int32_t>;
 
 static_assert(sizeof(AtomicInteger) == sizeof(int32_t), "Weird AtomicInteger size");
 static_assert(alignof(AtomicInteger) == alignof(int32_t),
diff --git a/libartbase/base/bit_field.h b/libartbase/base/bit_field.h
index 9971735..f57c414 100644
--- a/libartbase/base/bit_field.h
+++ b/libartbase/base/bit_field.h
@@ -30,7 +30,7 @@
 template<typename T, size_t kPosition, size_t kSize>
 class BitField {
  public:
-  typedef T value_type;
+  using value_type = T;
   static constexpr size_t position = kPosition;
   static constexpr size_t size = kSize;
 
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index 0c1b04e..0993648 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -226,7 +226,7 @@
 template<typename Accessor>
 class BitTableRange : public IterationRange<typename BitTable<Accessor>::const_iterator> {
  public:
-  typedef typename BitTable<Accessor>::const_iterator const_iterator;
+  using const_iterator = typename BitTable<Accessor>::const_iterator;
 
   using IterationRange<const_iterator>::IterationRange;
   BitTableRange() : IterationRange<const_iterator>(const_iterator(), const_iterator()) { }
diff --git a/libartbase/base/debug_stack.h b/libartbase/base/debug_stack.h
index f2d93d4..4743786 100644
--- a/libartbase/base/debug_stack.h
+++ b/libartbase/base/debug_stack.h
@@ -37,9 +37,9 @@
 template <bool kIsDebug>
 class DebugStackIndirectTopRefImpl;
 
-typedef DebugStackRefCounterImpl<kIsDebugBuild> DebugStackRefCounter;
-typedef DebugStackReferenceImpl<kIsDebugBuild> DebugStackReference;
-typedef DebugStackIndirectTopRefImpl<kIsDebugBuild> DebugStackIndirectTopRef;
+using DebugStackRefCounter = DebugStackRefCounterImpl<kIsDebugBuild>;
+using DebugStackReference = DebugStackReferenceImpl<kIsDebugBuild>;
+using DebugStackIndirectTopRef = DebugStackIndirectTopRefImpl<kIsDebugBuild>;
 
 // Non-debug mode specializations. This should be optimized away.
 
diff --git a/libartbase/base/intrusive_forward_list.h b/libartbase/base/intrusive_forward_list.h
index 984ae9c..47e4b4d 100644
--- a/libartbase/base/intrusive_forward_list.h
+++ b/libartbase/base/intrusive_forward_list.h
@@ -134,14 +134,14 @@
 template <typename T, typename HookTraits>
 class IntrusiveForwardList {
  public:
-  typedef HookTraits hook_traits;
-  typedef       T  value_type;
-  typedef       T& reference;
-  typedef const T& const_reference;
-  typedef       T* pointer;
-  typedef const T* const_pointer;
-  typedef IntrusiveForwardListIterator<      T, hook_traits> iterator;
-  typedef IntrusiveForwardListIterator<const T, hook_traits> const_iterator;
+  using hook_traits     = HookTraits;
+  using value_type      = T;
+  using reference       = T&;
+  using const_reference = const T&;
+  using pointer         = T*;
+  using const_pointer   = const T*;
+  using iterator        = IntrusiveForwardListIterator<T, hook_traits>;
+  using const_iterator  = IntrusiveForwardListIterator<const T, hook_traits>;
 
   // Construct/copy/destroy.
   IntrusiveForwardList() = default;
diff --git a/libartbase/base/iteration_range.h b/libartbase/base/iteration_range.h
index c916250..0685d59 100644
--- a/libartbase/base/iteration_range.h
+++ b/libartbase/base/iteration_range.h
@@ -27,11 +27,11 @@
 template <typename Iter>
 class IterationRange {
  public:
-  typedef Iter iterator;
-  typedef typename std::iterator_traits<Iter>::difference_type difference_type;
-  typedef typename std::iterator_traits<Iter>::value_type value_type;
-  typedef typename std::iterator_traits<Iter>::pointer pointer;
-  typedef typename std::iterator_traits<Iter>::reference reference;
+  using iterator        = Iter;
+  using difference_type = typename std::iterator_traits<Iter>::difference_type;
+  using value_type      = typename std::iterator_traits<Iter>::value_type;
+  using pointer         = typename std::iterator_traits<Iter>::pointer;
+  using reference       = typename std::iterator_traits<Iter>::reference;
 
   IterationRange(iterator first, iterator last) : first_(first), last_(last) { }
 
@@ -64,7 +64,7 @@
 
 template <typename Container>
 inline auto ReverseRange(Container&& c) {
-  typedef typename std::reverse_iterator<decltype(c.begin())> riter;
+  using riter = typename std::reverse_iterator<decltype(c.begin())>;
   return MakeIterationRange(riter(c.end()), riter(c.begin()));
 }
 
diff --git a/libartbase/base/memory_region.h b/libartbase/base/memory_region.h
index 9c9ff92..8db7018 100644
--- a/libartbase/base/memory_region.h
+++ b/libartbase/base/memory_region.h
@@ -80,7 +80,7 @@
   template<typename T>
   ALWAYS_INLINE T LoadUnaligned(uintptr_t offset) const {
     // Equivalent unsigned integer type corresponding to T.
-    typedef typename std::make_unsigned<T>::type U;
+    using U = std::make_unsigned_t<T>;
     U equivalent_unsigned_integer_value = 0;
     // Read the value byte by byte in a little-endian fashion.
     for (size_t i = 0; i < sizeof(U); ++i) {
@@ -95,7 +95,7 @@
   template<typename T>
   ALWAYS_INLINE void StoreUnaligned(uintptr_t offset, T value) const {
     // Equivalent unsigned integer type corresponding to T.
-    typedef typename std::make_unsigned<T>::type U;
+    using U = std::make_unsigned_t<T>;
     U equivalent_unsigned_integer_value = bit_cast<U, T>(value);
     // Write the value byte by byte in a little-endian fashion.
     for (size_t i = 0; i < sizeof(U); ++i) {
diff --git a/libartbase/base/os.h b/libartbase/base/os.h
index 4062d90..cb71d21 100644
--- a/libartbase/base/os.h
+++ b/libartbase/base/os.h
@@ -25,7 +25,7 @@
 
 namespace art {
 
-typedef ::unix_file::FdFile File;
+using File = ::unix_file::FdFile;
 
 // Interface to the underlying OS platform.
 
diff --git a/libartbase/base/safe_map.h b/libartbase/base/safe_map.h
index 058f0d3..371b257 100644
--- a/libartbase/base/safe_map.h
+++ b/libartbase/base/safe_map.h
@@ -31,19 +31,20 @@
           typename Allocator = std::allocator<std::pair<const K, V>>>
 class SafeMap {
  private:
-  typedef SafeMap<K, V, Comparator, Allocator> Self;
+  using Self = SafeMap<K, V, Comparator, Allocator>;
+  using Impl = std::map<K, V, Comparator, Allocator>;
 
  public:
-  typedef typename ::std::map<K, V, Comparator, Allocator>::key_compare key_compare;
-  typedef typename ::std::map<K, V, Comparator, Allocator>::value_compare value_compare;
-  typedef typename ::std::map<K, V, Comparator, Allocator>::allocator_type allocator_type;
-  typedef typename ::std::map<K, V, Comparator, Allocator>::iterator iterator;
-  typedef typename ::std::map<K, V, Comparator, Allocator>::const_iterator const_iterator;
-  typedef typename ::std::map<K, V, Comparator, Allocator>::size_type size_type;
-  typedef typename ::std::map<K, V, Comparator, Allocator>::key_type key_type;
-  typedef typename ::std::map<K, V, Comparator, Allocator>::value_type value_type;
-  typedef typename ::std::map<K, V, Comparator, Allocator>::node_type node_type;
-  typedef typename ::std::map<K, V, Comparator, Allocator>::insert_return_type insert_return_type;
+  using key_compare        = typename Impl::key_compare;
+  using value_compare      = typename Impl::value_compare;
+  using allocator_type     = typename Impl::allocator_type;
+  using iterator           = typename Impl::iterator;
+  using const_iterator     = typename Impl::const_iterator;
+  using size_type          = typename Impl::size_type;
+  using key_type           = typename Impl::key_type;
+  using value_type         = typename Impl::value_type;
+  using node_type          = typename Impl::node_type;
+  using insert_return_type = typename Impl::insert_return_type;
 
   SafeMap() = default;
   SafeMap(const SafeMap&) = default;
@@ -177,7 +178,7 @@
   }
 
  private:
-  ::std::map<K, V, Comparator, Allocator> map_;
+  Impl map_;
 };
 
 template <typename K, typename V, typename Comparator, typename Allocator>
diff --git a/libartbase/base/scoped_arena_containers.h b/libartbase/base/scoped_arena_containers.h
index ae3023b..882fdcc 100644
--- a/libartbase/base/scoped_arena_containers.h
+++ b/libartbase/base/scoped_arena_containers.h
@@ -107,13 +107,13 @@
     : private DebugStackReference, private DebugStackIndirectTopRef,
       private ArenaAllocatorAdapterKind {
  public:
-  typedef void value_type;
-  typedef void* pointer;
-  typedef const void* const_pointer;
+  using value_type    = void;
+  using pointer       = void*;
+  using const_pointer = const void*;
 
   template <typename U>
   struct rebind {
-    typedef ScopedArenaAllocatorAdapter<U> other;
+    using other = ScopedArenaAllocatorAdapter<U>;
   };
 
   explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* allocator,
@@ -146,17 +146,17 @@
     : private DebugStackReference, private DebugStackIndirectTopRef,
       private ArenaAllocatorAdapterKind {
  public:
-  typedef T value_type;
-  typedef T* pointer;
-  typedef T& reference;
-  typedef const T* const_pointer;
-  typedef const T& const_reference;
-  typedef size_t size_type;
-  typedef ptrdiff_t difference_type;
+  using value_type      = T;
+  using pointer         = T*;
+  using reference       = T&;
+  using const_pointer   = const T*;
+  using const_reference = const T&;
+  using size_type       = size_t;
+  using difference_type = ptrdiff_t;
 
   template <typename U>
   struct rebind {
-    typedef ScopedArenaAllocatorAdapter<U> other;
+    using other = ScopedArenaAllocatorAdapter<U>;
   };
 
   explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* allocator,
diff --git a/libartbase/base/scoped_flock.h b/libartbase/base/scoped_flock.h
index 39b36b4..525332a 100644
--- a/libartbase/base/scoped_flock.h
+++ b/libartbase/base/scoped_flock.h
@@ -32,7 +32,7 @@
 class LockedFileCloseNoFlush;
 
 // A scoped File object that calls Close without flushing.
-typedef std::unique_ptr<LockedFile, LockedFileCloseNoFlush> ScopedFlock;
+using ScopedFlock = std::unique_ptr<LockedFile, LockedFileCloseNoFlush>;
 
 class LockedFile : public unix_file::FdFile {
  public:
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index 7160302..0e8231a 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -101,7 +101,7 @@
 
 #if defined(__BIONIC__)
 struct Arc4RandomGenerator {
-  typedef uint32_t result_type;
+  using result_type = uint32_t;
   static constexpr uint32_t min() { return std::numeric_limits<uint32_t>::min(); }
   static constexpr uint32_t max() { return std::numeric_limits<uint32_t>::max(); }
   uint32_t operator() () { return arc4random(); }
diff --git a/libartbase/base/zip_archive.h b/libartbase/base/zip_archive.h
index 29c5ea1..084bfd0 100644
--- a/libartbase/base/zip_archive.h
+++ b/libartbase/base/zip_archive.h
@@ -32,7 +32,7 @@
 // system/core/zip_archive definitions.
 struct ZipArchive;
 struct ZipEntry;
-typedef ZipArchive* ZipArchiveHandle;
+using ZipArchiveHandle = ZipArchive*;
 
 namespace art {
 
diff --git a/libelffile/elf/elf_utils.h b/libelffile/elf/elf_utils.h
index 181dd10..9c4f0d81 100644
--- a/libelffile/elf/elf_utils.h
+++ b/libelffile/elf/elf_utils.h
@@ -26,35 +26,35 @@
 namespace art {
 
 struct ElfTypes32 {
-  typedef Elf32_Addr Addr;
-  typedef Elf32_Off Off;
-  typedef Elf32_Half Half;
-  typedef Elf32_Word Word;
-  typedef Elf32_Sword Sword;
-  typedef Elf32_Ehdr Ehdr;
-  typedef Elf32_Shdr Shdr;
-  typedef Elf32_Sym Sym;
-  typedef Elf32_Rel Rel;
-  typedef Elf32_Rela Rela;
-  typedef Elf32_Phdr Phdr;
-  typedef Elf32_Dyn Dyn;
+  using Addr = Elf32_Addr;
+  using Off = Elf32_Off;
+  using Half = Elf32_Half;
+  using Word = Elf32_Word;
+  using Sword = Elf32_Sword;
+  using Ehdr = Elf32_Ehdr;
+  using Shdr = Elf32_Shdr;
+  using Sym = Elf32_Sym;
+  using Rel = Elf32_Rel;
+  using Rela = Elf32_Rela;
+  using Phdr = Elf32_Phdr;
+  using Dyn = Elf32_Dyn;
 };
 
 struct ElfTypes64 {
-  typedef Elf64_Addr Addr;
-  typedef Elf64_Off Off;
-  typedef Elf64_Half Half;
-  typedef Elf64_Word Word;
-  typedef Elf64_Sword Sword;
-  typedef Elf64_Xword Xword;
-  typedef Elf64_Sxword Sxword;
-  typedef Elf64_Ehdr Ehdr;
-  typedef Elf64_Shdr Shdr;
-  typedef Elf64_Sym Sym;
-  typedef Elf64_Rel Rel;
-  typedef Elf64_Rela Rela;
-  typedef Elf64_Phdr Phdr;
-  typedef Elf64_Dyn Dyn;
+  using Addr = Elf64_Addr;
+  using Off = Elf64_Off;
+  using Half = Elf64_Half;
+  using Word = Elf64_Word;
+  using Sword = Elf64_Sword;
+  using Xword = Elf64_Xword;
+  using Sxword = Elf64_Sxword;
+  using Ehdr = Elf64_Ehdr;
+  using Shdr = Elf64_Shdr;
+  using Sym = Elf64_Sym;
+  using Rel = Elf64_Rel;
+  using Rela = Elf64_Rela;
+  using Phdr = Elf64_Phdr;
+  using Dyn = Elf64_Dyn;
 };
 
 #define ELF_ST_BIND(x) ((x) >> 4)
diff --git a/runtime/cha.h b/runtime/cha.h
index a07ee91..14af43e 100644
--- a/runtime/cha.h
+++ b/runtime/cha.h
@@ -85,8 +85,8 @@
   // as the entrypoint, we update the entrypoint to the interpreter bridge.
   // We will also deoptimize frames that are currently executing the code of
   // the method header.
-  typedef std::pair<ArtMethod*, OatQuickMethodHeader*> MethodAndMethodHeaderPair;
-  typedef std::vector<MethodAndMethodHeaderPair> ListOfDependentPairs;
+  using MethodAndMethodHeaderPair = std::pair<ArtMethod*, OatQuickMethodHeader*>;
+  using ListOfDependentPairs = std::vector<MethodAndMethodHeaderPair>;
 
   ClassHierarchyAnalysis() {}
 
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 0ea6245..3bb212e 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -134,11 +134,11 @@
 
   // Hash set that hashes class descriptor, and compares descriptors and class loaders. Results
   // should be compared for a matching class descriptor and class loader.
-  typedef HashSet<TableSlot,
-                  TableSlotEmptyFn,
-                  ClassDescriptorHash,
-                  ClassDescriptorEquals,
-                  TrackingAllocator<TableSlot, kAllocatorTagClassTable>> ClassSet;
+  using ClassSet = HashSet<TableSlot,
+                           TableSlotEmptyFn,
+                           ClassDescriptorHash,
+                           ClassDescriptorEquals,
+                           TrackingAllocator<TableSlot, kAllocatorTagClassTable>>;
 
   ClassTable();
 
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index cc347a1..d3995f0 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -66,7 +66,7 @@
 class DexFile;
 class JavaVMExt;
 class Runtime;
-typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
+using RuntimeOptions = std::vector<std::pair<std::string, const void*>>;
 class Thread;
 class VariableSizedHandleScope;
 
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 0e9dd19..8516b51 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -32,8 +32,8 @@
 class ElfFileImpl;
 
 // Explicitly instantiated in elf_file.cc
-typedef ElfFileImpl<ElfTypes32> ElfFileImpl32;
-typedef ElfFileImpl<ElfTypes64> ElfFileImpl64;
+using ElfFileImpl32 = ElfFileImpl<ElfTypes32>;
+using ElfFileImpl64 = ElfFileImpl<ElfTypes64>;
 
 // Used for compile time and runtime for ElfFile access. Because of
 // the need for use at runtime, cannot directly use LLVM classes such as
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 8fd8044..26d960e 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -150,7 +150,7 @@
   Elf_Word GetHashBucket(size_t i, bool* ok) const;
   Elf_Word GetHashChain(size_t i, bool* ok) const;
 
-  typedef std::map<std::string, Elf_Sym*> SymbolTable;
+  using SymbolTable = std::map<std::string, Elf_Sym*>;
   SymbolTable** GetSymbolTable(Elf_Word section_type);
 
   bool ValidPointer(const uint8_t* start) const;
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 4486798..5e6bd88 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -285,7 +285,7 @@
   DISALLOW_COPY_AND_ASSIGN(AtomicStack);
 };
 
-typedef AtomicStack<mirror::Object> ObjectStack;
+using ObjectStack = AtomicStack<mirror::Object>;
 
 }  // namespace accounting
 }  // namespace gc
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 5f6fd3e..1fa602f 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -52,9 +52,10 @@
   // A callback for visiting an object in the heap.
   using ObjectCallback = void (*)(mirror::Object*, void*);
 
-  typedef std::set<uint8_t*, std::less<uint8_t*>,
-                   TrackingAllocator<uint8_t*, kAllocatorTagModUnionCardSet>> CardSet;
-  typedef MemoryRangeBitmap<CardTable::kCardSize> CardBitmap;
+  using CardSet = std::set<uint8_t*,
+                           std::less<uint8_t*>,
+                           TrackingAllocator<uint8_t*, kAllocatorTagModUnionCardSet>>;
+  using CardBitmap = MemoryRangeBitmap<CardTable::kCardSize>;
 
   explicit ModUnionTable(const std::string& name, Heap* heap, space::ContinuousSpace* space)
       : name_(name),
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 3525667..8b390df 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -44,8 +44,9 @@
 // from the free list spaces to the bump pointer spaces.
 class RememberedSet {
  public:
-  typedef std::set<uint8_t*, std::less<uint8_t*>,
-                   TrackingAllocator<uint8_t*, kAllocatorTagRememberedSet>> CardSet;
+  using CardSet = std::set<uint8_t*,
+                           std::less<uint8_t*>,
+                           TrackingAllocator<uint8_t*, kAllocatorTagRememberedSet>>;
 
   explicit RememberedSet(const std::string& name, Heap* heap, space::ContinuousSpace* space)
       : name_(name), heap_(heap), space_(space) {}
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index fe98741..b26d6da 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -261,8 +261,8 @@
   std::string name_;
 };
 
-typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
-typedef SpaceBitmap<kLargeObjectAlignment> LargeObjectBitmap;
+using ContinuousSpaceBitmap = SpaceBitmap<kObjectAlignment>;
+using LargeObjectBitmap = SpaceBitmap<kLargeObjectAlignment>;
 
 template<size_t kAlignment>
 std::ostream& operator << (std::ostream& stream, const SpaceBitmap<kAlignment>& bitmap);
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 405d060..1d9a6ce 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -211,7 +211,7 @@
   // recent allocation tracking, but GcRoot<mirror::Object> pointers in these pairs can become null.
   // Both types of pointers need read barriers, do not directly access them.
   using EntryPair = std::pair<GcRoot<mirror::Object>, AllocRecord>;
-  typedef std::list<EntryPair> EntryList;
+  using EntryList = std::list<EntryPair>;
 
   // Caller needs to check that it is enabled before calling since we read the stack trace before
   // checking the enabled boolean.
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 72d460d..c274fed 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -42,9 +42,9 @@
 
 namespace accounting {
 template<typename T> class AtomicStack;
-typedef AtomicStack<mirror::Object> ObjectStack;
+using ObjectStack = AtomicStack<mirror::Object>;
 template <size_t kAlignment> class SpaceBitmap;
-typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
+using ContinuousSpaceBitmap = SpaceBitmap<kObjectAlignment>;
 class HeapBitmap;
 class ReadBarrierTable;
 }  // namespace accounting
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 75cfdba..6af7c54 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -46,7 +46,7 @@
 
 namespace accounting {
 template<typename T> class AtomicStack;
-typedef AtomicStack<mirror::Object> ObjectStack;
+using ObjectStack = AtomicStack<mirror::Object>;
 }  // namespace accounting
 
 namespace collector {
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 9f2939f..245ea10 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -44,7 +44,7 @@
 
 namespace accounting {
 template <typename T> class AtomicStack;
-typedef AtomicStack<mirror::Object> ObjectStack;
+using ObjectStack = AtomicStack<mirror::Object>;
 }  // namespace accounting
 
 namespace space {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index f9dd83d..345109d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -76,7 +76,7 @@
 
 namespace accounting {
 template <typename T> class AtomicStack;
-typedef AtomicStack<mirror::Object> ObjectStack;
+using ObjectStack = AtomicStack<mirror::Object>;
 class CardTable;
 class HeapBitmap;
 class ModUnionTable;
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 8b3115c..d94f467 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -226,8 +226,9 @@
    public:
     bool operator()(const AllocationInfo* a, const AllocationInfo* b) const;
   };
-  typedef std::set<AllocationInfo*, SortByPrevFree,
-                   TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>> FreeBlocks;
+  using FreeBlocks = std::set<AllocationInfo*,
+                              SortByPrevFree,
+                              TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>>;
 
   // There is not footer for any allocations at the end of the space, so we keep track of how much
   // free space there is at the end manually.
diff --git a/runtime/image.h b/runtime/image.h
index c5773ec..8f045e9 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -511,7 +511,7 @@
  * The first element indicates the location of a managed object with a field that needs fixing up.
  * The second element of the pair is an object-relative offset to the field in question.
  */
-typedef std::pair<uint32_t, uint32_t> AppImageReferenceOffsetInfo;
+using AppImageReferenceOffsetInfo = std::pair<uint32_t, uint32_t>;
 
 std::ostream& operator<<(std::ostream& os, ImageHeader::ImageMethod method);
 std::ostream& operator<<(std::ostream& os, ImageHeader::ImageRoot root);
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 65e384d..157c28e 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -85,7 +85,7 @@
 // more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch
 // additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj,
 // lookup iref1. A pattern based on object bits will miss this.
-typedef void* IndirectRef;
+using IndirectRef = void*;
 
 // Indirect reference kind, used as the two low bits of IndirectRef.
 //
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 6c08333..2a66946 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -710,7 +710,7 @@
 
   // Contains the instrumentation level required by each client of the instrumentation identified
   // by a string key.
-  typedef SafeMap<const char*, InstrumentationLevel> InstrumentationLevelTable;
+  using InstrumentationLevelTable = SafeMap<const char*, InstrumentationLevel>;
   InstrumentationLevelTable requested_instrumentation_levels_ GUARDED_BY(Locks::mutator_lock_);
 
   // The event listeners, written to with the mutator_lock_ exclusively held.
diff --git a/runtime/interpreter/safe_math.h b/runtime/interpreter/safe_math.h
index 06f046a..25a9353 100644
--- a/runtime/interpreter/safe_math.h
+++ b/runtime/interpreter/safe_math.h
@@ -26,16 +26,18 @@
 // Declares a type which is the larger in bit size of the two template parameters.
 template <typename T1, typename T2>
 struct select_bigger {
-  typedef typename std::conditional<sizeof(T1) >= sizeof(T2), T1, T2>::type type;
+  using type = std::conditional_t<sizeof(T1) >= sizeof(T2), T1, T2>;
 };
+template <typename T1, typename T2>
+using select_bigger_t = typename select_bigger<T1, T2>::type;
 
 // Perform signed arithmetic Op on 'a' and 'b' with defined wrapping behavior.
 template<template <typename OpT> class Op, typename T1, typename T2>
-static inline typename select_bigger<T1, T2>::type SafeMath(T1 a, T2 b) {
-  typedef typename select_bigger<T1, T2>::type biggest_T;
-  typedef typename std::make_unsigned<biggest_T>::type unsigned_biggest_T;
-  static_assert(std::is_signed<T1>::value, "Expected T1 to be signed");
-  static_assert(std::is_signed<T2>::value, "Expected T2 to be signed");
+static inline select_bigger_t<T1, T2> SafeMath(T1 a, T2 b) {
+  using biggest_T = select_bigger_t<T1, T2>;
+  using unsigned_biggest_T = std::make_unsigned_t<biggest_T>;
+  static_assert(std::is_signed_v<T1>, "Expected T1 to be signed");
+  static_assert(std::is_signed_v<T2>, "Expected T2 to be signed");
   unsigned_biggest_T val1 = static_cast<unsigned_biggest_T>(static_cast<biggest_T>(a));
   unsigned_biggest_T val2 = static_cast<unsigned_biggest_T>(b);
   return static_cast<biggest_T>(Op<unsigned_biggest_T>()(val1, val2));
@@ -43,19 +45,19 @@
 
 // Perform a signed add on 'a' and 'b' with defined wrapping behavior.
 template<typename T1, typename T2>
-static inline typename select_bigger<T1, T2>::type SafeAdd(T1 a, T2 b) {
+static inline select_bigger_t<T1, T2> SafeAdd(T1 a, T2 b) {
   return SafeMath<std::plus>(a, b);
 }
 
 // Perform a signed substract on 'a' and 'b' with defined wrapping behavior.
 template<typename T1, typename T2>
-static inline typename select_bigger<T1, T2>::type SafeSub(T1 a, T2 b) {
+static inline select_bigger_t<T1, T2> SafeSub(T1 a, T2 b) {
   return SafeMath<std::minus>(a, b);
 }
 
 // Perform a signed multiply on 'a' and 'b' with defined wrapping behavior.
 template<typename T1, typename T2>
-static inline typename select_bigger<T1, T2>::type SafeMul(T1 a, T2 b) {
+static inline select_bigger_t<T1, T2> SafeMul(T1 a, T2 b) {
   return SafeMath<std::multiplies>(a, b);
 }
 
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 717d1de..4bf9dee 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -158,7 +158,7 @@
   MIRROR_CLASS("[F");
   MIRROR_CLASS("[D");
 
-  typedef T ElementType;
+  using ElementType = T;
 
   static ObjPtr<PrimitiveArray<T>> Alloc(Thread* self, size_t length)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index a3fc552..7b110fc 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -49,14 +49,14 @@
 class FinalizerReference;
 template<class T> class ObjectArray;
 template<class T> class PrimitiveArray;
-typedef PrimitiveArray<uint8_t> BooleanArray;
-typedef PrimitiveArray<int8_t> ByteArray;
-typedef PrimitiveArray<uint16_t> CharArray;
-typedef PrimitiveArray<double> DoubleArray;
-typedef PrimitiveArray<float> FloatArray;
-typedef PrimitiveArray<int32_t> IntArray;
-typedef PrimitiveArray<int64_t> LongArray;
-typedef PrimitiveArray<int16_t> ShortArray;
+using BooleanArray = PrimitiveArray<uint8_t>;
+using ByteArray = PrimitiveArray<int8_t>;
+using CharArray = PrimitiveArray<uint16_t>;
+using DoubleArray = PrimitiveArray<double>;
+using FloatArray = PrimitiveArray<float>;
+using IntArray = PrimitiveArray<int32_t>;
+using LongArray = PrimitiveArray<int64_t>;
+using ShortArray = PrimitiveArray<int16_t>;
 class Reference;
 class String;
 class Throwable;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 99e071e..dd4c21c 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -44,7 +44,7 @@
 template<class T> class Handle;
 class StackVisitor;
 class Thread;
-typedef uint32_t MonitorId;
+using MonitorId = uint32_t;
 
 namespace mirror {
 class Object;
@@ -452,7 +452,7 @@
   size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
   size_t Size() REQUIRES(!monitor_list_lock_);
 
-  typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors;
+  using Monitors = std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>>;
 
  private:
   // During sweeping we may free an object and on a separate thread have an object created using
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 4521a22..76ce59f 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -236,7 +236,7 @@
   // ChunkListCapacity(current_chunk_list_index_).
   size_t current_chunk_list_capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
 
-  typedef TrackingAllocator<uint8_t, kAllocatorTagMonitorPool> Allocator;
+  using Allocator = TrackingAllocator<uint8_t, kAllocatorTagMonitorPool>;
   Allocator allocator_;
 
   // Start of free list of monitors.
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 608805b..00e0a97 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -36,7 +36,7 @@
 class DexFile;
 struct RuntimeArgumentMap;
 
-typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
+using RuntimeOptions = std::vector<std::pair<std::string, const void*>>;
 
 template <typename TVariantMap,
           template <typename TKeyValue> class TVariantMapKey>
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 6388944..2ffd866 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -55,8 +55,8 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  typedef std::vector<GcRoot<mirror::Object>,
-                      TrackingAllocator<GcRoot<mirror::Object>, kAllocatorTagReferenceTable>> Table;
+  using Table = std::vector<GcRoot<mirror::Object>,
+                            TrackingAllocator<GcRoot<mirror::Object>, kAllocatorTagReferenceTable>>;
   static void Dump(std::ostream& os, Table& entries)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::alloc_tracker_lock_);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d30dd87..eb313e0 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -74,7 +74,7 @@
 class DexCache;
 template<class T> class ObjectArray;
 template<class T> class PrimitiveArray;
-typedef PrimitiveArray<int8_t> ByteArray;
+using ByteArray = PrimitiveArray<int8_t>;
 class String;
 class Throwable;
 }  // namespace mirror
@@ -115,7 +115,7 @@
 struct TraceConfig;
 class Transaction;
 
-typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
+using RuntimeOptions = std::vector<std::pair<std::string, const void*>>;
 
 class Runtime {
  public:
diff --git a/runtime/thread.h b/runtime/thread.h
index 5ffaa9f..7ea7d42 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -70,7 +70,7 @@
 class Object;
 template<class T> class ObjectArray;
 template<class T> class PrimitiveArray;
-typedef PrimitiveArray<int32_t> IntArray;
+using IntArray = PrimitiveArray<int32_t>;
 class StackTraceElement;
 class String;
 class Throwable;
@@ -1538,7 +1538,7 @@
   struct PACKED(4) tls_32bit_sized_values {
     // We have no control over the size of 'bool', but want our boolean fields
     // to be 4-byte quantities.
-    typedef uint32_t bool32_t;
+    using bool32_t = uint32_t;
 
     explicit tls_32bit_sized_values(bool is_daemon)
         : suspend_count(0),