Fixes to build against new VIXL interface.

- Fix namespace usage and use of deprecated functions.
- Link all dependants to new libvixl-arm64 target for now.

Change-Id: Iee6f299784fd663fc2a759f3ee816fdbc511e509
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 54ed62b..9f2027f 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -20,7 +20,7 @@
 #include "offsets.h"
 #include "thread.h"
 
-using namespace vixl;  // NOLINT(build/namespaces)
+using namespace vixl::aarch64;  // NOLINT(build/namespaces)
 
 namespace art {
 namespace arm64 {
@@ -39,7 +39,7 @@
 }
 
 size_t Arm64Assembler::CodeSize() const {
-  return vixl_masm_->BufferCapacity() - vixl_masm_->RemainingBufferSpace();
+  return vixl_masm_->GetBufferCapacity() - vixl_masm_->GetRemainingBufferSpace();
 }
 
 const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
@@ -86,9 +86,9 @@
   } else {
     // temp = rd + value
     // rd = cond ? temp : rn
-    vixl::UseScratchRegisterScope temps(vixl_masm_);
+    UseScratchRegisterScope temps(vixl_masm_);
     temps.Exclude(reg_x(rd), reg_x(rn));
-    vixl::Register temp = temps.AcquireX();
+    Register temp = temps.AcquireX();
     ___ Add(temp, reg_x(rn), value);
     ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
   }
@@ -182,8 +182,8 @@
 }
 
 void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
-  vixl::UseScratchRegisterScope temps(vixl_masm_);
-  vixl::Register temp = temps.AcquireX();
+  UseScratchRegisterScope temps(vixl_masm_);
+  Register temp = temps.AcquireX();
   ___ Mov(temp, reg_x(SP));
   ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
 }
@@ -206,9 +206,9 @@
     // temp = value
     // rd = cond ? temp : rd
     if (value != 0) {
-      vixl::UseScratchRegisterScope temps(vixl_masm_);
+      UseScratchRegisterScope temps(vixl_masm_);
       temps.Exclude(reg_x(dest));
-      vixl::Register temp = temps.AcquireX();
+      Register temp = temps.AcquireX();
       ___ Mov(temp, value);
       ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
     } else {
@@ -313,7 +313,7 @@
   Arm64ManagedRegister base = m_base.AsArm64();
   CHECK(dst.IsXRegister() && base.IsXRegister());
   // Remove dst and base form the temp list - higher level API uses IP1, IP0.
-  vixl::UseScratchRegisterScope temps(vixl_masm_);
+  UseScratchRegisterScope temps(vixl_masm_);
   temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
   ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
 }
@@ -479,7 +479,7 @@
 
 void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
   // TODO: Should we check that m_scratch is IP? - see arm.
-  ___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
+  ___ Dmb(InnerShareable, BarrierAll);
 }
 
 void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
@@ -527,7 +527,7 @@
   CHECK(base.IsXRegister()) << base;
   CHECK(scratch.IsXRegister()) << scratch;
   // Remove base and scratch form the temp list - higher level API uses IP1, IP0.
-  vixl::UseScratchRegisterScope temps(vixl_masm_);
+  UseScratchRegisterScope temps(vixl_masm_);
   temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
   ___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
   ___ Br(reg_x(scratch.AsXRegister()));
@@ -598,7 +598,7 @@
   Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
   CHECK(out_reg.IsXRegister()) << out_reg;
   CHECK(in_reg.IsXRegister()) << in_reg;
-  vixl::Label exit;
+  vixl::aarch64::Label exit;
   if (!out_reg.Equals(in_reg)) {
     // FIXME: Who sets the flags here?
     LoadImmediate(out_reg.AsXRegister(), 0, eq);
@@ -617,9 +617,9 @@
 }
 
 void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
-  vixl::UseScratchRegisterScope temps(vixl_masm_);
+  UseScratchRegisterScope temps(vixl_masm_);
   temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
-  vixl::Register temp = temps.AcquireX();
+  Register temp = temps.AcquireX();
 
   // Bind exception poll entry.
   ___ Bind(exception->Entry());
@@ -638,26 +638,26 @@
 
 static inline dwarf::Reg DWARFReg(CPURegister reg) {
   if (reg.IsFPRegister()) {
-    return dwarf::Reg::Arm64Fp(reg.code());
+    return dwarf::Reg::Arm64Fp(reg.GetCode());
   } else {
-    DCHECK_LT(reg.code(), 31u);  // X0 - X30.
-    return dwarf::Reg::Arm64Core(reg.code());
+    DCHECK_LT(reg.GetCode(), 31u);  // X0 - X30.
+    return dwarf::Reg::Arm64Core(reg.GetCode());
   }
 }
 
-void Arm64Assembler::SpillRegisters(vixl::CPURegList registers, int offset) {
-  int size = registers.RegisterSizeInBytes();
+void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
+  int size = registers.GetRegisterSizeInBytes();
   const Register sp = vixl_masm_->StackPointer();
   // Since we are operating on register pairs, we would like to align on
   // double the standard size; on the other hand, we don't want to insert
   // an extra store, which will happen if the number of registers is even.
-  if (!IsAlignedParam(offset, 2 * size) && registers.Count() % 2 != 0) {
+  if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
     const CPURegister& dst0 = registers.PopLowestIndex();
     ___ Str(dst0, MemOperand(sp, offset));
     cfi_.RelOffset(DWARFReg(dst0), offset);
     offset += size;
   }
-  while (registers.Count() >= 2) {
+  while (registers.GetCount() >= 2) {
     const CPURegister& dst0 = registers.PopLowestIndex();
     const CPURegister& dst1 = registers.PopLowestIndex();
     ___ Stp(dst0, dst1, MemOperand(sp, offset));
@@ -673,17 +673,17 @@
   DCHECK(registers.IsEmpty());
 }
 
-void Arm64Assembler::UnspillRegisters(vixl::CPURegList registers, int offset) {
-  int size = registers.RegisterSizeInBytes();
+void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
+  int size = registers.GetRegisterSizeInBytes();
   const Register sp = vixl_masm_->StackPointer();
   // Be consistent with the logic for spilling registers.
-  if (!IsAlignedParam(offset, 2 * size) && registers.Count() % 2 != 0) {
+  if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
     const CPURegister& dst0 = registers.PopLowestIndex();
     ___ Ldr(dst0, MemOperand(sp, offset));
     cfi_.Restore(DWARFReg(dst0));
     offset += size;
   }
-  while (registers.Count() >= 2) {
+  while (registers.GetCount() >= 2) {
     const CPURegister& dst0 = registers.PopLowestIndex();
     const CPURegister& dst1 = registers.PopLowestIndex();
     ___ Ldp(dst0, dst1, MemOperand(sp, offset));
@@ -709,14 +709,14 @@
   for (auto r : callee_save_regs) {
     Arm64ManagedRegister reg = r.AsArm64();
     if (reg.IsXRegister()) {
-      core_reg_list.Combine(reg_x(reg.AsXRegister()).code());
+      core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
     } else {
       DCHECK(reg.IsDRegister());
-      fp_reg_list.Combine(reg_d(reg.AsDRegister()).code());
+      fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
     }
   }
-  size_t core_reg_size = core_reg_list.TotalSizeInBytes();
-  size_t fp_reg_size = fp_reg_list.TotalSizeInBytes();
+  size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+  size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
 
   // Increase frame to required size.
   DCHECK_ALIGNED(frame_size, kStackAlignment);
@@ -765,14 +765,14 @@
   for (auto r : callee_save_regs) {
     Arm64ManagedRegister reg = r.AsArm64();
     if (reg.IsXRegister()) {
-      core_reg_list.Combine(reg_x(reg.AsXRegister()).code());
+      core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
     } else {
       DCHECK(reg.IsDRegister());
-      fp_reg_list.Combine(reg_d(reg.AsDRegister()).code());
+      fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
     }
   }
-  size_t core_reg_size = core_reg_list.TotalSizeInBytes();
-  size_t fp_reg_size = fp_reg_list.TotalSizeInBytes();
+  size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+  size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
 
   // For now we only check that the size of the frame is large enough to hold spills and method
   // reference.
@@ -798,19 +798,19 @@
   cfi_.DefCFAOffset(frame_size);
 }
 
-void Arm64Assembler::PoisonHeapReference(vixl::Register reg) {
+void Arm64Assembler::PoisonHeapReference(Register reg) {
   DCHECK(reg.IsW());
   // reg = -reg.
-  ___ Neg(reg, vixl::Operand(reg));
+  ___ Neg(reg, Operand(reg));
 }
 
-void Arm64Assembler::UnpoisonHeapReference(vixl::Register reg) {
+void Arm64Assembler::UnpoisonHeapReference(Register reg) {
   DCHECK(reg.IsW());
   // reg = -reg.
-  ___ Neg(reg, vixl::Operand(reg));
+  ___ Neg(reg, Operand(reg));
 }
 
-void Arm64Assembler::MaybeUnpoisonHeapReference(vixl::Register reg) {
+void Arm64Assembler::MaybeUnpoisonHeapReference(Register reg) {
   if (kPoisonHeapReferences) {
     UnpoisonHeapReference(reg);
   }
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 91171a8..a481544 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -28,19 +28,19 @@
 #include "utils/assembler.h"
 #include "offsets.h"
 
-// TODO: make vixl clean wrt -Wshadow.
+// TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn
 #pragma GCC diagnostic push
 #pragma GCC diagnostic ignored "-Wunknown-pragmas"
 #pragma GCC diagnostic ignored "-Wshadow"
 #pragma GCC diagnostic ignored "-Wmissing-noreturn"
-#include "vixl/a64/macro-assembler-a64.h"
-#include "vixl/a64/disasm-a64.h"
+#include "a64/disasm-a64.h"
+#include "a64/macro-assembler-a64.h"
 #pragma GCC diagnostic pop
 
 namespace art {
 namespace arm64 {
 
-#define MEM_OP(...)      vixl::MemOperand(__VA_ARGS__)
+#define MEM_OP(...)      vixl::aarch64::MemOperand(__VA_ARGS__)
 
 enum LoadOperandType {
   kLoadSignedByte,
@@ -68,7 +68,7 @@
       : scratch_(scratch), stack_adjust_(stack_adjust) {
     }
 
-  vixl::Label* Entry() { return &exception_entry_; }
+  vixl::aarch64::Label* Entry() { return &exception_entry_; }
 
   // Register used for passing Thread::Current()->exception_ .
   const Arm64ManagedRegister scratch_;
@@ -76,7 +76,7 @@
   // Stack adjust for ExceptionPool.
   const size_t stack_adjust_;
 
-  vixl::Label exception_entry_;
+  vixl::aarch64::Label exception_entry_;
 
   friend class Arm64Assembler;
   DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
@@ -89,7 +89,7 @@
   explicit Arm64Assembler(ArenaAllocator* arena)
       : Assembler(arena),
         exception_blocks_(arena->Adapter(kArenaAllocAssembler)),
-        vixl_masm_(new vixl::MacroAssembler(kArm64BaseBufferSize)) {}
+        vixl_masm_(new vixl::aarch64::MacroAssembler(kArm64BaseBufferSize)) {}
 
   virtual ~Arm64Assembler() {
     delete vixl_masm_;
@@ -105,8 +105,8 @@
   // Copy instructions out of assembly buffer into the given region of memory.
   void FinalizeInstructions(const MemoryRegion& region);
 
-  void SpillRegisters(vixl::CPURegList registers, int offset);
-  void UnspillRegisters(vixl::CPURegList registers, int offset);
+  void SpillRegisters(vixl::aarch64::CPURegList registers, int offset);
+  void UnspillRegisters(vixl::aarch64::CPURegList registers, int offset);
 
   // Emit code that will create an activation on the stack.
   void BuildFrame(size_t frame_size,
@@ -177,13 +177,17 @@
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
   // null.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
-                       ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+  void CreateHandleScopeEntry(ManagedRegister out_reg,
+                              FrameOffset handlescope_offset,
+                              ManagedRegister in_reg,
+                              bool null_allowed) OVERRIDE;
 
   // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
-                       ManagedRegister scratch, bool null_allowed) OVERRIDE;
+  void CreateHandleScopeEntry(FrameOffset out_off,
+                              FrameOffset handlescope_offset,
+                              ManagedRegister scratch,
+                              bool null_allowed) OVERRIDE;
 
   // src holds a handle scope entry (Object**) load this into dst.
   void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
@@ -210,11 +214,11 @@
   //
 
   // Poison a heap reference contained in `reg`.
-  void PoisonHeapReference(vixl::Register reg);
+  void PoisonHeapReference(vixl::aarch64::Register reg);
   // Unpoison a heap reference contained in `reg`.
-  void UnpoisonHeapReference(vixl::Register reg);
+  void UnpoisonHeapReference(vixl::aarch64::Register reg);
   // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
-  void MaybeUnpoisonHeapReference(vixl::Register reg);
+  void MaybeUnpoisonHeapReference(vixl::aarch64::Register reg);
 
   void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
     UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM64";
@@ -224,32 +228,32 @@
   }
 
  private:
-  static vixl::Register reg_x(int code) {
+  static vixl::aarch64::Register reg_x(int code) {
     CHECK(code < kNumberOfXRegisters) << code;
     if (code == SP) {
-      return vixl::sp;
+      return vixl::aarch64::sp;
     } else if (code == XZR) {
-      return vixl::xzr;
+      return vixl::aarch64::xzr;
     }
-    return vixl::Register::XRegFromCode(code);
+    return vixl::aarch64::Register::GetXRegFromCode(code);
   }
 
-  static vixl::Register reg_w(int code) {
+  static vixl::aarch64::Register reg_w(int code) {
     CHECK(code < kNumberOfWRegisters) << code;
     if (code == WSP) {
-      return vixl::wsp;
+      return vixl::aarch64::wsp;
     } else if (code == WZR) {
-      return vixl::wzr;
+      return vixl::aarch64::wzr;
     }
-    return vixl::Register::WRegFromCode(code);
+    return vixl::aarch64::Register::GetWRegFromCode(code);
   }
 
-  static vixl::FPRegister reg_d(int code) {
-    return vixl::FPRegister::DRegFromCode(code);
+  static vixl::aarch64::FPRegister reg_d(int code) {
+    return vixl::aarch64::FPRegister::GetDRegFromCode(code);
   }
 
-  static vixl::FPRegister reg_s(int code) {
-    return vixl::FPRegister::SRegFromCode(code);
+  static vixl::aarch64::FPRegister reg_s(int code) {
+    return vixl::aarch64::FPRegister::GetSRegFromCode(code);
   }
 
   // Emits Exception block.
@@ -261,22 +265,31 @@
   void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
   void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
 
-  void LoadImmediate(XRegister dest, int32_t value, vixl::Condition cond = vixl::al);
+  void LoadImmediate(XRegister dest,
+                     int32_t value,
+                     vixl::aarch64::Condition cond = vixl::aarch64::al);
   void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
-  void LoadWFromOffset(LoadOperandType type, WRegister dest,
-                      XRegister base, int32_t offset);
+  void LoadWFromOffset(LoadOperandType type,
+                       WRegister dest,
+                       XRegister base,
+                       int32_t offset);
   void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
   void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
   void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
-  void AddConstant(XRegister rd, int32_t value, vixl::Condition cond = vixl::al);
-  void AddConstant(XRegister rd, XRegister rn, int32_t value, vixl::Condition cond = vixl::al);
+  void AddConstant(XRegister rd,
+                   int32_t value,
+                   vixl::aarch64::Condition cond = vixl::aarch64::al);
+  void AddConstant(XRegister rd,
+                   XRegister rn,
+                   int32_t value,
+                   vixl::aarch64::Condition cond = vixl::aarch64::al);
 
   // List of exception blocks to generate at the end of the code cache.
   ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
 
  public:
   // Vixl assembler.
-  vixl::MacroAssembler* const vixl_masm_;
+  vixl::aarch64::MacroAssembler* const vixl_masm_;
 
   // Used for testing.
   friend class Arm64ManagedRegister_VixlRegisters_Test;
diff --git a/compiler/utils/arm64/managed_register_arm64_test.cc b/compiler/utils/arm64/managed_register_arm64_test.cc
index e27115d..79076b8 100644
--- a/compiler/utils/arm64/managed_register_arm64_test.cc
+++ b/compiler/utils/arm64/managed_register_arm64_test.cc
@@ -591,149 +591,149 @@
 
 TEST(Arm64ManagedRegister, VixlRegisters) {
   // X Registers.
-  EXPECT_TRUE(vixl::x0.Is(Arm64Assembler::reg_x(X0)));
-  EXPECT_TRUE(vixl::x1.Is(Arm64Assembler::reg_x(X1)));
-  EXPECT_TRUE(vixl::x2.Is(Arm64Assembler::reg_x(X2)));
-  EXPECT_TRUE(vixl::x3.Is(Arm64Assembler::reg_x(X3)));
-  EXPECT_TRUE(vixl::x4.Is(Arm64Assembler::reg_x(X4)));
-  EXPECT_TRUE(vixl::x5.Is(Arm64Assembler::reg_x(X5)));
-  EXPECT_TRUE(vixl::x6.Is(Arm64Assembler::reg_x(X6)));
-  EXPECT_TRUE(vixl::x7.Is(Arm64Assembler::reg_x(X7)));
-  EXPECT_TRUE(vixl::x8.Is(Arm64Assembler::reg_x(X8)));
-  EXPECT_TRUE(vixl::x9.Is(Arm64Assembler::reg_x(X9)));
-  EXPECT_TRUE(vixl::x10.Is(Arm64Assembler::reg_x(X10)));
-  EXPECT_TRUE(vixl::x11.Is(Arm64Assembler::reg_x(X11)));
-  EXPECT_TRUE(vixl::x12.Is(Arm64Assembler::reg_x(X12)));
-  EXPECT_TRUE(vixl::x13.Is(Arm64Assembler::reg_x(X13)));
-  EXPECT_TRUE(vixl::x14.Is(Arm64Assembler::reg_x(X14)));
-  EXPECT_TRUE(vixl::x15.Is(Arm64Assembler::reg_x(X15)));
-  EXPECT_TRUE(vixl::x16.Is(Arm64Assembler::reg_x(X16)));
-  EXPECT_TRUE(vixl::x17.Is(Arm64Assembler::reg_x(X17)));
-  EXPECT_TRUE(vixl::x18.Is(Arm64Assembler::reg_x(X18)));
-  EXPECT_TRUE(vixl::x19.Is(Arm64Assembler::reg_x(X19)));
-  EXPECT_TRUE(vixl::x20.Is(Arm64Assembler::reg_x(X20)));
-  EXPECT_TRUE(vixl::x21.Is(Arm64Assembler::reg_x(X21)));
-  EXPECT_TRUE(vixl::x22.Is(Arm64Assembler::reg_x(X22)));
-  EXPECT_TRUE(vixl::x23.Is(Arm64Assembler::reg_x(X23)));
-  EXPECT_TRUE(vixl::x24.Is(Arm64Assembler::reg_x(X24)));
-  EXPECT_TRUE(vixl::x25.Is(Arm64Assembler::reg_x(X25)));
-  EXPECT_TRUE(vixl::x26.Is(Arm64Assembler::reg_x(X26)));
-  EXPECT_TRUE(vixl::x27.Is(Arm64Assembler::reg_x(X27)));
-  EXPECT_TRUE(vixl::x28.Is(Arm64Assembler::reg_x(X28)));
-  EXPECT_TRUE(vixl::x29.Is(Arm64Assembler::reg_x(X29)));
-  EXPECT_TRUE(vixl::x30.Is(Arm64Assembler::reg_x(X30)));
+  EXPECT_TRUE(vixl::aarch64::x0.Is(Arm64Assembler::reg_x(X0)));
+  EXPECT_TRUE(vixl::aarch64::x1.Is(Arm64Assembler::reg_x(X1)));
+  EXPECT_TRUE(vixl::aarch64::x2.Is(Arm64Assembler::reg_x(X2)));
+  EXPECT_TRUE(vixl::aarch64::x3.Is(Arm64Assembler::reg_x(X3)));
+  EXPECT_TRUE(vixl::aarch64::x4.Is(Arm64Assembler::reg_x(X4)));
+  EXPECT_TRUE(vixl::aarch64::x5.Is(Arm64Assembler::reg_x(X5)));
+  EXPECT_TRUE(vixl::aarch64::x6.Is(Arm64Assembler::reg_x(X6)));
+  EXPECT_TRUE(vixl::aarch64::x7.Is(Arm64Assembler::reg_x(X7)));
+  EXPECT_TRUE(vixl::aarch64::x8.Is(Arm64Assembler::reg_x(X8)));
+  EXPECT_TRUE(vixl::aarch64::x9.Is(Arm64Assembler::reg_x(X9)));
+  EXPECT_TRUE(vixl::aarch64::x10.Is(Arm64Assembler::reg_x(X10)));
+  EXPECT_TRUE(vixl::aarch64::x11.Is(Arm64Assembler::reg_x(X11)));
+  EXPECT_TRUE(vixl::aarch64::x12.Is(Arm64Assembler::reg_x(X12)));
+  EXPECT_TRUE(vixl::aarch64::x13.Is(Arm64Assembler::reg_x(X13)));
+  EXPECT_TRUE(vixl::aarch64::x14.Is(Arm64Assembler::reg_x(X14)));
+  EXPECT_TRUE(vixl::aarch64::x15.Is(Arm64Assembler::reg_x(X15)));
+  EXPECT_TRUE(vixl::aarch64::x16.Is(Arm64Assembler::reg_x(X16)));
+  EXPECT_TRUE(vixl::aarch64::x17.Is(Arm64Assembler::reg_x(X17)));
+  EXPECT_TRUE(vixl::aarch64::x18.Is(Arm64Assembler::reg_x(X18)));
+  EXPECT_TRUE(vixl::aarch64::x19.Is(Arm64Assembler::reg_x(X19)));
+  EXPECT_TRUE(vixl::aarch64::x20.Is(Arm64Assembler::reg_x(X20)));
+  EXPECT_TRUE(vixl::aarch64::x21.Is(Arm64Assembler::reg_x(X21)));
+  EXPECT_TRUE(vixl::aarch64::x22.Is(Arm64Assembler::reg_x(X22)));
+  EXPECT_TRUE(vixl::aarch64::x23.Is(Arm64Assembler::reg_x(X23)));
+  EXPECT_TRUE(vixl::aarch64::x24.Is(Arm64Assembler::reg_x(X24)));
+  EXPECT_TRUE(vixl::aarch64::x25.Is(Arm64Assembler::reg_x(X25)));
+  EXPECT_TRUE(vixl::aarch64::x26.Is(Arm64Assembler::reg_x(X26)));
+  EXPECT_TRUE(vixl::aarch64::x27.Is(Arm64Assembler::reg_x(X27)));
+  EXPECT_TRUE(vixl::aarch64::x28.Is(Arm64Assembler::reg_x(X28)));
+  EXPECT_TRUE(vixl::aarch64::x29.Is(Arm64Assembler::reg_x(X29)));
+  EXPECT_TRUE(vixl::aarch64::x30.Is(Arm64Assembler::reg_x(X30)));
 
-  EXPECT_TRUE(vixl::x19.Is(Arm64Assembler::reg_x(TR)));
-  EXPECT_TRUE(vixl::ip0.Is(Arm64Assembler::reg_x(IP0)));
-  EXPECT_TRUE(vixl::ip1.Is(Arm64Assembler::reg_x(IP1)));
-  EXPECT_TRUE(vixl::x29.Is(Arm64Assembler::reg_x(FP)));
-  EXPECT_TRUE(vixl::lr.Is(Arm64Assembler::reg_x(LR)));
-  EXPECT_TRUE(vixl::sp.Is(Arm64Assembler::reg_x(SP)));
-  EXPECT_TRUE(vixl::xzr.Is(Arm64Assembler::reg_x(XZR)));
+  EXPECT_TRUE(vixl::aarch64::x19.Is(Arm64Assembler::reg_x(TR)));
+  EXPECT_TRUE(vixl::aarch64::ip0.Is(Arm64Assembler::reg_x(IP0)));
+  EXPECT_TRUE(vixl::aarch64::ip1.Is(Arm64Assembler::reg_x(IP1)));
+  EXPECT_TRUE(vixl::aarch64::x29.Is(Arm64Assembler::reg_x(FP)));
+  EXPECT_TRUE(vixl::aarch64::lr.Is(Arm64Assembler::reg_x(LR)));
+  EXPECT_TRUE(vixl::aarch64::sp.Is(Arm64Assembler::reg_x(SP)));
+  EXPECT_TRUE(vixl::aarch64::xzr.Is(Arm64Assembler::reg_x(XZR)));
 
   // W Registers.
-  EXPECT_TRUE(vixl::w0.Is(Arm64Assembler::reg_w(W0)));
-  EXPECT_TRUE(vixl::w1.Is(Arm64Assembler::reg_w(W1)));
-  EXPECT_TRUE(vixl::w2.Is(Arm64Assembler::reg_w(W2)));
-  EXPECT_TRUE(vixl::w3.Is(Arm64Assembler::reg_w(W3)));
-  EXPECT_TRUE(vixl::w4.Is(Arm64Assembler::reg_w(W4)));
-  EXPECT_TRUE(vixl::w5.Is(Arm64Assembler::reg_w(W5)));
-  EXPECT_TRUE(vixl::w6.Is(Arm64Assembler::reg_w(W6)));
-  EXPECT_TRUE(vixl::w7.Is(Arm64Assembler::reg_w(W7)));
-  EXPECT_TRUE(vixl::w8.Is(Arm64Assembler::reg_w(W8)));
-  EXPECT_TRUE(vixl::w9.Is(Arm64Assembler::reg_w(W9)));
-  EXPECT_TRUE(vixl::w10.Is(Arm64Assembler::reg_w(W10)));
-  EXPECT_TRUE(vixl::w11.Is(Arm64Assembler::reg_w(W11)));
-  EXPECT_TRUE(vixl::w12.Is(Arm64Assembler::reg_w(W12)));
-  EXPECT_TRUE(vixl::w13.Is(Arm64Assembler::reg_w(W13)));
-  EXPECT_TRUE(vixl::w14.Is(Arm64Assembler::reg_w(W14)));
-  EXPECT_TRUE(vixl::w15.Is(Arm64Assembler::reg_w(W15)));
-  EXPECT_TRUE(vixl::w16.Is(Arm64Assembler::reg_w(W16)));
-  EXPECT_TRUE(vixl::w17.Is(Arm64Assembler::reg_w(W17)));
-  EXPECT_TRUE(vixl::w18.Is(Arm64Assembler::reg_w(W18)));
-  EXPECT_TRUE(vixl::w19.Is(Arm64Assembler::reg_w(W19)));
-  EXPECT_TRUE(vixl::w20.Is(Arm64Assembler::reg_w(W20)));
-  EXPECT_TRUE(vixl::w21.Is(Arm64Assembler::reg_w(W21)));
-  EXPECT_TRUE(vixl::w22.Is(Arm64Assembler::reg_w(W22)));
-  EXPECT_TRUE(vixl::w23.Is(Arm64Assembler::reg_w(W23)));
-  EXPECT_TRUE(vixl::w24.Is(Arm64Assembler::reg_w(W24)));
-  EXPECT_TRUE(vixl::w25.Is(Arm64Assembler::reg_w(W25)));
-  EXPECT_TRUE(vixl::w26.Is(Arm64Assembler::reg_w(W26)));
-  EXPECT_TRUE(vixl::w27.Is(Arm64Assembler::reg_w(W27)));
-  EXPECT_TRUE(vixl::w28.Is(Arm64Assembler::reg_w(W28)));
-  EXPECT_TRUE(vixl::w29.Is(Arm64Assembler::reg_w(W29)));
-  EXPECT_TRUE(vixl::w30.Is(Arm64Assembler::reg_w(W30)));
-  EXPECT_TRUE(vixl::w31.Is(Arm64Assembler::reg_w(WZR)));
-  EXPECT_TRUE(vixl::wzr.Is(Arm64Assembler::reg_w(WZR)));
-  EXPECT_TRUE(vixl::wsp.Is(Arm64Assembler::reg_w(WSP)));
+  EXPECT_TRUE(vixl::aarch64::w0.Is(Arm64Assembler::reg_w(W0)));
+  EXPECT_TRUE(vixl::aarch64::w1.Is(Arm64Assembler::reg_w(W1)));
+  EXPECT_TRUE(vixl::aarch64::w2.Is(Arm64Assembler::reg_w(W2)));
+  EXPECT_TRUE(vixl::aarch64::w3.Is(Arm64Assembler::reg_w(W3)));
+  EXPECT_TRUE(vixl::aarch64::w4.Is(Arm64Assembler::reg_w(W4)));
+  EXPECT_TRUE(vixl::aarch64::w5.Is(Arm64Assembler::reg_w(W5)));
+  EXPECT_TRUE(vixl::aarch64::w6.Is(Arm64Assembler::reg_w(W6)));
+  EXPECT_TRUE(vixl::aarch64::w7.Is(Arm64Assembler::reg_w(W7)));
+  EXPECT_TRUE(vixl::aarch64::w8.Is(Arm64Assembler::reg_w(W8)));
+  EXPECT_TRUE(vixl::aarch64::w9.Is(Arm64Assembler::reg_w(W9)));
+  EXPECT_TRUE(vixl::aarch64::w10.Is(Arm64Assembler::reg_w(W10)));
+  EXPECT_TRUE(vixl::aarch64::w11.Is(Arm64Assembler::reg_w(W11)));
+  EXPECT_TRUE(vixl::aarch64::w12.Is(Arm64Assembler::reg_w(W12)));
+  EXPECT_TRUE(vixl::aarch64::w13.Is(Arm64Assembler::reg_w(W13)));
+  EXPECT_TRUE(vixl::aarch64::w14.Is(Arm64Assembler::reg_w(W14)));
+  EXPECT_TRUE(vixl::aarch64::w15.Is(Arm64Assembler::reg_w(W15)));
+  EXPECT_TRUE(vixl::aarch64::w16.Is(Arm64Assembler::reg_w(W16)));
+  EXPECT_TRUE(vixl::aarch64::w17.Is(Arm64Assembler::reg_w(W17)));
+  EXPECT_TRUE(vixl::aarch64::w18.Is(Arm64Assembler::reg_w(W18)));
+  EXPECT_TRUE(vixl::aarch64::w19.Is(Arm64Assembler::reg_w(W19)));
+  EXPECT_TRUE(vixl::aarch64::w20.Is(Arm64Assembler::reg_w(W20)));
+  EXPECT_TRUE(vixl::aarch64::w21.Is(Arm64Assembler::reg_w(W21)));
+  EXPECT_TRUE(vixl::aarch64::w22.Is(Arm64Assembler::reg_w(W22)));
+  EXPECT_TRUE(vixl::aarch64::w23.Is(Arm64Assembler::reg_w(W23)));
+  EXPECT_TRUE(vixl::aarch64::w24.Is(Arm64Assembler::reg_w(W24)));
+  EXPECT_TRUE(vixl::aarch64::w25.Is(Arm64Assembler::reg_w(W25)));
+  EXPECT_TRUE(vixl::aarch64::w26.Is(Arm64Assembler::reg_w(W26)));
+  EXPECT_TRUE(vixl::aarch64::w27.Is(Arm64Assembler::reg_w(W27)));
+  EXPECT_TRUE(vixl::aarch64::w28.Is(Arm64Assembler::reg_w(W28)));
+  EXPECT_TRUE(vixl::aarch64::w29.Is(Arm64Assembler::reg_w(W29)));
+  EXPECT_TRUE(vixl::aarch64::w30.Is(Arm64Assembler::reg_w(W30)));
+  EXPECT_TRUE(vixl::aarch64::w31.Is(Arm64Assembler::reg_w(WZR)));
+  EXPECT_TRUE(vixl::aarch64::wzr.Is(Arm64Assembler::reg_w(WZR)));
+  EXPECT_TRUE(vixl::aarch64::wsp.Is(Arm64Assembler::reg_w(WSP)));
 
   // D Registers.
-  EXPECT_TRUE(vixl::d0.Is(Arm64Assembler::reg_d(D0)));
-  EXPECT_TRUE(vixl::d1.Is(Arm64Assembler::reg_d(D1)));
-  EXPECT_TRUE(vixl::d2.Is(Arm64Assembler::reg_d(D2)));
-  EXPECT_TRUE(vixl::d3.Is(Arm64Assembler::reg_d(D3)));
-  EXPECT_TRUE(vixl::d4.Is(Arm64Assembler::reg_d(D4)));
-  EXPECT_TRUE(vixl::d5.Is(Arm64Assembler::reg_d(D5)));
-  EXPECT_TRUE(vixl::d6.Is(Arm64Assembler::reg_d(D6)));
-  EXPECT_TRUE(vixl::d7.Is(Arm64Assembler::reg_d(D7)));
-  EXPECT_TRUE(vixl::d8.Is(Arm64Assembler::reg_d(D8)));
-  EXPECT_TRUE(vixl::d9.Is(Arm64Assembler::reg_d(D9)));
-  EXPECT_TRUE(vixl::d10.Is(Arm64Assembler::reg_d(D10)));
-  EXPECT_TRUE(vixl::d11.Is(Arm64Assembler::reg_d(D11)));
-  EXPECT_TRUE(vixl::d12.Is(Arm64Assembler::reg_d(D12)));
-  EXPECT_TRUE(vixl::d13.Is(Arm64Assembler::reg_d(D13)));
-  EXPECT_TRUE(vixl::d14.Is(Arm64Assembler::reg_d(D14)));
-  EXPECT_TRUE(vixl::d15.Is(Arm64Assembler::reg_d(D15)));
-  EXPECT_TRUE(vixl::d16.Is(Arm64Assembler::reg_d(D16)));
-  EXPECT_TRUE(vixl::d17.Is(Arm64Assembler::reg_d(D17)));
-  EXPECT_TRUE(vixl::d18.Is(Arm64Assembler::reg_d(D18)));
-  EXPECT_TRUE(vixl::d19.Is(Arm64Assembler::reg_d(D19)));
-  EXPECT_TRUE(vixl::d20.Is(Arm64Assembler::reg_d(D20)));
-  EXPECT_TRUE(vixl::d21.Is(Arm64Assembler::reg_d(D21)));
-  EXPECT_TRUE(vixl::d22.Is(Arm64Assembler::reg_d(D22)));
-  EXPECT_TRUE(vixl::d23.Is(Arm64Assembler::reg_d(D23)));
-  EXPECT_TRUE(vixl::d24.Is(Arm64Assembler::reg_d(D24)));
-  EXPECT_TRUE(vixl::d25.Is(Arm64Assembler::reg_d(D25)));
-  EXPECT_TRUE(vixl::d26.Is(Arm64Assembler::reg_d(D26)));
-  EXPECT_TRUE(vixl::d27.Is(Arm64Assembler::reg_d(D27)));
-  EXPECT_TRUE(vixl::d28.Is(Arm64Assembler::reg_d(D28)));
-  EXPECT_TRUE(vixl::d29.Is(Arm64Assembler::reg_d(D29)));
-  EXPECT_TRUE(vixl::d30.Is(Arm64Assembler::reg_d(D30)));
-  EXPECT_TRUE(vixl::d31.Is(Arm64Assembler::reg_d(D31)));
+  EXPECT_TRUE(vixl::aarch64::d0.Is(Arm64Assembler::reg_d(D0)));
+  EXPECT_TRUE(vixl::aarch64::d1.Is(Arm64Assembler::reg_d(D1)));
+  EXPECT_TRUE(vixl::aarch64::d2.Is(Arm64Assembler::reg_d(D2)));
+  EXPECT_TRUE(vixl::aarch64::d3.Is(Arm64Assembler::reg_d(D3)));
+  EXPECT_TRUE(vixl::aarch64::d4.Is(Arm64Assembler::reg_d(D4)));
+  EXPECT_TRUE(vixl::aarch64::d5.Is(Arm64Assembler::reg_d(D5)));
+  EXPECT_TRUE(vixl::aarch64::d6.Is(Arm64Assembler::reg_d(D6)));
+  EXPECT_TRUE(vixl::aarch64::d7.Is(Arm64Assembler::reg_d(D7)));
+  EXPECT_TRUE(vixl::aarch64::d8.Is(Arm64Assembler::reg_d(D8)));
+  EXPECT_TRUE(vixl::aarch64::d9.Is(Arm64Assembler::reg_d(D9)));
+  EXPECT_TRUE(vixl::aarch64::d10.Is(Arm64Assembler::reg_d(D10)));
+  EXPECT_TRUE(vixl::aarch64::d11.Is(Arm64Assembler::reg_d(D11)));
+  EXPECT_TRUE(vixl::aarch64::d12.Is(Arm64Assembler::reg_d(D12)));
+  EXPECT_TRUE(vixl::aarch64::d13.Is(Arm64Assembler::reg_d(D13)));
+  EXPECT_TRUE(vixl::aarch64::d14.Is(Arm64Assembler::reg_d(D14)));
+  EXPECT_TRUE(vixl::aarch64::d15.Is(Arm64Assembler::reg_d(D15)));
+  EXPECT_TRUE(vixl::aarch64::d16.Is(Arm64Assembler::reg_d(D16)));
+  EXPECT_TRUE(vixl::aarch64::d17.Is(Arm64Assembler::reg_d(D17)));
+  EXPECT_TRUE(vixl::aarch64::d18.Is(Arm64Assembler::reg_d(D18)));
+  EXPECT_TRUE(vixl::aarch64::d19.Is(Arm64Assembler::reg_d(D19)));
+  EXPECT_TRUE(vixl::aarch64::d20.Is(Arm64Assembler::reg_d(D20)));
+  EXPECT_TRUE(vixl::aarch64::d21.Is(Arm64Assembler::reg_d(D21)));
+  EXPECT_TRUE(vixl::aarch64::d22.Is(Arm64Assembler::reg_d(D22)));
+  EXPECT_TRUE(vixl::aarch64::d23.Is(Arm64Assembler::reg_d(D23)));
+  EXPECT_TRUE(vixl::aarch64::d24.Is(Arm64Assembler::reg_d(D24)));
+  EXPECT_TRUE(vixl::aarch64::d25.Is(Arm64Assembler::reg_d(D25)));
+  EXPECT_TRUE(vixl::aarch64::d26.Is(Arm64Assembler::reg_d(D26)));
+  EXPECT_TRUE(vixl::aarch64::d27.Is(Arm64Assembler::reg_d(D27)));
+  EXPECT_TRUE(vixl::aarch64::d28.Is(Arm64Assembler::reg_d(D28)));
+  EXPECT_TRUE(vixl::aarch64::d29.Is(Arm64Assembler::reg_d(D29)));
+  EXPECT_TRUE(vixl::aarch64::d30.Is(Arm64Assembler::reg_d(D30)));
+  EXPECT_TRUE(vixl::aarch64::d31.Is(Arm64Assembler::reg_d(D31)));
 
   // S Registers.
-  EXPECT_TRUE(vixl::s0.Is(Arm64Assembler::reg_s(S0)));
-  EXPECT_TRUE(vixl::s1.Is(Arm64Assembler::reg_s(S1)));
-  EXPECT_TRUE(vixl::s2.Is(Arm64Assembler::reg_s(S2)));
-  EXPECT_TRUE(vixl::s3.Is(Arm64Assembler::reg_s(S3)));
-  EXPECT_TRUE(vixl::s4.Is(Arm64Assembler::reg_s(S4)));
-  EXPECT_TRUE(vixl::s5.Is(Arm64Assembler::reg_s(S5)));
-  EXPECT_TRUE(vixl::s6.Is(Arm64Assembler::reg_s(S6)));
-  EXPECT_TRUE(vixl::s7.Is(Arm64Assembler::reg_s(S7)));
-  EXPECT_TRUE(vixl::s8.Is(Arm64Assembler::reg_s(S8)));
-  EXPECT_TRUE(vixl::s9.Is(Arm64Assembler::reg_s(S9)));
-  EXPECT_TRUE(vixl::s10.Is(Arm64Assembler::reg_s(S10)));
-  EXPECT_TRUE(vixl::s11.Is(Arm64Assembler::reg_s(S11)));
-  EXPECT_TRUE(vixl::s12.Is(Arm64Assembler::reg_s(S12)));
-  EXPECT_TRUE(vixl::s13.Is(Arm64Assembler::reg_s(S13)));
-  EXPECT_TRUE(vixl::s14.Is(Arm64Assembler::reg_s(S14)));
-  EXPECT_TRUE(vixl::s15.Is(Arm64Assembler::reg_s(S15)));
-  EXPECT_TRUE(vixl::s16.Is(Arm64Assembler::reg_s(S16)));
-  EXPECT_TRUE(vixl::s17.Is(Arm64Assembler::reg_s(S17)));
-  EXPECT_TRUE(vixl::s18.Is(Arm64Assembler::reg_s(S18)));
-  EXPECT_TRUE(vixl::s19.Is(Arm64Assembler::reg_s(S19)));
-  EXPECT_TRUE(vixl::s20.Is(Arm64Assembler::reg_s(S20)));
-  EXPECT_TRUE(vixl::s21.Is(Arm64Assembler::reg_s(S21)));
-  EXPECT_TRUE(vixl::s22.Is(Arm64Assembler::reg_s(S22)));
-  EXPECT_TRUE(vixl::s23.Is(Arm64Assembler::reg_s(S23)));
-  EXPECT_TRUE(vixl::s24.Is(Arm64Assembler::reg_s(S24)));
-  EXPECT_TRUE(vixl::s25.Is(Arm64Assembler::reg_s(S25)));
-  EXPECT_TRUE(vixl::s26.Is(Arm64Assembler::reg_s(S26)));
-  EXPECT_TRUE(vixl::s27.Is(Arm64Assembler::reg_s(S27)));
-  EXPECT_TRUE(vixl::s28.Is(Arm64Assembler::reg_s(S28)));
-  EXPECT_TRUE(vixl::s29.Is(Arm64Assembler::reg_s(S29)));
-  EXPECT_TRUE(vixl::s30.Is(Arm64Assembler::reg_s(S30)));
-  EXPECT_TRUE(vixl::s31.Is(Arm64Assembler::reg_s(S31)));
+  EXPECT_TRUE(vixl::aarch64::s0.Is(Arm64Assembler::reg_s(S0)));
+  EXPECT_TRUE(vixl::aarch64::s1.Is(Arm64Assembler::reg_s(S1)));
+  EXPECT_TRUE(vixl::aarch64::s2.Is(Arm64Assembler::reg_s(S2)));
+  EXPECT_TRUE(vixl::aarch64::s3.Is(Arm64Assembler::reg_s(S3)));
+  EXPECT_TRUE(vixl::aarch64::s4.Is(Arm64Assembler::reg_s(S4)));
+  EXPECT_TRUE(vixl::aarch64::s5.Is(Arm64Assembler::reg_s(S5)));
+  EXPECT_TRUE(vixl::aarch64::s6.Is(Arm64Assembler::reg_s(S6)));
+  EXPECT_TRUE(vixl::aarch64::s7.Is(Arm64Assembler::reg_s(S7)));
+  EXPECT_TRUE(vixl::aarch64::s8.Is(Arm64Assembler::reg_s(S8)));
+  EXPECT_TRUE(vixl::aarch64::s9.Is(Arm64Assembler::reg_s(S9)));
+  EXPECT_TRUE(vixl::aarch64::s10.Is(Arm64Assembler::reg_s(S10)));
+  EXPECT_TRUE(vixl::aarch64::s11.Is(Arm64Assembler::reg_s(S11)));
+  EXPECT_TRUE(vixl::aarch64::s12.Is(Arm64Assembler::reg_s(S12)));
+  EXPECT_TRUE(vixl::aarch64::s13.Is(Arm64Assembler::reg_s(S13)));
+  EXPECT_TRUE(vixl::aarch64::s14.Is(Arm64Assembler::reg_s(S14)));
+  EXPECT_TRUE(vixl::aarch64::s15.Is(Arm64Assembler::reg_s(S15)));
+  EXPECT_TRUE(vixl::aarch64::s16.Is(Arm64Assembler::reg_s(S16)));
+  EXPECT_TRUE(vixl::aarch64::s17.Is(Arm64Assembler::reg_s(S17)));
+  EXPECT_TRUE(vixl::aarch64::s18.Is(Arm64Assembler::reg_s(S18)));
+  EXPECT_TRUE(vixl::aarch64::s19.Is(Arm64Assembler::reg_s(S19)));
+  EXPECT_TRUE(vixl::aarch64::s20.Is(Arm64Assembler::reg_s(S20)));
+  EXPECT_TRUE(vixl::aarch64::s21.Is(Arm64Assembler::reg_s(S21)));
+  EXPECT_TRUE(vixl::aarch64::s22.Is(Arm64Assembler::reg_s(S22)));
+  EXPECT_TRUE(vixl::aarch64::s23.Is(Arm64Assembler::reg_s(S23)));
+  EXPECT_TRUE(vixl::aarch64::s24.Is(Arm64Assembler::reg_s(S24)));
+  EXPECT_TRUE(vixl::aarch64::s25.Is(Arm64Assembler::reg_s(S25)));
+  EXPECT_TRUE(vixl::aarch64::s26.Is(Arm64Assembler::reg_s(S26)));
+  EXPECT_TRUE(vixl::aarch64::s27.Is(Arm64Assembler::reg_s(S27)));
+  EXPECT_TRUE(vixl::aarch64::s28.Is(Arm64Assembler::reg_s(S28)));
+  EXPECT_TRUE(vixl::aarch64::s29.Is(Arm64Assembler::reg_s(S29)));
+  EXPECT_TRUE(vixl::aarch64::s30.Is(Arm64Assembler::reg_s(S30)));
+  EXPECT_TRUE(vixl::aarch64::s31.Is(Arm64Assembler::reg_s(S31)));
 }
 
 }  // namespace arm64