Fixes to build against new VIXL interface.

- Fix namespace usage and use of deprecated functions.
- Link all dependants to new libvixl-arm64 target for now.

Change-Id: Iee6f299784fd663fc2a759f3ee816fdbc511e509
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index d4bf695..63f8951 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -27,8 +27,13 @@
 #include "utils/arm64/assembler_arm64.h"
 #include "utils/string_reference.h"
 #include "utils/type_reference.h"
-#include "vixl/a64/disasm-a64.h"
-#include "vixl/a64/macro-assembler-a64.h"
+
+// TODO: make vixl clean wrt -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "a64/disasm-a64.h"
+#include "a64/macro-assembler-a64.h"
+#pragma GCC diagnostic pop
 
 namespace art {
 namespace arm64 {
@@ -38,32 +43,47 @@
 // Use a local definition to prevent copying mistakes.
 static constexpr size_t kArm64WordSize = kArm64PointerSize;
 
-static const vixl::Register kParameterCoreRegisters[] = {
-  vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7
+static const vixl::aarch64::Register kParameterCoreRegisters[] = {
+  vixl::aarch64::x1,
+  vixl::aarch64::x2,
+  vixl::aarch64::x3,
+  vixl::aarch64::x4,
+  vixl::aarch64::x5,
+  vixl::aarch64::x6,
+  vixl::aarch64::x7
 };
 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
-static const vixl::FPRegister kParameterFPRegisters[] = {
-  vixl::d0, vixl::d1, vixl::d2, vixl::d3, vixl::d4, vixl::d5, vixl::d6, vixl::d7
+static const vixl::aarch64::FPRegister kParameterFPRegisters[] = {
+  vixl::aarch64::d0,
+  vixl::aarch64::d1,
+  vixl::aarch64::d2,
+  vixl::aarch64::d3,
+  vixl::aarch64::d4,
+  vixl::aarch64::d5,
+  vixl::aarch64::d6,
+  vixl::aarch64::d7
 };
 static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
 
-const vixl::Register tr = vixl::x19;                        // Thread Register
-static const vixl::Register kArtMethodRegister = vixl::x0;  // Method register on invoke.
+// Thread Register
+const vixl::aarch64::Register tr = vixl::aarch64::x19;
+// Method register on invoke.
+static const vixl::aarch64::Register kArtMethodRegister = vixl::aarch64::x0;
+const vixl::aarch64::CPURegList vixl_reserved_core_registers(vixl::aarch64::ip0,
+                                                             vixl::aarch64::ip1);
+const vixl::aarch64::CPURegList vixl_reserved_fp_registers(vixl::aarch64::d31);
 
-const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
-const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31);
-
-const vixl::CPURegList runtime_reserved_core_registers(tr, vixl::lr);
+const vixl::aarch64::CPURegList runtime_reserved_core_registers(tr, vixl::aarch64::lr);
 
 // Callee-saved registers AAPCS64 (without x19 - Thread Register)
-const vixl::CPURegList callee_saved_core_registers(vixl::CPURegister::kRegister,
-                                                   vixl::kXRegSize,
-                                                   vixl::x20.code(),
-                                                   vixl::x30.code());
-const vixl::CPURegList callee_saved_fp_registers(vixl::CPURegister::kFPRegister,
-                                                 vixl::kDRegSize,
-                                                 vixl::d8.code(),
-                                                 vixl::d15.code());
+const vixl::aarch64::CPURegList callee_saved_core_registers(vixl::aarch64::CPURegister::kRegister,
+                                                            vixl::aarch64::kXRegSize,
+                                                            vixl::aarch64::x20.GetCode(),
+                                                            vixl::aarch64::x30.GetCode());
+const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kFPRegister,
+                                                          vixl::aarch64::kDRegSize,
+                                                          vixl::aarch64::d8.GetCode(),
+                                                          vixl::aarch64::d15.GetCode());
 Location ARM64ReturnLocation(Primitive::Type return_type);
 
 class SlowPathCodeARM64 : public SlowPathCode {
@@ -71,15 +91,15 @@
   explicit SlowPathCodeARM64(HInstruction* instruction)
       : SlowPathCode(instruction), entry_label_(), exit_label_() {}
 
-  vixl::Label* GetEntryLabel() { return &entry_label_; }
-  vixl::Label* GetExitLabel() { return &exit_label_; }
+  vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; }
+  vixl::aarch64::Label* GetExitLabel() { return &exit_label_; }
 
   void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
   void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
 
  private:
-  vixl::Label entry_label_;
-  vixl::Label exit_label_;
+  vixl::aarch64::Label entry_label_;
+  vixl::aarch64::Label exit_label_;
 
   DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
 };
@@ -89,27 +109,42 @@
   explicit JumpTableARM64(HPackedSwitch* switch_instr)
     : switch_instr_(switch_instr), table_start_() {}
 
-  vixl::Label* GetTableStartLabel() { return &table_start_; }
+  vixl::aarch64::Label* GetTableStartLabel() { return &table_start_; }
 
   void EmitTable(CodeGeneratorARM64* codegen);
 
  private:
   HPackedSwitch* const switch_instr_;
-  vixl::Label table_start_;
+  vixl::aarch64::Label table_start_;
 
   DISALLOW_COPY_AND_ASSIGN(JumpTableARM64);
 };
 
-static const vixl::Register kRuntimeParameterCoreRegisters[] =
-    { vixl::x0, vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7 };
+static const vixl::aarch64::Register kRuntimeParameterCoreRegisters[] =
+    { vixl::aarch64::x0,
+      vixl::aarch64::x1,
+      vixl::aarch64::x2,
+      vixl::aarch64::x3,
+      vixl::aarch64::x4,
+      vixl::aarch64::x5,
+      vixl::aarch64::x6,
+      vixl::aarch64::x7 };
 static constexpr size_t kRuntimeParameterCoreRegistersLength =
     arraysize(kRuntimeParameterCoreRegisters);
-static const vixl::FPRegister kRuntimeParameterFpuRegisters[] =
-    { vixl::d0, vixl::d1, vixl::d2, vixl::d3, vixl::d4, vixl::d5, vixl::d6, vixl::d7 };
+static const vixl::aarch64::FPRegister kRuntimeParameterFpuRegisters[] =
+    { vixl::aarch64::d0,
+      vixl::aarch64::d1,
+      vixl::aarch64::d2,
+      vixl::aarch64::d3,
+      vixl::aarch64::d4,
+      vixl::aarch64::d5,
+      vixl::aarch64::d6,
+      vixl::aarch64::d7 };
 static constexpr size_t kRuntimeParameterFpuRegistersLength =
     arraysize(kRuntimeParameterCoreRegisters);
 
-class InvokeRuntimeCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
+class InvokeRuntimeCallingConvention : public CallingConvention<vixl::aarch64::Register,
+                                                                vixl::aarch64::FPRegister> {
  public:
   static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
 
@@ -126,7 +161,8 @@
   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
 };
 
-class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
+class InvokeDexCallingConvention : public CallingConvention<vixl::aarch64::Register,
+                                                            vixl::aarch64::FPRegister> {
  public:
   InvokeDexCallingConvention()
       : CallingConvention(kParameterCoreRegisters,
@@ -166,23 +202,23 @@
   FieldAccessCallingConventionARM64() {}
 
   Location GetObjectLocation() const OVERRIDE {
-    return helpers::LocationFrom(vixl::x1);
+    return helpers::LocationFrom(vixl::aarch64::x1);
   }
   Location GetFieldIndexLocation() const OVERRIDE {
-    return helpers::LocationFrom(vixl::x0);
+    return helpers::LocationFrom(vixl::aarch64::x0);
   }
   Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
-    return helpers::LocationFrom(vixl::x0);
+    return helpers::LocationFrom(vixl::aarch64::x0);
   }
   Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
     return Primitive::Is64BitType(type)
-        ? helpers::LocationFrom(vixl::x2)
+        ? helpers::LocationFrom(vixl::aarch64::x2)
         : (is_instance
-            ? helpers::LocationFrom(vixl::x2)
-            : helpers::LocationFrom(vixl::x1));
+            ? helpers::LocationFrom(vixl::aarch64::x2)
+            : helpers::LocationFrom(vixl::aarch64::x1));
   }
   Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
-    return helpers::LocationFrom(vixl::d0);
+    return helpers::LocationFrom(vixl::aarch64::d0);
   }
 
  private:
@@ -208,10 +244,11 @@
   }
 
   Arm64Assembler* GetAssembler() const { return assembler_; }
-  vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+  vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
 
  private:
-  void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, vixl::Register class_reg);
+  void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
+                                        vixl::aarch64::Register class_reg);
   void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
   void HandleBinaryOp(HBinaryOperation* instr);
 
@@ -256,9 +293,9 @@
   // while honoring read barriers (if any).
   void GenerateGcRootFieldLoad(HInstruction* instruction,
                                Location root,
-                               vixl::Register obj,
+                               vixl::aarch64::Register obj,
                                uint32_t offset,
-                               vixl::Label* fixup_label = nullptr);
+                               vixl::aarch64::Label* fixup_label = nullptr);
 
   // Generate a floating-point comparison.
   void GenerateFcmp(HInstruction* instruction);
@@ -266,8 +303,8 @@
   void HandleShift(HBinaryOperation* instr);
   void GenerateTestAndBranch(HInstruction* instruction,
                              size_t condition_input_index,
-                             vixl::Label* true_target,
-                             vixl::Label* false_target);
+                             vixl::aarch64::Label* true_target,
+                             vixl::aarch64::Label* false_target);
   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
@@ -327,12 +364,12 @@
 
  private:
   Arm64Assembler* GetAssembler() const;
-  vixl::MacroAssembler* GetVIXLAssembler() const {
+  vixl::aarch64::MacroAssembler* GetVIXLAssembler() const {
     return GetAssembler()->vixl_masm_;
   }
 
   CodeGeneratorARM64* const codegen_;
-  vixl::UseScratchRegisterScope vixl_temps_;
+  vixl::aarch64::UseScratchRegisterScope vixl_temps_;
 
   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARM64);
 };
@@ -348,12 +385,12 @@
   void GenerateFrameEntry() OVERRIDE;
   void GenerateFrameExit() OVERRIDE;
 
-  vixl::CPURegList GetFramePreservedCoreRegisters() const;
-  vixl::CPURegList GetFramePreservedFPRegisters() const;
+  vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const;
+  vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const;
 
   void Bind(HBasicBlock* block) OVERRIDE;
 
-  vixl::Label* GetLabelOf(HBasicBlock* block) {
+  vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) {
     block = FirstNonEmptyBlock(block);
     return &(block_labels_[block->GetBlockId()]);
   }
@@ -368,19 +405,21 @@
   }
 
   uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
-    vixl::Label* block_entry_label = GetLabelOf(block);
+    vixl::aarch64::Label* block_entry_label = GetLabelOf(block);
     DCHECK(block_entry_label->IsBound());
-    return block_entry_label->location();
+    return block_entry_label->GetLocation();
   }
 
   HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
   HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
   Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
   const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
-  vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+  vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
 
   // Emit a write barrier.
-  void MarkGCCard(vixl::Register object, vixl::Register value, bool value_can_be_null);
+  void MarkGCCard(vixl::aarch64::Register object,
+                  vixl::aarch64::Register value,
+                  bool value_can_be_null);
 
   void GenerateMemoryBarrier(MemBarrierKind kind);
 
@@ -399,8 +438,8 @@
   // (xzr, wzr), or make for poor allocatable registers (sp alignment
   // requirements, etc.). This also facilitates our task as all other registers
   // can easily be mapped via to or from their type and index or code.
-  static const int kNumberOfAllocatableRegisters = vixl::kNumberOfRegisters - 1;
-  static const int kNumberOfAllocatableFPRegisters = vixl::kNumberOfFPRegisters;
+  static const int kNumberOfAllocatableRegisters = vixl::aarch64::kNumberOfRegisters - 1;
+  static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfFPRegisters;
   static constexpr int kNumberOfAllocatableRegisterPairs = 0;
 
   void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
@@ -426,18 +465,24 @@
   void Finalize(CodeAllocator* allocator) OVERRIDE;
 
   // Code generation helpers.
-  void MoveConstant(vixl::CPURegister destination, HConstant* constant);
+  void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant);
   void MoveConstant(Location destination, int32_t value) OVERRIDE;
   void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
   void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
 
-  void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
-  void Store(Primitive::Type type, vixl::CPURegister src, const vixl::MemOperand& dst);
+  void Load(Primitive::Type type,
+            vixl::aarch64::CPURegister dst,
+            const vixl::aarch64::MemOperand& src);
+  void Store(Primitive::Type type,
+             vixl::aarch64::CPURegister src,
+             const vixl::aarch64::MemOperand& dst);
   void LoadAcquire(HInstruction* instruction,
-                   vixl::CPURegister dst,
-                   const vixl::MemOperand& src,
+                   vixl::aarch64::CPURegister dst,
+                   const vixl::aarch64::MemOperand& src,
                    bool needs_null_check);
-  void StoreRelease(Primitive::Type type, vixl::CPURegister src, const vixl::MemOperand& dst);
+  void StoreRelease(Primitive::Type type,
+                    vixl::aarch64::CPURegister src,
+                    const vixl::aarch64::MemOperand& dst);
 
   // Generate code to invoke a runtime entry point.
   void InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -484,32 +529,33 @@
   // to be bound before the instruction. The instruction will be either the
   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
   // to the associated ADRP patch label).
-  vixl::Label* NewPcRelativeStringPatch(const DexFile& dex_file,
-                                        uint32_t string_index,
-                                        vixl::Label* adrp_label = nullptr);
+  vixl::aarch64::Label* NewPcRelativeStringPatch(const DexFile& dex_file,
+                                                 uint32_t string_index,
+                                                 vixl::aarch64::Label* adrp_label = nullptr);
 
   // Add a new PC-relative type patch for an instruction and return the label
   // to be bound before the instruction. The instruction will be either the
   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
   // to the associated ADRP patch label).
-  vixl::Label* NewPcRelativeTypePatch(const DexFile& dex_file,
-                                      uint32_t type_index,
-                                      vixl::Label* adrp_label = nullptr);
+  vixl::aarch64::Label* NewPcRelativeTypePatch(const DexFile& dex_file,
+                                               uint32_t type_index,
+                                               vixl::aarch64::Label* adrp_label = nullptr);
 
   // Add a new PC-relative dex cache array patch for an instruction and return
   // the label to be bound before the instruction. The instruction will be
   // either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
   // pointing to the associated ADRP patch label).
-  vixl::Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
-                                               uint32_t element_offset,
-                                               vixl::Label* adrp_label = nullptr);
+  vixl::aarch64::Label* NewPcRelativeDexCacheArrayPatch(
+      const DexFile& dex_file,
+      uint32_t element_offset,
+      vixl::aarch64::Label* adrp_label = nullptr);
 
-  vixl::Literal<uint32_t>* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
-                                                             uint32_t string_index);
-  vixl::Literal<uint32_t>* DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
-                                                           uint32_t type_index);
-  vixl::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
-  vixl::Literal<uint64_t>* DeduplicateDexCacheAddressLiteral(uint64_t address);
+  vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
+                                                                      uint32_t string_index);
+  vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
+                                                                    uint32_t type_index);
+  vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
+  vixl::aarch64::Literal<uint64_t>* DeduplicateDexCacheAddressLiteral(uint64_t address);
 
   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
 
@@ -517,29 +563,29 @@
   // reference field load when Baker's read barriers are used.
   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
                                              Location ref,
-                                             vixl::Register obj,
+                                             vixl::aarch64::Register obj,
                                              uint32_t offset,
-                                             vixl::Register temp,
+                                             vixl::aarch64::Register temp,
                                              bool needs_null_check,
                                              bool use_load_acquire);
   // Fast path implementation of ReadBarrier::Barrier for a heap
   // reference array load when Baker's read barriers are used.
   void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
                                              Location ref,
-                                             vixl::Register obj,
+                                             vixl::aarch64::Register obj,
                                              uint32_t data_offset,
                                              Location index,
-                                             vixl::Register temp,
+                                             vixl::aarch64::Register temp,
                                              bool needs_null_check);
   // Factored implementation used by GenerateFieldLoadWithBakerReadBarrier
   // and GenerateArrayLoadWithBakerReadBarrier.
   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
                                                  Location ref,
-                                                 vixl::Register obj,
+                                                 vixl::aarch64::Register obj,
                                                  uint32_t offset,
                                                  Location index,
                                                  size_t scale_factor,
-                                                 vixl::Register temp,
+                                                 vixl::aarch64::Register temp,
                                                  bool needs_null_check,
                                                  bool use_load_acquire);
 
@@ -597,24 +643,25 @@
   void GenerateExplicitNullCheck(HNullCheck* instruction);
 
  private:
-  using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::Literal<uint64_t>*>;
-  using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, vixl::Literal<uint32_t>*>;
+  using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::aarch64::Literal<uint64_t>*>;
+  using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, vixl::aarch64::Literal<uint32_t>*>;
   using MethodToLiteralMap = ArenaSafeMap<MethodReference,
-                                          vixl::Literal<uint64_t>*,
+                                          vixl::aarch64::Literal<uint64_t>*,
                                           MethodReferenceComparator>;
   using BootStringToLiteralMap = ArenaSafeMap<StringReference,
-                                              vixl::Literal<uint32_t>*,
+                                              vixl::aarch64::Literal<uint32_t>*,
                                               StringReferenceValueComparator>;
   using BootTypeToLiteralMap = ArenaSafeMap<TypeReference,
-                                            vixl::Literal<uint32_t>*,
+                                            vixl::aarch64::Literal<uint32_t>*,
                                             TypeReferenceValueComparator>;
 
-  vixl::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
-  vixl::Literal<uint64_t>* DeduplicateUint64Literal(uint64_t value);
-  vixl::Literal<uint64_t>* DeduplicateMethodLiteral(MethodReference target_method,
-                                                    MethodToLiteralMap* map);
-  vixl::Literal<uint64_t>* DeduplicateMethodAddressLiteral(MethodReference target_method);
-  vixl::Literal<uint64_t>* DeduplicateMethodCodeLiteral(MethodReference target_method);
+  vixl::aarch64::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value,
+                                                             Uint32ToLiteralMap* map);
+  vixl::aarch64::Literal<uint64_t>* DeduplicateUint64Literal(uint64_t value);
+  vixl::aarch64::Literal<uint64_t>* DeduplicateMethodLiteral(MethodReference target_method,
+                                                             MethodToLiteralMap* map);
+  vixl::aarch64::Literal<uint64_t>* DeduplicateMethodAddressLiteral(MethodReference target_method);
+  vixl::aarch64::Literal<uint64_t>* DeduplicateMethodCodeLiteral(MethodReference target_method);
 
   // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays
   // and boot image strings/types. The only difference is the interpretation of the
@@ -626,21 +673,21 @@
     const DexFile& target_dex_file;
     // Either the dex cache array element offset or the string/type index.
     uint32_t offset_or_index;
-    vixl::Label label;
-    vixl::Label* pc_insn_label;
+    vixl::aarch64::Label label;
+    vixl::aarch64::Label* pc_insn_label;
   };
 
-  vixl::Label* NewPcRelativePatch(const DexFile& dex_file,
-                                  uint32_t offset_or_index,
-                                  vixl::Label* adrp_label,
-                                  ArenaDeque<PcRelativePatchInfo>* patches);
+  vixl::aarch64::Label* NewPcRelativePatch(const DexFile& dex_file,
+                                           uint32_t offset_or_index,
+                                           vixl::aarch64::Label* adrp_label,
+                                           ArenaDeque<PcRelativePatchInfo>* patches);
 
   void EmitJumpTables();
 
   // Labels for each block that will be compiled.
-  // We use a deque so that the `vixl::Label` objects do not move in memory.
-  ArenaDeque<vixl::Label> block_labels_;  // Indexed by block id.
-  vixl::Label frame_entry_label_;
+  // We use a deque so that the `vixl::aarch64::Label` objects do not move in memory.
+  ArenaDeque<vixl::aarch64::Label> block_labels_;  // Indexed by block id.
+  vixl::aarch64::Label frame_entry_label_;
   ArenaVector<std::unique_ptr<JumpTableARM64>> jump_tables_;
 
   LocationsBuilderARM64 location_builder_;
@@ -659,7 +706,7 @@
   MethodToLiteralMap call_patches_;
   // Relative call patch info.
   // Using ArenaDeque<> which retains element addresses on push/emplace_back().
-  ArenaDeque<MethodPatchInfo<vixl::Label>> relative_call_patches_;
+  ArenaDeque<MethodPatchInfo<vixl::aarch64::Label>> relative_call_patches_;
   // PC-relative DexCache access info.
   ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
   // Deduplication map for boot string literals for kBootImageLinkTimeAddress.