Fixes to build against new VIXL interface.
- Fix namespace usage and use of deprecated functions.
- Link all dependants to new libvixl-arm64 target for now.
Change-Id: Iee6f299784fd663fc2a759f3ee816fdbc511e509
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 74c3033..cc96cf0 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -636,7 +636,7 @@
ifeq ($$(art_target_or_host),target)
$$(eval $$(call set-target-local-clang-vars))
$$(eval $$(call set-target-local-cflags-vars,debug))
- LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixl
+ LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixl-arm64
LOCAL_MODULE_PATH_32 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_32)
LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64)
LOCAL_MULTILIB := both
@@ -680,7 +680,7 @@
LOCAL_CLANG := $$(ART_HOST_CLANG)
LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS)
LOCAL_ASFLAGS += $$(ART_HOST_ASFLAGS) $$(ART_HOST_DEBUG_ASFLAGS)
- LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl
+ LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl-arm64
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 02c176c..b712add 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -280,15 +280,15 @@
# Vixl assembly support for ARM64 targets.
ifeq ($$(art_ndebug_or_debug),debug)
ifeq ($$(art_static_or_shared), static)
- LOCAL_WHOLESTATIC_LIBRARIES += libvixl
+ LOCAL_WHOLESTATIC_LIBRARIES += libvixl-arm64
else
- LOCAL_SHARED_LIBRARIES += libvixl
+ LOCAL_SHARED_LIBRARIES += libvixl-arm64
endif
else
ifeq ($$(art_static_or_shared), static)
- LOCAL_WHOLE_STATIC_LIBRARIES += libvixl
+ LOCAL_WHOLE_STATIC_LIBRARIES += libvixl-arm64
else
- LOCAL_SHARED_LIBRARIES += libvixl
+ LOCAL_SHARED_LIBRARIES += libvixl-arm64
endif
endif
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 7cdcea2..54b009a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -33,8 +33,7 @@
#include "utils/assembler.h"
#include "utils/stack_checks.h"
-
-using namespace vixl; // NOLINT(build/namespaces)
+using namespace vixl::aarch64; // NOLINT(build/namespaces)
#ifdef __
#error "ARM64 Codegen VIXL macro-assembler macro already defined."
@@ -147,20 +146,20 @@
codegen->GetNumberOfFloatingPointRegisters()));
CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
- register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
+ register_set->GetCoreRegisters() & (~callee_saved_core_registers.GetList()));
CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
- register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
+ register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.GetList()));
MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
Register base = masm->StackPointer();
- int64_t core_spill_size = core_list.TotalSizeInBytes();
- int64_t fp_spill_size = fp_list.TotalSizeInBytes();
+ int64_t core_spill_size = core_list.GetTotalSizeInBytes();
+ int64_t fp_spill_size = fp_list.GetTotalSizeInBytes();
int64_t reg_size = kXRegSizeInBytes;
int64_t max_ls_pair_offset = spill_offset + core_spill_size + fp_spill_size - 2 * reg_size;
uint32_t ls_access_size = WhichPowerOf2(reg_size);
- if (((core_list.Count() > 1) || (fp_list.Count() > 1)) &&
+ if (((core_list.GetCount() > 1) || (fp_list.GetCount() > 1)) &&
!masm->IsImmLSPair(max_ls_pair_offset, ls_access_size)) {
// If the offset does not fit in the instruction's immediate field, use an alternate register
// to compute the base address(float point registers spill base address).
@@ -411,7 +410,7 @@
}
}
- vixl::Label* GetReturnLabel() {
+ vixl::aarch64::Label* GetReturnLabel() {
DCHECK(successor_ == nullptr);
return &return_label_;
}
@@ -427,7 +426,7 @@
HBasicBlock* const successor_;
// If `successor_` is null, the label to branch to after the suspend check.
- vixl::Label return_label_;
+ vixl::aarch64::Label return_label_;
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
};
@@ -567,9 +566,9 @@
__ Bind(&table_start_);
const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors();
for (uint32_t i = 0; i < num_entries; i++) {
- vixl::Label* target_label = codegen->GetLabelOf(successors[i]);
+ vixl::aarch64::Label* target_label = codegen->GetLabelOf(successors[i]);
DCHECK(target_label->IsBound());
- ptrdiff_t jump_offset = target_label->location() - table_start_.location();
+ ptrdiff_t jump_offset = target_label->GetLocation() - table_start_.GetLocation();
DCHECK_GT(jump_offset, std::numeric_limits<int32_t>::min());
DCHECK_LE(jump_offset, std::numeric_limits<int32_t>::max());
Literal<int32_t> literal(jump_offset);
@@ -790,8 +789,8 @@
private:
Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
- size_t ref = static_cast<int>(XRegisterFrom(ref_).code());
- size_t obj = static_cast<int>(XRegisterFrom(obj_).code());
+ size_t ref = static_cast<int>(XRegisterFrom(ref_).GetCode());
+ size_t obj = static_cast<int>(XRegisterFrom(obj_).GetCode());
for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) {
return Register(VIXLRegCodeFromART(i), kXRegSize);
@@ -909,8 +908,8 @@
kNumberOfAllocatableRegisters,
kNumberOfAllocatableFPRegisters,
kNumberOfAllocatableRegisterPairs,
- callee_saved_core_registers.list(),
- callee_saved_fp_registers.list(),
+ callee_saved_core_registers.GetList(),
+ callee_saved_fp_registers.GetList(),
compiler_options,
stats),
block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -1060,17 +1059,17 @@
GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
}
-vixl::CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
+CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spill_mask_, GetNumberOfCoreRegisters(), 0, 0));
- return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
- core_spill_mask_);
+ return CPURegList(CPURegister::kRegister, kXRegSize,
+ core_spill_mask_);
}
-vixl::CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
+CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
GetNumberOfFloatingPointRegisters()));
- return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
- fpu_spill_mask_);
+ return CPURegList(CPURegister::kFPRegister, kDRegSize,
+ fpu_spill_mask_);
}
void CodeGeneratorARM64::Bind(HBasicBlock* block) {
@@ -1094,7 +1093,7 @@
UseScratchRegisterScope temps(GetVIXLAssembler());
Register card = temps.AcquireX();
Register temp = temps.AcquireW(); // Index within the CardTable - 32bit.
- vixl::Label done;
+ vixl::aarch64::Label done;
if (value_can_be_null) {
__ Cbz(value, &done);
}
@@ -1119,12 +1118,12 @@
CPURegList reserved_core_registers = vixl_reserved_core_registers;
reserved_core_registers.Combine(runtime_reserved_core_registers);
while (!reserved_core_registers.IsEmpty()) {
- blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
+ blocked_core_registers_[reserved_core_registers.PopLowestIndex().GetCode()] = true;
}
CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
while (!reserved_fp_registers.IsEmpty()) {
- blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
+ blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().GetCode()] = true;
}
if (GetGraph()->IsDebuggable()) {
@@ -1133,7 +1132,7 @@
// now, just block them.
CPURegList reserved_fp_registers_debuggable = callee_saved_fp_registers;
while (!reserved_fp_registers_debuggable.IsEmpty()) {
- blocked_fpu_registers_[reserved_fp_registers_debuggable.PopLowestIndex().code()] = true;
+ blocked_fpu_registers_[reserved_fp_registers_debuggable.PopLowestIndex().GetCode()] = true;
}
}
}
@@ -1344,7 +1343,7 @@
DCHECK(!src.IsPostIndex());
// TODO(vixl): Let the MacroAssembler handle MemOperand.
- __ Add(temp_base, src.base(), OperandFromMemOperand(src));
+ __ Add(temp_base, src.GetBaseRegister(), OperandFromMemOperand(src));
MemOperand base = MemOperand(temp_base);
switch (type) {
case Primitive::kPrimBoolean:
@@ -1436,7 +1435,7 @@
// TODO(vixl): Let the MacroAssembler handle this.
Operand op = OperandFromMemOperand(dst);
- __ Add(temp_base, dst.base(), op);
+ __ Add(temp_base, dst.GetBaseRegister(), op);
MemOperand base = MemOperand(temp_base);
switch (type) {
case Primitive::kPrimBoolean:
@@ -1490,7 +1489,7 @@
}
void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
- vixl::Register class_reg) {
+ Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireW();
size_t status_offset = mirror::Class::StatusOffset().SizeValue();
@@ -1755,7 +1754,7 @@
__ Sub(dst, lhs, rhs);
} else if (instr->IsRor()) {
if (rhs.IsImmediate()) {
- uint32_t shift = rhs.immediate() & (lhs.SizeInBits() - 1);
+ uint32_t shift = rhs.GetImmediate() & (lhs.GetSizeInBits() - 1);
__ Ror(dst, lhs, shift);
} else {
// Ensure shift distance is in the same size register as the result. If
@@ -1818,7 +1817,7 @@
Register lhs = InputRegisterAt(instr, 0);
Operand rhs = InputOperandAt(instr, 1);
if (rhs.IsImmediate()) {
- uint32_t shift_value = rhs.immediate() &
+ uint32_t shift_value = rhs.GetImmediate() &
(type == Primitive::kPrimInt ? kMaxIntShiftDistance : kMaxLongShiftDistance);
if (instr->IsShl()) {
__ Lsl(dst, lhs, shift_value);
@@ -1828,7 +1827,7 @@
__ Lsr(dst, lhs, shift_value);
}
} else {
- Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W();
+ Register rhs_reg = dst.IsX() ? rhs.GetRegister().X() : rhs.GetRegister().W();
if (instr->IsShl()) {
__ Lsl(dst, lhs, rhs_reg);
@@ -2014,13 +2013,14 @@
if (instr->GetType() == Primitive::kPrimLong &&
codegen_->GetInstructionSetFeatures().NeedFixCortexA53_835769()) {
MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen_)->GetVIXLAssembler();
- vixl::Instruction* prev = masm->GetCursorAddress<vixl::Instruction*>() - vixl::kInstructionSize;
+ vixl::aarch64::Instruction* prev =
+ masm->GetCursorAddress<vixl::aarch64::Instruction*>() - kInstructionSize;
if (prev->IsLoadOrStore()) {
// Make sure we emit only exactly one nop.
- vixl::CodeBufferCheckScope scope(masm,
- vixl::kInstructionSize,
- vixl::CodeBufferCheckScope::kCheck,
- vixl::CodeBufferCheckScope::kExactSize);
+ vixl::aarch64::CodeBufferCheckScope scope(masm,
+ kInstructionSize,
+ vixl::aarch64::CodeBufferCheckScope::kCheck,
+ vixl::aarch64::CodeBufferCheckScope::kExactSize);
__ nop();
}
}
@@ -2210,7 +2210,7 @@
} else {
DCHECK(needs_write_barrier);
DCHECK(!instruction->GetArray()->IsArm64IntermediateAddress());
- vixl::Label done;
+ vixl::aarch64::Label done;
SlowPathCodeARM64* slow_path = nullptr;
{
// We use a block to end the scratch scope before the write barrier, thus
@@ -2235,7 +2235,7 @@
slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
- vixl::Label non_zero;
+ vixl::aarch64::Label non_zero;
__ Cbnz(Register(value), &non_zero);
if (!index.IsConstant()) {
__ Add(temp, array, offset);
@@ -2289,7 +2289,7 @@
__ Cmp(temp, temp2);
if (instruction->StaticTypeOfArrayIsObjectArray()) {
- vixl::Label do_put;
+ vixl::aarch64::Label do_put;
__ B(eq, &do_put);
// If heap poisoning is enabled, the `temp` reference has
// not been unpoisoned yet; unpoison it now.
@@ -2822,11 +2822,11 @@
void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
- vixl::Label* true_target,
- vixl::Label* false_target) {
+ vixl::aarch64::Label* true_target,
+ vixl::aarch64::Label* false_target) {
// FP branching requires both targets to be explicit. If either of the targets
// is nullptr (fallthrough) use and bind `fallthrough_target` instead.
- vixl::Label fallthrough_target;
+ vixl::aarch64::Label fallthrough_target;
HInstruction* cond = instruction->InputAt(condition_input_index);
if (true_target == nullptr && false_target == nullptr) {
@@ -2884,7 +2884,7 @@
Operand rhs = InputOperandAt(condition, 1);
Condition arm64_cond;
- vixl::Label* non_fallthrough_target;
+ vixl::aarch64::Label* non_fallthrough_target;
if (true_target == nullptr) {
arm64_cond = ARM64Condition(condition->GetOppositeCondition());
non_fallthrough_target = false_target;
@@ -2894,7 +2894,7 @@
}
if ((arm64_cond == eq || arm64_cond == ne || arm64_cond == lt || arm64_cond == ge) &&
- rhs.IsImmediate() && (rhs.immediate() == 0)) {
+ rhs.IsImmediate() && (rhs.GetImmediate() == 0)) {
switch (arm64_cond) {
case eq:
__ Cbz(lhs, non_fallthrough_target);
@@ -2943,10 +2943,14 @@
void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
- vixl::Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
- nullptr : codegen_->GetLabelOf(true_successor);
- vixl::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
- nullptr : codegen_->GetLabelOf(false_successor);
+ vixl::aarch64::Label* true_target = codegen_->GetLabelOf(true_successor);
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor)) {
+ true_target = nullptr;
+ }
+ vixl::aarch64::Label* false_target = codegen_->GetLabelOf(false_successor);
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor)) {
+ false_target = nullptr;
+ }
GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
}
@@ -3130,7 +3134,7 @@
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- vixl::Label done, zero;
+ vixl::aarch64::Label done, zero;
SlowPathCodeARM64* slow_path = nullptr;
// Return 0 if `obj` is null.
@@ -3155,7 +3159,7 @@
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- vixl::Label loop, success;
+ vixl::aarch64::Label loop, success;
__ Bind(&loop);
// /* HeapReference<Class> */ out = out->super_class_
GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
@@ -3172,7 +3176,7 @@
case TypeCheckKind::kClassHierarchyCheck: {
// Walk over the class hierarchy to find a match.
- vixl::Label loop, success;
+ vixl::aarch64::Label loop, success;
__ Bind(&loop);
__ Cmp(out, cls);
__ B(eq, &success);
@@ -3191,7 +3195,7 @@
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
- vixl::Label exact_check;
+ vixl::aarch64::Label exact_check;
__ Cmp(out, cls);
__ B(eq, &exact_check);
// Otherwise, we need to check that the object's class is a non-primitive array.
@@ -3328,7 +3332,7 @@
is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
- vixl::Label done;
+ vixl::aarch64::Label done;
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
__ Cbz(obj, &done);
@@ -3350,7 +3354,7 @@
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- vixl::Label loop, compare_classes;
+ vixl::aarch64::Label loop, compare_classes;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
@@ -3377,7 +3381,7 @@
case TypeCheckKind::kClassHierarchyCheck: {
// Walk over the class hierarchy to find a match.
- vixl::Label loop;
+ vixl::aarch64::Label loop;
__ Bind(&loop);
__ Cmp(temp, cls);
__ B(eq, &done);
@@ -3402,7 +3406,7 @@
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
- vixl::Label check_non_primitive_component_type;
+ vixl::aarch64::Label check_non_primitive_component_type;
__ Cmp(temp, cls);
__ B(eq, &done);
@@ -3628,17 +3632,17 @@
// Add ADRP with its PC-relative DexCache access patch.
const DexFile& dex_file = *invoke->GetTargetMethod().dex_file;
uint32_t element_offset = invoke->GetDexCacheArrayOffset();
- vixl::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
+ vixl::aarch64::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(adrp_label);
__ adrp(XRegisterFrom(temp), /* offset placeholder */ 0);
}
// Add LDR with its PC-relative DexCache access patch.
- vixl::Label* ldr_label =
+ vixl::aarch64::Label* ldr_label =
NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(ldr_label);
__ ldr(XRegisterFrom(temp), MemOperand(XRegisterFrom(temp), /* offset placeholder */ 0));
}
@@ -3675,8 +3679,8 @@
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
relative_call_patches_.emplace_back(invoke->GetTargetMethod());
- vixl::Label* label = &relative_call_patches_.back().label;
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ vixl::aarch64::Label* label = &relative_call_patches_.back().label;
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(label);
__ bl(0); // Branch and link to itself. This will be overriden at link time.
break;
@@ -3735,58 +3739,64 @@
__ Blr(lr);
}
-vixl::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(const DexFile& dex_file,
- uint32_t string_index,
- vixl::Label* adrp_label) {
+vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(
+ const DexFile& dex_file,
+ uint32_t string_index,
+ vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(dex_file, string_index, adrp_label, &pc_relative_string_patches_);
}
-vixl::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(const DexFile& dex_file,
- uint32_t type_index,
- vixl::Label* adrp_label) {
+vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(
+ const DexFile& dex_file,
+ uint32_t type_index,
+ vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(dex_file, type_index, adrp_label, &pc_relative_type_patches_);
}
-vixl::Label* CodeGeneratorARM64::NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
- uint32_t element_offset,
- vixl::Label* adrp_label) {
+vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeDexCacheArrayPatch(
+ const DexFile& dex_file,
+ uint32_t element_offset,
+ vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(dex_file, element_offset, adrp_label, &pc_relative_dex_cache_patches_);
}
-vixl::Label* CodeGeneratorARM64::NewPcRelativePatch(const DexFile& dex_file,
- uint32_t offset_or_index,
- vixl::Label* adrp_label,
- ArenaDeque<PcRelativePatchInfo>* patches) {
+vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativePatch(
+ const DexFile& dex_file,
+ uint32_t offset_or_index,
+ vixl::aarch64::Label* adrp_label,
+ ArenaDeque<PcRelativePatchInfo>* patches) {
// Add a patch entry and return the label.
patches->emplace_back(dex_file, offset_or_index);
PcRelativePatchInfo* info = &patches->back();
- vixl::Label* label = &info->label;
+ vixl::aarch64::Label* label = &info->label;
// If adrp_label is null, this is the ADRP patch and needs to point to its own label.
info->pc_insn_label = (adrp_label != nullptr) ? adrp_label : label;
return label;
}
-vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageStringLiteral(
+vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageStringLiteral(
const DexFile& dex_file, uint32_t string_index) {
return boot_image_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
}
-vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageTypeLiteral(
+vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageTypeLiteral(
const DexFile& dex_file, uint32_t type_index) {
return boot_image_type_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
}
-vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddressLiteral(uint64_t address) {
+vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddressLiteral(
+ uint64_t address) {
bool needs_patch = GetCompilerOptions().GetIncludePatchInformation();
Uint32ToLiteralMap* map = needs_patch ? &boot_image_address_patches_ : &uint32_literals_;
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
}
-vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateDexCacheAddressLiteral(uint64_t address) {
+vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateDexCacheAddressLiteral(
+ uint64_t address) {
return DeduplicateUint64Literal(address);
}
@@ -3805,76 +3815,76 @@
linker_patches->reserve(size);
for (const auto& entry : method_patches_) {
const MethodReference& target_method = entry.first;
- vixl::Literal<uint64_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::MethodPatch(literal->offset(),
+ vixl::aarch64::Literal<uint64_t>* literal = entry.second;
+ linker_patches->push_back(LinkerPatch::MethodPatch(literal->GetOffset(),
target_method.dex_file,
target_method.dex_method_index));
}
for (const auto& entry : call_patches_) {
const MethodReference& target_method = entry.first;
- vixl::Literal<uint64_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::CodePatch(literal->offset(),
+ vixl::aarch64::Literal<uint64_t>* literal = entry.second;
+ linker_patches->push_back(LinkerPatch::CodePatch(literal->GetOffset(),
target_method.dex_file,
target_method.dex_method_index));
}
- for (const MethodPatchInfo<vixl::Label>& info : relative_call_patches_) {
- linker_patches->push_back(LinkerPatch::RelativeCodePatch(info.label.location(),
+ for (const MethodPatchInfo<vixl::aarch64::Label>& info : relative_call_patches_) {
+ linker_patches->push_back(LinkerPatch::RelativeCodePatch(info.label.GetLocation(),
info.target_method.dex_file,
info.target_method.dex_method_index));
}
for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
- linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.location(),
+ linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.GetLocation(),
&info.target_dex_file,
- info.pc_insn_label->location(),
+ info.pc_insn_label->GetLocation(),
info.offset_or_index));
}
for (const auto& entry : boot_image_string_patches_) {
const StringReference& target_string = entry.first;
- vixl::Literal<uint32_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::StringPatch(literal->offset(),
+ vixl::aarch64::Literal<uint32_t>* literal = entry.second;
+ linker_patches->push_back(LinkerPatch::StringPatch(literal->GetOffset(),
target_string.dex_file,
target_string.string_index));
}
for (const PcRelativePatchInfo& info : pc_relative_string_patches_) {
- linker_patches->push_back(LinkerPatch::RelativeStringPatch(info.label.location(),
+ linker_patches->push_back(LinkerPatch::RelativeStringPatch(info.label.GetLocation(),
&info.target_dex_file,
- info.pc_insn_label->location(),
+ info.pc_insn_label->GetLocation(),
info.offset_or_index));
}
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
- vixl::Literal<uint32_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::TypePatch(literal->offset(),
+ vixl::aarch64::Literal<uint32_t>* literal = entry.second;
+ linker_patches->push_back(LinkerPatch::TypePatch(literal->GetOffset(),
target_type.dex_file,
target_type.type_index));
}
for (const PcRelativePatchInfo& info : pc_relative_type_patches_) {
- linker_patches->push_back(LinkerPatch::RelativeTypePatch(info.label.location(),
+ linker_patches->push_back(LinkerPatch::RelativeTypePatch(info.label.GetLocation(),
&info.target_dex_file,
- info.pc_insn_label->location(),
+ info.pc_insn_label->GetLocation(),
info.offset_or_index));
}
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
- vixl::Literal<uint32_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::RecordPosition(literal->offset()));
+ vixl::aarch64::Literal<uint32_t>* literal = entry.second;
+ linker_patches->push_back(LinkerPatch::RecordPosition(literal->GetOffset()));
}
}
-vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value,
+vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value,
Uint32ToLiteralMap* map) {
return map->GetOrCreate(
value,
[this, value]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(value); });
}
-vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateUint64Literal(uint64_t value) {
+vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateUint64Literal(uint64_t value) {
return uint64_literals_.GetOrCreate(
value,
[this, value]() { return __ CreateLiteralDestroyedWithPool<uint64_t>(value); });
}
-vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodLiteral(
+vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodLiteral(
MethodReference target_method,
MethodToLiteralMap* map) {
return map->GetOrCreate(
@@ -3882,12 +3892,12 @@
[this]() { return __ CreateLiteralDestroyedWithPool<uint64_t>(/* placeholder */ 0u); });
}
-vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodAddressLiteral(
+vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodAddressLiteral(
MethodReference target_method) {
return DeduplicateMethodLiteral(target_method, &method_patches_);
}
-vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodCodeLiteral(
+vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodCodeLiteral(
MethodReference target_method) {
return DeduplicateMethodLiteral(target_method, &call_patches_);
}
@@ -3961,7 +3971,7 @@
CodeGenerator::CreateLoadClassLocationSummary(
cls,
LocationFrom(calling_convention.GetRegisterAt(0)),
- LocationFrom(vixl::x0),
+ LocationFrom(vixl::aarch64::x0),
/* code_generator_supports_read_barrier */ true);
return;
}
@@ -4013,16 +4023,17 @@
// Add ADRP with its PC-relative type patch.
const DexFile& dex_file = cls->GetDexFile();
uint32_t type_index = cls->GetTypeIndex();
- vixl::Label* adrp_label = codegen_->NewPcRelativeTypePatch(dex_file, type_index);
+ vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeTypePatch(dex_file, type_index);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(adrp_label);
__ adrp(out.X(), /* offset placeholder */ 0);
}
// Add ADD with its PC-relative type patch.
- vixl::Label* add_label = codegen_->NewPcRelativeTypePatch(dex_file, type_index, adrp_label);
+ vixl::aarch64::Label* add_label =
+ codegen_->NewPcRelativeTypePatch(dex_file, type_index, adrp_label);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(add_label);
__ add(out.X(), out.X(), Operand(/* offset placeholder */ 0));
}
@@ -4055,14 +4066,15 @@
// Add ADRP with its PC-relative DexCache access patch.
const DexFile& dex_file = cls->GetDexFile();
uint32_t element_offset = cls->GetDexCacheElementOffset();
- vixl::Label* adrp_label = codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
+ vixl::aarch64::Label* adrp_label =
+ codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(adrp_label);
__ adrp(out.X(), /* offset placeholder */ 0);
}
// Add LDR with its PC-relative DexCache access patch.
- vixl::Label* ldr_label =
+ vixl::aarch64::Label* ldr_label =
codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(cls, out_loc, out.X(), /* offset placeholder */ 0, ldr_label);
@@ -4182,17 +4194,17 @@
// Add ADRP with its PC-relative String patch.
const DexFile& dex_file = load->GetDexFile();
uint32_t string_index = load->GetStringIndex();
- vixl::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
+ vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(adrp_label);
__ adrp(out.X(), /* offset placeholder */ 0);
}
// Add ADD with its PC-relative String patch.
- vixl::Label* add_label =
+ vixl::aarch64::Label* add_label =
codegen_->NewPcRelativeStringPatch(dex_file, string_index, adrp_label);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(add_label);
__ add(out.X(), out.X(), Operand(/* offset placeholder */ 0));
}
@@ -4224,14 +4236,15 @@
// Add ADRP with its PC-relative DexCache access patch.
const DexFile& dex_file = load->GetDexFile();
uint32_t element_offset = load->GetDexCacheElementOffset();
- vixl::Label* adrp_label = codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
+ vixl::aarch64::Label* adrp_label =
+ codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(adrp_label);
__ adrp(out.X(), /* offset placeholder */ 0);
}
// Add LDR with its PC-relative DexCache access patch.
- vixl::Label* ldr_label =
+ vixl::aarch64::Label* ldr_label =
codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
// /* GcRoot<mirror::String> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, out.X(), /* offset placeholder */ 0, ldr_label);
@@ -4452,7 +4465,7 @@
}
void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
- __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::Operand(1));
+ __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::aarch64::Operand(1));
}
void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
@@ -4885,7 +4898,7 @@
HBasicBlock* default_block = switch_instr->GetDefaultBlock();
// Roughly set 16 as max average assemblies generated per HIR in a graph.
- static constexpr int32_t kMaxExpectedSizePerHInstruction = 16 * vixl::kInstructionSize;
+ static constexpr int32_t kMaxExpectedSizePerHInstruction = 16 * kInstructionSize;
// ADR has a limited range(+/-1MB), so we set a threshold for the number of HIRs in the graph to
// make sure we don't emit it if the target may run out of range.
// TODO: Instead of emitting all jump tables at the end of the code, we could keep track of ADR
@@ -5030,9 +5043,9 @@
void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
- vixl::Register obj,
+ Register obj,
uint32_t offset,
- vixl::Label* fixup_label) {
+ vixl::aarch64::Label* fixup_label) {
Register root_reg = RegisterFrom(root, Primitive::kPrimNot);
if (kEmitCompilerReadBarrier) {
if (kUseBakerReadBarrier) {
@@ -5048,7 +5061,7 @@
if (fixup_label == nullptr) {
__ Ldr(root_reg, MemOperand(obj, offset));
} else {
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(fixup_label);
__ ldr(root_reg, MemOperand(obj, offset));
}
@@ -5079,7 +5092,7 @@
if (fixup_label == nullptr) {
__ Add(root_reg.X(), obj.X(), offset);
} else {
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(fixup_label);
__ add(root_reg.X(), obj.X(), offset);
}
@@ -5092,7 +5105,7 @@
if (fixup_label == nullptr) {
__ Ldr(root_reg, MemOperand(obj, offset));
} else {
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(fixup_label);
__ ldr(root_reg, MemOperand(obj, offset));
}
@@ -5103,7 +5116,7 @@
void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- vixl::Register obj,
+ Register obj,
uint32_t offset,
Register temp,
bool needs_null_check,
@@ -5127,7 +5140,7 @@
void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- vixl::Register obj,
+ Register obj,
uint32_t data_offset,
Location index,
Register temp,
@@ -5158,7 +5171,7 @@
void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- vixl::Register obj,
+ Register obj,
uint32_t offset,
Location index,
size_t scale_factor,
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index d4bf695..63f8951 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -27,8 +27,13 @@
#include "utils/arm64/assembler_arm64.h"
#include "utils/string_reference.h"
#include "utils/type_reference.h"
-#include "vixl/a64/disasm-a64.h"
-#include "vixl/a64/macro-assembler-a64.h"
+
+// TODO: make vixl clean wrt -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "a64/disasm-a64.h"
+#include "a64/macro-assembler-a64.h"
+#pragma GCC diagnostic pop
namespace art {
namespace arm64 {
@@ -38,32 +43,47 @@
// Use a local definition to prevent copying mistakes.
static constexpr size_t kArm64WordSize = kArm64PointerSize;
-static const vixl::Register kParameterCoreRegisters[] = {
- vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7
+static const vixl::aarch64::Register kParameterCoreRegisters[] = {
+ vixl::aarch64::x1,
+ vixl::aarch64::x2,
+ vixl::aarch64::x3,
+ vixl::aarch64::x4,
+ vixl::aarch64::x5,
+ vixl::aarch64::x6,
+ vixl::aarch64::x7
};
static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
-static const vixl::FPRegister kParameterFPRegisters[] = {
- vixl::d0, vixl::d1, vixl::d2, vixl::d3, vixl::d4, vixl::d5, vixl::d6, vixl::d7
+static const vixl::aarch64::FPRegister kParameterFPRegisters[] = {
+ vixl::aarch64::d0,
+ vixl::aarch64::d1,
+ vixl::aarch64::d2,
+ vixl::aarch64::d3,
+ vixl::aarch64::d4,
+ vixl::aarch64::d5,
+ vixl::aarch64::d6,
+ vixl::aarch64::d7
};
static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
-const vixl::Register tr = vixl::x19; // Thread Register
-static const vixl::Register kArtMethodRegister = vixl::x0; // Method register on invoke.
+// Thread Register
+const vixl::aarch64::Register tr = vixl::aarch64::x19;
+// Method register on invoke.
+static const vixl::aarch64::Register kArtMethodRegister = vixl::aarch64::x0;
+const vixl::aarch64::CPURegList vixl_reserved_core_registers(vixl::aarch64::ip0,
+ vixl::aarch64::ip1);
+const vixl::aarch64::CPURegList vixl_reserved_fp_registers(vixl::aarch64::d31);
-const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
-const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31);
-
-const vixl::CPURegList runtime_reserved_core_registers(tr, vixl::lr);
+const vixl::aarch64::CPURegList runtime_reserved_core_registers(tr, vixl::aarch64::lr);
// Callee-saved registers AAPCS64 (without x19 - Thread Register)
-const vixl::CPURegList callee_saved_core_registers(vixl::CPURegister::kRegister,
- vixl::kXRegSize,
- vixl::x20.code(),
- vixl::x30.code());
-const vixl::CPURegList callee_saved_fp_registers(vixl::CPURegister::kFPRegister,
- vixl::kDRegSize,
- vixl::d8.code(),
- vixl::d15.code());
+const vixl::aarch64::CPURegList callee_saved_core_registers(vixl::aarch64::CPURegister::kRegister,
+ vixl::aarch64::kXRegSize,
+ vixl::aarch64::x20.GetCode(),
+ vixl::aarch64::x30.GetCode());
+const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kFPRegister,
+ vixl::aarch64::kDRegSize,
+ vixl::aarch64::d8.GetCode(),
+ vixl::aarch64::d15.GetCode());
Location ARM64ReturnLocation(Primitive::Type return_type);
class SlowPathCodeARM64 : public SlowPathCode {
@@ -71,15 +91,15 @@
explicit SlowPathCodeARM64(HInstruction* instruction)
: SlowPathCode(instruction), entry_label_(), exit_label_() {}
- vixl::Label* GetEntryLabel() { return &entry_label_; }
- vixl::Label* GetExitLabel() { return &exit_label_; }
+ vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; }
+ vixl::aarch64::Label* GetExitLabel() { return &exit_label_; }
void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
private:
- vixl::Label entry_label_;
- vixl::Label exit_label_;
+ vixl::aarch64::Label entry_label_;
+ vixl::aarch64::Label exit_label_;
DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
};
@@ -89,27 +109,42 @@
explicit JumpTableARM64(HPackedSwitch* switch_instr)
: switch_instr_(switch_instr), table_start_() {}
- vixl::Label* GetTableStartLabel() { return &table_start_; }
+ vixl::aarch64::Label* GetTableStartLabel() { return &table_start_; }
void EmitTable(CodeGeneratorARM64* codegen);
private:
HPackedSwitch* const switch_instr_;
- vixl::Label table_start_;
+ vixl::aarch64::Label table_start_;
DISALLOW_COPY_AND_ASSIGN(JumpTableARM64);
};
-static const vixl::Register kRuntimeParameterCoreRegisters[] =
- { vixl::x0, vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7 };
+static const vixl::aarch64::Register kRuntimeParameterCoreRegisters[] =
+ { vixl::aarch64::x0,
+ vixl::aarch64::x1,
+ vixl::aarch64::x2,
+ vixl::aarch64::x3,
+ vixl::aarch64::x4,
+ vixl::aarch64::x5,
+ vixl::aarch64::x6,
+ vixl::aarch64::x7 };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
-static const vixl::FPRegister kRuntimeParameterFpuRegisters[] =
- { vixl::d0, vixl::d1, vixl::d2, vixl::d3, vixl::d4, vixl::d5, vixl::d6, vixl::d7 };
+static const vixl::aarch64::FPRegister kRuntimeParameterFpuRegisters[] =
+ { vixl::aarch64::d0,
+ vixl::aarch64::d1,
+ vixl::aarch64::d2,
+ vixl::aarch64::d3,
+ vixl::aarch64::d4,
+ vixl::aarch64::d5,
+ vixl::aarch64::d6,
+ vixl::aarch64::d7 };
static constexpr size_t kRuntimeParameterFpuRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
-class InvokeRuntimeCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
+class InvokeRuntimeCallingConvention : public CallingConvention<vixl::aarch64::Register,
+ vixl::aarch64::FPRegister> {
public:
static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
@@ -126,7 +161,8 @@
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
};
-class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
+class InvokeDexCallingConvention : public CallingConvention<vixl::aarch64::Register,
+ vixl::aarch64::FPRegister> {
public:
InvokeDexCallingConvention()
: CallingConvention(kParameterCoreRegisters,
@@ -166,23 +202,23 @@
FieldAccessCallingConventionARM64() {}
Location GetObjectLocation() const OVERRIDE {
- return helpers::LocationFrom(vixl::x1);
+ return helpers::LocationFrom(vixl::aarch64::x1);
}
Location GetFieldIndexLocation() const OVERRIDE {
- return helpers::LocationFrom(vixl::x0);
+ return helpers::LocationFrom(vixl::aarch64::x0);
}
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
- return helpers::LocationFrom(vixl::x0);
+ return helpers::LocationFrom(vixl::aarch64::x0);
}
Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
return Primitive::Is64BitType(type)
- ? helpers::LocationFrom(vixl::x2)
+ ? helpers::LocationFrom(vixl::aarch64::x2)
: (is_instance
- ? helpers::LocationFrom(vixl::x2)
- : helpers::LocationFrom(vixl::x1));
+ ? helpers::LocationFrom(vixl::aarch64::x2)
+ : helpers::LocationFrom(vixl::aarch64::x1));
}
Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
- return helpers::LocationFrom(vixl::d0);
+ return helpers::LocationFrom(vixl::aarch64::d0);
}
private:
@@ -208,10 +244,11 @@
}
Arm64Assembler* GetAssembler() const { return assembler_; }
- vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
private:
- void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, vixl::Register class_reg);
+ void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
+ vixl::aarch64::Register class_reg);
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
void HandleBinaryOp(HBinaryOperation* instr);
@@ -256,9 +293,9 @@
// while honoring read barriers (if any).
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
- vixl::Register obj,
+ vixl::aarch64::Register obj,
uint32_t offset,
- vixl::Label* fixup_label = nullptr);
+ vixl::aarch64::Label* fixup_label = nullptr);
// Generate a floating-point comparison.
void GenerateFcmp(HInstruction* instruction);
@@ -266,8 +303,8 @@
void HandleShift(HBinaryOperation* instr);
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
- vixl::Label* true_target,
- vixl::Label* false_target);
+ vixl::aarch64::Label* true_target,
+ vixl::aarch64::Label* false_target);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
@@ -327,12 +364,12 @@
private:
Arm64Assembler* GetAssembler() const;
- vixl::MacroAssembler* GetVIXLAssembler() const {
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() const {
return GetAssembler()->vixl_masm_;
}
CodeGeneratorARM64* const codegen_;
- vixl::UseScratchRegisterScope vixl_temps_;
+ vixl::aarch64::UseScratchRegisterScope vixl_temps_;
DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARM64);
};
@@ -348,12 +385,12 @@
void GenerateFrameEntry() OVERRIDE;
void GenerateFrameExit() OVERRIDE;
- vixl::CPURegList GetFramePreservedCoreRegisters() const;
- vixl::CPURegList GetFramePreservedFPRegisters() const;
+ vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const;
+ vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const;
void Bind(HBasicBlock* block) OVERRIDE;
- vixl::Label* GetLabelOf(HBasicBlock* block) {
+ vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) {
block = FirstNonEmptyBlock(block);
return &(block_labels_[block->GetBlockId()]);
}
@@ -368,19 +405,21 @@
}
uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
- vixl::Label* block_entry_label = GetLabelOf(block);
+ vixl::aarch64::Label* block_entry_label = GetLabelOf(block);
DCHECK(block_entry_label->IsBound());
- return block_entry_label->location();
+ return block_entry_label->GetLocation();
}
HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
- vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
// Emit a write barrier.
- void MarkGCCard(vixl::Register object, vixl::Register value, bool value_can_be_null);
+ void MarkGCCard(vixl::aarch64::Register object,
+ vixl::aarch64::Register value,
+ bool value_can_be_null);
void GenerateMemoryBarrier(MemBarrierKind kind);
@@ -399,8 +438,8 @@
// (xzr, wzr), or make for poor allocatable registers (sp alignment
// requirements, etc.). This also facilitates our task as all other registers
// can easily be mapped via to or from their type and index or code.
- static const int kNumberOfAllocatableRegisters = vixl::kNumberOfRegisters - 1;
- static const int kNumberOfAllocatableFPRegisters = vixl::kNumberOfFPRegisters;
+ static const int kNumberOfAllocatableRegisters = vixl::aarch64::kNumberOfRegisters - 1;
+ static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfFPRegisters;
static constexpr int kNumberOfAllocatableRegisterPairs = 0;
void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
@@ -426,18 +465,24 @@
void Finalize(CodeAllocator* allocator) OVERRIDE;
// Code generation helpers.
- void MoveConstant(vixl::CPURegister destination, HConstant* constant);
+ void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant);
void MoveConstant(Location destination, int32_t value) OVERRIDE;
void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
- void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
- void Store(Primitive::Type type, vixl::CPURegister src, const vixl::MemOperand& dst);
+ void Load(Primitive::Type type,
+ vixl::aarch64::CPURegister dst,
+ const vixl::aarch64::MemOperand& src);
+ void Store(Primitive::Type type,
+ vixl::aarch64::CPURegister src,
+ const vixl::aarch64::MemOperand& dst);
void LoadAcquire(HInstruction* instruction,
- vixl::CPURegister dst,
- const vixl::MemOperand& src,
+ vixl::aarch64::CPURegister dst,
+ const vixl::aarch64::MemOperand& src,
bool needs_null_check);
- void StoreRelease(Primitive::Type type, vixl::CPURegister src, const vixl::MemOperand& dst);
+ void StoreRelease(Primitive::Type type,
+ vixl::aarch64::CPURegister src,
+ const vixl::aarch64::MemOperand& dst);
// Generate code to invoke a runtime entry point.
void InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -484,32 +529,33 @@
// to be bound before the instruction. The instruction will be either the
// ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
// to the associated ADRP patch label).
- vixl::Label* NewPcRelativeStringPatch(const DexFile& dex_file,
- uint32_t string_index,
- vixl::Label* adrp_label = nullptr);
+ vixl::aarch64::Label* NewPcRelativeStringPatch(const DexFile& dex_file,
+ uint32_t string_index,
+ vixl::aarch64::Label* adrp_label = nullptr);
// Add a new PC-relative type patch for an instruction and return the label
// to be bound before the instruction. The instruction will be either the
// ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
// to the associated ADRP patch label).
- vixl::Label* NewPcRelativeTypePatch(const DexFile& dex_file,
- uint32_t type_index,
- vixl::Label* adrp_label = nullptr);
+ vixl::aarch64::Label* NewPcRelativeTypePatch(const DexFile& dex_file,
+ uint32_t type_index,
+ vixl::aarch64::Label* adrp_label = nullptr);
// Add a new PC-relative dex cache array patch for an instruction and return
// the label to be bound before the instruction. The instruction will be
// either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
// pointing to the associated ADRP patch label).
- vixl::Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
- uint32_t element_offset,
- vixl::Label* adrp_label = nullptr);
+ vixl::aarch64::Label* NewPcRelativeDexCacheArrayPatch(
+ const DexFile& dex_file,
+ uint32_t element_offset,
+ vixl::aarch64::Label* adrp_label = nullptr);
- vixl::Literal<uint32_t>* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- uint32_t string_index);
- vixl::Literal<uint32_t>* DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
- uint32_t type_index);
- vixl::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
- vixl::Literal<uint64_t>* DeduplicateDexCacheAddressLiteral(uint64_t address);
+ vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
+ uint32_t string_index);
+ vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
+ uint32_t type_index);
+ vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
+ vixl::aarch64::Literal<uint64_t>* DeduplicateDexCacheAddressLiteral(uint64_t address);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
@@ -517,29 +563,29 @@
// reference field load when Baker's read barriers are used.
void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- vixl::Register obj,
+ vixl::aarch64::Register obj,
uint32_t offset,
- vixl::Register temp,
+ vixl::aarch64::Register temp,
bool needs_null_check,
bool use_load_acquire);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference array load when Baker's read barriers are used.
void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- vixl::Register obj,
+ vixl::aarch64::Register obj,
uint32_t data_offset,
Location index,
- vixl::Register temp,
+ vixl::aarch64::Register temp,
bool needs_null_check);
// Factored implementation used by GenerateFieldLoadWithBakerReadBarrier
// and GenerateArrayLoadWithBakerReadBarrier.
void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- vixl::Register obj,
+ vixl::aarch64::Register obj,
uint32_t offset,
Location index,
size_t scale_factor,
- vixl::Register temp,
+ vixl::aarch64::Register temp,
bool needs_null_check,
bool use_load_acquire);
@@ -597,24 +643,25 @@
void GenerateExplicitNullCheck(HNullCheck* instruction);
private:
- using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::Literal<uint64_t>*>;
- using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, vixl::Literal<uint32_t>*>;
+ using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::aarch64::Literal<uint64_t>*>;
+ using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, vixl::aarch64::Literal<uint32_t>*>;
using MethodToLiteralMap = ArenaSafeMap<MethodReference,
- vixl::Literal<uint64_t>*,
+ vixl::aarch64::Literal<uint64_t>*,
MethodReferenceComparator>;
using BootStringToLiteralMap = ArenaSafeMap<StringReference,
- vixl::Literal<uint32_t>*,
+ vixl::aarch64::Literal<uint32_t>*,
StringReferenceValueComparator>;
using BootTypeToLiteralMap = ArenaSafeMap<TypeReference,
- vixl::Literal<uint32_t>*,
+ vixl::aarch64::Literal<uint32_t>*,
TypeReferenceValueComparator>;
- vixl::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
- vixl::Literal<uint64_t>* DeduplicateUint64Literal(uint64_t value);
- vixl::Literal<uint64_t>* DeduplicateMethodLiteral(MethodReference target_method,
- MethodToLiteralMap* map);
- vixl::Literal<uint64_t>* DeduplicateMethodAddressLiteral(MethodReference target_method);
- vixl::Literal<uint64_t>* DeduplicateMethodCodeLiteral(MethodReference target_method);
+ vixl::aarch64::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value,
+ Uint32ToLiteralMap* map);
+ vixl::aarch64::Literal<uint64_t>* DeduplicateUint64Literal(uint64_t value);
+ vixl::aarch64::Literal<uint64_t>* DeduplicateMethodLiteral(MethodReference target_method,
+ MethodToLiteralMap* map);
+ vixl::aarch64::Literal<uint64_t>* DeduplicateMethodAddressLiteral(MethodReference target_method);
+ vixl::aarch64::Literal<uint64_t>* DeduplicateMethodCodeLiteral(MethodReference target_method);
// The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays
// and boot image strings/types. The only difference is the interpretation of the
@@ -626,21 +673,21 @@
const DexFile& target_dex_file;
// Either the dex cache array element offset or the string/type index.
uint32_t offset_or_index;
- vixl::Label label;
- vixl::Label* pc_insn_label;
+ vixl::aarch64::Label label;
+ vixl::aarch64::Label* pc_insn_label;
};
- vixl::Label* NewPcRelativePatch(const DexFile& dex_file,
- uint32_t offset_or_index,
- vixl::Label* adrp_label,
- ArenaDeque<PcRelativePatchInfo>* patches);
+ vixl::aarch64::Label* NewPcRelativePatch(const DexFile& dex_file,
+ uint32_t offset_or_index,
+ vixl::aarch64::Label* adrp_label,
+ ArenaDeque<PcRelativePatchInfo>* patches);
void EmitJumpTables();
// Labels for each block that will be compiled.
- // We use a deque so that the `vixl::Label` objects do not move in memory.
- ArenaDeque<vixl::Label> block_labels_; // Indexed by block id.
- vixl::Label frame_entry_label_;
+ // We use a deque so that the `vixl::aarch64::Label` objects do not move in memory.
+ ArenaDeque<vixl::aarch64::Label> block_labels_; // Indexed by block id.
+ vixl::aarch64::Label frame_entry_label_;
ArenaVector<std::unique_ptr<JumpTableARM64>> jump_tables_;
LocationsBuilderARM64 location_builder_;
@@ -659,7 +706,7 @@
MethodToLiteralMap call_patches_;
// Relative call patch info.
// Using ArenaDeque<> which retains element addresses on push/emplace_back().
- ArenaDeque<MethodPatchInfo<vixl::Label>> relative_call_patches_;
+ ArenaDeque<MethodPatchInfo<vixl::aarch64::Label>> relative_call_patches_;
// PC-relative DexCache access info.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
// Deduplication map for boot string literals for kBootImageLinkTimeAddress.
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index a849448..d2afa5b 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -21,8 +21,9 @@
#include "locations.h"
#include "nodes.h"
#include "utils/arm64/assembler_arm64.h"
-#include "vixl/a64/disasm-a64.h"
-#include "vixl/a64/macro-assembler-a64.h"
+
+#include "a64/disasm-a64.h"
+#include "a64/macro-assembler-a64.h"
namespace art {
namespace arm64 {
@@ -34,87 +35,88 @@
static inline int VIXLRegCodeFromART(int code) {
if (code == SP) {
- return vixl::kSPRegInternalCode;
+ return vixl::aarch64::kSPRegInternalCode;
}
if (code == XZR) {
- return vixl::kZeroRegCode;
+ return vixl::aarch64::kZeroRegCode;
}
return code;
}
static inline int ARTRegCodeFromVIXL(int code) {
- if (code == vixl::kSPRegInternalCode) {
+ if (code == vixl::aarch64::kSPRegInternalCode) {
return SP;
}
- if (code == vixl::kZeroRegCode) {
+ if (code == vixl::aarch64::kZeroRegCode) {
return XZR;
}
return code;
}
-static inline vixl::Register XRegisterFrom(Location location) {
+static inline vixl::aarch64::Register XRegisterFrom(Location location) {
DCHECK(location.IsRegister()) << location;
- return vixl::Register::XRegFromCode(VIXLRegCodeFromART(location.reg()));
+ return vixl::aarch64::Register::GetXRegFromCode(VIXLRegCodeFromART(location.reg()));
}
-static inline vixl::Register WRegisterFrom(Location location) {
+static inline vixl::aarch64::Register WRegisterFrom(Location location) {
DCHECK(location.IsRegister()) << location;
- return vixl::Register::WRegFromCode(VIXLRegCodeFromART(location.reg()));
+ return vixl::aarch64::Register::GetWRegFromCode(VIXLRegCodeFromART(location.reg()));
}
-static inline vixl::Register RegisterFrom(Location location, Primitive::Type type) {
+static inline vixl::aarch64::Register RegisterFrom(Location location, Primitive::Type type) {
DCHECK(type != Primitive::kPrimVoid && !Primitive::IsFloatingPointType(type)) << type;
return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location);
}
-static inline vixl::Register OutputRegister(HInstruction* instr) {
+static inline vixl::aarch64::Register OutputRegister(HInstruction* instr) {
return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
}
-static inline vixl::Register InputRegisterAt(HInstruction* instr, int input_index) {
+static inline vixl::aarch64::Register InputRegisterAt(HInstruction* instr, int input_index) {
return RegisterFrom(instr->GetLocations()->InAt(input_index),
instr->InputAt(input_index)->GetType());
}
-static inline vixl::FPRegister DRegisterFrom(Location location) {
+static inline vixl::aarch64::FPRegister DRegisterFrom(Location location) {
DCHECK(location.IsFpuRegister()) << location;
- return vixl::FPRegister::DRegFromCode(location.reg());
+ return vixl::aarch64::FPRegister::GetDRegFromCode(location.reg());
}
-static inline vixl::FPRegister SRegisterFrom(Location location) {
+static inline vixl::aarch64::FPRegister SRegisterFrom(Location location) {
DCHECK(location.IsFpuRegister()) << location;
- return vixl::FPRegister::SRegFromCode(location.reg());
+ return vixl::aarch64::FPRegister::GetSRegFromCode(location.reg());
}
-static inline vixl::FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
+static inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
DCHECK(Primitive::IsFloatingPointType(type)) << type;
return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location);
}
-static inline vixl::FPRegister OutputFPRegister(HInstruction* instr) {
+static inline vixl::aarch64::FPRegister OutputFPRegister(HInstruction* instr) {
return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
}
-static inline vixl::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
+static inline vixl::aarch64::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
instr->InputAt(input_index)->GetType());
}
-static inline vixl::CPURegister CPURegisterFrom(Location location, Primitive::Type type) {
- return Primitive::IsFloatingPointType(type) ? vixl::CPURegister(FPRegisterFrom(location, type))
- : vixl::CPURegister(RegisterFrom(location, type));
+static inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, Primitive::Type type) {
+ return Primitive::IsFloatingPointType(type)
+ ? vixl::aarch64::CPURegister(FPRegisterFrom(location, type))
+ : vixl::aarch64::CPURegister(RegisterFrom(location, type));
}
-static inline vixl::CPURegister OutputCPURegister(HInstruction* instr) {
+static inline vixl::aarch64::CPURegister OutputCPURegister(HInstruction* instr) {
return Primitive::IsFloatingPointType(instr->GetType())
- ? static_cast<vixl::CPURegister>(OutputFPRegister(instr))
- : static_cast<vixl::CPURegister>(OutputRegister(instr));
+ ? static_cast<vixl::aarch64::CPURegister>(OutputFPRegister(instr))
+ : static_cast<vixl::aarch64::CPURegister>(OutputRegister(instr));
}
-static inline vixl::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
+static inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
return Primitive::IsFloatingPointType(instr->InputAt(index)->GetType())
- ? static_cast<vixl::CPURegister>(InputFPRegisterAt(instr, index))
- : static_cast<vixl::CPURegister>(InputRegisterAt(instr, index));
+ ? static_cast<vixl::aarch64::CPURegister>(InputFPRegisterAt(instr, index))
+ : static_cast<vixl::aarch64::CPURegister>(InputRegisterAt(instr, index));
}
static inline int64_t Int64ConstantFrom(Location location) {
@@ -129,63 +131,70 @@
}
}
-static inline vixl::Operand OperandFrom(Location location, Primitive::Type type) {
+static inline vixl::aarch64::Operand OperandFrom(Location location, Primitive::Type type) {
if (location.IsRegister()) {
- return vixl::Operand(RegisterFrom(location, type));
+ return vixl::aarch64::Operand(RegisterFrom(location, type));
} else {
- return vixl::Operand(Int64ConstantFrom(location));
+ return vixl::aarch64::Operand(Int64ConstantFrom(location));
}
}
-static inline vixl::Operand InputOperandAt(HInstruction* instr, int input_index) {
+static inline vixl::aarch64::Operand InputOperandAt(HInstruction* instr, int input_index) {
return OperandFrom(instr->GetLocations()->InAt(input_index),
instr->InputAt(input_index)->GetType());
}
-static inline vixl::MemOperand StackOperandFrom(Location location) {
- return vixl::MemOperand(vixl::sp, location.GetStackIndex());
+static inline vixl::aarch64::MemOperand StackOperandFrom(Location location) {
+ return vixl::aarch64::MemOperand(vixl::aarch64::sp, location.GetStackIndex());
}
-static inline vixl::MemOperand HeapOperand(const vixl::Register& base, size_t offset = 0) {
+static inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
+ size_t offset = 0) {
// A heap reference must be 32bit, so fit in a W register.
DCHECK(base.IsW());
- return vixl::MemOperand(base.X(), offset);
+ return vixl::aarch64::MemOperand(base.X(), offset);
}
-static inline vixl::MemOperand HeapOperand(const vixl::Register& base,
- const vixl::Register& regoffset,
- vixl::Shift shift = vixl::LSL,
- unsigned shift_amount = 0) {
+static inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
+ const vixl::aarch64::Register& regoffset,
+ vixl::aarch64::Shift shift = vixl::aarch64::LSL,
+ unsigned shift_amount = 0) {
// A heap reference must be 32bit, so fit in a W register.
DCHECK(base.IsW());
- return vixl::MemOperand(base.X(), regoffset, shift, shift_amount);
+ return vixl::aarch64::MemOperand(base.X(), regoffset, shift, shift_amount);
}
-static inline vixl::MemOperand HeapOperand(const vixl::Register& base, Offset offset) {
+static inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
+ Offset offset) {
return HeapOperand(base, offset.SizeValue());
}
-static inline vixl::MemOperand HeapOperandFrom(Location location, Offset offset) {
+static inline vixl::aarch64::MemOperand HeapOperandFrom(Location location, Offset offset) {
return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset);
}
-static inline Location LocationFrom(const vixl::Register& reg) {
- return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code()));
+static inline Location LocationFrom(const vixl::aarch64::Register& reg) {
+ return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.GetCode()));
}
-static inline Location LocationFrom(const vixl::FPRegister& fpreg) {
- return Location::FpuRegisterLocation(fpreg.code());
+static inline Location LocationFrom(const vixl::aarch64::FPRegister& fpreg) {
+ return Location::FpuRegisterLocation(fpreg.GetCode());
}
-static inline vixl::Operand OperandFromMemOperand(const vixl::MemOperand& mem_op) {
+static inline vixl::aarch64::Operand OperandFromMemOperand(
+ const vixl::aarch64::MemOperand& mem_op) {
if (mem_op.IsImmediateOffset()) {
- return vixl::Operand(mem_op.offset());
+ return vixl::aarch64::Operand(mem_op.GetOffset());
} else {
DCHECK(mem_op.IsRegisterOffset());
- if (mem_op.extend() != vixl::NO_EXTEND) {
- return vixl::Operand(mem_op.regoffset(), mem_op.extend(), mem_op.shift_amount());
- } else if (mem_op.shift() != vixl::NO_SHIFT) {
- return vixl::Operand(mem_op.regoffset(), mem_op.shift(), mem_op.shift_amount());
+ if (mem_op.GetExtend() != vixl::aarch64::NO_EXTEND) {
+ return vixl::aarch64::Operand(mem_op.GetRegisterOffset(),
+ mem_op.GetExtend(),
+ mem_op.GetShiftAmount());
+ } else if (mem_op.GetShift() != vixl::aarch64::NO_SHIFT) {
+ return vixl::aarch64::Operand(mem_op.GetRegisterOffset(),
+ mem_op.GetShift(),
+ mem_op.GetShiftAmount());
} else {
LOG(FATAL) << "Should not reach here";
UNREACHABLE();
@@ -212,10 +221,10 @@
if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
// Uses logical operations.
- return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
+ return vixl::aarch64::Assembler::IsImmLogical(value, vixl::aarch64::kXRegSize);
} else if (instr->IsNeg()) {
// Uses mov -immediate.
- return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
+ return vixl::aarch64::Assembler::IsImmMovn(value, vixl::aarch64::kXRegSize);
} else {
DCHECK(instr->IsAdd() ||
instr->IsArm64IntermediateAddress() ||
@@ -227,7 +236,8 @@
// Uses aliases of ADD/SUB instructions.
// If `value` does not fit but `-value` does, VIXL will automatically use
// the 'opposite' instruction.
- return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value);
+ return vixl::aarch64::Assembler::IsImmAddSub(value)
+ || vixl::aarch64::Assembler::IsImmAddSub(-value);
}
}
@@ -263,30 +273,30 @@
return true;
}
-static inline vixl::Shift ShiftFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
+static inline vixl::aarch64::Shift ShiftFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
switch (op_kind) {
- case HArm64DataProcWithShifterOp::kASR: return vixl::ASR;
- case HArm64DataProcWithShifterOp::kLSL: return vixl::LSL;
- case HArm64DataProcWithShifterOp::kLSR: return vixl::LSR;
+ case HArm64DataProcWithShifterOp::kASR: return vixl::aarch64::ASR;
+ case HArm64DataProcWithShifterOp::kLSL: return vixl::aarch64::LSL;
+ case HArm64DataProcWithShifterOp::kLSR: return vixl::aarch64::LSR;
default:
LOG(FATAL) << "Unexpected op kind " << op_kind;
UNREACHABLE();
- return vixl::NO_SHIFT;
+ return vixl::aarch64::NO_SHIFT;
}
}
-static inline vixl::Extend ExtendFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
+static inline vixl::aarch64::Extend ExtendFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
switch (op_kind) {
- case HArm64DataProcWithShifterOp::kUXTB: return vixl::UXTB;
- case HArm64DataProcWithShifterOp::kUXTH: return vixl::UXTH;
- case HArm64DataProcWithShifterOp::kUXTW: return vixl::UXTW;
- case HArm64DataProcWithShifterOp::kSXTB: return vixl::SXTB;
- case HArm64DataProcWithShifterOp::kSXTH: return vixl::SXTH;
- case HArm64DataProcWithShifterOp::kSXTW: return vixl::SXTW;
+ case HArm64DataProcWithShifterOp::kUXTB: return vixl::aarch64::UXTB;
+ case HArm64DataProcWithShifterOp::kUXTH: return vixl::aarch64::UXTH;
+ case HArm64DataProcWithShifterOp::kUXTW: return vixl::aarch64::UXTW;
+ case HArm64DataProcWithShifterOp::kSXTB: return vixl::aarch64::SXTB;
+ case HArm64DataProcWithShifterOp::kSXTH: return vixl::aarch64::SXTH;
+ case HArm64DataProcWithShifterOp::kSXTW: return vixl::aarch64::SXTW;
default:
LOG(FATAL) << "Unexpected op kind " << op_kind;
UNREACHABLE();
- return vixl::NO_EXTEND;
+ return vixl::aarch64::NO_EXTEND;
}
}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 16438a7..987d3f8 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -28,10 +28,14 @@
#include "utils/arm64/assembler_arm64.h"
#include "utils/arm64/constants_arm64.h"
-#include "vixl/a64/disasm-a64.h"
-#include "vixl/a64/macro-assembler-a64.h"
+using namespace vixl::aarch64; // NOLINT(build/namespaces)
-using namespace vixl; // NOLINT(build/namespaces)
+// TODO: make vixl clean wrt -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "a64/disasm-a64.h"
+#include "a64/macro-assembler-a64.h"
+#pragma GCC diagnostic pop
namespace art {
@@ -57,7 +61,7 @@
} // namespace
-vixl::MacroAssembler* IntrinsicCodeGeneratorARM64::GetVIXLAssembler() {
+MacroAssembler* IntrinsicCodeGeneratorARM64::GetVIXLAssembler() {
return codegen_->GetAssembler()->vixl_masm_;
}
@@ -170,14 +174,14 @@
locations->SetOut(Location::RequiresFpuRegister());
}
-static void MoveFPToInt(LocationSummary* locations, bool is64bit, vixl::MacroAssembler* masm) {
+static void MoveFPToInt(LocationSummary* locations, bool is64bit, MacroAssembler* masm) {
Location input = locations->InAt(0);
Location output = locations->Out();
__ Fmov(is64bit ? XRegisterFrom(output) : WRegisterFrom(output),
is64bit ? DRegisterFrom(input) : SRegisterFrom(input));
}
-static void MoveIntToFP(LocationSummary* locations, bool is64bit, vixl::MacroAssembler* masm) {
+static void MoveIntToFP(LocationSummary* locations, bool is64bit, MacroAssembler* masm) {
Location input = locations->InAt(0);
Location output = locations->Out();
__ Fmov(is64bit ? DRegisterFrom(output) : SRegisterFrom(output),
@@ -222,7 +226,7 @@
static void GenReverseBytes(LocationSummary* locations,
Primitive::Type type,
- vixl::MacroAssembler* masm) {
+ MacroAssembler* masm) {
Location in = locations->InAt(0);
Location out = locations->Out();
@@ -276,7 +280,7 @@
static void GenNumberOfLeadingZeros(LocationSummary* locations,
Primitive::Type type,
- vixl::MacroAssembler* masm) {
+ MacroAssembler* masm) {
DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
Location in = locations->InAt(0);
@@ -303,7 +307,7 @@
static void GenNumberOfTrailingZeros(LocationSummary* locations,
Primitive::Type type,
- vixl::MacroAssembler* masm) {
+ MacroAssembler* masm) {
DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
Location in = locations->InAt(0);
@@ -331,7 +335,7 @@
static void GenReverse(LocationSummary* locations,
Primitive::Type type,
- vixl::MacroAssembler* masm) {
+ MacroAssembler* masm) {
DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
Location in = locations->InAt(0);
@@ -356,7 +360,7 @@
GenReverse(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler());
}
-static void GenBitCount(HInvoke* instr, Primitive::Type type, vixl::MacroAssembler* masm) {
+static void GenBitCount(HInvoke* instr, Primitive::Type type, MacroAssembler* masm) {
DCHECK(Primitive::IsIntOrLongType(type)) << type;
DCHECK_EQ(instr->GetType(), Primitive::kPrimInt);
DCHECK_EQ(Primitive::PrimitiveKind(instr->InputAt(0)->GetType()), type);
@@ -397,7 +401,7 @@
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
-static void MathAbsFP(LocationSummary* locations, bool is64bit, vixl::MacroAssembler* masm) {
+static void MathAbsFP(LocationSummary* locations, bool is64bit, MacroAssembler* masm) {
Location in = locations->InAt(0);
Location out = locations->Out();
@@ -433,7 +437,7 @@
static void GenAbsInteger(LocationSummary* locations,
bool is64bit,
- vixl::MacroAssembler* masm) {
+ MacroAssembler* masm) {
Location in = locations->InAt(0);
Location output = locations->Out();
@@ -463,7 +467,7 @@
static void GenMinMaxFP(LocationSummary* locations,
bool is_min,
bool is_double,
- vixl::MacroAssembler* masm) {
+ MacroAssembler* masm) {
Location op1 = locations->InAt(0);
Location op2 = locations->InAt(1);
Location out = locations->Out();
@@ -523,7 +527,7 @@
static void GenMinMax(LocationSummary* locations,
bool is_min,
bool is_long,
- vixl::MacroAssembler* masm) {
+ MacroAssembler* masm) {
Location op1 = locations->InAt(0);
Location op2 = locations->InAt(1);
Location out = locations->Out();
@@ -574,7 +578,7 @@
void IntrinsicCodeGeneratorARM64::VisitMathSqrt(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Fsqrt(DRegisterFrom(locations->Out()), DRegisterFrom(locations->InAt(0)));
}
@@ -584,7 +588,7 @@
void IntrinsicCodeGeneratorARM64::VisitMathCeil(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Frintp(DRegisterFrom(locations->Out()), DRegisterFrom(locations->InAt(0)));
}
@@ -594,7 +598,7 @@
void IntrinsicCodeGeneratorARM64::VisitMathFloor(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Frintm(DRegisterFrom(locations->Out()), DRegisterFrom(locations->InAt(0)));
}
@@ -604,7 +608,7 @@
void IntrinsicCodeGeneratorARM64::VisitMathRint(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Frintn(DRegisterFrom(locations->Out()), DRegisterFrom(locations->InAt(0)));
}
@@ -617,7 +621,7 @@
locations->AddTemp(Location::RequiresFpuRegister());
}
-static void GenMathRound(HInvoke* invoke, bool is_double, vixl::MacroAssembler* masm) {
+static void GenMathRound(HInvoke* invoke, bool is_double, vixl::aarch64::MacroAssembler* masm) {
// Java 8 API definition for Math.round():
// Return the closest long or int to the argument, with ties rounding to positive infinity.
//
@@ -635,13 +639,13 @@
FPRegister in_reg = is_double ? DRegisterFrom(l->InAt(0)) : SRegisterFrom(l->InAt(0));
FPRegister tmp_fp = is_double ? DRegisterFrom(l->GetTemp(0)) : SRegisterFrom(l->GetTemp(0));
Register out_reg = is_double ? XRegisterFrom(l->Out()) : WRegisterFrom(l->Out());
- vixl::Label done;
+ vixl::aarch64::Label done;
// Round to nearest integer, ties away from zero.
__ Fcvtas(out_reg, in_reg);
// For positive values, zero or NaN inputs, rounding is done.
- __ Tbz(out_reg, out_reg.size() - 1, &done);
+ __ Tbz(out_reg, out_reg.GetSizeInBits() - 1, &done);
// Handle input < 0 cases.
// If input is negative but not a tie, previous result (round to nearest) is valid.
@@ -675,7 +679,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekByte(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Ldrsb(WRegisterFrom(invoke->GetLocations()->Out()),
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
@@ -685,7 +689,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Ldr(WRegisterFrom(invoke->GetLocations()->Out()),
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
@@ -695,7 +699,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Ldr(XRegisterFrom(invoke->GetLocations()->Out()),
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
@@ -705,7 +709,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Ldrsh(WRegisterFrom(invoke->GetLocations()->Out()),
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
@@ -723,7 +727,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeByte(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Strb(WRegisterFrom(invoke->GetLocations()->InAt(1)),
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
@@ -733,7 +737,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeIntNative(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Str(WRegisterFrom(invoke->GetLocations()->InAt(1)),
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
@@ -743,7 +747,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeLongNative(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Str(XRegisterFrom(invoke->GetLocations()->InAt(1)),
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
@@ -753,7 +757,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeShortNative(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
__ Strh(WRegisterFrom(invoke->GetLocations()->InAt(1)),
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
@@ -778,7 +782,7 @@
DCHECK((type == Primitive::kPrimInt) ||
(type == Primitive::kPrimLong) ||
(type == Primitive::kPrimNot));
- vixl::MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
Location base_loc = locations->InAt(1);
Register base = WRegisterFrom(base_loc); // Object pointer.
Location offset_loc = locations->InAt(2);
@@ -912,7 +916,7 @@
bool is_volatile,
bool is_ordered,
CodeGeneratorARM64* codegen) {
- vixl::MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
@@ -1031,7 +1035,7 @@
}
static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM64* codegen) {
- vixl::MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
Register out = WRegisterFrom(locations->Out()); // Boolean result.
@@ -1070,7 +1074,7 @@
// } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
// result = tmp_value != 0;
- vixl::Label loop_head, exit_loop;
+ vixl::aarch64::Label loop_head, exit_loop;
__ Bind(&loop_head);
// TODO: When `type == Primitive::kPrimNot`, add a read barrier for
// the reference stored in the object before attempting the CAS,
@@ -1154,7 +1158,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
Register str = XRegisterFrom(locations->InAt(0));
@@ -1165,9 +1169,9 @@
Register temp1 = WRegisterFrom(locations->GetTemp(1));
Register temp2 = WRegisterFrom(locations->GetTemp(2));
- vixl::Label loop;
- vixl::Label find_char_diff;
- vixl::Label end;
+ vixl::aarch64::Label loop;
+ vixl::aarch64::Label find_char_diff;
+ vixl::aarch64::Label end;
// Get offsets of count and value fields within a string object.
const int32_t count_offset = mirror::String::CountOffset().Int32Value();
@@ -1269,7 +1273,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitStringEquals(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
Register str = WRegisterFrom(locations->InAt(0));
@@ -1281,10 +1285,10 @@
Register temp1 = WRegisterFrom(locations->GetTemp(0));
Register temp2 = WRegisterFrom(locations->GetTemp(1));
- vixl::Label loop;
- vixl::Label end;
- vixl::Label return_true;
- vixl::Label return_false;
+ vixl::aarch64::Label loop;
+ vixl::aarch64::Label end;
+ vixl::aarch64::Label return_true;
+ vixl::aarch64::Label return_false;
// Get offsets of count, value, and class fields within a string object.
const int32_t count_offset = mirror::String::CountOffset().Int32Value();
@@ -1357,7 +1361,7 @@
}
static void GenerateVisitStringIndexOf(HInvoke* invoke,
- vixl::MacroAssembler* masm,
+ MacroAssembler* masm,
CodeGeneratorARM64* codegen,
ArenaAllocator* allocator,
bool start_at_zero) {
@@ -1454,7 +1458,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
Register byte_array = WRegisterFrom(locations->InAt(0));
@@ -1483,7 +1487,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
// No need to emit code checking whether `locations->InAt(2)` is a null
// pointer, as callers of the native method
@@ -1508,7 +1512,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
Register string_to_copy = WRegisterFrom(locations->InAt(0));
@@ -1556,7 +1560,7 @@
}
static void GenFPToFPCall(HInvoke* invoke,
- vixl::MacroAssembler* masm,
+ MacroAssembler* masm,
CodeGeneratorARM64* codegen,
QuickEntrypointEnum entry) {
__ Ldr(lr, MemOperand(tr, GetThreadOffset<kArm64WordSize>(entry).Int32Value()));
@@ -1716,7 +1720,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
// Check assumption that sizeof(Char) is 2 (used in scaling below).
@@ -1756,9 +1760,9 @@
__ Sub(num_chr, srcEnd, srcBegin);
// Do the copy.
- vixl::Label loop;
- vixl::Label done;
- vixl::Label remainder;
+ vixl::aarch64::Label loop;
+ vixl::aarch64::Label done;
+ vixl::aarch64::Label remainder;
// Early out for valid zero-length retrievals.
__ Cbz(num_chr, &done);
@@ -1773,9 +1777,9 @@
// Main loop used for longer fetches loads and stores 8x16-bit characters at a time.
// (Unaligned addresses are acceptable here and not worth inlining extra code to rectify.)
__ Bind(&loop);
- __ Ldp(tmp1, tmp2, MemOperand(src_ptr, char_size * 8, vixl::PostIndex));
+ __ Ldp(tmp1, tmp2, MemOperand(src_ptr, char_size * 8, PostIndex));
__ Subs(num_chr, num_chr, 8);
- __ Stp(tmp1, tmp2, MemOperand(dst_ptr, char_size * 8, vixl::PostIndex));
+ __ Stp(tmp1, tmp2, MemOperand(dst_ptr, char_size * 8, PostIndex));
__ B(ge, &loop);
__ Adds(num_chr, num_chr, 8);
@@ -1784,9 +1788,9 @@
// Main loop for < 8 character case and remainder handling. Loads and stores one
// 16-bit Java character at a time.
__ Bind(&remainder);
- __ Ldrh(tmp1, MemOperand(src_ptr, char_size, vixl::PostIndex));
+ __ Ldrh(tmp1, MemOperand(src_ptr, char_size, PostIndex));
__ Subs(num_chr, num_chr, 1);
- __ Strh(tmp1, MemOperand(dst_ptr, char_size, vixl::PostIndex));
+ __ Strh(tmp1, MemOperand(dst_ptr, char_size, PostIndex));
__ B(gt, &remainder);
__ Bind(&done);
@@ -1800,7 +1804,7 @@
uint32_t at,
HInstruction* input) {
HIntConstant* const_input = input->AsIntConstant();
- if (const_input != nullptr && !vixl::Assembler::IsImmAddSub(const_input->GetValue())) {
+ if (const_input != nullptr && !vixl::aarch64::Assembler::IsImmAddSub(const_input->GetValue())) {
locations->SetInAt(at, Location::RequiresRegister());
} else {
locations->SetInAt(at, Location::RegisterOrConstant(input));
@@ -1847,7 +1851,7 @@
locations->AddTemp(Location::RequiresRegister());
}
-static void CheckSystemArrayCopyPosition(vixl::MacroAssembler* masm,
+static void CheckSystemArrayCopyPosition(MacroAssembler* masm,
const Location& pos,
const Register& input,
const Location& length,
@@ -1880,7 +1884,7 @@
} else {
// Check that pos >= 0.
Register pos_reg = WRegisterFrom(pos);
- __ Tbnz(pos_reg, pos_reg.size() - 1, slow_path->GetEntryLabel());
+ __ Tbnz(pos_reg, pos_reg.GetSizeInBits() - 1, slow_path->GetEntryLabel());
// Check that pos <= length(input) && (length(input) - pos) >= length.
__ Ldr(temp, MemOperand(input, length_offset));
@@ -1893,7 +1897,7 @@
// Compute base source address, base destination address, and end source address
// for System.arraycopy* intrinsics.
-static void GenSystemArrayCopyAddresses(vixl::MacroAssembler* masm,
+static void GenSystemArrayCopyAddresses(MacroAssembler* masm,
Primitive::Type type,
const Register& src,
const Location& src_pos,
@@ -1934,7 +1938,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopyChar(HInvoke* invoke) {
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
Register src = XRegisterFrom(locations->InAt(0));
Location src_pos = locations->InAt(1);
@@ -2007,12 +2011,12 @@
const int32_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
UseScratchRegisterScope temps(masm);
Register tmp = temps.AcquireW();
- vixl::Label loop, done;
+ vixl::aarch64::Label loop, done;
__ Bind(&loop);
__ Cmp(src_curr_addr, src_stop_addr);
__ B(&done, eq);
- __ Ldrh(tmp, MemOperand(src_curr_addr, char_size, vixl::PostIndex));
- __ Strh(tmp, MemOperand(dst_curr_addr, char_size, vixl::PostIndex));
+ __ Ldrh(tmp, MemOperand(src_curr_addr, char_size, PostIndex));
+ __ Strh(tmp, MemOperand(dst_curr_addr, char_size, PostIndex));
__ B(&loop);
__ Bind(&done);
@@ -2088,7 +2092,7 @@
// intrinsic and re-enable it (b/29516905).
DCHECK(!kEmitCompilerReadBarrier);
- vixl::MacroAssembler* masm = GetVIXLAssembler();
+ MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -2107,7 +2111,7 @@
SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
- vixl::Label conditions_on_positions_validated;
+ vixl::aarch64::Label conditions_on_positions_validated;
SystemArrayCopyOptimizations optimizations(invoke);
// If source and destination are the same, we go to slow path if we need to do
@@ -2230,7 +2234,7 @@
__ Cmp(temp1, temp2);
if (optimizations.GetDestinationIsTypedObjectArray()) {
- vixl::Label do_copy;
+ vixl::aarch64::Label do_copy;
__ B(&do_copy, eq);
if (!did_unpoison) {
codegen_->GetAssembler()->MaybeUnpoisonHeapReference(temp1);
@@ -2278,15 +2282,15 @@
// Iterate over the arrays and do a raw copy of the objects. We don't need to
// poison/unpoison.
- vixl::Label loop, done;
+ vixl::aarch64::Label loop, done;
const int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
__ Bind(&loop);
__ Cmp(src_curr_addr, src_stop_addr);
__ B(&done, eq);
{
Register tmp = temps.AcquireW();
- __ Ldr(tmp, MemOperand(src_curr_addr, element_size, vixl::PostIndex));
- __ Str(tmp, MemOperand(dst_curr_addr, element_size, vixl::PostIndex));
+ __ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex));
+ __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
}
__ B(&loop);
__ Bind(&done);
@@ -2299,7 +2303,7 @@
static void GenIsInfinite(LocationSummary* locations,
bool is64bit,
- vixl::MacroAssembler* masm) {
+ MacroAssembler* masm) {
Operand infinity;
Register out;
@@ -2311,7 +2315,7 @@
out = WRegisterFrom(locations->Out());
}
- const Register zero = vixl::Assembler::AppropriateZeroRegFor(out);
+ const Register zero = vixl::aarch64::Assembler::AppropriateZeroRegFor(out);
MoveFPToInt(locations, is64bit, masm);
__ Eor(out, out, infinity);
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index d47448a..5251536 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -20,10 +20,11 @@
#include "intrinsics.h"
namespace vixl {
+namespace aarch64 {
class MacroAssembler;
-} // namespace vixl
+}} // namespace vixl::aarch64
namespace art {
@@ -73,7 +74,7 @@
#undef OPTIMIZING_INTRINSICS
private:
- vixl::MacroAssembler* GetVIXLAssembler();
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler();
ArenaAllocator* GetAllocator();
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 54ed62b..9f2027f 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -20,7 +20,7 @@
#include "offsets.h"
#include "thread.h"
-using namespace vixl; // NOLINT(build/namespaces)
+using namespace vixl::aarch64; // NOLINT(build/namespaces)
namespace art {
namespace arm64 {
@@ -39,7 +39,7 @@
}
size_t Arm64Assembler::CodeSize() const {
- return vixl_masm_->BufferCapacity() - vixl_masm_->RemainingBufferSpace();
+ return vixl_masm_->GetBufferCapacity() - vixl_masm_->GetRemainingBufferSpace();
}
const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
@@ -86,9 +86,9 @@
} else {
// temp = rd + value
// rd = cond ? temp : rn
- vixl::UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(vixl_masm_);
temps.Exclude(reg_x(rd), reg_x(rn));
- vixl::Register temp = temps.AcquireX();
+ Register temp = temps.AcquireX();
___ Add(temp, reg_x(rn), value);
___ Csel(reg_x(rd), temp, reg_x(rd), cond);
}
@@ -182,8 +182,8 @@
}
void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
- vixl::UseScratchRegisterScope temps(vixl_masm_);
- vixl::Register temp = temps.AcquireX();
+ UseScratchRegisterScope temps(vixl_masm_);
+ Register temp = temps.AcquireX();
___ Mov(temp, reg_x(SP));
___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
}
@@ -206,9 +206,9 @@
// temp = value
// rd = cond ? temp : rd
if (value != 0) {
- vixl::UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(vixl_masm_);
temps.Exclude(reg_x(dest));
- vixl::Register temp = temps.AcquireX();
+ Register temp = temps.AcquireX();
___ Mov(temp, value);
___ Csel(reg_x(dest), temp, reg_x(dest), cond);
} else {
@@ -313,7 +313,7 @@
Arm64ManagedRegister base = m_base.AsArm64();
CHECK(dst.IsXRegister() && base.IsXRegister());
// Remove dst and base form the temp list - higher level API uses IP1, IP0.
- vixl::UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(vixl_masm_);
temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
}
@@ -479,7 +479,7 @@
void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
// TODO: Should we check that m_scratch is IP? - see arm.
- ___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
+ ___ Dmb(InnerShareable, BarrierAll);
}
void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
@@ -527,7 +527,7 @@
CHECK(base.IsXRegister()) << base;
CHECK(scratch.IsXRegister()) << scratch;
// Remove base and scratch form the temp list - higher level API uses IP1, IP0.
- vixl::UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(vixl_masm_);
temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
___ Br(reg_x(scratch.AsXRegister()));
@@ -598,7 +598,7 @@
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
CHECK(out_reg.IsXRegister()) << out_reg;
CHECK(in_reg.IsXRegister()) << in_reg;
- vixl::Label exit;
+ vixl::aarch64::Label exit;
if (!out_reg.Equals(in_reg)) {
// FIXME: Who sets the flags here?
LoadImmediate(out_reg.AsXRegister(), 0, eq);
@@ -617,9 +617,9 @@
}
void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
- vixl::UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(vixl_masm_);
temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
- vixl::Register temp = temps.AcquireX();
+ Register temp = temps.AcquireX();
// Bind exception poll entry.
___ Bind(exception->Entry());
@@ -638,26 +638,26 @@
static inline dwarf::Reg DWARFReg(CPURegister reg) {
if (reg.IsFPRegister()) {
- return dwarf::Reg::Arm64Fp(reg.code());
+ return dwarf::Reg::Arm64Fp(reg.GetCode());
} else {
- DCHECK_LT(reg.code(), 31u); // X0 - X30.
- return dwarf::Reg::Arm64Core(reg.code());
+ DCHECK_LT(reg.GetCode(), 31u); // X0 - X30.
+ return dwarf::Reg::Arm64Core(reg.GetCode());
}
}
-void Arm64Assembler::SpillRegisters(vixl::CPURegList registers, int offset) {
- int size = registers.RegisterSizeInBytes();
+void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
+ int size = registers.GetRegisterSizeInBytes();
const Register sp = vixl_masm_->StackPointer();
// Since we are operating on register pairs, we would like to align on
// double the standard size; on the other hand, we don't want to insert
// an extra store, which will happen if the number of registers is even.
- if (!IsAlignedParam(offset, 2 * size) && registers.Count() % 2 != 0) {
+ if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
const CPURegister& dst0 = registers.PopLowestIndex();
___ Str(dst0, MemOperand(sp, offset));
cfi_.RelOffset(DWARFReg(dst0), offset);
offset += size;
}
- while (registers.Count() >= 2) {
+ while (registers.GetCount() >= 2) {
const CPURegister& dst0 = registers.PopLowestIndex();
const CPURegister& dst1 = registers.PopLowestIndex();
___ Stp(dst0, dst1, MemOperand(sp, offset));
@@ -673,17 +673,17 @@
DCHECK(registers.IsEmpty());
}
-void Arm64Assembler::UnspillRegisters(vixl::CPURegList registers, int offset) {
- int size = registers.RegisterSizeInBytes();
+void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
+ int size = registers.GetRegisterSizeInBytes();
const Register sp = vixl_masm_->StackPointer();
// Be consistent with the logic for spilling registers.
- if (!IsAlignedParam(offset, 2 * size) && registers.Count() % 2 != 0) {
+ if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
const CPURegister& dst0 = registers.PopLowestIndex();
___ Ldr(dst0, MemOperand(sp, offset));
cfi_.Restore(DWARFReg(dst0));
offset += size;
}
- while (registers.Count() >= 2) {
+ while (registers.GetCount() >= 2) {
const CPURegister& dst0 = registers.PopLowestIndex();
const CPURegister& dst1 = registers.PopLowestIndex();
___ Ldp(dst0, dst1, MemOperand(sp, offset));
@@ -709,14 +709,14 @@
for (auto r : callee_save_regs) {
Arm64ManagedRegister reg = r.AsArm64();
if (reg.IsXRegister()) {
- core_reg_list.Combine(reg_x(reg.AsXRegister()).code());
+ core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
} else {
DCHECK(reg.IsDRegister());
- fp_reg_list.Combine(reg_d(reg.AsDRegister()).code());
+ fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
}
}
- size_t core_reg_size = core_reg_list.TotalSizeInBytes();
- size_t fp_reg_size = fp_reg_list.TotalSizeInBytes();
+ size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+ size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
// Increase frame to required size.
DCHECK_ALIGNED(frame_size, kStackAlignment);
@@ -765,14 +765,14 @@
for (auto r : callee_save_regs) {
Arm64ManagedRegister reg = r.AsArm64();
if (reg.IsXRegister()) {
- core_reg_list.Combine(reg_x(reg.AsXRegister()).code());
+ core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
} else {
DCHECK(reg.IsDRegister());
- fp_reg_list.Combine(reg_d(reg.AsDRegister()).code());
+ fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
}
}
- size_t core_reg_size = core_reg_list.TotalSizeInBytes();
- size_t fp_reg_size = fp_reg_list.TotalSizeInBytes();
+ size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+ size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
// For now we only check that the size of the frame is large enough to hold spills and method
// reference.
@@ -798,19 +798,19 @@
cfi_.DefCFAOffset(frame_size);
}
-void Arm64Assembler::PoisonHeapReference(vixl::Register reg) {
+void Arm64Assembler::PoisonHeapReference(Register reg) {
DCHECK(reg.IsW());
// reg = -reg.
- ___ Neg(reg, vixl::Operand(reg));
+ ___ Neg(reg, Operand(reg));
}
-void Arm64Assembler::UnpoisonHeapReference(vixl::Register reg) {
+void Arm64Assembler::UnpoisonHeapReference(Register reg) {
DCHECK(reg.IsW());
// reg = -reg.
- ___ Neg(reg, vixl::Operand(reg));
+ ___ Neg(reg, Operand(reg));
}
-void Arm64Assembler::MaybeUnpoisonHeapReference(vixl::Register reg) {
+void Arm64Assembler::MaybeUnpoisonHeapReference(Register reg) {
if (kPoisonHeapReferences) {
UnpoisonHeapReference(reg);
}
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 91171a8..a481544 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -28,19 +28,19 @@
#include "utils/assembler.h"
#include "offsets.h"
-// TODO: make vixl clean wrt -Wshadow.
+// TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunknown-pragmas"
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wmissing-noreturn"
-#include "vixl/a64/macro-assembler-a64.h"
-#include "vixl/a64/disasm-a64.h"
+#include "a64/disasm-a64.h"
+#include "a64/macro-assembler-a64.h"
#pragma GCC diagnostic pop
namespace art {
namespace arm64 {
-#define MEM_OP(...) vixl::MemOperand(__VA_ARGS__)
+#define MEM_OP(...) vixl::aarch64::MemOperand(__VA_ARGS__)
enum LoadOperandType {
kLoadSignedByte,
@@ -68,7 +68,7 @@
: scratch_(scratch), stack_adjust_(stack_adjust) {
}
- vixl::Label* Entry() { return &exception_entry_; }
+ vixl::aarch64::Label* Entry() { return &exception_entry_; }
// Register used for passing Thread::Current()->exception_ .
const Arm64ManagedRegister scratch_;
@@ -76,7 +76,7 @@
// Stack adjust for ExceptionPool.
const size_t stack_adjust_;
- vixl::Label exception_entry_;
+ vixl::aarch64::Label exception_entry_;
friend class Arm64Assembler;
DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
@@ -89,7 +89,7 @@
explicit Arm64Assembler(ArenaAllocator* arena)
: Assembler(arena),
exception_blocks_(arena->Adapter(kArenaAllocAssembler)),
- vixl_masm_(new vixl::MacroAssembler(kArm64BaseBufferSize)) {}
+ vixl_masm_(new vixl::aarch64::MacroAssembler(kArm64BaseBufferSize)) {}
virtual ~Arm64Assembler() {
delete vixl_masm_;
@@ -105,8 +105,8 @@
// Copy instructions out of assembly buffer into the given region of memory.
void FinalizeInstructions(const MemoryRegion& region);
- void SpillRegisters(vixl::CPURegList registers, int offset);
- void UnspillRegisters(vixl::CPURegList registers, int offset);
+ void SpillRegisters(vixl::aarch64::CPURegList registers, int offset);
+ void UnspillRegisters(vixl::aarch64::CPURegList registers, int offset);
// Emit code that will create an activation on the stack.
void BuildFrame(size_t frame_size,
@@ -177,13 +177,17 @@
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
// null.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) OVERRIDE;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst.
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
@@ -210,11 +214,11 @@
//
// Poison a heap reference contained in `reg`.
- void PoisonHeapReference(vixl::Register reg);
+ void PoisonHeapReference(vixl::aarch64::Register reg);
// Unpoison a heap reference contained in `reg`.
- void UnpoisonHeapReference(vixl::Register reg);
+ void UnpoisonHeapReference(vixl::aarch64::Register reg);
// Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
- void MaybeUnpoisonHeapReference(vixl::Register reg);
+ void MaybeUnpoisonHeapReference(vixl::aarch64::Register reg);
void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM64";
@@ -224,32 +228,32 @@
}
private:
- static vixl::Register reg_x(int code) {
+ static vixl::aarch64::Register reg_x(int code) {
CHECK(code < kNumberOfXRegisters) << code;
if (code == SP) {
- return vixl::sp;
+ return vixl::aarch64::sp;
} else if (code == XZR) {
- return vixl::xzr;
+ return vixl::aarch64::xzr;
}
- return vixl::Register::XRegFromCode(code);
+ return vixl::aarch64::Register::GetXRegFromCode(code);
}
- static vixl::Register reg_w(int code) {
+ static vixl::aarch64::Register reg_w(int code) {
CHECK(code < kNumberOfWRegisters) << code;
if (code == WSP) {
- return vixl::wsp;
+ return vixl::aarch64::wsp;
} else if (code == WZR) {
- return vixl::wzr;
+ return vixl::aarch64::wzr;
}
- return vixl::Register::WRegFromCode(code);
+ return vixl::aarch64::Register::GetWRegFromCode(code);
}
- static vixl::FPRegister reg_d(int code) {
- return vixl::FPRegister::DRegFromCode(code);
+ static vixl::aarch64::FPRegister reg_d(int code) {
+ return vixl::aarch64::FPRegister::GetDRegFromCode(code);
}
- static vixl::FPRegister reg_s(int code) {
- return vixl::FPRegister::SRegFromCode(code);
+ static vixl::aarch64::FPRegister reg_s(int code) {
+ return vixl::aarch64::FPRegister::GetSRegFromCode(code);
}
// Emits Exception block.
@@ -261,22 +265,31 @@
void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
- void LoadImmediate(XRegister dest, int32_t value, vixl::Condition cond = vixl::al);
+ void LoadImmediate(XRegister dest,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
- void LoadWFromOffset(LoadOperandType type, WRegister dest,
- XRegister base, int32_t offset);
+ void LoadWFromOffset(LoadOperandType type,
+ WRegister dest,
+ XRegister base,
+ int32_t offset);
void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
- void AddConstant(XRegister rd, int32_t value, vixl::Condition cond = vixl::al);
- void AddConstant(XRegister rd, XRegister rn, int32_t value, vixl::Condition cond = vixl::al);
+ void AddConstant(XRegister rd,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
+ void AddConstant(XRegister rd,
+ XRegister rn,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
// List of exception blocks to generate at the end of the code cache.
ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
public:
// Vixl assembler.
- vixl::MacroAssembler* const vixl_masm_;
+ vixl::aarch64::MacroAssembler* const vixl_masm_;
// Used for testing.
friend class Arm64ManagedRegister_VixlRegisters_Test;
diff --git a/compiler/utils/arm64/managed_register_arm64_test.cc b/compiler/utils/arm64/managed_register_arm64_test.cc
index e27115d..79076b8 100644
--- a/compiler/utils/arm64/managed_register_arm64_test.cc
+++ b/compiler/utils/arm64/managed_register_arm64_test.cc
@@ -591,149 +591,149 @@
TEST(Arm64ManagedRegister, VixlRegisters) {
// X Registers.
- EXPECT_TRUE(vixl::x0.Is(Arm64Assembler::reg_x(X0)));
- EXPECT_TRUE(vixl::x1.Is(Arm64Assembler::reg_x(X1)));
- EXPECT_TRUE(vixl::x2.Is(Arm64Assembler::reg_x(X2)));
- EXPECT_TRUE(vixl::x3.Is(Arm64Assembler::reg_x(X3)));
- EXPECT_TRUE(vixl::x4.Is(Arm64Assembler::reg_x(X4)));
- EXPECT_TRUE(vixl::x5.Is(Arm64Assembler::reg_x(X5)));
- EXPECT_TRUE(vixl::x6.Is(Arm64Assembler::reg_x(X6)));
- EXPECT_TRUE(vixl::x7.Is(Arm64Assembler::reg_x(X7)));
- EXPECT_TRUE(vixl::x8.Is(Arm64Assembler::reg_x(X8)));
- EXPECT_TRUE(vixl::x9.Is(Arm64Assembler::reg_x(X9)));
- EXPECT_TRUE(vixl::x10.Is(Arm64Assembler::reg_x(X10)));
- EXPECT_TRUE(vixl::x11.Is(Arm64Assembler::reg_x(X11)));
- EXPECT_TRUE(vixl::x12.Is(Arm64Assembler::reg_x(X12)));
- EXPECT_TRUE(vixl::x13.Is(Arm64Assembler::reg_x(X13)));
- EXPECT_TRUE(vixl::x14.Is(Arm64Assembler::reg_x(X14)));
- EXPECT_TRUE(vixl::x15.Is(Arm64Assembler::reg_x(X15)));
- EXPECT_TRUE(vixl::x16.Is(Arm64Assembler::reg_x(X16)));
- EXPECT_TRUE(vixl::x17.Is(Arm64Assembler::reg_x(X17)));
- EXPECT_TRUE(vixl::x18.Is(Arm64Assembler::reg_x(X18)));
- EXPECT_TRUE(vixl::x19.Is(Arm64Assembler::reg_x(X19)));
- EXPECT_TRUE(vixl::x20.Is(Arm64Assembler::reg_x(X20)));
- EXPECT_TRUE(vixl::x21.Is(Arm64Assembler::reg_x(X21)));
- EXPECT_TRUE(vixl::x22.Is(Arm64Assembler::reg_x(X22)));
- EXPECT_TRUE(vixl::x23.Is(Arm64Assembler::reg_x(X23)));
- EXPECT_TRUE(vixl::x24.Is(Arm64Assembler::reg_x(X24)));
- EXPECT_TRUE(vixl::x25.Is(Arm64Assembler::reg_x(X25)));
- EXPECT_TRUE(vixl::x26.Is(Arm64Assembler::reg_x(X26)));
- EXPECT_TRUE(vixl::x27.Is(Arm64Assembler::reg_x(X27)));
- EXPECT_TRUE(vixl::x28.Is(Arm64Assembler::reg_x(X28)));
- EXPECT_TRUE(vixl::x29.Is(Arm64Assembler::reg_x(X29)));
- EXPECT_TRUE(vixl::x30.Is(Arm64Assembler::reg_x(X30)));
+ EXPECT_TRUE(vixl::aarch64::x0.Is(Arm64Assembler::reg_x(X0)));
+ EXPECT_TRUE(vixl::aarch64::x1.Is(Arm64Assembler::reg_x(X1)));
+ EXPECT_TRUE(vixl::aarch64::x2.Is(Arm64Assembler::reg_x(X2)));
+ EXPECT_TRUE(vixl::aarch64::x3.Is(Arm64Assembler::reg_x(X3)));
+ EXPECT_TRUE(vixl::aarch64::x4.Is(Arm64Assembler::reg_x(X4)));
+ EXPECT_TRUE(vixl::aarch64::x5.Is(Arm64Assembler::reg_x(X5)));
+ EXPECT_TRUE(vixl::aarch64::x6.Is(Arm64Assembler::reg_x(X6)));
+ EXPECT_TRUE(vixl::aarch64::x7.Is(Arm64Assembler::reg_x(X7)));
+ EXPECT_TRUE(vixl::aarch64::x8.Is(Arm64Assembler::reg_x(X8)));
+ EXPECT_TRUE(vixl::aarch64::x9.Is(Arm64Assembler::reg_x(X9)));
+ EXPECT_TRUE(vixl::aarch64::x10.Is(Arm64Assembler::reg_x(X10)));
+ EXPECT_TRUE(vixl::aarch64::x11.Is(Arm64Assembler::reg_x(X11)));
+ EXPECT_TRUE(vixl::aarch64::x12.Is(Arm64Assembler::reg_x(X12)));
+ EXPECT_TRUE(vixl::aarch64::x13.Is(Arm64Assembler::reg_x(X13)));
+ EXPECT_TRUE(vixl::aarch64::x14.Is(Arm64Assembler::reg_x(X14)));
+ EXPECT_TRUE(vixl::aarch64::x15.Is(Arm64Assembler::reg_x(X15)));
+ EXPECT_TRUE(vixl::aarch64::x16.Is(Arm64Assembler::reg_x(X16)));
+ EXPECT_TRUE(vixl::aarch64::x17.Is(Arm64Assembler::reg_x(X17)));
+ EXPECT_TRUE(vixl::aarch64::x18.Is(Arm64Assembler::reg_x(X18)));
+ EXPECT_TRUE(vixl::aarch64::x19.Is(Arm64Assembler::reg_x(X19)));
+ EXPECT_TRUE(vixl::aarch64::x20.Is(Arm64Assembler::reg_x(X20)));
+ EXPECT_TRUE(vixl::aarch64::x21.Is(Arm64Assembler::reg_x(X21)));
+ EXPECT_TRUE(vixl::aarch64::x22.Is(Arm64Assembler::reg_x(X22)));
+ EXPECT_TRUE(vixl::aarch64::x23.Is(Arm64Assembler::reg_x(X23)));
+ EXPECT_TRUE(vixl::aarch64::x24.Is(Arm64Assembler::reg_x(X24)));
+ EXPECT_TRUE(vixl::aarch64::x25.Is(Arm64Assembler::reg_x(X25)));
+ EXPECT_TRUE(vixl::aarch64::x26.Is(Arm64Assembler::reg_x(X26)));
+ EXPECT_TRUE(vixl::aarch64::x27.Is(Arm64Assembler::reg_x(X27)));
+ EXPECT_TRUE(vixl::aarch64::x28.Is(Arm64Assembler::reg_x(X28)));
+ EXPECT_TRUE(vixl::aarch64::x29.Is(Arm64Assembler::reg_x(X29)));
+ EXPECT_TRUE(vixl::aarch64::x30.Is(Arm64Assembler::reg_x(X30)));
- EXPECT_TRUE(vixl::x19.Is(Arm64Assembler::reg_x(TR)));
- EXPECT_TRUE(vixl::ip0.Is(Arm64Assembler::reg_x(IP0)));
- EXPECT_TRUE(vixl::ip1.Is(Arm64Assembler::reg_x(IP1)));
- EXPECT_TRUE(vixl::x29.Is(Arm64Assembler::reg_x(FP)));
- EXPECT_TRUE(vixl::lr.Is(Arm64Assembler::reg_x(LR)));
- EXPECT_TRUE(vixl::sp.Is(Arm64Assembler::reg_x(SP)));
- EXPECT_TRUE(vixl::xzr.Is(Arm64Assembler::reg_x(XZR)));
+ EXPECT_TRUE(vixl::aarch64::x19.Is(Arm64Assembler::reg_x(TR)));
+ EXPECT_TRUE(vixl::aarch64::ip0.Is(Arm64Assembler::reg_x(IP0)));
+ EXPECT_TRUE(vixl::aarch64::ip1.Is(Arm64Assembler::reg_x(IP1)));
+ EXPECT_TRUE(vixl::aarch64::x29.Is(Arm64Assembler::reg_x(FP)));
+ EXPECT_TRUE(vixl::aarch64::lr.Is(Arm64Assembler::reg_x(LR)));
+ EXPECT_TRUE(vixl::aarch64::sp.Is(Arm64Assembler::reg_x(SP)));
+ EXPECT_TRUE(vixl::aarch64::xzr.Is(Arm64Assembler::reg_x(XZR)));
// W Registers.
- EXPECT_TRUE(vixl::w0.Is(Arm64Assembler::reg_w(W0)));
- EXPECT_TRUE(vixl::w1.Is(Arm64Assembler::reg_w(W1)));
- EXPECT_TRUE(vixl::w2.Is(Arm64Assembler::reg_w(W2)));
- EXPECT_TRUE(vixl::w3.Is(Arm64Assembler::reg_w(W3)));
- EXPECT_TRUE(vixl::w4.Is(Arm64Assembler::reg_w(W4)));
- EXPECT_TRUE(vixl::w5.Is(Arm64Assembler::reg_w(W5)));
- EXPECT_TRUE(vixl::w6.Is(Arm64Assembler::reg_w(W6)));
- EXPECT_TRUE(vixl::w7.Is(Arm64Assembler::reg_w(W7)));
- EXPECT_TRUE(vixl::w8.Is(Arm64Assembler::reg_w(W8)));
- EXPECT_TRUE(vixl::w9.Is(Arm64Assembler::reg_w(W9)));
- EXPECT_TRUE(vixl::w10.Is(Arm64Assembler::reg_w(W10)));
- EXPECT_TRUE(vixl::w11.Is(Arm64Assembler::reg_w(W11)));
- EXPECT_TRUE(vixl::w12.Is(Arm64Assembler::reg_w(W12)));
- EXPECT_TRUE(vixl::w13.Is(Arm64Assembler::reg_w(W13)));
- EXPECT_TRUE(vixl::w14.Is(Arm64Assembler::reg_w(W14)));
- EXPECT_TRUE(vixl::w15.Is(Arm64Assembler::reg_w(W15)));
- EXPECT_TRUE(vixl::w16.Is(Arm64Assembler::reg_w(W16)));
- EXPECT_TRUE(vixl::w17.Is(Arm64Assembler::reg_w(W17)));
- EXPECT_TRUE(vixl::w18.Is(Arm64Assembler::reg_w(W18)));
- EXPECT_TRUE(vixl::w19.Is(Arm64Assembler::reg_w(W19)));
- EXPECT_TRUE(vixl::w20.Is(Arm64Assembler::reg_w(W20)));
- EXPECT_TRUE(vixl::w21.Is(Arm64Assembler::reg_w(W21)));
- EXPECT_TRUE(vixl::w22.Is(Arm64Assembler::reg_w(W22)));
- EXPECT_TRUE(vixl::w23.Is(Arm64Assembler::reg_w(W23)));
- EXPECT_TRUE(vixl::w24.Is(Arm64Assembler::reg_w(W24)));
- EXPECT_TRUE(vixl::w25.Is(Arm64Assembler::reg_w(W25)));
- EXPECT_TRUE(vixl::w26.Is(Arm64Assembler::reg_w(W26)));
- EXPECT_TRUE(vixl::w27.Is(Arm64Assembler::reg_w(W27)));
- EXPECT_TRUE(vixl::w28.Is(Arm64Assembler::reg_w(W28)));
- EXPECT_TRUE(vixl::w29.Is(Arm64Assembler::reg_w(W29)));
- EXPECT_TRUE(vixl::w30.Is(Arm64Assembler::reg_w(W30)));
- EXPECT_TRUE(vixl::w31.Is(Arm64Assembler::reg_w(WZR)));
- EXPECT_TRUE(vixl::wzr.Is(Arm64Assembler::reg_w(WZR)));
- EXPECT_TRUE(vixl::wsp.Is(Arm64Assembler::reg_w(WSP)));
+ EXPECT_TRUE(vixl::aarch64::w0.Is(Arm64Assembler::reg_w(W0)));
+ EXPECT_TRUE(vixl::aarch64::w1.Is(Arm64Assembler::reg_w(W1)));
+ EXPECT_TRUE(vixl::aarch64::w2.Is(Arm64Assembler::reg_w(W2)));
+ EXPECT_TRUE(vixl::aarch64::w3.Is(Arm64Assembler::reg_w(W3)));
+ EXPECT_TRUE(vixl::aarch64::w4.Is(Arm64Assembler::reg_w(W4)));
+ EXPECT_TRUE(vixl::aarch64::w5.Is(Arm64Assembler::reg_w(W5)));
+ EXPECT_TRUE(vixl::aarch64::w6.Is(Arm64Assembler::reg_w(W6)));
+ EXPECT_TRUE(vixl::aarch64::w7.Is(Arm64Assembler::reg_w(W7)));
+ EXPECT_TRUE(vixl::aarch64::w8.Is(Arm64Assembler::reg_w(W8)));
+ EXPECT_TRUE(vixl::aarch64::w9.Is(Arm64Assembler::reg_w(W9)));
+ EXPECT_TRUE(vixl::aarch64::w10.Is(Arm64Assembler::reg_w(W10)));
+ EXPECT_TRUE(vixl::aarch64::w11.Is(Arm64Assembler::reg_w(W11)));
+ EXPECT_TRUE(vixl::aarch64::w12.Is(Arm64Assembler::reg_w(W12)));
+ EXPECT_TRUE(vixl::aarch64::w13.Is(Arm64Assembler::reg_w(W13)));
+ EXPECT_TRUE(vixl::aarch64::w14.Is(Arm64Assembler::reg_w(W14)));
+ EXPECT_TRUE(vixl::aarch64::w15.Is(Arm64Assembler::reg_w(W15)));
+ EXPECT_TRUE(vixl::aarch64::w16.Is(Arm64Assembler::reg_w(W16)));
+ EXPECT_TRUE(vixl::aarch64::w17.Is(Arm64Assembler::reg_w(W17)));
+ EXPECT_TRUE(vixl::aarch64::w18.Is(Arm64Assembler::reg_w(W18)));
+ EXPECT_TRUE(vixl::aarch64::w19.Is(Arm64Assembler::reg_w(W19)));
+ EXPECT_TRUE(vixl::aarch64::w20.Is(Arm64Assembler::reg_w(W20)));
+ EXPECT_TRUE(vixl::aarch64::w21.Is(Arm64Assembler::reg_w(W21)));
+ EXPECT_TRUE(vixl::aarch64::w22.Is(Arm64Assembler::reg_w(W22)));
+ EXPECT_TRUE(vixl::aarch64::w23.Is(Arm64Assembler::reg_w(W23)));
+ EXPECT_TRUE(vixl::aarch64::w24.Is(Arm64Assembler::reg_w(W24)));
+ EXPECT_TRUE(vixl::aarch64::w25.Is(Arm64Assembler::reg_w(W25)));
+ EXPECT_TRUE(vixl::aarch64::w26.Is(Arm64Assembler::reg_w(W26)));
+ EXPECT_TRUE(vixl::aarch64::w27.Is(Arm64Assembler::reg_w(W27)));
+ EXPECT_TRUE(vixl::aarch64::w28.Is(Arm64Assembler::reg_w(W28)));
+ EXPECT_TRUE(vixl::aarch64::w29.Is(Arm64Assembler::reg_w(W29)));
+ EXPECT_TRUE(vixl::aarch64::w30.Is(Arm64Assembler::reg_w(W30)));
+ EXPECT_TRUE(vixl::aarch64::w31.Is(Arm64Assembler::reg_w(WZR)));
+ EXPECT_TRUE(vixl::aarch64::wzr.Is(Arm64Assembler::reg_w(WZR)));
+ EXPECT_TRUE(vixl::aarch64::wsp.Is(Arm64Assembler::reg_w(WSP)));
// D Registers.
- EXPECT_TRUE(vixl::d0.Is(Arm64Assembler::reg_d(D0)));
- EXPECT_TRUE(vixl::d1.Is(Arm64Assembler::reg_d(D1)));
- EXPECT_TRUE(vixl::d2.Is(Arm64Assembler::reg_d(D2)));
- EXPECT_TRUE(vixl::d3.Is(Arm64Assembler::reg_d(D3)));
- EXPECT_TRUE(vixl::d4.Is(Arm64Assembler::reg_d(D4)));
- EXPECT_TRUE(vixl::d5.Is(Arm64Assembler::reg_d(D5)));
- EXPECT_TRUE(vixl::d6.Is(Arm64Assembler::reg_d(D6)));
- EXPECT_TRUE(vixl::d7.Is(Arm64Assembler::reg_d(D7)));
- EXPECT_TRUE(vixl::d8.Is(Arm64Assembler::reg_d(D8)));
- EXPECT_TRUE(vixl::d9.Is(Arm64Assembler::reg_d(D9)));
- EXPECT_TRUE(vixl::d10.Is(Arm64Assembler::reg_d(D10)));
- EXPECT_TRUE(vixl::d11.Is(Arm64Assembler::reg_d(D11)));
- EXPECT_TRUE(vixl::d12.Is(Arm64Assembler::reg_d(D12)));
- EXPECT_TRUE(vixl::d13.Is(Arm64Assembler::reg_d(D13)));
- EXPECT_TRUE(vixl::d14.Is(Arm64Assembler::reg_d(D14)));
- EXPECT_TRUE(vixl::d15.Is(Arm64Assembler::reg_d(D15)));
- EXPECT_TRUE(vixl::d16.Is(Arm64Assembler::reg_d(D16)));
- EXPECT_TRUE(vixl::d17.Is(Arm64Assembler::reg_d(D17)));
- EXPECT_TRUE(vixl::d18.Is(Arm64Assembler::reg_d(D18)));
- EXPECT_TRUE(vixl::d19.Is(Arm64Assembler::reg_d(D19)));
- EXPECT_TRUE(vixl::d20.Is(Arm64Assembler::reg_d(D20)));
- EXPECT_TRUE(vixl::d21.Is(Arm64Assembler::reg_d(D21)));
- EXPECT_TRUE(vixl::d22.Is(Arm64Assembler::reg_d(D22)));
- EXPECT_TRUE(vixl::d23.Is(Arm64Assembler::reg_d(D23)));
- EXPECT_TRUE(vixl::d24.Is(Arm64Assembler::reg_d(D24)));
- EXPECT_TRUE(vixl::d25.Is(Arm64Assembler::reg_d(D25)));
- EXPECT_TRUE(vixl::d26.Is(Arm64Assembler::reg_d(D26)));
- EXPECT_TRUE(vixl::d27.Is(Arm64Assembler::reg_d(D27)));
- EXPECT_TRUE(vixl::d28.Is(Arm64Assembler::reg_d(D28)));
- EXPECT_TRUE(vixl::d29.Is(Arm64Assembler::reg_d(D29)));
- EXPECT_TRUE(vixl::d30.Is(Arm64Assembler::reg_d(D30)));
- EXPECT_TRUE(vixl::d31.Is(Arm64Assembler::reg_d(D31)));
+ EXPECT_TRUE(vixl::aarch64::d0.Is(Arm64Assembler::reg_d(D0)));
+ EXPECT_TRUE(vixl::aarch64::d1.Is(Arm64Assembler::reg_d(D1)));
+ EXPECT_TRUE(vixl::aarch64::d2.Is(Arm64Assembler::reg_d(D2)));
+ EXPECT_TRUE(vixl::aarch64::d3.Is(Arm64Assembler::reg_d(D3)));
+ EXPECT_TRUE(vixl::aarch64::d4.Is(Arm64Assembler::reg_d(D4)));
+ EXPECT_TRUE(vixl::aarch64::d5.Is(Arm64Assembler::reg_d(D5)));
+ EXPECT_TRUE(vixl::aarch64::d6.Is(Arm64Assembler::reg_d(D6)));
+ EXPECT_TRUE(vixl::aarch64::d7.Is(Arm64Assembler::reg_d(D7)));
+ EXPECT_TRUE(vixl::aarch64::d8.Is(Arm64Assembler::reg_d(D8)));
+ EXPECT_TRUE(vixl::aarch64::d9.Is(Arm64Assembler::reg_d(D9)));
+ EXPECT_TRUE(vixl::aarch64::d10.Is(Arm64Assembler::reg_d(D10)));
+ EXPECT_TRUE(vixl::aarch64::d11.Is(Arm64Assembler::reg_d(D11)));
+ EXPECT_TRUE(vixl::aarch64::d12.Is(Arm64Assembler::reg_d(D12)));
+ EXPECT_TRUE(vixl::aarch64::d13.Is(Arm64Assembler::reg_d(D13)));
+ EXPECT_TRUE(vixl::aarch64::d14.Is(Arm64Assembler::reg_d(D14)));
+ EXPECT_TRUE(vixl::aarch64::d15.Is(Arm64Assembler::reg_d(D15)));
+ EXPECT_TRUE(vixl::aarch64::d16.Is(Arm64Assembler::reg_d(D16)));
+ EXPECT_TRUE(vixl::aarch64::d17.Is(Arm64Assembler::reg_d(D17)));
+ EXPECT_TRUE(vixl::aarch64::d18.Is(Arm64Assembler::reg_d(D18)));
+ EXPECT_TRUE(vixl::aarch64::d19.Is(Arm64Assembler::reg_d(D19)));
+ EXPECT_TRUE(vixl::aarch64::d20.Is(Arm64Assembler::reg_d(D20)));
+ EXPECT_TRUE(vixl::aarch64::d21.Is(Arm64Assembler::reg_d(D21)));
+ EXPECT_TRUE(vixl::aarch64::d22.Is(Arm64Assembler::reg_d(D22)));
+ EXPECT_TRUE(vixl::aarch64::d23.Is(Arm64Assembler::reg_d(D23)));
+ EXPECT_TRUE(vixl::aarch64::d24.Is(Arm64Assembler::reg_d(D24)));
+ EXPECT_TRUE(vixl::aarch64::d25.Is(Arm64Assembler::reg_d(D25)));
+ EXPECT_TRUE(vixl::aarch64::d26.Is(Arm64Assembler::reg_d(D26)));
+ EXPECT_TRUE(vixl::aarch64::d27.Is(Arm64Assembler::reg_d(D27)));
+ EXPECT_TRUE(vixl::aarch64::d28.Is(Arm64Assembler::reg_d(D28)));
+ EXPECT_TRUE(vixl::aarch64::d29.Is(Arm64Assembler::reg_d(D29)));
+ EXPECT_TRUE(vixl::aarch64::d30.Is(Arm64Assembler::reg_d(D30)));
+ EXPECT_TRUE(vixl::aarch64::d31.Is(Arm64Assembler::reg_d(D31)));
// S Registers.
- EXPECT_TRUE(vixl::s0.Is(Arm64Assembler::reg_s(S0)));
- EXPECT_TRUE(vixl::s1.Is(Arm64Assembler::reg_s(S1)));
- EXPECT_TRUE(vixl::s2.Is(Arm64Assembler::reg_s(S2)));
- EXPECT_TRUE(vixl::s3.Is(Arm64Assembler::reg_s(S3)));
- EXPECT_TRUE(vixl::s4.Is(Arm64Assembler::reg_s(S4)));
- EXPECT_TRUE(vixl::s5.Is(Arm64Assembler::reg_s(S5)));
- EXPECT_TRUE(vixl::s6.Is(Arm64Assembler::reg_s(S6)));
- EXPECT_TRUE(vixl::s7.Is(Arm64Assembler::reg_s(S7)));
- EXPECT_TRUE(vixl::s8.Is(Arm64Assembler::reg_s(S8)));
- EXPECT_TRUE(vixl::s9.Is(Arm64Assembler::reg_s(S9)));
- EXPECT_TRUE(vixl::s10.Is(Arm64Assembler::reg_s(S10)));
- EXPECT_TRUE(vixl::s11.Is(Arm64Assembler::reg_s(S11)));
- EXPECT_TRUE(vixl::s12.Is(Arm64Assembler::reg_s(S12)));
- EXPECT_TRUE(vixl::s13.Is(Arm64Assembler::reg_s(S13)));
- EXPECT_TRUE(vixl::s14.Is(Arm64Assembler::reg_s(S14)));
- EXPECT_TRUE(vixl::s15.Is(Arm64Assembler::reg_s(S15)));
- EXPECT_TRUE(vixl::s16.Is(Arm64Assembler::reg_s(S16)));
- EXPECT_TRUE(vixl::s17.Is(Arm64Assembler::reg_s(S17)));
- EXPECT_TRUE(vixl::s18.Is(Arm64Assembler::reg_s(S18)));
- EXPECT_TRUE(vixl::s19.Is(Arm64Assembler::reg_s(S19)));
- EXPECT_TRUE(vixl::s20.Is(Arm64Assembler::reg_s(S20)));
- EXPECT_TRUE(vixl::s21.Is(Arm64Assembler::reg_s(S21)));
- EXPECT_TRUE(vixl::s22.Is(Arm64Assembler::reg_s(S22)));
- EXPECT_TRUE(vixl::s23.Is(Arm64Assembler::reg_s(S23)));
- EXPECT_TRUE(vixl::s24.Is(Arm64Assembler::reg_s(S24)));
- EXPECT_TRUE(vixl::s25.Is(Arm64Assembler::reg_s(S25)));
- EXPECT_TRUE(vixl::s26.Is(Arm64Assembler::reg_s(S26)));
- EXPECT_TRUE(vixl::s27.Is(Arm64Assembler::reg_s(S27)));
- EXPECT_TRUE(vixl::s28.Is(Arm64Assembler::reg_s(S28)));
- EXPECT_TRUE(vixl::s29.Is(Arm64Assembler::reg_s(S29)));
- EXPECT_TRUE(vixl::s30.Is(Arm64Assembler::reg_s(S30)));
- EXPECT_TRUE(vixl::s31.Is(Arm64Assembler::reg_s(S31)));
+ EXPECT_TRUE(vixl::aarch64::s0.Is(Arm64Assembler::reg_s(S0)));
+ EXPECT_TRUE(vixl::aarch64::s1.Is(Arm64Assembler::reg_s(S1)));
+ EXPECT_TRUE(vixl::aarch64::s2.Is(Arm64Assembler::reg_s(S2)));
+ EXPECT_TRUE(vixl::aarch64::s3.Is(Arm64Assembler::reg_s(S3)));
+ EXPECT_TRUE(vixl::aarch64::s4.Is(Arm64Assembler::reg_s(S4)));
+ EXPECT_TRUE(vixl::aarch64::s5.Is(Arm64Assembler::reg_s(S5)));
+ EXPECT_TRUE(vixl::aarch64::s6.Is(Arm64Assembler::reg_s(S6)));
+ EXPECT_TRUE(vixl::aarch64::s7.Is(Arm64Assembler::reg_s(S7)));
+ EXPECT_TRUE(vixl::aarch64::s8.Is(Arm64Assembler::reg_s(S8)));
+ EXPECT_TRUE(vixl::aarch64::s9.Is(Arm64Assembler::reg_s(S9)));
+ EXPECT_TRUE(vixl::aarch64::s10.Is(Arm64Assembler::reg_s(S10)));
+ EXPECT_TRUE(vixl::aarch64::s11.Is(Arm64Assembler::reg_s(S11)));
+ EXPECT_TRUE(vixl::aarch64::s12.Is(Arm64Assembler::reg_s(S12)));
+ EXPECT_TRUE(vixl::aarch64::s13.Is(Arm64Assembler::reg_s(S13)));
+ EXPECT_TRUE(vixl::aarch64::s14.Is(Arm64Assembler::reg_s(S14)));
+ EXPECT_TRUE(vixl::aarch64::s15.Is(Arm64Assembler::reg_s(S15)));
+ EXPECT_TRUE(vixl::aarch64::s16.Is(Arm64Assembler::reg_s(S16)));
+ EXPECT_TRUE(vixl::aarch64::s17.Is(Arm64Assembler::reg_s(S17)));
+ EXPECT_TRUE(vixl::aarch64::s18.Is(Arm64Assembler::reg_s(S18)));
+ EXPECT_TRUE(vixl::aarch64::s19.Is(Arm64Assembler::reg_s(S19)));
+ EXPECT_TRUE(vixl::aarch64::s20.Is(Arm64Assembler::reg_s(S20)));
+ EXPECT_TRUE(vixl::aarch64::s21.Is(Arm64Assembler::reg_s(S21)));
+ EXPECT_TRUE(vixl::aarch64::s22.Is(Arm64Assembler::reg_s(S22)));
+ EXPECT_TRUE(vixl::aarch64::s23.Is(Arm64Assembler::reg_s(S23)));
+ EXPECT_TRUE(vixl::aarch64::s24.Is(Arm64Assembler::reg_s(S24)));
+ EXPECT_TRUE(vixl::aarch64::s25.Is(Arm64Assembler::reg_s(S25)));
+ EXPECT_TRUE(vixl::aarch64::s26.Is(Arm64Assembler::reg_s(S26)));
+ EXPECT_TRUE(vixl::aarch64::s27.Is(Arm64Assembler::reg_s(S27)));
+ EXPECT_TRUE(vixl::aarch64::s28.Is(Arm64Assembler::reg_s(S28)));
+ EXPECT_TRUE(vixl::aarch64::s29.Is(Arm64Assembler::reg_s(S29)));
+ EXPECT_TRUE(vixl::aarch64::s30.Is(Arm64Assembler::reg_s(S30)));
+ EXPECT_TRUE(vixl::aarch64::s31.Is(Arm64Assembler::reg_s(S31)));
}
} // namespace arm64
diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk
index dfc379f..f5f02cd 100644
--- a/dex2oat/Android.mk
+++ b/dex2oat/Android.mk
@@ -62,7 +62,7 @@
libnativebridge \
libnativeloader \
libsigchain_dummy \
- libvixl \
+ libvixl-arm64 \
liblog \
libz \
libbacktrace \
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index d76bbb8..6905f88 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -91,9 +91,9 @@
LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
# For disassembler_arm64.
ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libvixl
+ LOCAL_SHARED_LIBRARIES += libvixl-arm64
else
- LOCAL_SHARED_LIBRARIES += libvixl
+ LOCAL_SHARED_LIBRARIES += libvixl-arm64
endif
ifeq ($$(art_target_or_host),target)
include $(BUILD_SHARED_LIBRARY)
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 6a9afe5..a93f7d5 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -24,6 +24,8 @@
#include "base/stringprintf.h"
#include "thread.h"
+using namespace vixl::aarch64; // NOLINT(build/namespaces)
+
namespace art {
namespace arm64 {
@@ -38,15 +40,14 @@
LR = 30
};
-void CustomDisassembler::AppendRegisterNameToOutput(
- const vixl::Instruction* instr,
- const vixl::CPURegister& reg) {
+void CustomDisassembler::AppendRegisterNameToOutput(const Instruction* instr,
+ const CPURegister& reg) {
USE(instr);
if (reg.IsRegister() && reg.Is64Bits()) {
- if (reg.code() == TR) {
+ if (reg.GetCode() == TR) {
AppendToOutput("tr");
return;
- } else if (reg.code() == LR) {
+ } else if (reg.GetCode() == LR) {
AppendToOutput("lr");
return;
}
@@ -56,7 +57,7 @@
Disassembler::AppendRegisterNameToOutput(instr, reg);
}
-void CustomDisassembler::VisitLoadLiteral(const vixl::Instruction* instr) {
+void CustomDisassembler::VisitLoadLiteral(const Instruction* instr) {
Disassembler::VisitLoadLiteral(instr);
if (!read_literals_) {
@@ -66,27 +67,27 @@
// Get address of literal. Bail if not within expected buffer range to
// avoid trying to fetch invalid literals (we can encounter this when
// interpreting raw data as instructions).
- void* data_address = instr->LiteralAddress<void*>();
+ void* data_address = instr->GetLiteralAddress<void*>();
if (data_address < base_address_ || data_address >= end_address_) {
AppendToOutput(" (?)");
return;
}
// Output information on literal.
- vixl::Instr op = instr->Mask(vixl::LoadLiteralMask);
+ Instr op = instr->Mask(LoadLiteralMask);
switch (op) {
- case vixl::LDR_w_lit:
- case vixl::LDR_x_lit:
- case vixl::LDRSW_x_lit: {
- int64_t data = op == vixl::LDR_x_lit ? *reinterpret_cast<int64_t*>(data_address)
- : *reinterpret_cast<int32_t*>(data_address);
+ case LDR_w_lit:
+ case LDR_x_lit:
+ case LDRSW_x_lit: {
+ int64_t data = op == LDR_x_lit ? *reinterpret_cast<int64_t*>(data_address)
+ : *reinterpret_cast<int32_t*>(data_address);
AppendToOutput(" (0x%" PRIx64 " / %" PRId64 ")", data, data);
break;
}
- case vixl::LDR_s_lit:
- case vixl::LDR_d_lit: {
- double data = (op == vixl::LDR_s_lit) ? *reinterpret_cast<float*>(data_address)
- : *reinterpret_cast<double*>(data_address);
+ case LDR_s_lit:
+ case LDR_d_lit: {
+ double data = (op == LDR_s_lit) ? *reinterpret_cast<float*>(data_address)
+ : *reinterpret_cast<double*>(data_address);
AppendToOutput(" (%g)", data);
break;
}
@@ -95,11 +96,11 @@
}
}
-void CustomDisassembler::VisitLoadStoreUnsignedOffset(const vixl::Instruction* instr) {
+void CustomDisassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
Disassembler::VisitLoadStoreUnsignedOffset(instr);
- if (instr->Rn() == TR) {
- int64_t offset = instr->ImmLSUnsigned() << instr->SizeLS();
+ if (instr->GetRn() == TR) {
+ int64_t offset = instr->GetImmLSUnsigned() << instr->GetSizeLS();
std::ostringstream tmp_stream;
Thread::DumpThreadOffset<8>(tmp_stream, static_cast<uint32_t>(offset));
AppendToOutput(" ; %s", tmp_stream.str().c_str());
@@ -107,15 +108,15 @@
}
size_t DisassemblerArm64::Dump(std::ostream& os, const uint8_t* begin) {
- const vixl::Instruction* instr = reinterpret_cast<const vixl::Instruction*>(begin);
+ const Instruction* instr = reinterpret_cast<const Instruction*>(begin);
decoder.Decode(instr);
os << FormatInstructionPointer(begin)
- << StringPrintf(": %08x\t%s\n", instr->InstructionBits(), disasm.GetOutput());
- return vixl::kInstructionSize;
+ << StringPrintf(": %08x\t%s\n", instr->GetInstructionBits(), disasm.GetOutput());
+ return kInstructionSize;
}
void DisassemblerArm64::Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) {
- for (const uint8_t* cur = begin; cur < end; cur += vixl::kInstructionSize) {
+ for (const uint8_t* cur = begin; cur < end; cur += kInstructionSize) {
Dump(os, cur);
}
}
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index a4e5ee8..c64d8ea 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -21,34 +21,35 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
-#include "vixl/a64/decoder-a64.h"
-#include "vixl/a64/disasm-a64.h"
+#include "a64/decoder-a64.h"
+#include "a64/disasm-a64.h"
#pragma GCC diagnostic pop
namespace art {
namespace arm64 {
-class CustomDisassembler FINAL : public vixl::Disassembler {
+class CustomDisassembler FINAL : public vixl::aarch64::Disassembler {
public:
explicit CustomDisassembler(DisassemblerOptions* options)
- : vixl::Disassembler(),
+ : vixl::aarch64::Disassembler(),
read_literals_(options->can_read_literals_),
base_address_(options->base_address_),
end_address_(options->end_address_) {
if (!options->absolute_addresses_) {
- MapCodeAddress(0, reinterpret_cast<const vixl::Instruction*>(options->base_address_));
+ MapCodeAddress(0,
+ reinterpret_cast<const vixl::aarch64::Instruction*>(options->base_address_));
}
}
// Use register aliases in the disassembly.
- void AppendRegisterNameToOutput(const vixl::Instruction* instr,
- const vixl::CPURegister& reg) OVERRIDE;
+ void AppendRegisterNameToOutput(const vixl::aarch64::Instruction* instr,
+ const vixl::aarch64::CPURegister& reg) OVERRIDE;
// Improve the disassembly of literal load instructions.
- void VisitLoadLiteral(const vixl::Instruction* instr) OVERRIDE;
+ void VisitLoadLiteral(const vixl::aarch64::Instruction* instr) OVERRIDE;
// Improve the disassembly of thread offset.
- void VisitLoadStoreUnsignedOffset(const vixl::Instruction* instr) OVERRIDE;
+ void VisitLoadStoreUnsignedOffset(const vixl::aarch64::Instruction* instr) OVERRIDE;
private:
// Indicate if the disassembler should read data loaded from literal pools.
@@ -75,7 +76,7 @@
void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
private:
- vixl::Decoder decoder;
+ vixl::aarch64::Decoder decoder;
CustomDisassembler disasm;
DISALLOW_COPY_AND_ASSIGN(DisassemblerArm64);
diff --git a/runtime/simulator/Android.mk b/runtime/simulator/Android.mk
index ad91cde..953a377 100644
--- a/runtime/simulator/Android.mk
+++ b/runtime/simulator/Android.mk
@@ -88,9 +88,9 @@
LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
# For simulator_arm64.
ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libvixl
+ LOCAL_SHARED_LIBRARIES += libvixl-arm64
else
- LOCAL_SHARED_LIBRARIES += libvixl
+ LOCAL_SHARED_LIBRARIES += libvixl-arm64
endif
ifeq ($$(art_target_or_host),target)
include $(BUILD_SHARED_LIBRARY)
diff --git a/runtime/simulator/code_simulator_arm64.cc b/runtime/simulator/code_simulator_arm64.cc
index 39dfa6d..897d4f5 100644
--- a/runtime/simulator/code_simulator_arm64.cc
+++ b/runtime/simulator/code_simulator_arm64.cc
@@ -16,13 +16,15 @@
#include "simulator/code_simulator_arm64.h"
+using namespace vixl::aarch64; // NOLINT(build/namespaces)
+
namespace art {
namespace arm64 {
-// VIXL has not been tested on 32bit architectures, so vixl::Simulator is not always
+// VIXL has not been tested on 32bit architectures, so Simulator is not always
// available. To avoid linker error on these architectures, we check if we can simulate
// in the beginning of following methods, with compile time constant `kCanSimulate`.
-// TODO: when vixl::Simulator is always available, remove the these checks.
+// TODO: when Simulator is always available, remove the these checks.
CodeSimulatorArm64* CodeSimulatorArm64::CreateCodeSimulatorArm64() {
if (kCanSimulate) {
@@ -35,8 +37,8 @@
CodeSimulatorArm64::CodeSimulatorArm64()
: CodeSimulator(), decoder_(nullptr), simulator_(nullptr) {
DCHECK(kCanSimulate);
- decoder_ = new vixl::Decoder();
- simulator_ = new vixl::Simulator(decoder_);
+ decoder_ = new Decoder();
+ simulator_ = new Simulator(decoder_);
}
CodeSimulatorArm64::~CodeSimulatorArm64() {
@@ -47,22 +49,22 @@
void CodeSimulatorArm64::RunFrom(intptr_t code_buffer) {
DCHECK(kCanSimulate);
- simulator_->RunFrom(reinterpret_cast<const vixl::Instruction*>(code_buffer));
+ simulator_->RunFrom(reinterpret_cast<const Instruction*>(code_buffer));
}
bool CodeSimulatorArm64::GetCReturnBool() const {
DCHECK(kCanSimulate);
- return simulator_->wreg(0);
+ return simulator_->ReadWRegister(0);
}
int32_t CodeSimulatorArm64::GetCReturnInt32() const {
DCHECK(kCanSimulate);
- return simulator_->wreg(0);
+ return simulator_->ReadWRegister(0);
}
int64_t CodeSimulatorArm64::GetCReturnInt64() const {
DCHECK(kCanSimulate);
- return simulator_->xreg(0);
+ return simulator_->ReadXRegister(0);
}
} // namespace arm64
diff --git a/runtime/simulator/code_simulator_arm64.h b/runtime/simulator/code_simulator_arm64.h
index 10fceb9..69388b1 100644
--- a/runtime/simulator/code_simulator_arm64.h
+++ b/runtime/simulator/code_simulator_arm64.h
@@ -19,10 +19,11 @@
#include "memory"
#include "simulator/code_simulator.h"
+
// TODO: make vixl clean wrt -Wshadow.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
-#include "vixl/a64/simulator-a64.h"
+#include "a64/simulator-a64.h"
#pragma GCC diagnostic pop
namespace art {
@@ -42,10 +43,10 @@
private:
CodeSimulatorArm64();
- vixl::Decoder* decoder_;
- vixl::Simulator* simulator_;
+ vixl::aarch64::Decoder* decoder_;
+ vixl::aarch64::Simulator* simulator_;
- // TODO: Enable CodeSimulatorArm64 for more host ISAs once vixl::Simulator supports them.
+ // TODO: Enable CodeSimulatorArm64 for more host ISAs once Simulator supports them.
static constexpr bool kCanSimulate = (kRuntimeISA == kX86_64);
DISALLOW_COPY_AND_ASSIGN(CodeSimulatorArm64);