Vixl: Update the VIXL interface to VIXL 1.7 and enable VIXL debug.
This patch updates the interface to VIXL 1.7 and enables the debug version of
VIXL when ART is built in debug mode.
Change-Id: I443fb941bec3cffefba7038f93bb972e6b7d8db5
Signed-off-by: Serban Constantinescu <serban.constantinescu@arm.com>
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 84176a1..70c7e52 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -279,7 +279,11 @@
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
# Vixl assembly support for ARM64 targets.
- LOCAL_SHARED_LIBRARIES += libvixl
+ ifeq ($$(art_ndebug_or_debug),debug)
+ LOCAL_SHARED_LIBRARIES += libvixld
+ else
+ LOCAL_SHARED_LIBRARIES += libvixl
+ endif
ifeq ($$(art_target_or_host),target)
# For atrace.
LOCAL_SHARED_LIBRARIES += libcutils
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index a57f892..8a7abb4 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -366,6 +366,8 @@
Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64PointerSize).Int32Value());
assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
+ // Ensure we emit the literal pool.
+ assembler.EmitSlowPaths();
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
assembler.FinalizeInstructions(code);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6cacd4f..e581af2 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -71,11 +71,7 @@
}
}
GenerateSlowPaths();
-
- size_t code_size = GetAssembler()->CodeSize();
- uint8_t* buffer = allocator->Allocate(code_size);
- MemoryRegion code(buffer, code_size);
- GetAssembler()->FinalizeInstructions(code);
+ Finalize(allocator);
}
void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
@@ -97,9 +93,13 @@
}
}
GenerateSlowPaths();
+ Finalize(allocator);
+}
+void CodeGenerator::Finalize(CodeAllocator* allocator) {
size_t code_size = GetAssembler()->CodeSize();
uint8_t* buffer = allocator->Allocate(code_size);
+
MemoryRegion code(buffer, code_size);
GetAssembler()->FinalizeInstructions(code);
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 321a31f..4c0d3ea 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -90,6 +90,7 @@
}
virtual void Initialize() = 0;
+ virtual void Finalize(CodeAllocator* allocator);
virtual void GenerateFrameEntry() = 0;
virtual void GenerateFrameExit() = 0;
virtual void Bind(HBasicBlock* block) = 0;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0fc4307..8d43a5d 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -479,6 +479,12 @@
#undef __
#define __ GetVIXLAssembler()->
+void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
+ // Ensure we emit the literal pool.
+ __ FinalizeCode();
+ CodeGenerator::Finalize(allocator);
+}
+
void CodeGeneratorARM64::GenerateFrameEntry() {
// TODO: Add proper support for the stack overflow check.
UseScratchRegisterScope temps(GetVIXLAssembler());
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a40f27f..236a04d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -232,6 +232,8 @@
}
}
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
// Code generation helpers.
void MoveConstant(vixl::CPURegister destination, HConstant* constant);
void MoveHelper(Location destination, Location source, Primitive::Type type);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 733b58f..cb07ffa 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -83,6 +83,7 @@
break;
}
+ assembler->EmitSlowPaths();
size_t cs = assembler->CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 390f2ea..21014c8 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -329,12 +329,12 @@
if (dst.IsXRegister()) {
if (size == 4) {
CHECK(src.IsWRegister());
- ___ Mov(reg_x(dst.AsXRegister()), reg_w(src.AsWRegister()));
+ ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
} else {
if (src.IsXRegister()) {
___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
} else {
- ___ Mov(reg_x(dst.AsXRegister()), reg_w(src.AsWRegister()));
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
}
}
} else if (dst.IsWRegister()) {
@@ -484,9 +484,9 @@
CHECK(size == 1 || size == 2) << size;
CHECK(reg.IsWRegister()) << reg;
if (size == 1) {
- ___ sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
} else {
- ___ sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
}
}
@@ -495,9 +495,9 @@
CHECK(size == 1 || size == 2) << size;
CHECK(reg.IsWRegister()) << reg;
if (size == 1) {
- ___ uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
} else {
- ___ uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
}
}