ARM: VIXL32: Implement VIXL-based assembler.
This patch introduces new ARM assembler (Thumb2) based on VIXL and
ARM VIXL JNI Macro Assembler. Both are turned off by default (JNI
one will be turned on in the following patch).
Change-Id: I5f7eb35da5318d7170b3c7e8553364ebe29cc991
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 2426eb9..99a09f2 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -96,8 +96,10 @@
optimizing/code_generator_arm.cc \
optimizing/intrinsics_arm.cc \
utils/arm/assembler_arm.cc \
+ utils/arm/assembler_arm_vixl.cc \
utils/arm/assembler_thumb2.cc \
utils/arm/jni_macro_assembler_arm.cc \
+ utils/arm/jni_macro_assembler_arm_vixl.cc \
utils/arm/managed_register_arm.cc \
# TODO We should really separate out those files that are actually needed for both variants of an
@@ -286,15 +288,15 @@
# VIXL assembly support for ARM64 targets.
ifeq ($$(art_ndebug_or_debug),debug)
ifeq ($$(art_static_or_shared), static)
- LOCAL_WHOLESTATIC_LIBRARIES += libvixld-arm64
+ LOCAL_WHOLESTATIC_LIBRARIES += libvixld-arm libvixld-arm64
else
- LOCAL_SHARED_LIBRARIES += libvixld-arm64
+ LOCAL_SHARED_LIBRARIES += libvixld-arm libvixld-arm64
endif
else
ifeq ($$(art_static_or_shared), static)
- LOCAL_WHOLE_STATIC_LIBRARIES += libvixl-arm64
+ LOCAL_WHOLE_STATIC_LIBRARIES += libvixl-arm libvixl-arm64
else
- LOCAL_SHARED_LIBRARIES += libvixl-arm64
+ LOCAL_SHARED_LIBRARIES += libvixl-arm libvixl-arm64
endif
endif
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 8ba6fb4..33f2fec 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -28,6 +28,7 @@
#include "base/stl_util.h"
#include "base/value_object.h"
#include "constants_arm.h"
+#include "utils/arm/assembler_arm_shared.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/assembler.h"
#include "utils/jni_macro_assembler.h"
@@ -214,29 +215,6 @@
#endif
};
-
-enum LoadOperandType {
- kLoadSignedByte,
- kLoadUnsignedByte,
- kLoadSignedHalfword,
- kLoadUnsignedHalfword,
- kLoadWord,
- kLoadWordPair,
- kLoadSWord,
- kLoadDWord
-};
-
-
-enum StoreOperandType {
- kStoreByte,
- kStoreHalfword,
- kStoreWord,
- kStoreWordPair,
- kStoreSWord,
- kStoreDWord
-};
-
-
// Load/store multiple addressing mode.
enum BlockAddressMode {
// bit encoding P U W
@@ -417,13 +395,6 @@
kItE = kItElse
};
-// Set condition codes request.
-enum SetCc {
- kCcDontCare, // Allows prioritizing 16-bit instructions on Thumb2 whether they set CCs or not.
- kCcSet,
- kCcKeep,
-};
-
constexpr uint32_t kNoItCondition = 3;
constexpr uint32_t kInvalidModifiedImmediate = -1;
diff --git a/compiler/utils/arm/assembler_arm_shared.h b/compiler/utils/arm/assembler_arm_shared.h
new file mode 100644
index 0000000..21f13ee
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm_shared.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_SHARED_H_
+#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_SHARED_H_
+
+namespace art {
+namespace arm {
+
+enum LoadOperandType {
+ kLoadSignedByte,
+ kLoadUnsignedByte,
+ kLoadSignedHalfword,
+ kLoadUnsignedHalfword,
+ kLoadWord,
+ kLoadWordPair,
+ kLoadSWord,
+ kLoadDWord
+};
+
+enum StoreOperandType {
+ kStoreByte,
+ kStoreHalfword,
+ kStoreWord,
+ kStoreWordPair,
+ kStoreSWord,
+ kStoreDWord
+};
+
+// Set condition codes request.
+enum SetCc {
+ kCcDontCare, // Allows prioritizing 16-bit instructions on Thumb2 whether they set CCs or not.
+ kCcSet,
+ kCcKeep,
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_SHARED_H_
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
new file mode 100644
index 0000000..3c5973e
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -0,0 +1,382 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <type_traits>
+
+#include "assembler_arm_vixl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "thread.h"
+
+using namespace vixl::aarch32; // NOLINT(build/namespaces)
+
+namespace art {
+namespace arm {
+
+#ifdef ___
+#error "ARM Assembler macro already defined."
+#else
+#define ___ vixl_masm_.
+#endif
+
+extern const vixl32::Register tr(TR);
+
+void ArmVIXLAssembler::FinalizeCode() {
+ vixl_masm_.FinalizeCode();
+}
+
+size_t ArmVIXLAssembler::CodeSize() const {
+ return vixl_masm_.GetSizeOfCodeGenerated();
+}
+
+const uint8_t* ArmVIXLAssembler::CodeBufferBaseAddress() const {
+ return vixl_masm_.GetStartAddress<uint8_t*>();
+}
+
+void ArmVIXLAssembler::FinalizeInstructions(const MemoryRegion& region) {
+ // Copy the instructions from the buffer.
+ MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
+ region.CopyFrom(0, from);
+}
+
+void ArmVIXLAssembler::PoisonHeapReference(vixl::aarch32::Register reg) {
+ // reg = -reg.
+ ___ Rsb(reg, reg, 0);
+}
+
+void ArmVIXLAssembler::UnpoisonHeapReference(vixl::aarch32::Register reg) {
+ // reg = -reg.
+ ___ Rsb(reg, reg, 0);
+}
+
+void ArmVIXLAssembler::MaybeUnpoisonHeapReference(vixl32::Register reg) {
+ if (kPoisonHeapReferences) {
+ UnpoisonHeapReference(reg);
+ }
+}
+
+void ArmVIXLAssembler::LoadImmediate(vixl32::Register rd, int32_t value) {
+ // TODO(VIXL): Implement this optimization in VIXL.
+ if (!ShifterOperandCanAlwaysHold(value) && ShifterOperandCanAlwaysHold(~value)) {
+ ___ Mvn(rd, ~value);
+ } else {
+ ___ Mov(rd, value);
+ }
+}
+
+bool ArmVIXLAssembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
+ return vixl_masm_.IsModifiedImmediate(immediate);
+}
+
+bool ArmVIXLAssembler::ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc) {
+ switch (opcode) {
+ case ADD:
+ case SUB:
+ // Less than (or equal to) 12 bits can be done if we don't need to set condition codes.
+ if (IsUint<12>(immediate) && set_cc != kCcSet) {
+ return true;
+ }
+ return ShifterOperandCanAlwaysHold(immediate);
+
+ case MOV:
+ // TODO: Support less than or equal to 12bits.
+ return ShifterOperandCanAlwaysHold(immediate);
+
+ case MVN:
+ default:
+ return ShifterOperandCanAlwaysHold(immediate);
+ }
+}
+
+bool ArmVIXLAssembler::CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
+ int32_t offset,
+ /*out*/ int32_t* add_to_base,
+ /*out*/ int32_t* offset_for_load_store) {
+ int32_t other_bits = offset & ~allowed_offset_bits;
+ if (ShifterOperandCanAlwaysHold(other_bits) || ShifterOperandCanAlwaysHold(-other_bits)) {
+ *add_to_base = offset & ~allowed_offset_bits;
+ *offset_for_load_store = offset & allowed_offset_bits;
+ return true;
+ }
+ return false;
+}
+
+int32_t ArmVIXLAssembler::AdjustLoadStoreOffset(int32_t allowed_offset_bits,
+ vixl32::Register temp,
+ vixl32::Register base,
+ int32_t offset) {
+ DCHECK_NE(offset & ~allowed_offset_bits, 0);
+ int32_t add_to_base, offset_for_load;
+ if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) {
+ ___ Add(temp, base, add_to_base);
+ return offset_for_load;
+ } else {
+ ___ Mov(temp, offset);
+ ___ Add(temp, temp, base);
+ return 0;
+ }
+}
+
+// TODO(VIXL): Implement this in VIXL.
+int32_t ArmVIXLAssembler::GetAllowedLoadOffsetBits(LoadOperandType type) {
+ switch (type) {
+ case kLoadSignedByte:
+ case kLoadSignedHalfword:
+ case kLoadUnsignedHalfword:
+ case kLoadUnsignedByte:
+ case kLoadWord:
+ // We can encode imm12 offset.
+ return 0xfff;
+ case kLoadSWord:
+ case kLoadDWord:
+ case kLoadWordPair:
+ // We can encode imm8:'00' offset.
+ return 0xff << 2;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+// TODO(VIXL): Implement this in VIXL.
+int32_t ArmVIXLAssembler::GetAllowedStoreOffsetBits(StoreOperandType type) {
+ switch (type) {
+ case kStoreHalfword:
+ case kStoreByte:
+ case kStoreWord:
+ // We can encode imm12 offset.
+ return 0xfff;
+ case kStoreSWord:
+ case kStoreDWord:
+ case kStoreWordPair:
+ // We can encode imm8:'00' offset.
+ return 0xff << 2;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+// TODO(VIXL): Implement this in VIXL.
+static bool CanHoldLoadOffsetThumb(LoadOperandType type, int offset) {
+ switch (type) {
+ case kLoadSignedByte:
+ case kLoadSignedHalfword:
+ case kLoadUnsignedHalfword:
+ case kLoadUnsignedByte:
+ case kLoadWord:
+ return IsAbsoluteUint<12>(offset);
+ case kLoadSWord:
+ case kLoadDWord:
+ return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset); // VFP addressing mode.
+ case kLoadWordPair:
+ return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset);
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+// TODO(VIXL): Implement this in VIXL.
+static bool CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
+ switch (type) {
+ case kStoreHalfword:
+ case kStoreByte:
+ case kStoreWord:
+ return IsAbsoluteUint<12>(offset);
+ case kStoreSWord:
+ case kStoreDWord:
+ return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset); // VFP addressing mode.
+ case kStoreWordPair:
+ return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset);
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffsetThumb.
+// TODO(VIXL): Implement AdjustLoadStoreOffset logic in VIXL.
+void ArmVIXLAssembler::StoreToOffset(StoreOperandType type,
+ vixl32::Register reg,
+ vixl32::Register base,
+ int32_t offset) {
+ vixl32::Register tmp_reg;
+ UseScratchRegisterScope temps(&vixl_masm_);
+
+ if (!CanHoldStoreOffsetThumb(type, offset)) {
+ CHECK_NE(base.GetCode(), kIpCode);
+ if ((reg.GetCode() != kIpCode) &&
+ ((type != kStoreWordPair) || (reg.GetCode() + 1 != kIpCode))) {
+ tmp_reg = temps.Acquire();
+ } else {
+ // Be careful not to use ip twice (for `reg` (or `reg` + 1 in
+ // the case of a word-pair store) and `base`) to build the
+ // Address object used by the store instruction(s) below.
+ // Instead, save R5 on the stack (or R6 if R5 is already used by
+ // `base`), use it as secondary temporary register, and restore
+ // it after the store instruction has been emitted.
+ tmp_reg = (base.GetCode() != 5) ? r5 : r6;
+ ___ Push(tmp_reg);
+ if (base.GetCode() == kSpCode) {
+ offset += kRegisterSize;
+ }
+ }
+ // TODO: Implement indexed store (not available for STRD), inline AdjustLoadStoreOffset()
+ // and in the "unsplittable" path get rid of the "add" by using the store indexed instead.
+ offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(type), tmp_reg, base, offset);
+ base = tmp_reg;
+ }
+ DCHECK(CanHoldStoreOffsetThumb(type, offset));
+ switch (type) {
+ case kStoreByte:
+ ___ Strb(reg, MemOperand(base, offset));
+ break;
+ case kStoreHalfword:
+ ___ Strh(reg, MemOperand(base, offset));
+ break;
+ case kStoreWord:
+ ___ Str(reg, MemOperand(base, offset));
+ break;
+ case kStoreWordPair:
+ ___ Strd(reg, vixl32::Register(reg.GetCode() + 1), MemOperand(base, offset));
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+ if ((tmp_reg.IsValid()) && (tmp_reg.GetCode() != kIpCode)) {
+ CHECK(tmp_reg.Is(r5) || tmp_reg.Is(r6)) << tmp_reg;
+ ___ Pop(tmp_reg);
+ }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffsetThumb.
+// TODO(VIXL): Implement AdjustLoadStoreOffset logic in VIXL.
+void ArmVIXLAssembler::LoadFromOffset(LoadOperandType type,
+ vixl32::Register dest,
+ vixl32::Register base,
+ int32_t offset) {
+ if (!CanHoldLoadOffsetThumb(type, offset)) {
+ CHECK(!base.Is(ip));
+ // Inlined AdjustLoadStoreOffset() allows us to pull a few more tricks.
+ int32_t allowed_offset_bits = GetAllowedLoadOffsetBits(type);
+ DCHECK_NE(offset & ~allowed_offset_bits, 0);
+ int32_t add_to_base, offset_for_load;
+ if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) {
+ // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load.
+ AddConstant(dest, base, add_to_base);
+ base = dest;
+ offset = offset_for_load;
+ } else {
+ UseScratchRegisterScope temps(&vixl_masm_);
+ vixl32::Register temp = (dest.Is(base)) ? temps.Acquire() : dest;
+ LoadImmediate(temp, offset);
+ // TODO: Implement indexed load (not available for LDRD) and use it here to avoid the ADD.
+ // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load.
+ ___ Add(dest, dest, (dest.Is(base)) ? temp : base);
+ base = dest;
+ offset = 0;
+ }
+ }
+
+ DCHECK(CanHoldLoadOffsetThumb(type, offset));
+ switch (type) {
+ case kLoadSignedByte:
+ ___ Ldrsb(dest, MemOperand(base, offset));
+ break;
+ case kLoadUnsignedByte:
+ ___ Ldrb(dest, MemOperand(base, offset));
+ break;
+ case kLoadSignedHalfword:
+ ___ Ldrsh(dest, MemOperand(base, offset));
+ break;
+ case kLoadUnsignedHalfword:
+ ___ Ldrh(dest, MemOperand(base, offset));
+ break;
+ case kLoadWord:
+ CHECK(!dest.IsSP());
+ ___ Ldr(dest, MemOperand(base, offset));
+ break;
+ case kLoadWordPair:
+ ___ Ldrd(dest, vixl32::Register(dest.GetCode() + 1), MemOperand(base, offset));
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+void ArmVIXLAssembler::StoreSToOffset(vixl32::SRegister source,
+ vixl32::Register base,
+ int32_t offset) {
+ ___ Vstr(source, MemOperand(base, offset));
+}
+
+void ArmVIXLAssembler::StoreDToOffset(vixl32::DRegister source,
+ vixl32::Register base,
+ int32_t offset) {
+ ___ Vstr(source, MemOperand(base, offset));
+}
+
+void ArmVIXLAssembler::LoadSFromOffset(vixl32::SRegister reg,
+ vixl32::Register base,
+ int32_t offset) {
+ ___ Vldr(reg, MemOperand(base, offset));
+}
+
+void ArmVIXLAssembler::LoadDFromOffset(vixl32::DRegister reg,
+ vixl32::Register base,
+ int32_t offset) {
+ ___ Vldr(reg, MemOperand(base, offset));
+}
+
+void ArmVIXLAssembler::AddConstant(vixl32::Register rd, int32_t value) {
+ AddConstant(rd, rd, value);
+}
+
+// TODO(VIXL): think about using adds which updates flags where possible.
+void ArmVIXLAssembler::AddConstant(vixl32::Register rd,
+ vixl32::Register rn,
+ int32_t value) {
+ DCHECK(vixl_masm_.OutsideITBlock());
+ // TODO(VIXL): implement this optimization in VIXL.
+ if (value == 0) {
+ if (!rd.Is(rn)) {
+ ___ Mov(rd, rn);
+ }
+ return;
+ }
+ ___ Add(rd, rn, value);
+}
+
+// Inside IT block we must use assembler, macroassembler instructions are not permitted.
+void ArmVIXLAssembler::AddConstantInIt(vixl32::Register rd,
+ vixl32::Register rn,
+ int32_t value,
+ vixl32::Condition cond) {
+ DCHECK(vixl_masm_.InITBlock());
+ if (value == 0) {
+ ___ mov(cond, rd, rn);
+ } else {
+ ___ add(cond, rd, rn, value);
+ }
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
new file mode 100644
index 0000000..c8f3a9b
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
+#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
+
+#include "base/arena_containers.h"
+#include "base/logging.h"
+#include "constants_arm.h"
+#include "offsets.h"
+#include "utils/arm/assembler_arm_shared.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+
+// TODO(VIXL): Make VIXL compile with -Wshadow and remove pragmas.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "aarch32/macro-assembler-aarch32.h"
+#pragma GCC diagnostic pop
+
+namespace vixl32 = vixl::aarch32;
+
+namespace art {
+namespace arm {
+
+class ArmVIXLAssembler FINAL : public Assembler {
+ private:
+ class ArmException;
+ public:
+ explicit ArmVIXLAssembler(ArenaAllocator* arena)
+ : Assembler(arena) {
+ // Use Thumb2 instruction set.
+ vixl_masm_.UseT32();
+ }
+
+ virtual ~ArmVIXLAssembler() {}
+ vixl32::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
+ void FinalizeCode() OVERRIDE;
+
+ // Size of generated code.
+ size_t CodeSize() const OVERRIDE;
+ const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
+
+ // Copy instructions out of assembly buffer into the given region of memory.
+ void FinalizeInstructions(const MemoryRegion& region) OVERRIDE;
+
+ void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM";
+ }
+ void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM";
+ }
+
+ //
+ // Heap poisoning.
+ //
+ // Poison a heap reference contained in `reg`.
+ void PoisonHeapReference(vixl32::Register reg);
+ // Unpoison a heap reference contained in `reg`.
+ void UnpoisonHeapReference(vixl32::Register reg);
+ // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybeUnpoisonHeapReference(vixl32::Register reg);
+
+ void StoreToOffset(StoreOperandType type,
+ vixl32::Register reg,
+ vixl32::Register base,
+ int32_t offset);
+ void StoreSToOffset(vixl32::SRegister source, vixl32::Register base, int32_t offset);
+ void StoreDToOffset(vixl32::DRegister source, vixl32::Register base, int32_t offset);
+
+ void LoadImmediate(vixl32::Register dest, int32_t value);
+ void LoadFromOffset(LoadOperandType type,
+ vixl32::Register reg,
+ vixl32::Register base,
+ int32_t offset);
+ void LoadSFromOffset(vixl32::SRegister reg, vixl32::Register base, int32_t offset);
+ void LoadDFromOffset(vixl32::DRegister reg, vixl32::Register base, int32_t offset);
+
+ bool ShifterOperandCanAlwaysHold(uint32_t immediate);
+ bool ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc);
+ bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
+ int32_t offset,
+ /*out*/ int32_t* add_to_base,
+ /*out*/ int32_t* offset_for_load_store);
+ int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits,
+ vixl32::Register temp,
+ vixl32::Register base,
+ int32_t offset);
+ int32_t GetAllowedLoadOffsetBits(LoadOperandType type);
+ int32_t GetAllowedStoreOffsetBits(StoreOperandType type);
+
+ void AddConstant(vixl32::Register rd, int32_t value);
+ void AddConstant(vixl32::Register rd, vixl32::Register rn, int32_t value);
+ void AddConstantInIt(vixl32::Register rd,
+ vixl32::Register rn,
+ int32_t value,
+ vixl32::Condition cond = vixl32::al);
+
+ private:
+ // VIXL assembler.
+ vixl32::MacroAssembler vixl_masm_;
+};
+
+// Thread register declaration.
+extern const vixl32::Register tr;
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
new file mode 100644
index 0000000..719fe7f
--- /dev/null
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -0,0 +1,599 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <type_traits>
+
+#include "jni_macro_assembler_arm_vixl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "thread.h"
+
+using namespace vixl::aarch32; // NOLINT(build/namespaces)
+namespace vixl32 = vixl::aarch32;
+
+namespace art {
+namespace arm {
+
+#ifdef ___
+#error "ARM Assembler macro already defined."
+#else
+#define ___ asm_.GetVIXLAssembler()->
+#endif
+
+void ArmVIXLJNIMacroAssembler::FinalizeCode() {
+ for (const std::unique_ptr<
+ ArmVIXLJNIMacroAssembler::ArmException>& exception : exception_blocks_) {
+ EmitExceptionPoll(exception.get());
+ }
+ asm_.FinalizeCode();
+}
+
+static dwarf::Reg DWARFReg(vixl32::Register reg) {
+ return dwarf::Reg::ArmCore(static_cast<int>(reg.GetCode()));
+}
+
+static dwarf::Reg DWARFReg(vixl32::SRegister reg) {
+ return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
+}
+
+static constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);;
+
+void ArmVIXLJNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ CHECK(r0.Is(method_reg.AsArm().AsVIXLRegister()));
+
+ // Push callee saves and link register.
+ RegList core_spill_mask = 1 << LR;
+ uint32_t fp_spill_mask = 0;
+ for (const ManagedRegister& reg : callee_save_regs) {
+ if (reg.AsArm().IsCoreRegister()) {
+ core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+ } else {
+ fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+ }
+ }
+ ___ Push(RegisterList(core_spill_mask));
+ cfi().AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
+ cfi().RelOffsetForMany(DWARFReg(r0), 0, core_spill_mask, kFramePointerSize);
+ if (fp_spill_mask != 0) {
+ uint32_t first = CTZ(fp_spill_mask);
+ uint32_t last = first + POPCOUNT(fp_spill_mask) - 1;
+
+ // Check that list is contiguous.
+ DCHECK_EQ(fp_spill_mask >> CTZ(fp_spill_mask), ~0u >> (32 - POPCOUNT(fp_spill_mask)));
+
+ ___ Vpush(SRegisterList(vixl32::SRegister(first), vixl32::SRegister(last)));
+ cfi().AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
+ cfi().RelOffsetForMany(DWARFReg(s0), 0, fp_spill_mask, kFramePointerSize);
+ }
+
+ // Increase frame to required size.
+ int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+ // Must at least have space for Method*.
+ CHECK_GT(frame_size, pushed_values * kFramePointerSize);
+ IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize); // handles CFI as well.
+
+ // Write out Method*.
+ asm_.StoreToOffset(kStoreWord, r0, sp, 0);
+
+ // Write out entry spills.
+ int32_t offset = frame_size + kFramePointerSize;
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ArmManagedRegister reg = entry_spills.at(i).AsArm();
+ if (reg.IsNoRegister()) {
+ // only increment stack offset.
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ offset += spill.getSize();
+ } else if (reg.IsCoreRegister()) {
+ asm_.StoreToOffset(kStoreWord, reg.AsVIXLRegister(), sp, offset);
+ offset += 4;
+ } else if (reg.IsSRegister()) {
+ asm_.StoreSToOffset(reg.AsVIXLSRegister(), sp, offset);
+ offset += 4;
+ } else if (reg.IsDRegister()) {
+ asm_.StoreDToOffset(reg.AsVIXLDRegister(), sp, offset);
+ offset += 8;
+ }
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+
+ // Compute callee saves to pop and PC.
+ RegList core_spill_mask = 1 << PC;
+ uint32_t fp_spill_mask = 0;
+ for (const ManagedRegister& reg : callee_save_regs) {
+ if (reg.AsArm().IsCoreRegister()) {
+ core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+ } else {
+ fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+ }
+ }
+
+ // Decrease frame to start of callee saves.
+ int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+ CHECK_GT(frame_size, pop_values * kFramePointerSize);
+ DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize)); // handles CFI as well.
+
+ if (fp_spill_mask != 0) {
+ uint32_t first = CTZ(fp_spill_mask);
+ uint32_t last = first + POPCOUNT(fp_spill_mask) - 1;
+ // Check that list is contiguous.
+ DCHECK_EQ(fp_spill_mask >> CTZ(fp_spill_mask), ~0u >> (32 - POPCOUNT(fp_spill_mask)));
+
+ ___ Vpop(SRegisterList(vixl32::SRegister(first), vixl32::SRegister(last)));
+ cfi().AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
+ cfi().RestoreMany(DWARFReg(s0), fp_spill_mask);
+ }
+
+ // Pop callee saves and PC.
+ ___ Pop(RegisterList(core_spill_mask));
+
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+
+void ArmVIXLJNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ asm_.AddConstant(sp, -adjust);
+ cfi().AdjustCFAOffset(adjust);
+}
+
+void ArmVIXLJNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ asm_.AddConstant(sp, adjust);
+ cfi().AdjustCFAOffset(-adjust);
+}
+
+void ArmVIXLJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister m_src, size_t size) {
+ ArmManagedRegister src = m_src.AsArm();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCoreRegister()) {
+ CHECK_EQ(4u, size);
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegisterPairLow(), sp, dest.Int32Value());
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegisterPairHigh(), sp, dest.Int32Value() + 4);
+ } else if (src.IsSRegister()) {
+ CHECK_EQ(4u, size);
+ asm_.StoreSToOffset(src.AsVIXLSRegister(), sp, dest.Int32Value());
+ } else {
+ CHECK_EQ(8u, size);
+ CHECK(src.IsDRegister()) << src;
+ asm_.StoreDToOffset(src.AsVIXLDRegister(), sp, dest.Int32Value());
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::StoreSpanning(FrameOffset dest,
+ ManagedRegister msrc,
+ FrameOffset in_off,
+ ManagedRegister mscratch) {
+ ArmManagedRegister src = msrc.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+ asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, in_off.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value() + 4);
+}
+
+void ArmVIXLJNIMacroAssembler::CopyRef(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, src.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ ArmManagedRegister dst = dest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ asm_.LoadFromOffset(kLoadWord,
+ dst.AsVIXLRegister(),
+ base.AsArm().AsVIXLRegister(),
+ offs.Int32Value());
+
+ if (unpoison_reference) {
+ asm_.MaybeUnpoisonHeapReference(dst.AsVIXLRegister());
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister dest ATTRIBUTE_UNUSED,
+ FrameOffset src ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::LoadRawPtr(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ManagedRegister base ATTRIBUTE_UNUSED,
+ Offset offs ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
+ uint32_t imm,
+ ManagedRegister scratch) {
+ ArmManagedRegister mscratch = scratch.AsArm();
+ CHECK(mscratch.IsCoreRegister()) << mscratch;
+ asm_.LoadImmediate(mscratch.AsVIXLRegister(), imm);
+ asm_.StoreToOffset(kStoreWord, mscratch.AsVIXLRegister(), sp, dest.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+ return Load(m_dst.AsArm(), sp, src.Int32Value(), size);
+}
+
+void ArmVIXLJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst ATTRIBUTE_UNUSED,
+ ThreadOffset32 src ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ asm_.LoadFromOffset(kLoadWord, dst.AsVIXLRegister(), tr, offs.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, fr_offs.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ asm_.AddConstant(scratch.AsVIXLRegister(), sp, fr_offs.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
+ asm_.StoreToOffset(kStoreWord, sp, tr, thr_offs.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
+}
+
+void ArmVIXLJNIMacroAssembler::ZeroExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
+}
+
+void ArmVIXLJNIMacroAssembler::Move(ManagedRegister m_dst,
+ ManagedRegister m_src,
+ size_t size ATTRIBUTE_UNUSED) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ ArmManagedRegister src = m_src.AsArm();
+ if (!dst.Equals(src)) {
+ if (dst.IsCoreRegister()) {
+ CHECK(src.IsCoreRegister()) << src;
+ ___ Mov(dst.AsVIXLRegister(), src.AsVIXLRegister());
+ } else if (dst.IsDRegister()) {
+ CHECK(src.IsDRegister()) << src;
+ ___ Vmov(F64, dst.AsVIXLDRegister(), src.AsVIXLDRegister());
+ } else if (dst.IsSRegister()) {
+ CHECK(src.IsSRegister()) << src;
+ ___ Vmov(F32, dst.AsVIXLSRegister(), src.AsVIXLSRegister());
+ } else {
+ CHECK(dst.IsRegisterPair()) << dst;
+ CHECK(src.IsRegisterPair()) << src;
+ // Ensure that the first move doesn't clobber the input of the second.
+ if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
+ ___ Mov(dst.AsVIXLRegisterPairLow(), src.AsVIXLRegisterPairLow());
+ ___ Mov(dst.AsVIXLRegisterPairHigh(), src.AsVIXLRegisterPairHigh());
+ } else {
+ ___ Mov(dst.AsVIXLRegisterPairHigh(), src.AsVIXLRegisterPairHigh());
+ ___ Mov(dst.AsVIXLRegisterPairLow(), src.AsVIXLRegisterPairLow());
+ }
+ }
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) {
+ ArmManagedRegister temp = scratch.AsArm();
+ CHECK(temp.IsCoreRegister()) << temp;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value());
+ asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value());
+ } else if (size == 8) {
+ asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value());
+ asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value());
+ asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value() + 4);
+ asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value() + 4);
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
+ ManagedRegister src_base ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(ManagedRegister dest_base ATTRIBUTE_UNUSED,
+ Offset dest_offset ATTRIBUTE_UNUSED,
+ FrameOffset src ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dst ATTRIBUTE_UNUSED,
+ FrameOffset src_base ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(ManagedRegister dest ATTRIBUTE_UNUSED,
+ Offset dest_offset ATTRIBUTE_UNUSED,
+ ManagedRegister src ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dst ATTRIBUTE_UNUSED,
+ Offset dest_offset ATTRIBUTE_UNUSED,
+ FrameOffset src ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+static constexpr uint32_t kArmInstrMaxSizeInBytes = 4;
+
+void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ if (null_allowed) {
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ asm_.LoadFromOffset(kLoadWord,
+ out_reg.AsVIXLRegister(),
+ sp,
+ handle_scope_offset.Int32Value());
+ in_reg = out_reg;
+ }
+ ___ Cmp(in_reg.AsVIXLRegister(), 0);
+
+ if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
+ if (!out_reg.Equals(in_reg)) {
+ AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
+ 3 * kArmInstrMaxSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ ___ it(eq, 0xc);
+ ___ mov(eq, out_reg.AsVIXLRegister(), 0);
+ asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
+ } else {
+ AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
+ 2 * kArmInstrMaxSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ ___ it(ne, 0x8);
+ asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
+ }
+ } else {
+ // TODO: Implement this (old arm assembler would have crashed here).
+ UNIMPLEMENTED(FATAL);
+ }
+ } else {
+ asm_.AddConstant(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ if (null_allowed) {
+ asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ ___ Cmp(scratch.AsVIXLRegister(), 0);
+
+ if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
+ AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
+ 2 * kArmInstrMaxSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ ___ it(ne, 0x8);
+ asm_.AddConstantInIt(scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
+ } else {
+ // TODO: Implement this (old arm assembler would have crashed here).
+ UNIMPLEMENTED(FATAL);
+ }
+ } else {
+ asm_.AddConstant(scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
+ }
+ asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, out_off.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::LoadReferenceFromHandleScope(
+ ManagedRegister mout_reg ATTRIBUTE_UNUSED,
+ ManagedRegister min_reg ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
+ bool could_be_null ATTRIBUTE_UNUSED) {
+ // TODO: not validating references.
+}
+
+void ArmVIXLJNIMacroAssembler::VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,
+ bool could_be_null ATTRIBUTE_UNUSED) {
+ // TODO: not validating references.
+}
+
+void ArmVIXLJNIMacroAssembler::Call(ManagedRegister mbase,
+ Offset offset,
+ ManagedRegister mscratch) {
+ ArmManagedRegister base = mbase.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(base.IsCoreRegister()) << base;
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ asm_.LoadFromOffset(kLoadWord,
+ scratch.AsVIXLRegister(),
+ base.AsVIXLRegister(),
+ offset.Int32Value());
+ ___ Blx(scratch.AsVIXLRegister());
+ // TODO: place reference map on call.
+}
+
+void ArmVIXLJNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, base.Int32Value());
+ asm_.LoadFromOffset(kLoadWord,
+ scratch.AsVIXLRegister(),
+ scratch.AsVIXLRegister(),
+ offset.Int32Value());
+ ___ Blx(scratch.AsVIXLRegister());
+ // TODO: place reference map on call
+}
+
+void ArmVIXLJNIMacroAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::GetCurrentThread(ManagedRegister mtr) {
+ ___ Mov(mtr.AsArm().AsVIXLRegister(), tr);
+}
+
+void ArmVIXLJNIMacroAssembler::GetCurrentThread(FrameOffset dest_offset,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ asm_.StoreToOffset(kStoreWord, tr, sp, dest_offset.Int32Value());
+}
+
+void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
+ CHECK_ALIGNED(stack_adjust, kStackAlignment);
+ ArmManagedRegister scratch = m_scratch.AsArm();
+ exception_blocks_.emplace_back(
+ new ArmVIXLJNIMacroAssembler::ArmException(scratch, stack_adjust));
+ asm_.LoadFromOffset(kLoadWord,
+ scratch.AsVIXLRegister(),
+ tr,
+ Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
+
+ ___ Cmp(scratch.AsVIXLRegister(), 0);
+ {
+ AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
+ kArmInstrMaxSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ ___ b(ne, Narrow, exception_blocks_.back()->Entry());
+ }
+ // TODO: think about using CBNZ here.
+}
+
+void ArmVIXLJNIMacroAssembler::EmitExceptionPoll(
+ ArmVIXLJNIMacroAssembler::ArmException* exception) {
+ ___ Bind(exception->Entry());
+ if (exception->stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSize(exception->stack_adjust_);
+ }
+ // Pass exception object as argument.
+ // Don't care about preserving r0 as this won't return.
+ ___ Mov(r0, exception->scratch_.AsVIXLRegister());
+ // TODO: check that exception->scratch_ is dead by this point.
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ ___ Ldr(temp,
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value()));
+ ___ Blx(temp);
+}
+
+void ArmVIXLJNIMacroAssembler::MemoryBarrier(ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmVIXLJNIMacroAssembler::Load(ArmManagedRegister
+ dest,
+ vixl32::Register base,
+ int32_t offset,
+ size_t size) {
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dest;
+ } else if (dest.IsCoreRegister()) {
+ CHECK_EQ(4u, size) << dest;
+ CHECK(!dest.AsVIXLRegister().Is(sp)) << dest;
+ ___ Ldr(dest.AsVIXLRegister(), MemOperand(base, offset));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size) << dest;
+ ___ Ldr(dest.AsVIXLRegisterPairLow(), MemOperand(base, offset));
+ ___ Ldr(dest.AsVIXLRegisterPairHigh(), MemOperand(base, offset + 4));
+ } else if (dest.IsSRegister()) {
+ ___ Vldr(dest.AsVIXLSRegister(), MemOperand(base, offset));
+ } else {
+ CHECK(dest.IsDRegister()) << dest;
+ ___ Vldr(dest.AsVIXLDRegister(), MemOperand(base, offset));
+ }
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
new file mode 100644
index 0000000..dfc35b7
--- /dev/null
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_VIXL_H_
+#define ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_VIXL_H_
+
+#include "base/arena_containers.h"
+#include "base/logging.h"
+#include "constants_arm.h"
+#include "offsets.h"
+#include "utils/arm/assembler_arm_shared.h"
+#include "utils/arm/assembler_arm_vixl.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+
+namespace art {
+namespace arm {
+
+class ArmVIXLJNIMacroAssembler FINAL
+ : public JNIMacroAssemblerFwd<ArmVIXLAssembler, PointerSize::k32> {
+ private:
+ class ArmException;
+ public:
+ explicit ArmVIXLJNIMacroAssembler(ArenaAllocator* arena)
+ : JNIMacroAssemblerFwd(arena),
+ exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+
+ virtual ~ArmVIXLJNIMacroAssembler() {}
+ void FinalizeCode() OVERRIDE;
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack.
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack.
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs) OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines.
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest,
+ ManagedRegister src,
+ FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines.
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest,
+ ThreadOffset32 src,
+ size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+
+ // Copying routines.
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ // Sign extension.
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension.
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current().
+ void GetCurrentThread(ManagedRegister mtr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst.
+ void LoadReferenceFromHandleScope(ManagedRegister dst,
+ ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset].
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+
+ void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+
+ void EmitExceptionPoll(ArmVIXLJNIMacroAssembler::ArmException *exception);
+ void Load(ArmManagedRegister dest, vixl32::Register base, int32_t offset, size_t size);
+
+ private:
+ class ArmException {
+ private:
+ ArmException(ArmManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {}
+
+ vixl32::Label* Entry() { return &exception_entry_; }
+
+ // Register used for passing Thread::Current()->exception_ .
+ const ArmManagedRegister scratch_;
+
+ // Stack adjust for ExceptionPool.
+ const size_t stack_adjust_;
+
+ vixl32::Label exception_entry_;
+
+ friend class ArmVIXLJNIMacroAssembler;
+ DISALLOW_COPY_AND_ASSIGN(ArmException);
+ };
+
+ // List of exception blocks to generate at the end of the code cache.
+ ArenaVector<std::unique_ptr<ArmVIXLJNIMacroAssembler::ArmException>> exception_blocks_;
+ // Used for testing.
+ friend class ArmVIXAssemblerTest_VixlLoadFromOffset_Test;
+ friend class ArmVIXAssemblerTest_VixlStoreToOffset_Test;
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_VIXL_H_
diff --git a/compiler/utils/arm/managed_register_arm.h b/compiler/utils/arm/managed_register_arm.h
index 276db44..2be2d56 100644
--- a/compiler/utils/arm/managed_register_arm.h
+++ b/compiler/utils/arm/managed_register_arm.h
@@ -22,6 +22,12 @@
#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
+// TODO(VIXL): Make VIXL compile with -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "aarch32/macro-assembler-aarch32.h"
+#pragma GCC diagnostic pop
+
namespace art {
namespace arm {
@@ -90,16 +96,31 @@
return static_cast<Register>(id_);
}
+ vixl::aarch32::Register AsVIXLRegister() const {
+ CHECK(IsCoreRegister());
+ return vixl::aarch32::Register(id_);
+ }
+
constexpr SRegister AsSRegister() const {
CHECK(IsSRegister());
return static_cast<SRegister>(id_ - kNumberOfCoreRegIds);
}
+ vixl::aarch32::SRegister AsVIXLSRegister() const {
+ CHECK(IsSRegister());
+ return vixl::aarch32::SRegister(id_ - kNumberOfCoreRegIds);
+ }
+
constexpr DRegister AsDRegister() const {
CHECK(IsDRegister());
return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
}
+ vixl::aarch32::DRegister AsVIXLDRegister() const {
+ CHECK(IsDRegister());
+ return vixl::aarch32::DRegister(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
+ }
+
constexpr SRegister AsOverlappingDRegisterLow() const {
CHECK(IsOverlappingDRegister());
DRegister d_reg = AsDRegister();
@@ -128,12 +149,20 @@
return FromRegId(AllocIdLow()).AsCoreRegister();
}
+ vixl::aarch32::Register AsVIXLRegisterPairLow() const {
+ return vixl::aarch32::Register(AsRegisterPairLow());
+ }
+
constexpr Register AsRegisterPairHigh() const {
CHECK(IsRegisterPair());
// Appropriate mapping of register ids allows to use AllocIdHigh().
return FromRegId(AllocIdHigh()).AsCoreRegister();
}
+ vixl::aarch32::Register AsVIXLRegisterPairHigh() const {
+ return vixl::aarch32::Register(AsRegisterPairHigh());
+ }
+
constexpr bool IsCoreRegister() const {
CHECK(IsValidManagedRegister());
return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 9c9271d..41cb04b 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -23,6 +23,10 @@
#include "gtest/gtest.h"
#include "utils/arm/assembler_thumb2.h"
+
+#include "jni/quick/calling_convention.h"
+#include "utils/arm/jni_macro_assembler_arm_vixl.h"
+
#include "base/hex_dump.h"
#include "common_runtime_test.h"
@@ -1608,6 +1612,196 @@
EmitAndCheck(&assembler, "CmpConstant");
}
+#define ENABLE_VIXL_TEST
+
+#ifdef ENABLE_VIXL_TEST
+
+#define ARM_VIXL
+
+#ifdef ARM_VIXL
+typedef arm::ArmVIXLJNIMacroAssembler JniAssemblerType;
+#else
+typedef arm::Thumb2Assembler AssemblerType;
+#endif
+
+class ArmVIXAssemblerTest : public ::testing::Test {
+ public:
+ ArmVIXAssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
+
+ ArenaPool pool;
+ ArenaAllocator arena;
+ JniAssemblerType assembler;
+};
+
#undef __
+#define __ assembler->
+
+void EmitAndCheck(JniAssemblerType* assembler, const char* testname,
+ const char* const* results) {
+ __ FinalizeCode();
+ size_t cs = __ CodeSize();
+ std::vector<uint8_t> managed_code(cs);
+ MemoryRegion code(&managed_code[0], managed_code.size());
+ __ FinalizeInstructions(code);
+
+ DumpAndCheck(managed_code, testname, results);
+}
+
+void EmitAndCheck(JniAssemblerType* assembler, const char* testname) {
+ InitResults();
+ std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
+ ASSERT_NE(results, test_results.end());
+
+ EmitAndCheck(assembler, testname, results->second);
+}
+
+#undef __
+#define __ assembler.
+
+TEST_F(ArmVIXAssemblerTest, VixlJniHelpers) {
+ const bool is_static = true;
+ const bool is_synchronized = false;
+ const char* shorty = "IIFII";
+
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+
+ std::unique_ptr<JniCallingConvention> jni_conv(
+ JniCallingConvention::Create(&arena, is_static, is_synchronized, shorty, kThumb2));
+ std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
+ ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, kThumb2));
+ const int frame_size(jni_conv->FrameSize());
+ ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
+
+ const ManagedRegister method_register = ArmManagedRegister::FromCoreRegister(R0);
+ const ManagedRegister scratch_register = ArmManagedRegister::FromCoreRegister(R12);
+
+ __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
+ __ IncreaseFrameSize(32);
+
+ // Loads
+ __ IncreaseFrameSize(4096);
+ __ Load(method_register, FrameOffset(32), 4);
+ __ Load(method_register, FrameOffset(124), 4);
+ __ Load(method_register, FrameOffset(132), 4);
+ __ Load(method_register, FrameOffset(1020), 4);
+ __ Load(method_register, FrameOffset(1024), 4);
+ __ Load(scratch_register, FrameOffset(4092), 4);
+ __ Load(scratch_register, FrameOffset(4096), 4);
+ __ LoadRawPtrFromThread(scratch_register, ThreadOffset32(512));
+ __ LoadRef(method_register, scratch_register, MemberOffset(128), true);
+
+ // Stores
+ __ Store(FrameOffset(32), method_register, 4);
+ __ Store(FrameOffset(124), method_register, 4);
+ __ Store(FrameOffset(132), method_register, 4);
+ __ Store(FrameOffset(1020), method_register, 4);
+ __ Store(FrameOffset(1024), method_register, 4);
+ __ Store(FrameOffset(4092), scratch_register, 4);
+ __ Store(FrameOffset(4096), scratch_register, 4);
+ __ StoreImmediateToFrame(FrameOffset(48), 0xFF, scratch_register);
+ __ StoreImmediateToFrame(FrameOffset(48), 0xFFFFFF, scratch_register);
+ __ StoreRawPtr(FrameOffset(48), scratch_register);
+ __ StoreRef(FrameOffset(48), scratch_register);
+ __ StoreSpanning(FrameOffset(48), method_register, FrameOffset(48), scratch_register);
+ __ StoreStackOffsetToThread(ThreadOffset32(512), FrameOffset(4096), scratch_register);
+ __ StoreStackPointerToThread(ThreadOffset32(512));
+
+ // Other
+ __ Call(method_register, FrameOffset(48), scratch_register);
+ __ Copy(FrameOffset(48), FrameOffset(44), scratch_register, 4);
+ __ CopyRawPtrFromThread(FrameOffset(44), ThreadOffset32(512), scratch_register);
+ __ CopyRef(FrameOffset(48), FrameOffset(44), scratch_register);
+ __ GetCurrentThread(method_register);
+ __ GetCurrentThread(FrameOffset(48), scratch_register);
+ __ Move(scratch_register, method_register, 4);
+ __ VerifyObject(scratch_register, false);
+
+ __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, true);
+ __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, false);
+ __ CreateHandleScopeEntry(method_register, FrameOffset(48), scratch_register, true);
+ __ CreateHandleScopeEntry(FrameOffset(48), FrameOffset(64), scratch_register, true);
+ __ CreateHandleScopeEntry(method_register, FrameOffset(0), scratch_register, true);
+ __ CreateHandleScopeEntry(method_register, FrameOffset(1025), scratch_register, true);
+ __ CreateHandleScopeEntry(scratch_register, FrameOffset(1025), scratch_register, true);
+
+ __ ExceptionPoll(scratch_register, 0);
+
+ __ DecreaseFrameSize(4096);
+ __ DecreaseFrameSize(32);
+ __ RemoveFrame(frame_size, callee_save_regs);
+
+ EmitAndCheck(&assembler, "VixlJniHelpers");
+}
+
+#ifdef ARM_VIXL
+#define R0 vixl::aarch32::r0
+#define R2 vixl::aarch32::r2
+#define R4 vixl::aarch32::r4
+#define R12 vixl::aarch32::r12
+#undef __
+#define __ assembler.asm_.
+#endif
+
+TEST_F(ArmVIXAssemblerTest, VixlLoadFromOffset) {
+ __ LoadFromOffset(kLoadWord, R2, R4, 12);
+ __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
+ __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
+ __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
+ __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
+ __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
+ __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
+ __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
+ __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
+ __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
+ __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
+ __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
+ __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
+
+ __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12.
+ __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
+
+ __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
+ __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
+ __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
+
+ EmitAndCheck(&assembler, "VixlLoadFromOffset");
+}
+
+TEST_F(ArmVIXAssemblerTest, VixlStoreToOffset) {
+ __ StoreToOffset(kStoreWord, R2, R4, 12);
+ __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
+ __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
+ __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
+ __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
+ __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
+ __ StoreToOffset(kStoreHalfword, R2, R4, 12);
+ __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
+ __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
+ __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
+ __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
+ __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
+ __ StoreToOffset(kStoreWordPair, R2, R4, 12);
+ __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
+ __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
+ __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
+ __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
+ __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
+
+ __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12.
+ __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
+
+ __ StoreToOffset(kStoreByte, R2, R4, 12);
+
+ EmitAndCheck(&assembler, "VixlStoreToOffset");
+}
+
+#undef __
+#endif // ENABLE_VIXL_TEST
} // namespace arm
} // namespace art
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 6736015..81c6ec5 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5468,6 +5468,199 @@
nullptr
};
+const char* const VixlJniHelpersResults[] = {
+ " 0: e92d 4de0 stmdb sp!, {r5, r6, r7, r8, sl, fp, lr}\n",
+ " 4: ed2d 8a10 vpush {s16-s31}\n",
+ " 8: b089 sub sp, #36 ; 0x24\n",
+ " a: 9000 str r0, [sp, #0]\n",
+ " c: 9121 str r1, [sp, #132] ; 0x84\n",
+ " e: ed8d 0a22 vstr s0, [sp, #136] ; 0x88\n",
+ " 12: 9223 str r2, [sp, #140] ; 0x8c\n",
+ " 14: 9324 str r3, [sp, #144] ; 0x90\n",
+ " 16: b088 sub sp, #32\n",
+ " 18: f5ad 5d80 sub.w sp, sp, #4096 ; 0x1000\n",
+ " 1c: 9808 ldr r0, [sp, #32]\n",
+ " 1e: 981f ldr r0, [sp, #124] ; 0x7c\n",
+ " 20: 9821 ldr r0, [sp, #132] ; 0x84\n",
+ " 22: 98ff ldr r0, [sp, #1020] ; 0x3fc\n",
+ " 24: f8dd 0400 ldr.w r0, [sp, #1024] ; 0x400\n",
+ " 28: f8dd cffc ldr.w ip, [sp, #4092] ; 0xffc\n",
+ " 2c: f50d 5c80 add.w ip, sp, #4096 ; 0x1000\n",
+ " 30: f8dc c000 ldr.w ip, [ip]\n",
+ " 34: f8d9 c200 ldr.w ip, [r9, #512] ; 0x200\n",
+ " 38: f8dc 0080 ldr.w r0, [ip, #128] ; 0x80\n",
+ " 3c: 9008 str r0, [sp, #32]\n",
+ " 3e: 901f str r0, [sp, #124] ; 0x7c\n",
+ " 40: 9021 str r0, [sp, #132] ; 0x84\n",
+ " 42: 90ff str r0, [sp, #1020] ; 0x3fc\n",
+ " 44: f8cd 0400 str.w r0, [sp, #1024] ; 0x400\n",
+ " 48: f8cd cffc str.w ip, [sp, #4092] ; 0xffc\n",
+ " 4c: f84d 5d04 str.w r5, [sp, #-4]!\n",
+ " 50: f50d 5580 add.w r5, sp, #4096 ; 0x1000\n",
+ " 54: f8c5 c004 str.w ip, [r5, #4]\n",
+ " 58: f85d 5b04 ldr.w r5, [sp], #4\n",
+ " 5c: f04f 0cff mov.w ip, #255 ; 0xff\n",
+ " 60: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " 64: f06f 4c7f mvn.w ip, #4278190080 ; 0xff000000\n",
+ " 68: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " 6c: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " 70: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " 74: 900c str r0, [sp, #48] ; 0x30\n",
+ " 76: f8dd c030 ldr.w ip, [sp, #48] ; 0x30\n",
+ " 7a: f8cd c034 str.w ip, [sp, #52] ; 0x34\n",
+ " 7e: f50d 5c80 add.w ip, sp, #4096 ; 0x1000\n",
+ " 82: f8c9 c200 str.w ip, [r9, #512] ; 0x200\n",
+ " 86: f8c9 d200 str.w sp, [r9, #512] ; 0x200\n",
+ " 8a: f8d0 c030 ldr.w ip, [r0, #48] ; 0x30\n",
+ " 8e: 47e0 blx ip\n",
+ " 90: f8dd c02c ldr.w ip, [sp, #44] ; 0x2c\n",
+ " 94: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " 98: f8d9 c200 ldr.w ip, [r9, #512] ; 0x200\n",
+ " 9c: f8cd c02c str.w ip, [sp, #44] ; 0x2c\n",
+ " a0: f8dd c02c ldr.w ip, [sp, #44] ; 0x2c\n",
+ " a4: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " a8: 4648 mov r0, r9\n",
+ " aa: f8cd 9030 str.w r9, [sp, #48] ; 0x30\n",
+ " ae: 4684 mov ip, r0\n",
+ " b0: f1bc 0f00 cmp.w ip, #0\n",
+ " b4: bf18 it ne\n",
+ " b6: f10d 0c30 addne.w ip, sp, #48 ; 0x30\n",
+ " ba: f10d 0c30 add.w ip, sp, #48 ; 0x30\n",
+ " be: f1bc 0f00 cmp.w ip, #0\n",
+ " c2: bf0c ite eq\n",
+ " c4: 2000 moveq r0, #0\n",
+ " c6: a80c addne r0, sp, #48 ; 0x30\n",
+ " c8: f8dd c040 ldr.w ip, [sp, #64] ; 0x40\n",
+ " cc: f1bc 0f00 cmp.w ip, #0\n",
+ " d0: bf18 it ne\n",
+ " d2: f10d 0c40 addne.w ip, sp, #64 ; 0x40\n",
+ " d6: f8cd c030 str.w ip, [sp, #48] ; 0x30\n",
+ " da: f1bc 0f00 cmp.w ip, #0\n",
+ " de: bf0c ite eq\n",
+ " e0: 2000 moveq r0, #0\n",
+ " e2: 4668 movne r0, sp\n",
+ " e4: f1bc 0f00 cmp.w ip, #0\n",
+ " e8: bf0c ite eq\n",
+ " ea: 2000 moveq r0, #0\n",
+ " ec: f20d 4001 addwne r0, sp, #1025 ; 0x401\n",
+ " f0: f1bc 0f00 cmp.w ip, #0\n",
+ " f4: bf18 it ne\n",
+ " f6: f20d 4c01 addwne ip, sp, #1025 ; 0x401\n",
+ " fa: f8d9 c084 ldr.w ip, [r9, #132] ; 0x84\n",
+ " fe: f1bc 0f00 cmp.w ip, #0\n",
+ " 102: d107 bne.n 114 <VixlJniHelpers+0x114>\n",
+ " 104: f50d 5d80 add.w sp, sp, #4096 ; 0x1000\n",
+ " 108: b008 add sp, #32\n",
+ " 10a: b009 add sp, #36 ; 0x24\n",
+ " 10c: ecbd 8a10 vpop {s16-s31}\n",
+ " 110: e8bd 8de0 ldmia.w sp!, {r5, r6, r7, r8, sl, fp, pc}\n",
+ " 114: 4660 mov r0, ip\n",
+ " 116: f8d9 c2ac ldr.w ip, [r9, #684] ; 0x2ac\n",
+ " 11a: 47e0 blx ip\n",
+ nullptr
+};
+
+const char* const VixlLoadFromOffsetResults[] = {
+ " 0: 68e2 ldr r2, [r4, #12]\n",
+ " 2: f8d4 2fff ldr.w r2, [r4, #4095] ; 0xfff\n",
+ " 6: f504 5280 add.w r2, r4, #4096 ; 0x1000\n",
+ " a: 6812 ldr r2, [r2, #0]\n",
+ " c: f504 1280 add.w r2, r4, #1048576 ; 0x100000\n",
+ " 10: f8d2 20a4 ldr.w r2, [r2, #164] ; 0xa4\n",
+ " 14: f44f 5280 mov.w r2, #4096 ; 0x1000\n",
+ " 18: f2c0 0210 movt r2, #16\n",
+ " 1c: 4422 add r2, r4\n",
+ " 1e: 6812 ldr r2, [r2, #0]\n",
+ " 20: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 24: f2c0 0c10 movt ip, #16\n",
+ " 28: 4464 add r4, ip\n",
+ " 2a: 6824 ldr r4, [r4, #0]\n",
+ " 2c: 89a2 ldrh r2, [r4, #12]\n",
+ " 2e: f8b4 2fff ldrh.w r2, [r4, #4095] ; 0xfff\n",
+ " 32: f504 5280 add.w r2, r4, #4096 ; 0x1000\n",
+ " 36: 8812 ldrh r2, [r2, #0]\n",
+ " 38: f504 1280 add.w r2, r4, #1048576 ; 0x100000\n",
+ " 3c: f8b2 20a4 ldrh.w r2, [r2, #164] ; 0xa4\n",
+ " 40: f44f 5280 mov.w r2, #4096 ; 0x1000\n",
+ " 44: f2c0 0210 movt r2, #16\n",
+ " 48: 4422 add r2, r4\n",
+ " 4a: 8812 ldrh r2, [r2, #0]\n",
+ " 4c: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 50: f2c0 0c10 movt ip, #16\n",
+ " 54: 4464 add r4, ip\n",
+ " 56: 8824 ldrh r4, [r4, #0]\n",
+ " 58: e9d4 2303 ldrd r2, r3, [r4, #12]\n",
+ " 5c: e9d4 23ff ldrd r2, r3, [r4, #1020] ; 0x3fc\n",
+ " 60: f504 6280 add.w r2, r4, #1024 ; 0x400\n",
+ " 64: e9d2 2300 ldrd r2, r3, [r2]\n",
+ " 68: f504 2280 add.w r2, r4, #262144 ; 0x40000\n",
+ " 6c: e9d2 2329 ldrd r2, r3, [r2, #164] ; 0xa4\n",
+ " 70: f44f 6280 mov.w r2, #1024 ; 0x400\n",
+ " 74: f2c0 0204 movt r2, #4\n",
+ " 78: 4422 add r2, r4\n",
+ " 7a: e9d2 2300 ldrd r2, r3, [r2]\n",
+ " 7e: f44f 6c80 mov.w ip, #1024 ; 0x400\n",
+ " 82: f2c0 0c04 movt ip, #4\n",
+ " 86: 4464 add r4, ip\n",
+ " 88: e9d4 4500 ldrd r4, r5, [r4]\n",
+ " 8c: f8dc 000c ldr.w r0, [ip, #12]\n",
+ " 90: f5a4 1280 sub.w r2, r4, #1048576 ; 0x100000\n",
+ " 94: f8d2 20a4 ldr.w r2, [r2, #164] ; 0xa4\n",
+ " 98: f994 200c ldrsb.w r2, [r4, #12]\n",
+ " 9c: 7b22 ldrb r2, [r4, #12]\n",
+ " 9e: f9b4 200c ldrsh.w r2, [r4, #12]\n",
+ nullptr
+};
+const char* const VixlStoreToOffsetResults[] = {
+ " 0: 60e2 str r2, [r4, #12]\n",
+ " 2: f8c4 2fff str.w r2, [r4, #4095] ; 0xfff\n",
+ " 6: f504 5c80 add.w ip, r4, #4096 ; 0x1000\n",
+ " a: f8cc 2000 str.w r2, [ip]\n",
+ " e: f504 1c80 add.w ip, r4, #1048576 ; 0x100000\n",
+ " 12: f8cc 20a4 str.w r2, [ip, #164] ; 0xa4\n",
+ " 16: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 1a: f2c0 0c10 movt ip, #16\n",
+ " 1e: 44a4 add ip, r4\n",
+ " 20: f8cc 2000 str.w r2, [ip]\n",
+ " 24: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 28: f2c0 0c10 movt ip, #16\n",
+ " 2c: 44a4 add ip, r4\n",
+ " 2e: f8cc 4000 str.w r4, [ip]\n",
+ " 32: 81a2 strh r2, [r4, #12]\n",
+ " 34: f8a4 2fff strh.w r2, [r4, #4095] ; 0xfff\n",
+ " 38: f504 5c80 add.w ip, r4, #4096 ; 0x1000\n",
+ " 3c: f8ac 2000 strh.w r2, [ip]\n",
+ " 40: f504 1c80 add.w ip, r4, #1048576 ; 0x100000\n",
+ " 44: f8ac 20a4 strh.w r2, [ip, #164] ; 0xa4\n",
+ " 48: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 4c: f2c0 0c10 movt ip, #16\n",
+ " 50: 44a4 add ip, r4\n",
+ " 52: f8ac 2000 strh.w r2, [ip]\n",
+ " 56: f44f 5c80 mov.w ip, #4096 ; 0x1000\n",
+ " 5a: f2c0 0c10 movt ip, #16\n",
+ " 5e: 44a4 add ip, r4\n",
+ " 60: f8ac 4000 strh.w r4, [ip]\n",
+ " 64: e9c4 2303 strd r2, r3, [r4, #12]\n",
+ " 68: e9c4 23ff strd r2, r3, [r4, #1020] ; 0x3fc\n",
+ " 6c: f504 6c80 add.w ip, r4, #1024 ; 0x400\n",
+ " 70: e9cc 2300 strd r2, r3, [ip]\n",
+ " 74: f504 2c80 add.w ip, r4, #262144 ; 0x40000\n",
+ " 78: e9cc 2329 strd r2, r3, [ip, #164] ; 0xa4\n",
+ " 7c: f44f 6c80 mov.w ip, #1024 ; 0x400\n",
+ " 80: f2c0 0c04 movt ip, #4\n",
+ " 84: 44a4 add ip, r4\n",
+ " 86: e9cc 2300 strd r2, r3, [ip]\n",
+ " 8a: f44f 6c80 mov.w ip, #1024 ; 0x400\n",
+ " 8e: f2c0 0c04 movt ip, #4\n",
+ " 92: 44a4 add ip, r4\n",
+ " 94: e9cc 4500 strd r4, r5, [ip]\n",
+ " 98: f8cc 000c str.w r0, [ip, #12]\n",
+ " 9c: f5a4 1c80 sub.w ip, r4, #1048576 ; 0x100000\n",
+ " a0: f8cc 20a4 str.w r2, [ip, #164] ; 0xa4\n",
+ " a4: 7322 strb r2, [r4, #12]\n",
+ nullptr
+};
+
std::map<std::string, const char* const*> test_results;
void setup_results() {
test_results["SimpleMov"] = SimpleMovResults;
@@ -5520,4 +5713,7 @@
test_results["CompareAndBranch"] = CompareAndBranchResults;
test_results["AddConstant"] = AddConstantResults;
test_results["CmpConstant"] = CmpConstantResults;
+ test_results["VixlJniHelpers"] = VixlJniHelpersResults;
+ test_results["VixlStoreToOffset"] = VixlStoreToOffsetResults;
+ test_results["VixlLoadFromOffset"] = VixlLoadFromOffsetResults;
}