ARM: VIXL32: Use LoadLiteral for double constants.
Use LoadLiteral for generating double constants if the constant
doesn't fit VMOV instruction encoding:
0x00000004 ed9f0b07 vldr d0, 0x00000024
+ entry in literal pool
vs
0x00000008 f64f0ca1 mov ip, #63649
0x0000000c f2ce6c31 movt ip, #58929
0x00000010 ee80cb10 vdup.32 d0, ip
0x00000014 f6414cd6 mov ip, #7382
0x00000018 f2c40cc8 movt ip, #16584
0x0000001c ee20cb10 vmov.32 d0[1], ip
Test: ART_USE_VIXL_ARM_BACKEND=true m test-art-host
Test: ART_USE_VIXL_ARM_BACKEND=true m test-art-target
Change-Id: Ia0343bd6b9473870e364df95f2ccfae9750050e0
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index 322f6c4..e81e767 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -135,6 +135,16 @@
// jumping within 2KB range. For B(cond, label), because the supported branch range is 256
// bytes; we use the far_target hint to try to use 16-bit T1 encoding for short range jumps.
void B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target = true);
+
+ // Use literal for generating double constant if it doesn't fit VMOV encoding.
+ void Vmov(vixl32::DRegister rd, double imm) {
+ if (vixl::VFP::IsImmFP64(imm)) {
+ MacroAssembler::Vmov(rd, imm);
+ } else {
+ MacroAssembler::Vldr(rd, imm);
+ }
+ }
+ using MacroAssembler::Vmov;
};
class ArmVIXLAssembler FINAL : public Assembler {