summaryrefslogtreecommitdiff
path: root/compiler/utils/x86
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/utils/x86')
-rw-r--r--compiler/utils/x86/assembler_x86.cc1205
-rw-r--r--compiler/utils/x86/assembler_x86.h239
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc406
-rw-r--r--compiler/utils/x86/constants_x86.h2
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.cc587
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.h183
-rw-r--r--compiler/utils/x86/managed_register_x86.h40
7 files changed, 1961 insertions, 701 deletions
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 2203646e77..1736618363 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -350,6 +350,38 @@ void X86Assembler::movaps(XmmRegister dst, XmmRegister src) {
}
+void X86Assembler::movaps(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x28);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movups(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x10);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movaps(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x29);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movups(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitOperand(src, dst);
+}
+
+
void X86Assembler::movss(XmmRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
@@ -467,6 +499,83 @@ void X86Assembler::divss(XmmRegister dst, const Address& src) {
}
+void X86Assembler::addps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::subps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::mulps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::divps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movapd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x28);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movapd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x28);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movupd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x10);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movapd(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x29);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movupd(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitOperand(src, dst);
+}
+
+
void X86Assembler::flds(const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xD9);
@@ -533,39 +642,6 @@ void X86Assembler::movhpd(const Address& dst, XmmRegister src) {
}
-void X86Assembler::psrldq(XmmRegister reg, const Immediate& shift_count) {
- DCHECK(shift_count.is_uint8());
-
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0x66);
- EmitUint8(0x0F);
- EmitUint8(0x73);
- EmitXmmRegisterOperand(3, reg);
- EmitUint8(shift_count.value());
-}
-
-
-void X86Assembler::psrlq(XmmRegister reg, const Immediate& shift_count) {
- DCHECK(shift_count.is_uint8());
-
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0x66);
- EmitUint8(0x0F);
- EmitUint8(0x73);
- EmitXmmRegisterOperand(2, reg);
- EmitUint8(shift_count.value());
-}
-
-
-void X86Assembler::punpckldq(XmmRegister dst, XmmRegister src) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0x66);
- EmitUint8(0x0F);
- EmitUint8(0x62);
- EmitXmmRegisterOperand(dst, src);
-}
-
-
void X86Assembler::addsd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
@@ -638,6 +714,178 @@ void X86Assembler::divsd(XmmRegister dst, const Address& src) {
}
+void X86Assembler::addpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::subpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::mulpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::divpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movdqa(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x6F);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movdqa(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x6F);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movdqu(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x6F);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movdqa(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x7F);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movdqu(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x7F);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::paddb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xFC);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::psubb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xF8);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::paddw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xFD);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::psubw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xF9);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pmullw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xD5);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::paddd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xFE);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::psubd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xFA);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pmulld(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x40);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::paddq(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xD4);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::psubq(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xFB);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
void X86Assembler::cvtsi2ss(XmmRegister dst, Register src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
@@ -710,6 +958,14 @@ void X86Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) {
}
+void X86Assembler::cvtdq2ps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x5B);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
void X86Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
@@ -727,6 +983,14 @@ void X86Assembler::comiss(XmmRegister a, XmmRegister b) {
}
+void X86Assembler::comiss(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitOperand(a, b);
+}
+
+
void X86Assembler::comisd(XmmRegister a, XmmRegister b) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -736,6 +1000,15 @@ void X86Assembler::comisd(XmmRegister a, XmmRegister b) {
}
+void X86Assembler::comisd(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitOperand(a, b);
+}
+
+
void X86Assembler::ucomiss(XmmRegister a, XmmRegister b) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -828,10 +1101,27 @@ void X86Assembler::xorpd(XmmRegister dst, XmmRegister src) {
}
-void X86Assembler::andps(XmmRegister dst, XmmRegister src) {
+void X86Assembler::xorps(XmmRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
- EmitUint8(0x54);
+ EmitUint8(0x57);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::xorps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x57);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pxor(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xEF);
EmitXmmRegisterOperand(dst, src);
}
@@ -845,23 +1135,75 @@ void X86Assembler::andpd(XmmRegister dst, XmmRegister src) {
}
-void X86Assembler::orpd(XmmRegister dst, XmmRegister src) {
+void X86Assembler::andpd(XmmRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
EmitUint8(0x0F);
- EmitUint8(0x56);
+ EmitUint8(0x54);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::andps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x54);
EmitXmmRegisterOperand(dst, src);
}
-void X86Assembler::xorps(XmmRegister dst, const Address& src) {
+void X86Assembler::andps(XmmRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
- EmitUint8(0x57);
+ EmitUint8(0x54);
EmitOperand(dst, src);
}
+void X86Assembler::pand(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xDB);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::andnpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x55);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::andnps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x55);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pandn(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xDF);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::orpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x56);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
void X86Assembler::orps(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -870,28 +1212,268 @@ void X86Assembler::orps(XmmRegister dst, XmmRegister src) {
}
-void X86Assembler::xorps(XmmRegister dst, XmmRegister src) {
+void X86Assembler::por(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
EmitUint8(0x0F);
- EmitUint8(0x57);
+ EmitUint8(0xEB);
EmitXmmRegisterOperand(dst, src);
}
-void X86Assembler::andps(XmmRegister dst, const Address& src) {
+void X86Assembler::pavgb(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
EmitUint8(0x0F);
- EmitUint8(0x54);
- EmitOperand(dst, src);
+ EmitUint8(0xE0);
+ EmitXmmRegisterOperand(dst, src);
}
-void X86Assembler::andpd(XmmRegister dst, const Address& src) {
+void X86Assembler::pavgw(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
EmitUint8(0x0F);
- EmitUint8(0x54);
- EmitOperand(dst, src);
+ EmitUint8(0xE3);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpeqb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x74);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpeqw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x75);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpeqd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x76);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpeqq(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x29);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpgtb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x64);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpgtw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x65);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpgtd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x66);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::pcmpgtq(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x37);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xC6);
+ EmitXmmRegisterOperand(dst, src);
+ EmitUint8(imm.value());
+}
+
+
+void X86Assembler::shufps(XmmRegister dst, XmmRegister src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xC6);
+ EmitXmmRegisterOperand(dst, src);
+ EmitUint8(imm.value());
+}
+
+
+void X86Assembler::pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x70);
+ EmitXmmRegisterOperand(dst, src);
+ EmitUint8(imm.value());
+}
+
+
+void X86Assembler::punpcklbw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x60);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::punpcklwd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x61);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::punpckldq(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x62);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::punpcklqdq(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x6C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::psllw(XmmRegister reg, const Immediate& shift_count) {
+ DCHECK(shift_count.is_uint8());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x71);
+ EmitXmmRegisterOperand(6, reg);
+ EmitUint8(shift_count.value());
+}
+
+
+void X86Assembler::pslld(XmmRegister reg, const Immediate& shift_count) {
+ DCHECK(shift_count.is_uint8());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x72);
+ EmitXmmRegisterOperand(6, reg);
+ EmitUint8(shift_count.value());
+}
+
+
+void X86Assembler::psllq(XmmRegister reg, const Immediate& shift_count) {
+ DCHECK(shift_count.is_uint8());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x73);
+ EmitXmmRegisterOperand(6, reg);
+ EmitUint8(shift_count.value());
+}
+
+
+void X86Assembler::psraw(XmmRegister reg, const Immediate& shift_count) {
+ DCHECK(shift_count.is_uint8());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x71);
+ EmitXmmRegisterOperand(4, reg);
+ EmitUint8(shift_count.value());
+}
+
+
+void X86Assembler::psrad(XmmRegister reg, const Immediate& shift_count) {
+ DCHECK(shift_count.is_uint8());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x72);
+ EmitXmmRegisterOperand(4, reg);
+ EmitUint8(shift_count.value());
+}
+
+
+void X86Assembler::psrlw(XmmRegister reg, const Immediate& shift_count) {
+ DCHECK(shift_count.is_uint8());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x71);
+ EmitXmmRegisterOperand(2, reg);
+ EmitUint8(shift_count.value());
+}
+
+
+void X86Assembler::psrld(XmmRegister reg, const Immediate& shift_count) {
+ DCHECK(shift_count.is_uint8());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x72);
+ EmitXmmRegisterOperand(2, reg);
+ EmitUint8(shift_count.value());
+}
+
+
+void X86Assembler::psrlq(XmmRegister reg, const Immediate& shift_count) {
+ DCHECK(shift_count.is_uint8());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x73);
+ EmitXmmRegisterOperand(2, reg);
+ EmitUint8(shift_count.value());
+}
+
+
+void X86Assembler::psrldq(XmmRegister reg, const Immediate& shift_count) {
+ DCHECK(shift_count.is_uint8());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x73);
+ EmitXmmRegisterOperand(3, reg);
+ EmitUint8(shift_count.value());
}
@@ -1030,6 +1612,14 @@ void X86Assembler::xchgl(Register reg, const Address& address) {
}
+void X86Assembler::cmpb(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x80);
+ EmitOperand(7, address);
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
void X86Assembler::cmpw(const Address& address, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1123,6 +1713,23 @@ void X86Assembler::testl(Register reg, const Immediate& immediate) {
}
+void X86Assembler::testb(const Address& dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF6);
+ EmitOperand(EAX, dst);
+ CHECK(imm.is_int8());
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
+void X86Assembler::testl(const Address& dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(0, dst);
+ EmitImmediate(imm);
+}
+
+
void X86Assembler::andl(Register dst, Register src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x23);
@@ -1666,6 +2273,13 @@ void X86Assembler::jmp(NearLabel* label) {
}
+void X86Assembler::repne_scasb() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0xAE);
+}
+
+
void X86Assembler::repne_scasw() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1674,6 +2288,13 @@ void X86Assembler::repne_scasw() {
}
+void X86Assembler::repe_cmpsb() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0xA6);
+}
+
+
void X86Assembler::repe_cmpsw() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1689,6 +2310,13 @@ void X86Assembler::repe_cmpsl() {
}
+void X86Assembler::rep_movsb() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0xA4);
+}
+
+
void X86Assembler::rep_movsw() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1918,489 +2546,6 @@ void X86Assembler::EmitGenericShift(int reg_or_opcode,
EmitOperand(reg_or_opcode, operand);
}
-static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::X86Core(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = 4;
-
-void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
- const std::vector<ManagedRegister>& spill_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- DCHECK_EQ(buffer_.Size(), 0U); // Nothing emitted yet.
- cfi_.SetCurrentCFAOffset(4); // Return address on stack.
- CHECK_ALIGNED(frame_size, kStackAlignment);
- int gpr_count = 0;
- for (int i = spill_regs.size() - 1; i >= 0; --i) {
- Register spill = spill_regs.at(i).AsX86().AsCpuRegister();
- pushl(spill);
- gpr_count++;
- cfi_.AdjustCFAOffset(kFramePointerSize);
- cfi_.RelOffset(DWARFReg(spill), 0);
- }
-
- // return address then method on stack.
- int32_t adjust = frame_size - gpr_count * kFramePointerSize -
- kFramePointerSize /*method*/ -
- kFramePointerSize /*return address*/;
- addl(ESP, Immediate(-adjust));
- cfi_.AdjustCFAOffset(adjust);
- pushl(method_reg.AsX86().AsCpuRegister());
- cfi_.AdjustCFAOffset(kFramePointerSize);
- DCHECK_EQ(static_cast<size_t>(cfi_.GetCurrentCFAOffset()), frame_size);
-
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ManagedRegisterSpill spill = entry_spills.at(i);
- if (spill.AsX86().IsCpuRegister()) {
- int offset = frame_size + spill.getSpillOffset();
- movl(Address(ESP, offset), spill.AsX86().AsCpuRegister());
- } else {
- DCHECK(spill.AsX86().IsXmmRegister());
- if (spill.getSize() == 8) {
- movsd(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
- } else {
- CHECK_EQ(spill.getSize(), 4);
- movss(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
- }
- }
- }
-}
-
-void X86Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& spill_regs) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- cfi_.RememberState();
- // -kFramePointerSize for ArtMethod*.
- int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
- addl(ESP, Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
- for (size_t i = 0; i < spill_regs.size(); ++i) {
- Register spill = spill_regs.at(i).AsX86().AsCpuRegister();
- popl(spill);
- cfi_.AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
- cfi_.Restore(DWARFReg(spill));
- }
- ret();
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
-void X86Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addl(ESP, Immediate(-adjust));
- cfi_.AdjustCFAOffset(adjust);
-}
-
-void X86Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addl(ESP, Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
-}
-
-void X86Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
- X86ManagedRegister src = msrc.AsX86();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- movl(Address(ESP, offs), src.AsCpuRegister());
- } else if (src.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- movl(Address(ESP, offs), src.AsRegisterPairLow());
- movl(Address(ESP, FrameOffset(offs.Int32Value()+4)),
- src.AsRegisterPairHigh());
- } else if (src.IsX87Register()) {
- if (size == 4) {
- fstps(Address(ESP, offs));
- } else {
- fstpl(Address(ESP, offs));
- }
- } else {
- CHECK(src.IsXmmRegister());
- if (size == 4) {
- movss(Address(ESP, offs), src.AsXmmRegister());
- } else {
- movsd(Address(ESP, offs), src.AsXmmRegister());
- }
- }
-}
-
-void X86Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
- X86ManagedRegister src = msrc.AsX86();
- CHECK(src.IsCpuRegister());
- movl(Address(ESP, dest), src.AsCpuRegister());
-}
-
-void X86Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
- X86ManagedRegister src = msrc.AsX86();
- CHECK(src.IsCpuRegister());
- movl(Address(ESP, dest), src.AsCpuRegister());
-}
-
-void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister) {
- movl(Address(ESP, dest), Immediate(imm));
-}
-
-void X86Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister) {
- fs()->movl(Address::Absolute(dest), Immediate(imm));
-}
-
-void X86Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
- fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
-}
-
-void X86Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
- fs()->movl(Address::Absolute(thr_offs), ESP);
-}
-
-void X86Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
- FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
- UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
-}
-
-void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
- X86ManagedRegister dest = mdest.AsX86();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- movl(dest.AsCpuRegister(), Address(ESP, src));
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- movl(dest.AsRegisterPairLow(), Address(ESP, src));
- movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- flds(Address(ESP, src));
- } else {
- fldl(Address(ESP, src));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- movss(dest.AsXmmRegister(), Address(ESP, src));
- } else {
- movsd(dest.AsXmmRegister(), Address(ESP, src));
- }
- }
-}
-
-void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) {
- X86ManagedRegister dest = mdest.AsX86();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
- fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset<4>(src.Int32Value()+4)));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- fs()->flds(Address::Absolute(src));
- } else {
- fs()->fldl(Address::Absolute(src));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
- } else {
- fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
- }
- }
-}
-
-void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(ESP, src));
-}
-
-void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
- if (unpoison_reference) {
- MaybeUnpoisonHeapReference(dest.AsCpuRegister());
- }
-}
-
-void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
-}
-
-void X86Assembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset<4> offs) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister());
- fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
-}
-
-void X86Assembler::SignExtend(ManagedRegister mreg, size_t size) {
- X86ManagedRegister reg = mreg.AsX86();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
- } else {
- movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
- X86ManagedRegister reg = mreg.AsX86();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
- } else {
- movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
- X86ManagedRegister dest = mdest.AsX86();
- X86ManagedRegister src = msrc.AsX86();
- if (!dest.Equals(src)) {
- if (dest.IsCpuRegister() && src.IsCpuRegister()) {
- movl(dest.AsCpuRegister(), src.AsCpuRegister());
- } else if (src.IsX87Register() && dest.IsXmmRegister()) {
- // Pass via stack and pop X87 register
- subl(ESP, Immediate(16));
- if (size == 4) {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstps(Address(ESP, 0));
- movss(dest.AsXmmRegister(), Address(ESP, 0));
- } else {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstpl(Address(ESP, 0));
- movsd(dest.AsXmmRegister(), Address(ESP, 0));
- }
- addl(ESP, Immediate(16));
- } else {
- // TODO: x87, SSE
- UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
- }
- }
-}
-
-void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- movl(scratch.AsCpuRegister(), Address(ESP, src));
- movl(Address(ESP, dest), scratch.AsCpuRegister());
-}
-
-void X86Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<4> thr_offs,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
- Store(fr_offs, scratch, 4);
-}
-
-void X86Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- Load(scratch, fr_offs, 4);
- fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
-}
-
-void X86Assembler::Copy(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch,
- size_t size) {
- X86ManagedRegister scratch = mscratch.AsX86();
- if (scratch.IsCpuRegister() && size == 8) {
- Load(scratch, src, 4);
- Store(dest, scratch, 4);
- Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
- Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
- } else {
- Load(scratch, src, size);
- Store(dest, scratch, size);
- }
-}
-
-void X86Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void X86Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister scratch, size_t size) {
- CHECK(scratch.IsNoRegister());
- CHECK_EQ(size, 4u);
- pushl(Address(ESP, src));
- popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
-}
-
-void X86Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsX86().AsCpuRegister();
- CHECK_EQ(size, 4u);
- movl(scratch, Address(ESP, src_base));
- movl(scratch, Address(scratch, src_offset));
- movl(Address(ESP, dest), scratch);
-}
-
-void X86Assembler::Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) {
- CHECK_EQ(size, 4u);
- CHECK(scratch.IsNoRegister());
- pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
- popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
-}
-
-void X86Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsX86().AsCpuRegister();
- CHECK_EQ(size, 4u);
- CHECK_EQ(dest.Int32Value(), src.Int32Value());
- movl(scratch, Address(ESP, src));
- pushl(Address(scratch, src_offset));
- popl(Address(scratch, dest_offset));
-}
-
-void X86Assembler::MemoryBarrier(ManagedRegister) {
- mfence();
-}
-
-void X86Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
- X86ManagedRegister out_reg = mout_reg.AsX86();
- X86ManagedRegister in_reg = min_reg.AsX86();
- CHECK(in_reg.IsCpuRegister());
- CHECK(out_reg.IsCpuRegister());
- VerifyObject(in_reg, null_allowed);
- if (null_allowed) {
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
- Bind(&null_arg);
- } else {
- leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
- }
-}
-
-void X86Assembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- if (null_allowed) {
- Label null_arg;
- movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
- testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
- j(kZero, &null_arg);
- leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
- Bind(&null_arg);
- } else {
- leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
- }
- Store(out_off, scratch, 4);
-}
-
-// Given a handle scope entry, load the associated reference.
-void X86Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- X86ManagedRegister out_reg = mout_reg.AsX86();
- X86ManagedRegister in_reg = min_reg.AsX86();
- CHECK(out_reg.IsCpuRegister());
- CHECK(in_reg.IsCpuRegister());
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
- Bind(&null_arg);
-}
-
-void X86Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
- X86ManagedRegister base = mbase.AsX86();
- CHECK(base.IsCpuRegister());
- call(Address(base.AsCpuRegister(), offset.Int32Value()));
- // TODO: place reference map on call
-}
-
-void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
- Register scratch = mscratch.AsX86().AsCpuRegister();
- movl(scratch, Address(ESP, base));
- call(Address(scratch, offset));
-}
-
-void X86Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister /*mscratch*/) {
- fs()->call(Address::Absolute(offset));
-}
-
-void X86Assembler::GetCurrentThread(ManagedRegister tr) {
- fs()->movl(tr.AsX86().AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<4>()));
-}
-
-void X86Assembler::GetCurrentThread(FrameOffset offset,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<4>()));
- movl(Address(ESP, offset), scratch.AsCpuRegister());
-}
-
-void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86ExceptionSlowPath* slow = new (GetArena()) X86ExceptionSlowPath(stack_adjust);
- buffer_.EnqueueSlowPath(slow);
- fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<4>()), Immediate(0));
- j(kNotEqual, slow->Entry());
-}
-
-void X86ExceptionSlowPath::Emit(Assembler *sasm) {
- X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_);
- // Note: the return value is dead
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception as argument in EAX
- __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<4>()));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException)));
- // this call should never return
- __ int3();
-#undef __
-}
-
void X86Assembler::AddConstantArea() {
ArrayRef<const int32_t> area = constant_area_.GetBuffer();
// Generate the data for the literal area.
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 8567ad2a17..a747cda7bd 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -20,13 +20,14 @@
#include <vector>
#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/bit_utils.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "constants_x86.h"
#include "globals.h"
#include "managed_register_x86.h"
#include "offsets.h"
-#include "utils/array_ref.h"
#include "utils/assembler.h"
namespace art {
@@ -195,7 +196,7 @@ class Address : public Operand {
return result;
}
- static Address Absolute(ThreadOffset<4> addr) {
+ static Address Absolute(ThreadOffset32 addr) {
return Absolute(addr.Int32Value());
}
@@ -370,7 +371,12 @@ class X86Assembler FINAL : public Assembler {
void setb(Condition condition, Register dst);
- void movaps(XmmRegister dst, XmmRegister src);
+ void movaps(XmmRegister dst, XmmRegister src); // move
+ void movaps(XmmRegister dst, const Address& src); // load aligned
+ void movups(XmmRegister dst, const Address& src); // load unaligned
+ void movaps(const Address& dst, XmmRegister src); // store aligned
+ void movups(const Address& dst, XmmRegister src); // store unaligned
+
void movss(XmmRegister dst, const Address& src);
void movss(const Address& dst, XmmRegister src);
void movss(XmmRegister dst, XmmRegister src);
@@ -387,18 +393,24 @@ class X86Assembler FINAL : public Assembler {
void divss(XmmRegister dst, XmmRegister src);
void divss(XmmRegister dst, const Address& src);
+ void addps(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void subps(XmmRegister dst, XmmRegister src);
+ void mulps(XmmRegister dst, XmmRegister src);
+ void divps(XmmRegister dst, XmmRegister src);
+
+ void movapd(XmmRegister dst, XmmRegister src); // move
+ void movapd(XmmRegister dst, const Address& src); // load aligned
+ void movupd(XmmRegister dst, const Address& src); // load unaligned
+ void movapd(const Address& dst, XmmRegister src); // store aligned
+ void movupd(const Address& dst, XmmRegister src); // store unaligned
+
void movsd(XmmRegister dst, const Address& src);
void movsd(const Address& dst, XmmRegister src);
void movsd(XmmRegister dst, XmmRegister src);
- void psrlq(XmmRegister reg, const Immediate& shift_count);
- void punpckldq(XmmRegister dst, XmmRegister src);
-
void movhpd(XmmRegister dst, const Address& src);
void movhpd(const Address& dst, XmmRegister src);
- void psrldq(XmmRegister reg, const Immediate& shift_count);
-
void addsd(XmmRegister dst, XmmRegister src);
void addsd(XmmRegister dst, const Address& src);
void subsd(XmmRegister dst, XmmRegister src);
@@ -408,6 +420,31 @@ class X86Assembler FINAL : public Assembler {
void divsd(XmmRegister dst, XmmRegister src);
void divsd(XmmRegister dst, const Address& src);
+ void addpd(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void subpd(XmmRegister dst, XmmRegister src);
+ void mulpd(XmmRegister dst, XmmRegister src);
+ void divpd(XmmRegister dst, XmmRegister src);
+
+ void movdqa(XmmRegister dst, XmmRegister src); // move
+ void movdqa(XmmRegister dst, const Address& src); // load aligned
+ void movdqu(XmmRegister dst, const Address& src); // load unaligned
+ void movdqa(const Address& dst, XmmRegister src); // store aligned
+ void movdqu(const Address& dst, XmmRegister src); // store unaligned
+
+ void paddb(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void psubb(XmmRegister dst, XmmRegister src);
+
+ void paddw(XmmRegister dst, XmmRegister src);
+ void psubw(XmmRegister dst, XmmRegister src);
+ void pmullw(XmmRegister dst, XmmRegister src);
+
+ void paddd(XmmRegister dst, XmmRegister src);
+ void psubd(XmmRegister dst, XmmRegister src);
+ void pmulld(XmmRegister dst, XmmRegister src);
+
+ void paddq(XmmRegister dst, XmmRegister src);
+ void psubq(XmmRegister dst, XmmRegister src);
+
void cvtsi2ss(XmmRegister dst, Register src);
void cvtsi2sd(XmmRegister dst, Register src);
@@ -420,10 +457,13 @@ class X86Assembler FINAL : public Assembler {
void cvttss2si(Register dst, XmmRegister src);
void cvttsd2si(Register dst, XmmRegister src);
+ void cvtdq2ps(XmmRegister dst, XmmRegister src);
void cvtdq2pd(XmmRegister dst, XmmRegister src);
void comiss(XmmRegister a, XmmRegister b);
+ void comiss(XmmRegister a, const Address& b);
void comisd(XmmRegister a, XmmRegister b);
+ void comisd(XmmRegister a, const Address& b);
void ucomiss(XmmRegister a, XmmRegister b);
void ucomiss(XmmRegister a, const Address& b);
void ucomisd(XmmRegister a, XmmRegister b);
@@ -439,14 +479,56 @@ class X86Assembler FINAL : public Assembler {
void xorpd(XmmRegister dst, XmmRegister src);
void xorps(XmmRegister dst, const Address& src);
void xorps(XmmRegister dst, XmmRegister src);
+ void pxor(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void andpd(XmmRegister dst, XmmRegister src);
void andpd(XmmRegister dst, const Address& src);
void andps(XmmRegister dst, XmmRegister src);
void andps(XmmRegister dst, const Address& src);
+ void pand(XmmRegister dst, XmmRegister src); // no addr variant (for now)
- void orpd(XmmRegister dst, XmmRegister src);
+ void andnpd(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void andnps(XmmRegister dst, XmmRegister src);
+ void pandn(XmmRegister dst, XmmRegister src);
+
+ void orpd(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void orps(XmmRegister dst, XmmRegister src);
+ void por(XmmRegister dst, XmmRegister src);
+
+ void pavgb(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void pavgw(XmmRegister dst, XmmRegister src);
+
+ void pcmpeqb(XmmRegister dst, XmmRegister src);
+ void pcmpeqw(XmmRegister dst, XmmRegister src);
+ void pcmpeqd(XmmRegister dst, XmmRegister src);
+ void pcmpeqq(XmmRegister dst, XmmRegister src);
+
+ void pcmpgtb(XmmRegister dst, XmmRegister src);
+ void pcmpgtw(XmmRegister dst, XmmRegister src);
+ void pcmpgtd(XmmRegister dst, XmmRegister src);
+ void pcmpgtq(XmmRegister dst, XmmRegister src); // SSE4.2
+
+ void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
+ void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
+ void pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm);
+
+ void punpcklbw(XmmRegister dst, XmmRegister src);
+ void punpcklwd(XmmRegister dst, XmmRegister src);
+ void punpckldq(XmmRegister dst, XmmRegister src);
+ void punpcklqdq(XmmRegister dst, XmmRegister src);
+
+ void psllw(XmmRegister reg, const Immediate& shift_count);
+ void pslld(XmmRegister reg, const Immediate& shift_count);
+ void psllq(XmmRegister reg, const Immediate& shift_count);
+
+ void psraw(XmmRegister reg, const Immediate& shift_count);
+ void psrad(XmmRegister reg, const Immediate& shift_count);
+ // no psraq
+
+ void psrlw(XmmRegister reg, const Immediate& shift_count);
+ void psrld(XmmRegister reg, const Immediate& shift_count);
+ void psrlq(XmmRegister reg, const Immediate& shift_count);
+ void psrldq(XmmRegister reg, const Immediate& shift_count);
void flds(const Address& src);
void fstps(const Address& dst);
@@ -479,6 +561,7 @@ class X86Assembler FINAL : public Assembler {
void xchgl(Register dst, Register src);
void xchgl(Register reg, const Address& address);
+ void cmpb(const Address& address, const Immediate& imm);
void cmpw(const Address& address, const Immediate& imm);
void cmpl(Register reg, const Immediate& imm);
@@ -492,6 +575,9 @@ class X86Assembler FINAL : public Assembler {
void testl(Register reg, const Immediate& imm);
void testl(Register reg1, const Address& address);
+ void testb(const Address& dst, const Immediate& imm);
+ void testl(const Address& dst, const Immediate& imm);
+
void andl(Register dst, const Immediate& imm);
void andl(Register dst, Register src);
void andl(Register dst, const Address& address);
@@ -585,9 +671,12 @@ class X86Assembler FINAL : public Assembler {
void jmp(Label* label);
void jmp(NearLabel* label);
+ void repne_scasb();
void repne_scasw();
+ void repe_cmpsb();
void repe_cmpsw();
void repe_cmpsl();
+ void rep_movsb();
void rep_movsw();
X86Assembler* lock();
@@ -628,123 +717,6 @@ class X86Assembler FINAL : public Assembler {
void Bind(NearLabel* label);
//
- // Overridden common assembler high-level functionality
- //
-
- // Emit code that will create an activation on the stack
- void BuildFrame(size_t frame_size, ManagedRegister method_reg,
- const std::vector<ManagedRegister>& callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-
- void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch)
- OVERRIDE;
-
- void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE;
-
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-
- void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
-
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE;
-
- // Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
-
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
-
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void MemoryBarrier(ManagedRegister) OVERRIDE;
-
- // Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE;
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
- //
// Heap poisoning.
//
@@ -752,6 +724,12 @@ class X86Assembler FINAL : public Assembler {
void PoisonHeapReference(Register reg) { negl(reg); }
// Unpoison a heap reference contained in `reg`.
void UnpoisonHeapReference(Register reg) { negl(reg); }
+ // Poison a heap reference contained in `reg` if heap poisoning is enabled.
+ void MaybePoisonHeapReference(Register reg) {
+ if (kPoisonHeapReferences) {
+ PoisonHeapReference(reg);
+ }
+ }
// Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
void MaybeUnpoisonHeapReference(Register reg) {
if (kPoisonHeapReferences) {
@@ -841,15 +819,6 @@ inline void X86Assembler::EmitOperandSizeOverride() {
EmitUint8(0x66);
}
-// Slowpath entered when Thread::Current()->_exception is non-null
-class X86ExceptionSlowPath FINAL : public SlowPath {
- public:
- explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const size_t stack_adjust_;
-};
-
} // namespace x86
} // namespace art
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 1d1df6e447..f75f972265 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -122,18 +122,6 @@ TEST_F(AssemblerX86Test, Movntl) {
DriverStr(expected, "movntl");
}
-TEST_F(AssemblerX86Test, psrlq) {
- GetAssembler()->psrlq(x86::XMM0, CreateImmediate(32));
- const char* expected = "psrlq $0x20, %xmm0\n";
- DriverStr(expected, "psrlq");
-}
-
-TEST_F(AssemblerX86Test, punpckldq) {
- GetAssembler()->punpckldq(x86::XMM0, x86::XMM1);
- const char* expected = "punpckldq %xmm1, %xmm0\n";
- DriverStr(expected, "punpckldq");
-}
-
TEST_F(AssemblerX86Test, LoadLongConstant) {
GetAssembler()->LoadLongConstant(x86::XMM0, 51);
const char* expected =
@@ -207,12 +195,24 @@ TEST_F(AssemblerX86Test, FPUIntegerStore) {
DriverStr(expected, "FPUIntegerStore");
}
+TEST_F(AssemblerX86Test, Repnescasb) {
+ GetAssembler()->repne_scasb();
+ const char* expected = "repne scasb\n";
+ DriverStr(expected, "Repnescasb");
+}
+
TEST_F(AssemblerX86Test, Repnescasw) {
GetAssembler()->repne_scasw();
const char* expected = "repne scasw\n";
DriverStr(expected, "Repnescasw");
}
+TEST_F(AssemblerX86Test, Repecmpsb) {
+ GetAssembler()->repe_cmpsb();
+ const char* expected = "repe cmpsb\n";
+ DriverStr(expected, "Repecmpsb");
+}
+
TEST_F(AssemblerX86Test, Repecmpsw) {
GetAssembler()->repe_cmpsw();
const char* expected = "repe cmpsw\n";
@@ -225,10 +225,10 @@ TEST_F(AssemblerX86Test, Repecmpsl) {
DriverStr(expected, "Repecmpsl");
}
-TEST_F(AssemblerX86Test, RepneScasw) {
- GetAssembler()->repne_scasw();
- const char* expected = "repne scasw\n";
- DriverStr(expected, "repne_scasw");
+TEST_F(AssemblerX86Test, RepMovsb) {
+ GetAssembler()->rep_movsb();
+ const char* expected = "rep movsb\n";
+ DriverStr(expected, "rep_movsb");
}
TEST_F(AssemblerX86Test, RepMovsw) {
@@ -322,18 +322,51 @@ TEST_F(AssemblerX86Test, RollImm) {
DriverStr(RepeatRI(&x86::X86Assembler::roll, 1U, "roll ${imm}, %{reg}"), "rolli");
}
+TEST_F(AssemblerX86Test, Cvtdq2ps) {
+ DriverStr(RepeatFF(&x86::X86Assembler::cvtdq2ps, "cvtdq2ps %{reg2}, %{reg1}"), "cvtdq2ps");
+}
+
+TEST_F(AssemblerX86Test, Cvtdq2pd) {
+ DriverStr(RepeatFF(&x86::X86Assembler::cvtdq2pd, "cvtdq2pd %{reg2}, %{reg1}"), "cvtdq2pd");
+}
+
+TEST_F(AssemblerX86Test, ComissAddr) {
+ GetAssembler()->comiss(x86::XmmRegister(x86::XMM0), x86::Address(x86::EAX, 0));
+ const char* expected = "comiss 0(%EAX), %xmm0\n";
+ DriverStr(expected, "comiss");
+}
+
TEST_F(AssemblerX86Test, UComissAddr) {
GetAssembler()->ucomiss(x86::XmmRegister(x86::XMM0), x86::Address(x86::EAX, 0));
const char* expected = "ucomiss 0(%EAX), %xmm0\n";
DriverStr(expected, "ucomiss");
}
+TEST_F(AssemblerX86Test, ComisdAddr) {
+ GetAssembler()->comisd(x86::XmmRegister(x86::XMM0), x86::Address(x86::EAX, 0));
+ const char* expected = "comisd 0(%EAX), %xmm0\n";
+ DriverStr(expected, "comisd");
+}
+
TEST_F(AssemblerX86Test, UComisdAddr) {
GetAssembler()->ucomisd(x86::XmmRegister(x86::XMM0), x86::Address(x86::EAX, 0));
const char* expected = "ucomisd 0(%EAX), %xmm0\n";
DriverStr(expected, "ucomisd");
}
+TEST_F(AssemblerX86Test, RoundSS) {
+ GetAssembler()->roundss(
+ x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1), x86::Immediate(1));
+ const char* expected = "roundss $1, %xmm1, %xmm0\n";
+ DriverStr(expected, "roundss");
+}
+
+TEST_F(AssemblerX86Test, RoundSD) {
+ GetAssembler()->roundsd(
+ x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1), x86::Immediate(1));
+ const char* expected = "roundsd $1, %xmm1, %xmm0\n";
+ DriverStr(expected, "roundsd");
+}
TEST_F(AssemblerX86Test, CmovlAddress) {
GetAssembler()->cmovl(x86::kEqual, x86::Register(x86::EAX), x86::Address(
@@ -350,6 +383,341 @@ TEST_F(AssemblerX86Test, CmovlAddress) {
DriverStr(expected, "cmovl_address");
}
+TEST_F(AssemblerX86Test, TestbAddressImmediate) {
+ GetAssembler()->testb(
+ x86::Address(x86::Register(x86::EDI), x86::Register(x86::EBX), x86::TIMES_4, 12),
+ x86::Immediate(1));
+ GetAssembler()->testb(
+ x86::Address(x86::Register(x86::ESP), FrameOffset(7)),
+ x86::Immediate(-128));
+ GetAssembler()->testb(
+ x86::Address(x86::Register(x86::EBX), MemberOffset(130)),
+ x86::Immediate(127));
+ const char* expected =
+ "testb $1, 0xc(%EDI,%EBX,4)\n"
+ "testb $-128, 0x7(%ESP)\n"
+ "testb $127, 0x82(%EBX)\n";
+
+ DriverStr(expected, "TestbAddressImmediate");
+}
+
+TEST_F(AssemblerX86Test, TestlAddressImmediate) {
+ GetAssembler()->testl(
+ x86::Address(x86::Register(x86::EDI), x86::Register(x86::EBX), x86::TIMES_4, 12),
+ x86::Immediate(1));
+ GetAssembler()->testl(
+ x86::Address(x86::Register(x86::ESP), FrameOffset(7)),
+ x86::Immediate(-100000));
+ GetAssembler()->testl(
+ x86::Address(x86::Register(x86::EBX), MemberOffset(130)),
+ x86::Immediate(77777777));
+ const char* expected =
+ "testl $1, 0xc(%EDI,%EBX,4)\n"
+ "testl $-100000, 0x7(%ESP)\n"
+ "testl $77777777, 0x82(%EBX)\n";
+
+ DriverStr(expected, "TestlAddressImmediate");
+}
+
+TEST_F(AssemblerX86Test, Movaps) {
+ DriverStr(RepeatFF(&x86::X86Assembler::movaps, "movaps %{reg2}, %{reg1}"), "movaps");
+}
+
+TEST_F(AssemblerX86Test, MovapsAddr) {
+ GetAssembler()->movaps(x86::XmmRegister(x86::XMM0), x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->movaps(x86::Address(x86::Register(x86::ESP), 2), x86::XmmRegister(x86::XMM1));
+ const char* expected =
+ "movaps 0x4(%ESP), %xmm0\n"
+ "movaps %xmm1, 0x2(%ESP)\n";
+ DriverStr(expected, "movaps_address");
+}
+
+TEST_F(AssemblerX86Test, MovupsAddr) {
+ GetAssembler()->movups(x86::XmmRegister(x86::XMM0), x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->movups(x86::Address(x86::Register(x86::ESP), 2), x86::XmmRegister(x86::XMM1));
+ const char* expected =
+ "movups 0x4(%ESP), %xmm0\n"
+ "movups %xmm1, 0x2(%ESP)\n";
+ DriverStr(expected, "movups_address");
+}
+
+TEST_F(AssemblerX86Test, Movapd) {
+ DriverStr(RepeatFF(&x86::X86Assembler::movapd, "movapd %{reg2}, %{reg1}"), "movapd");
+}
+
+TEST_F(AssemblerX86Test, MovapdAddr) {
+ GetAssembler()->movapd(x86::XmmRegister(x86::XMM0), x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->movapd(x86::Address(x86::Register(x86::ESP), 2), x86::XmmRegister(x86::XMM1));
+ const char* expected =
+ "movapd 0x4(%ESP), %xmm0\n"
+ "movapd %xmm1, 0x2(%ESP)\n";
+ DriverStr(expected, "movapd_address");
+}
+
+TEST_F(AssemblerX86Test, MovupdAddr) {
+ GetAssembler()->movupd(x86::XmmRegister(x86::XMM0), x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->movupd(x86::Address(x86::Register(x86::ESP), 2), x86::XmmRegister(x86::XMM1));
+ const char* expected =
+ "movupd 0x4(%ESP), %xmm0\n"
+ "movupd %xmm1, 0x2(%ESP)\n";
+ DriverStr(expected, "movupd_address");
+}
+
+TEST_F(AssemblerX86Test, Movdqa) {
+ DriverStr(RepeatFF(&x86::X86Assembler::movdqa, "movdqa %{reg2}, %{reg1}"), "movdqa");
+}
+
+TEST_F(AssemblerX86Test, MovdqaAddr) {
+ GetAssembler()->movdqa(x86::XmmRegister(x86::XMM0), x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->movdqa(x86::Address(x86::Register(x86::ESP), 2), x86::XmmRegister(x86::XMM1));
+ const char* expected =
+ "movdqa 0x4(%ESP), %xmm0\n"
+ "movdqa %xmm1, 0x2(%ESP)\n";
+ DriverStr(expected, "movdqa_address");
+}
+
+TEST_F(AssemblerX86Test, MovdquAddr) {
+ GetAssembler()->movdqu(x86::XmmRegister(x86::XMM0), x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->movdqu(x86::Address(x86::Register(x86::ESP), 2), x86::XmmRegister(x86::XMM1));
+ const char* expected =
+ "movdqu 0x4(%ESP), %xmm0\n"
+ "movdqu %xmm1, 0x2(%ESP)\n";
+ DriverStr(expected, "movdqu_address");
+}
+
+TEST_F(AssemblerX86Test, AddPS) {
+ DriverStr(RepeatFF(&x86::X86Assembler::addps, "addps %{reg2}, %{reg1}"), "addps");
+}
+
+TEST_F(AssemblerX86Test, AddPD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::addpd, "addpd %{reg2}, %{reg1}"), "addpd");
+}
+
+TEST_F(AssemblerX86Test, SubPS) {
+ DriverStr(RepeatFF(&x86::X86Assembler::subps, "subps %{reg2}, %{reg1}"), "subps");
+}
+
+TEST_F(AssemblerX86Test, SubPD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::subpd, "subpd %{reg2}, %{reg1}"), "subpd");
+}
+
+TEST_F(AssemblerX86Test, MulPS) {
+ DriverStr(RepeatFF(&x86::X86Assembler::mulps, "mulps %{reg2}, %{reg1}"), "mulps");
+}
+
+TEST_F(AssemblerX86Test, MulPD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::mulpd, "mulpd %{reg2}, %{reg1}"), "mulpd");
+}
+
+TEST_F(AssemblerX86Test, DivPS) {
+ DriverStr(RepeatFF(&x86::X86Assembler::divps, "divps %{reg2}, %{reg1}"), "divps");
+}
+
+TEST_F(AssemblerX86Test, DivPD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::divpd, "divpd %{reg2}, %{reg1}"), "divpd");
+}
+
+TEST_F(AssemblerX86Test, PAddB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::paddb, "paddb %{reg2}, %{reg1}"), "paddb");
+}
+
+TEST_F(AssemblerX86Test, PSubB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::psubb, "psubb %{reg2}, %{reg1}"), "psubb");
+}
+
+TEST_F(AssemblerX86Test, PAddW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::paddw, "paddw %{reg2}, %{reg1}"), "paddw");
+}
+
+TEST_F(AssemblerX86Test, PSubW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::psubw, "psubw %{reg2}, %{reg1}"), "psubw");
+}
+
+TEST_F(AssemblerX86Test, PMullW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pmullw, "pmullw %{reg2}, %{reg1}"), "pmullw");
+}
+
+TEST_F(AssemblerX86Test, PAddD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::paddd, "paddd %{reg2}, %{reg1}"), "paddd");
+}
+
+TEST_F(AssemblerX86Test, PSubD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::psubd, "psubd %{reg2}, %{reg1}"), "psubd");
+}
+
+TEST_F(AssemblerX86Test, PMullD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pmulld, "pmulld %{reg2}, %{reg1}"), "pmulld");
+}
+
+TEST_F(AssemblerX86Test, PAddQ) {
+ DriverStr(RepeatFF(&x86::X86Assembler::paddq, "paddq %{reg2}, %{reg1}"), "paddq");
+}
+
+TEST_F(AssemblerX86Test, PSubQ) {
+ DriverStr(RepeatFF(&x86::X86Assembler::psubq, "psubq %{reg2}, %{reg1}"), "psubq");
+}
+
+TEST_F(AssemblerX86Test, XorPD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::xorpd, "xorpd %{reg2}, %{reg1}"), "xorpd");
+}
+
+TEST_F(AssemblerX86Test, XorPS) {
+ DriverStr(RepeatFF(&x86::X86Assembler::xorps, "xorps %{reg2}, %{reg1}"), "xorps");
+}
+
+TEST_F(AssemblerX86Test, PXor) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pxor, "pxor %{reg2}, %{reg1}"), "pxor");
+}
+
+TEST_F(AssemblerX86Test, AndPD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::andpd, "andpd %{reg2}, %{reg1}"), "andpd");
+}
+
+TEST_F(AssemblerX86Test, AndPS) {
+ DriverStr(RepeatFF(&x86::X86Assembler::andps, "andps %{reg2}, %{reg1}"), "andps");
+}
+
+TEST_F(AssemblerX86Test, PAnd) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pand, "pand %{reg2}, %{reg1}"), "pand");
+}
+
+TEST_F(AssemblerX86Test, AndnPD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::andnpd, "andnpd %{reg2}, %{reg1}"), "andnpd");
+}
+
+TEST_F(AssemblerX86Test, AndnPS) {
+ DriverStr(RepeatFF(&x86::X86Assembler::andnps, "andnps %{reg2}, %{reg1}"), "andnps");
+}
+
+TEST_F(AssemblerX86Test, PAndn) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pandn, "pandn %{reg2}, %{reg1}"), "pandn");
+}
+
+TEST_F(AssemblerX86Test, OrPD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::orpd, "orpd %{reg2}, %{reg1}"), "orpd");
+}
+
+TEST_F(AssemblerX86Test, OrPS) {
+ DriverStr(RepeatFF(&x86::X86Assembler::orps, "orps %{reg2}, %{reg1}"), "orps");
+}
+
+TEST_F(AssemblerX86Test, POr) {
+ DriverStr(RepeatFF(&x86::X86Assembler::por, "por %{reg2}, %{reg1}"), "por");
+}
+
+TEST_F(AssemblerX86Test, PAvgB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pavgb, "pavgb %{reg2}, %{reg1}"), "pavgb");
+}
+
+TEST_F(AssemblerX86Test, PAvgW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pavgw, "pavgw %{reg2}, %{reg1}"), "pavgw");
+}
+
+TEST_F(AssemblerX86Test, PCmpeqB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqb, "pcmpeqb %{reg2}, %{reg1}"), "cmpeqb");
+}
+
+TEST_F(AssemblerX86Test, PCmpeqW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqw, "pcmpeqw %{reg2}, %{reg1}"), "cmpeqw");
+}
+
+TEST_F(AssemblerX86Test, PCmpeqD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqd, "pcmpeqd %{reg2}, %{reg1}"), "cmpeqd");
+}
+
+TEST_F(AssemblerX86Test, PCmpeqQ) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqq, "pcmpeqq %{reg2}, %{reg1}"), "cmpeqq");
+}
+
+TEST_F(AssemblerX86Test, PCmpgtB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpgtb, "pcmpgtb %{reg2}, %{reg1}"), "cmpgtb");
+}
+
+TEST_F(AssemblerX86Test, PCmpgtW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpgtw, "pcmpgtw %{reg2}, %{reg1}"), "cmpgtw");
+}
+
+TEST_F(AssemblerX86Test, PCmpgtD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpgtd, "pcmpgtd %{reg2}, %{reg1}"), "cmpgtd");
+}
+
+TEST_F(AssemblerX86Test, PCmpgtQ) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pcmpgtq, "pcmpgtq %{reg2}, %{reg1}"), "cmpgtq");
+}
+
+TEST_F(AssemblerX86Test, ShufPS) {
+ DriverStr(RepeatFFI(&x86::X86Assembler::shufps, 1, "shufps ${imm}, %{reg2}, %{reg1}"), "shufps");
+}
+
+TEST_F(AssemblerX86Test, ShufPD) {
+ DriverStr(RepeatFFI(&x86::X86Assembler::shufpd, 1, "shufpd ${imm}, %{reg2}, %{reg1}"), "shufpd");
+}
+
+TEST_F(AssemblerX86Test, PShufD) {
+ DriverStr(RepeatFFI(&x86::X86Assembler::pshufd, 1, "pshufd ${imm}, %{reg2}, %{reg1}"), "pshufd");
+}
+
+TEST_F(AssemblerX86Test, Punpcklbw) {
+ DriverStr(RepeatFF(&x86::X86Assembler::punpcklbw, "punpcklbw %{reg2}, %{reg1}"), "punpcklbw");
+}
+
+TEST_F(AssemblerX86Test, Punpcklwd) {
+ DriverStr(RepeatFF(&x86::X86Assembler::punpcklwd, "punpcklwd %{reg2}, %{reg1}"), "punpcklwd");
+}
+
+TEST_F(AssemblerX86Test, Punpckldq) {
+ DriverStr(RepeatFF(&x86::X86Assembler::punpckldq, "punpckldq %{reg2}, %{reg1}"), "punpckldq");
+}
+
+TEST_F(AssemblerX86Test, Punpcklqdq) {
+ DriverStr(RepeatFF(&x86::X86Assembler::punpcklqdq, "punpcklqdq %{reg2}, %{reg1}"), "punpcklqdq");
+}
+
+TEST_F(AssemblerX86Test, psllw) {
+ GetAssembler()->psllw(x86::XMM0, CreateImmediate(16));
+ DriverStr("psllw $0x10, %xmm0\n", "psllwi");
+}
+
+TEST_F(AssemblerX86Test, pslld) {
+ GetAssembler()->pslld(x86::XMM0, CreateImmediate(16));
+ DriverStr("pslld $0x10, %xmm0\n", "pslldi");
+}
+
+TEST_F(AssemblerX86Test, psllq) {
+ GetAssembler()->psllq(x86::XMM0, CreateImmediate(16));
+ DriverStr("psllq $0x10, %xmm0\n", "psllqi");
+}
+
+TEST_F(AssemblerX86Test, psraw) {
+ GetAssembler()->psraw(x86::XMM0, CreateImmediate(16));
+ DriverStr("psraw $0x10, %xmm0\n", "psrawi");
+}
+
+TEST_F(AssemblerX86Test, psrad) {
+ GetAssembler()->psrad(x86::XMM0, CreateImmediate(16));
+ DriverStr("psrad $0x10, %xmm0\n", "psradi");
+}
+
+TEST_F(AssemblerX86Test, psrlw) {
+ GetAssembler()->psrlw(x86::XMM0, CreateImmediate(16));
+ DriverStr("psrlw $0x10, %xmm0\n", "psrlwi");
+}
+
+TEST_F(AssemblerX86Test, psrld) {
+ GetAssembler()->psrld(x86::XMM0, CreateImmediate(16));
+ DriverStr("psrld $0x10, %xmm0\n", "psrldi");
+}
+
+TEST_F(AssemblerX86Test, psrlq) {
+ GetAssembler()->psrlq(x86::XMM0, CreateImmediate(16));
+ DriverStr("psrlq $0x10, %xmm0\n", "psrlqi");
+}
+
+TEST_F(AssemblerX86Test, psrldq) {
+ GetAssembler()->psrldq(x86::XMM0, CreateImmediate(16));
+ DriverStr("psrldq $0x10, %xmm0\n", "psrldqi");
+}
+
/////////////////
// Near labels //
/////////////////
@@ -389,4 +757,10 @@ TEST_F(AssemblerX86Test, NearLabel) {
DriverStr(expected, "near_label");
}
+TEST_F(AssemblerX86Test, Cmpb) {
+ GetAssembler()->cmpb(x86::Address(x86::EDI, 128), x86::Immediate(0));
+ const char* expected = "cmpb $0, 128(%EDI)\n";
+ DriverStr(expected, "cmpb");
+}
+
} // namespace art
diff --git a/compiler/utils/x86/constants_x86.h b/compiler/utils/x86/constants_x86.h
index 2dfb65c479..0bc1560ed7 100644
--- a/compiler/utils/x86/constants_x86.h
+++ b/compiler/utils/x86/constants_x86.h
@@ -97,6 +97,8 @@ enum Condition {
kNotZero = kNotEqual,
kNegative = kSign,
kPositive = kNotSign,
+ kCarrySet = kBelow,
+ kCarryClear = kAboveEqual,
kUnordered = kParityEven
};
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
new file mode 100644
index 0000000000..cfdf80ba50
--- /dev/null
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -0,0 +1,587 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_x86.h"
+
+#include "utils/assembler.h"
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "thread.h"
+
+namespace art {
+namespace x86 {
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86ExceptionSlowPath FINAL : public SlowPath {
+ public:
+ explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
+ virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+ const size_t stack_adjust_;
+};
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::X86Core(static_cast<int>(reg));
+}
+
+constexpr size_t kFramePointerSize = 4;
+
+#define __ asm_.
+
+void X86JNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> spill_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
+ cfi().SetCurrentCFAOffset(4); // Return address on stack.
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ int gpr_count = 0;
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ Register spill = spill_regs[i].AsX86().AsCpuRegister();
+ __ pushl(spill);
+ gpr_count++;
+ cfi().AdjustCFAOffset(kFramePointerSize);
+ cfi().RelOffset(DWARFReg(spill), 0);
+ }
+
+ // return address then method on stack.
+ int32_t adjust = frame_size - gpr_count * kFramePointerSize -
+ kFramePointerSize /*method*/ -
+ kFramePointerSize /*return address*/;
+ __ addl(ESP, Immediate(-adjust));
+ cfi().AdjustCFAOffset(adjust);
+ __ pushl(method_reg.AsX86().AsCpuRegister());
+ cfi().AdjustCFAOffset(kFramePointerSize);
+ DCHECK_EQ(static_cast<size_t>(cfi().GetCurrentCFAOffset()), frame_size);
+
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ if (spill.AsX86().IsCpuRegister()) {
+ int offset = frame_size + spill.getSpillOffset();
+ __ movl(Address(ESP, offset), spill.AsX86().AsCpuRegister());
+ } else {
+ DCHECK(spill.AsX86().IsXmmRegister());
+ if (spill.getSize() == 8) {
+ __ movsd(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ __ movss(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
+ }
+ }
+ }
+}
+
+void X86JNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> spill_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+ // -kFramePointerSize for ArtMethod*.
+ int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
+ __ addl(ESP, Immediate(adjust));
+ cfi().AdjustCFAOffset(-adjust);
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ Register spill = spill_regs[i].AsX86().AsCpuRegister();
+ __ popl(spill);
+ cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
+ cfi().Restore(DWARFReg(spill));
+ }
+ __ ret();
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+void X86JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ __ addl(ESP, Immediate(-adjust));
+ cfi().AdjustCFAOffset(adjust);
+}
+
+static void DecreaseFrameSizeImpl(X86Assembler* assembler, size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ assembler->addl(ESP, Immediate(adjust));
+ assembler->cfi().AdjustCFAOffset(-adjust);
+}
+
+void X86JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ DecreaseFrameSizeImpl(&asm_, adjust);
+}
+
+void X86JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+ X86ManagedRegister src = msrc.AsX86();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ movl(Address(ESP, offs), src.AsCpuRegister());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ movl(Address(ESP, offs), src.AsRegisterPairLow());
+ __ movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), src.AsRegisterPairHigh());
+ } else if (src.IsX87Register()) {
+ if (size == 4) {
+ __ fstps(Address(ESP, offs));
+ } else {
+ __ fstpl(Address(ESP, offs));
+ }
+ } else {
+ CHECK(src.IsXmmRegister());
+ if (size == 4) {
+ __ movss(Address(ESP, offs), src.AsXmmRegister());
+ } else {
+ __ movsd(Address(ESP, offs), src.AsXmmRegister());
+ }
+ }
+}
+
+void X86JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ X86ManagedRegister src = msrc.AsX86();
+ CHECK(src.IsCpuRegister());
+ __ movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ X86ManagedRegister src = msrc.AsX86();
+ CHECK(src.IsCpuRegister());
+ __ movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister) {
+ __ movl(Address(ESP, dest), Immediate(imm));
+}
+
+void X86JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ __ leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
+ __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
+ __ fs()->movl(Address::Absolute(thr_offs), ESP);
+}
+
+void X86JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
+ ManagedRegister /*src*/,
+ FrameOffset /*in_off*/,
+ ManagedRegister /*scratch*/) {
+ UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
+}
+
+void X86JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ movl(dest.AsCpuRegister(), Address(ESP, src));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ movl(dest.AsRegisterPairLow(), Address(ESP, src));
+ __ movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ flds(Address(ESP, src));
+ } else {
+ __ fldl(Address(ESP, src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ movss(dest.AsXmmRegister(), Address(ESP, src));
+ } else {
+ __ movsd(dest.AsXmmRegister(), Address(ESP, src));
+ }
+ }
+}
+
+void X86JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ if (size == 1u) {
+ __ fs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src));
+ } else {
+ CHECK_EQ(4u, size);
+ __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
+ }
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
+ __ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ fs()->flds(Address::Absolute(src));
+ } else {
+ __ fs()->fldl(Address::Absolute(src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
+ } else {
+ __ fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
+ }
+ }
+}
+
+void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(ESP, src));
+}
+
+void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool unpoison_reference) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+ if (unpoison_reference) {
+ __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
+ }
+}
+
+void X86JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest,
+ ManagedRegister base,
+ Offset offs) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+}
+
+void X86JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister());
+ __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
+}
+
+void X86JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+ X86ManagedRegister reg = mreg.AsX86();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
+ } else {
+ __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ X86ManagedRegister reg = mreg.AsX86();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
+ } else {
+ __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ X86ManagedRegister src = msrc.AsX86();
+ if (!dest.Equals(src)) {
+ if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+ __ movl(dest.AsCpuRegister(), src.AsCpuRegister());
+ } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+ // Pass via stack and pop X87 register
+ __ subl(ESP, Immediate(16));
+ if (size == 4) {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstps(Address(ESP, 0));
+ __ movss(dest.AsXmmRegister(), Address(ESP, 0));
+ } else {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstpl(Address(ESP, 0));
+ __ movsd(dest.AsXmmRegister(), Address(ESP, 0));
+ }
+ __ addl(ESP, Immediate(16));
+ } else {
+ // TODO: x87, SSE
+ UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+ }
+ }
+}
+
+void X86JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ __ movl(scratch.AsCpuRegister(), Address(ESP, src));
+ __ movl(Address(ESP, dest), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
+ Store(fr_offs, scratch, 4);
+}
+
+void X86JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ Load(scratch, fr_offs, 4);
+ __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ if (scratch.IsCpuRegister() && size == 8) {
+ Load(scratch, src, 4);
+ Store(dest, scratch, 4);
+ Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+ Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+ } else {
+ Load(scratch, src, size);
+ Store(dest, scratch, size);
+ }
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ ManagedRegister /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void X86JNIMacroAssembler::Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK(scratch.IsNoRegister());
+ CHECK_EQ(size, 4u);
+ __ pushl(Address(ESP, src));
+ __ popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ __ movl(scratch, Address(ESP, src_base));
+ __ movl(scratch, Address(scratch, src_offset));
+ __ movl(Address(ESP, dest), scratch);
+}
+
+void X86JNIMacroAssembler::Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK_EQ(size, 4u);
+ CHECK(scratch.IsNoRegister());
+ __ pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
+ __ popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ CHECK_EQ(dest.Int32Value(), src.Int32Value());
+ __ movl(scratch, Address(ESP, src));
+ __ pushl(Address(scratch, src_offset));
+ __ popl(Address(scratch, dest_offset));
+}
+
+void X86JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
+ __ mfence();
+}
+
+void X86JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ X86ManagedRegister out_reg = mout_reg.AsX86();
+ X86ManagedRegister in_reg = min_reg.AsX86();
+ CHECK(in_reg.IsCpuRegister());
+ CHECK(out_reg.IsCpuRegister());
+ VerifyObject(in_reg, null_allowed);
+ if (null_allowed) {
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ }
+}
+
+void X86JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ if (null_allowed) {
+ Label null_arg;
+ __ movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ }
+ Store(out_off, scratch, 4);
+}
+
+// Given a handle scope entry, load the associated reference.
+void X86JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ X86ManagedRegister out_reg = mout_reg.AsX86();
+ X86ManagedRegister in_reg = min_reg.AsX86();
+ CHECK(out_reg.IsCpuRegister());
+ CHECK(in_reg.IsCpuRegister());
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+ __ Bind(&null_arg);
+}
+
+void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+ X86ManagedRegister base = mbase.AsX86();
+ CHECK(base.IsCpuRegister());
+ __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
+ // TODO: place reference map on call
+}
+
+void X86JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ __ movl(scratch, Address(ESP, base));
+ __ call(Address(scratch, offset));
+}
+
+void X86JNIMacroAssembler::CallFromThread(ThreadOffset32 offset, ManagedRegister /*mscratch*/) {
+ __ fs()->call(Address::Absolute(offset));
+}
+
+void X86JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ __ fs()->movl(tr.AsX86().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
+}
+
+void X86JNIMacroAssembler::GetCurrentThread(FrameOffset offset,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
+ __ movl(Address(ESP, offset), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+ X86ExceptionSlowPath* slow = new (__ GetArena()) X86ExceptionSlowPath(stack_adjust);
+ __ GetBuffer()->EnqueueSlowPath(slow);
+ __ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
+ __ j(kNotEqual, slow->Entry());
+}
+
+std::unique_ptr<JNIMacroLabel> X86JNIMacroAssembler::CreateLabel() {
+ return std::unique_ptr<JNIMacroLabel>(new X86JNIMacroLabel());
+}
+
+void X86JNIMacroAssembler::Jump(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ jmp(X86JNIMacroLabel::Cast(label)->AsX86());
+}
+
+void X86JNIMacroAssembler::Jump(JNIMacroLabel* label,
+ JNIMacroUnaryCondition condition,
+ ManagedRegister test) {
+ CHECK(label != nullptr);
+
+ art::x86::Condition x86_cond;
+ switch (condition) {
+ case JNIMacroUnaryCondition::kZero:
+ x86_cond = art::x86::kZero;
+ break;
+ case JNIMacroUnaryCondition::kNotZero:
+ x86_cond = art::x86::kNotZero;
+ break;
+ default:
+ LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
+ UNREACHABLE();
+ }
+
+ // TEST reg, reg
+ // Jcc <Offset>
+ __ testl(test.AsX86().AsCpuRegister(), test.AsX86().AsCpuRegister());
+ __ j(x86_cond, X86JNIMacroLabel::Cast(label)->AsX86());
+
+
+ // X86 also has JCZX, JECZX, however it's not worth it to implement
+ // because we aren't likely to codegen with ECX+kZero check.
+}
+
+void X86JNIMacroAssembler::Bind(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ Bind(X86JNIMacroLabel::Cast(label)->AsX86());
+}
+
+#undef __
+
+void X86ExceptionSlowPath::Emit(Assembler *sasm) {
+ X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ // Note: the return value is dead
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSizeImpl(sp_asm, stack_adjust_);
+ }
+ // Pass exception as argument in EAX
+ __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
+ // this call should never return
+ __ int3();
+#undef __
+}
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
new file mode 100644
index 0000000000..8ffda6425e
--- /dev/null
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_
+#define ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_
+
+#include <vector>
+
+#include "assembler_x86.h"
+#include "base/arena_containers.h"
+#include "base/array_ref.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "offsets.h"
+#include "utils/jni_macro_assembler.h"
+
+namespace art {
+namespace x86 {
+
+class X86JNIMacroLabel;
+
+class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
+ public:
+ explicit X86JNIMacroAssembler(ArenaAllocator* arena) : JNIMacroAssemblerFwd(arena) {}
+ virtual ~X86JNIMacroAssembler() {}
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+
+ // Copying routines
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void MemoryBarrier(ManagedRegister) OVERRIDE;
+
+ // Sign extension
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current()
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset]
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label) OVERRIDE;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler);
+};
+
+class X86JNIMacroLabel FINAL
+ : public JNIMacroLabelCommon<X86JNIMacroLabel,
+ art::Label,
+ kX86> {
+ public:
+ art::Label* AsX86() {
+ return AsPlatformLabel();
+ }
+};
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_
diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h
index fc20d7e208..c0c2b650e9 100644
--- a/compiler/utils/x86/managed_register_x86.h
+++ b/compiler/utils/x86/managed_register_x86.h
@@ -89,64 +89,64 @@ const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds +
// There is a one-to-one mapping between ManagedRegister and register id.
class X86ManagedRegister : public ManagedRegister {
public:
- ByteRegister AsByteRegister() const {
+ constexpr ByteRegister AsByteRegister() const {
CHECK(IsCpuRegister());
CHECK_LT(AsCpuRegister(), ESP); // ESP, EBP, ESI and EDI cannot be encoded as byte registers.
return static_cast<ByteRegister>(id_);
}
- Register AsCpuRegister() const {
+ constexpr Register AsCpuRegister() const {
CHECK(IsCpuRegister());
return static_cast<Register>(id_);
}
- XmmRegister AsXmmRegister() const {
+ constexpr XmmRegister AsXmmRegister() const {
CHECK(IsXmmRegister());
return static_cast<XmmRegister>(id_ - kNumberOfCpuRegIds);
}
- X87Register AsX87Register() const {
+ constexpr X87Register AsX87Register() const {
CHECK(IsX87Register());
return static_cast<X87Register>(id_ -
(kNumberOfCpuRegIds + kNumberOfXmmRegIds));
}
- Register AsRegisterPairLow() const {
+ constexpr Register AsRegisterPairLow() const {
CHECK(IsRegisterPair());
// Appropriate mapping of register ids allows to use AllocIdLow().
return FromRegId(AllocIdLow()).AsCpuRegister();
}
- Register AsRegisterPairHigh() const {
+ constexpr Register AsRegisterPairHigh() const {
CHECK(IsRegisterPair());
// Appropriate mapping of register ids allows to use AllocIdHigh().
return FromRegId(AllocIdHigh()).AsCpuRegister();
}
- RegisterPair AsRegisterPair() const {
+ constexpr RegisterPair AsRegisterPair() const {
CHECK(IsRegisterPair());
return static_cast<RegisterPair>(id_ -
(kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds));
}
- bool IsCpuRegister() const {
+ constexpr bool IsCpuRegister() const {
CHECK(IsValidManagedRegister());
return (0 <= id_) && (id_ < kNumberOfCpuRegIds);
}
- bool IsXmmRegister() const {
+ constexpr bool IsXmmRegister() const {
CHECK(IsValidManagedRegister());
const int test = id_ - kNumberOfCpuRegIds;
return (0 <= test) && (test < kNumberOfXmmRegIds);
}
- bool IsX87Register() const {
+ constexpr bool IsX87Register() const {
CHECK(IsValidManagedRegister());
const int test = id_ - (kNumberOfCpuRegIds + kNumberOfXmmRegIds);
return (0 <= test) && (test < kNumberOfX87RegIds);
}
- bool IsRegisterPair() const {
+ constexpr bool IsRegisterPair() const {
CHECK(IsValidManagedRegister());
const int test = id_ -
(kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds);
@@ -160,33 +160,33 @@ class X86ManagedRegister : public ManagedRegister {
// then false is returned.
bool Overlaps(const X86ManagedRegister& other) const;
- static X86ManagedRegister FromCpuRegister(Register r) {
+ static constexpr X86ManagedRegister FromCpuRegister(Register r) {
CHECK_NE(r, kNoRegister);
return FromRegId(r);
}
- static X86ManagedRegister FromXmmRegister(XmmRegister r) {
+ static constexpr X86ManagedRegister FromXmmRegister(XmmRegister r) {
CHECK_NE(r, kNoXmmRegister);
return FromRegId(r + kNumberOfCpuRegIds);
}
- static X86ManagedRegister FromX87Register(X87Register r) {
+ static constexpr X86ManagedRegister FromX87Register(X87Register r) {
CHECK_NE(r, kNoX87Register);
return FromRegId(r + kNumberOfCpuRegIds + kNumberOfXmmRegIds);
}
- static X86ManagedRegister FromRegisterPair(RegisterPair r) {
+ static constexpr X86ManagedRegister FromRegisterPair(RegisterPair r) {
CHECK_NE(r, kNoRegisterPair);
return FromRegId(r + (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
kNumberOfX87RegIds));
}
private:
- bool IsValidManagedRegister() const {
+ constexpr bool IsValidManagedRegister() const {
return (0 <= id_) && (id_ < kNumberOfRegIds);
}
- int RegId() const {
+ constexpr int RegId() const {
CHECK(!IsNoRegister());
return id_;
}
@@ -202,9 +202,9 @@ class X86ManagedRegister : public ManagedRegister {
friend class ManagedRegister;
- explicit X86ManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+ explicit constexpr X86ManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
- static X86ManagedRegister FromRegId(int reg_id) {
+ static constexpr X86ManagedRegister FromRegId(int reg_id) {
X86ManagedRegister reg(reg_id);
CHECK(reg.IsValidManagedRegister());
return reg;
@@ -215,7 +215,7 @@ std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg);
} // namespace x86
-inline x86::X86ManagedRegister ManagedRegister::AsX86() const {
+constexpr inline x86::X86ManagedRegister ManagedRegister::AsX86() const {
x86::X86ManagedRegister reg(id_);
CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
return reg;