Index: src/x64/assembler-x64.cc |
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc |
index 157f134b90746a932113c4387a1dc75ea700e4a4..8538bba928eaadc8ced6ef4ba7a090e1a4c7b9c9 100644 |
--- a/src/x64/assembler-x64.cc |
+++ b/src/x64/assembler-x64.cc |
@@ -113,20 +113,20 @@ Operand::Operand(Register base, |
} |
-// Safe default is no features. |
-// TODO(X64): Safe defaults include SSE2 for X64. |
-uint64_t CpuFeatures::supported_ = 0; |
+// The required user mode extensions in X64 are (from AMD64 ABI Table A.1): |
+// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall |
+uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures; |
uint64_t CpuFeatures::enabled_ = 0; |
void CpuFeatures::Probe() { |
ASSERT(Heap::HasBeenSetup()); |
- ASSERT(supported_ == 0); |
+ ASSERT(supported_ == kDefaultCpuFeatures); |
if (Serializer::enabled()) return; // No features if we might serialize. |
Assembler assm(NULL, 0); |
Label cpuid, done; |
#define __ assm. |
- // Save old esp, since we are going to modify the stack. |
+ // Save old rsp, since we are going to modify the stack. |
__ push(rbp); |
__ pushfq(); |
__ push(rcx); |
@@ -154,11 +154,11 @@ void CpuFeatures::Probe() { |
// safe here. |
__ bind(&cpuid); |
__ movq(rax, Immediate(1)); |
- supported_ = (1 << CPUID); |
+ supported_ = kDefaultCpuFeatures | (1 << CPUID); |
{ Scope fscope(CPUID); |
__ cpuid(); |
} |
- supported_ = 0; |
+ supported_ = kDefaultCpuFeatures; |
// Move the result from ecx:edx to rax and make sure to mark the |
// CPUID feature as supported. |
@@ -187,6 +187,10 @@ void CpuFeatures::Probe() { |
typedef uint64_t (*F0)(); |
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry()); |
supported_ = probe(); |
+ // SSE2 and CMOV must be available on an X64 CPU. |
+ ASSERT(IsSupported(CPUID)); |
+ ASSERT(IsSupported(SSE2)); |
+ ASSERT(IsSupported(CMOV)); |
} |
// ----------------------------------------------------------------------------- |
@@ -610,6 +614,57 @@ void Assembler::call(const Operand& op) { |
} |
+void Assembler::cmovq(Condition cc, Register dst, Register src) { |
+ // No need to check CpuInfo for CMOV support, it's a required part of the |
+ // 64-bit architecture. |
+ ASSERT(cc >= 0); // Use mov for unconditional moves. |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ // Opcode: REX.W 0f 40 + cc /r |
+ emit_rex_64(dst, src); |
+ emit(0x0f); |
+ emit(0x40 + cc); |
+ emit_modrm(dst, src); |
+} |
+ |
+ |
+void Assembler::cmovq(Condition cc, Register dst, const Operand& src) { |
+ ASSERT(cc >= 0); |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ // Opcode: REX.W 0f 40 + cc /r |
+ emit_rex_64(dst, src); |
+ emit(0x0f); |
+ emit(0x40 + cc); |
+ emit_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::cmovl(Condition cc, Register dst, Register src) { |
+ ASSERT(cc >= 0); |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ // Opcode: 0f 40 + cc /r |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0f); |
+ emit(0x40 + cc); |
+ emit_modrm(dst, src); |
+} |
+ |
+ |
+void Assembler::cmovl(Condition cc, Register dst, const Operand& src) { |
+ ASSERT(cc >= 0); |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ // Opcode: 0f 40 + cc /r |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0f); |
+ emit(0x40 + cc); |
+ emit_operand(dst, src); |
+} |
+ |
+ |
+ |
void Assembler::cpuid() { |
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID)); |
EnsureSpace ensure_space(this); |
@@ -1752,6 +1807,15 @@ void Assembler::fnclex() { |
} |
+void Assembler::sahf() { |
+ // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf |
+ // in 64-bit mode. Test CpuID. |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0x9E); |
+} |
+ |
+ |
void Assembler::emit_farith(int b1, int b2, int i) { |
ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode |
ASSERT(is_uint3(i)); // illegal stack offset |
@@ -1759,6 +1823,166 @@ void Assembler::emit_farith(int b1, int b2, int i) { |
emit(b2 + i); |
} |
+// SSE 2 operations |
+ |
+void Assembler::movsd(const Operand& dst, XMMRegister src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); // double |
+ emit_optional_rex_32(src, dst); |
+ emit(0x0F); |
+ emit(0x11); // store |
+ emit_sse_operand(src, dst); |
+} |
+ |
+ |
+void Assembler::movsd(Register dst, XMMRegister src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); // double |
+ emit_optional_rex_32(src, dst); |
+ emit(0x0F); |
+ emit(0x11); // store |
+ emit_sse_operand(src, dst); |
+} |
+ |
+ |
+void Assembler::movsd(XMMRegister dst, Register src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); // double |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0F); |
+ emit(0x10); // load |
+ emit_sse_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::movsd(XMMRegister dst, const Operand& src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); // double |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0F); |
+ emit(0x10); // load |
+ emit_sse_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::cvttss2si(Register dst, const Operand& src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF3); |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0F); |
+ emit(0x2C); |
+ emit_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::cvttsd2si(Register dst, const Operand& src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0F); |
+ emit(0x2C); |
+ emit_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0F); |
+ emit(0x2A); |
+ emit_sse_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::cvtlsi2sd(XMMRegister dst, Register src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0F); |
+ emit(0x2A); |
+ emit_sse_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::cvtqsi2sd(XMMRegister dst, Register src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); |
+ emit_rex_64(dst, src); |
+ emit(0x0F); |
+ emit(0x2A); |
+ emit_sse_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::addsd(XMMRegister dst, XMMRegister src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0F); |
+ emit(0x58); |
+ emit_sse_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::mulsd(XMMRegister dst, XMMRegister src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0F); |
+ emit(0x59); |
+ emit_sse_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::subsd(XMMRegister dst, XMMRegister src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0F); |
+ emit(0x5C); |
+ emit_sse_operand(dst, src); |
+} |
+ |
+ |
+void Assembler::divsd(XMMRegister dst, XMMRegister src) { |
+ EnsureSpace ensure_space(this); |
+ last_pc_ = pc_; |
+ emit(0xF2); |
+ emit_optional_rex_32(dst, src); |
+ emit(0x0F); |
+ emit(0x5E); |
+ emit_sse_operand(dst, src); |
+} |
+ |
+ |
+ |
+void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) { |
+ Register ireg = { reg.code() }; |
+ emit_operand(ireg, adr); |
+} |
+ |
+ |
+void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) { |
+ emit(0xC0 | (dst.code() << 3) | src.code()); |
+} |
+ |
+void Assembler::emit_sse_operand(XMMRegister dst, Register src) { |
+ emit(0xC0 | (dst.code() << 3) | src.code()); |
+} |
+ |
// Relocation information implementations |