Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(848)

Unified Diff: src/x64/code-stubs-x64.cc

Issue 6883159: X64: Adding macro to load double from memory, and use SSE3 instruction if present. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/x64/assembler-x64.cc ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/x64/code-stubs-x64.cc
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 76fcc88cbfad2b30df277c6ae50859fce69f8921..fdcfa14649859542c581f2470e96ffb8ccdbb40a 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1032,7 +1032,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ fstp(0); // Clear FPU stack.
__ ret(kPointerSize);
} else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -1052,7 +1052,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
if (tagged) {
__ ret(kPointerSize);
} else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
// Skip cache and return answer directly, only in untagged case.
@@ -1062,7 +1062,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ fld_d(Operand(rsp, 0));
GenerateOperation(masm);
__ fstp_d(Operand(rsp, 0));
- __ movsd(xmm1, Operand(rsp, 0));
+ __ LoadDbl(xmm1, Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
@@ -1090,7 +1090,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ push(rax);
__ CallRuntime(RuntimeFunction(), 1);
__ LeaveInternalFrame();
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
}
}
@@ -1359,11 +1359,11 @@ void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
// Load operand in rdx into xmm0.
__ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi_rdx);
@@ -1387,14 +1387,14 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
__ JumpIfSmi(rdx, &load_smi_rdx);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi_rdx);
@@ -1428,7 +1428,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
__ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, on_not_smis);
// Convert HeapNumber to smi if possible.
- __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
__ cvttsd2siq(smi_result, xmm0);
// Check if conversion was successful by converting back and
@@ -1449,7 +1449,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
__ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, on_not_smis);
// Convert second to smi, if possible.
- __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
__ cvttsd2siq(smi_result, xmm0);
__ cvtlsi2sd(xmm1, smi_result);
@@ -1583,7 +1583,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Optimized version of pow if exponent is a smi.
// xmm0 contains the base.
@@ -1633,7 +1633,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
// Test if exponent is nan.
__ ucomisd(xmm1, xmm1);
__ j(parity_even, &call_runtime);
@@ -1654,7 +1654,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
// base is NaN or +/-Infinity
__ j(greater_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// base is in xmm0 and exponent is in xmm1.
__ bind(&handle_special_cases);
@@ -2381,8 +2381,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
times_1,
FixedArray::kHeaderSize));
__ JumpIfSmi(probe, not_found);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
__ ucomisd(xmm0, xmm1);
__ j(parity_even, not_found); // Bail out if NaN is involved.
__ j(not_equal, not_found); // The cache did not contain this value.
@@ -2524,7 +2524,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// greater-equal. Return -1 for them, so the comparison yields
// false for all conditions except not-equal.
__ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ ucomisd(xmm0, xmm0);
__ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
@@ -4467,8 +4467,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ j(not_equal, &miss);
// Load left and right operand
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ LoadDbl(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
// Compare operands
__ ucomisd(xmm0, xmm1);
« no previous file with comments | « src/x64/assembler-x64.cc ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698