Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1059)

Unified Diff: src/x64/code-stubs-x64.cc

Issue 23890030: Rollback trunk to 3.21.15. (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/x64/builtins-x64.cc ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/x64/code-stubs-x64.cc
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index d1130aded0148c620860d490e9e36c3bce911d5f..51e1a5395cf7bb9b3fa9953a7264cbcaee3c0968 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1009,7 +1009,7 @@ static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
__ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
// Convert, convert back, and compare the two doubles' bits.
__ cvttsd2siq(scratch2, xmm0);
- __ Cvtlsi2sd(xmm1, scratch2);
+ __ cvtlsi2sd(xmm1, scratch2);
__ movq(scratch1, xmm0);
__ movq(scratch2, xmm1);
__ cmpq(scratch1, scratch2);
@@ -1145,7 +1145,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Then load the bits of the double into rbx.
__ SmiToInteger32(rax, rax);
__ subq(rsp, Immediate(kDoubleSize));
- __ Cvtlsi2sd(xmm1, rax);
+ __ cvtlsi2sd(xmm1, rax);
__ movsd(Operand(rsp, 0), xmm1);
__ movq(rbx, xmm1);
__ movq(rdx, xmm1);
@@ -1477,9 +1477,9 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
__ SmiToInteger32(kScratchRegister, rdx);
- __ Cvtlsi2sd(xmm0, kScratchRegister);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
__ SmiToInteger32(kScratchRegister, rax);
- __ Cvtlsi2sd(xmm1, kScratchRegister);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
}
@@ -1503,12 +1503,12 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
__ bind(&load_smi_rdx);
__ SmiToInteger32(kScratchRegister, rdx);
- __ Cvtlsi2sd(xmm0, kScratchRegister);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
__ JumpIfNotSmi(rax, &load_nonsmi_rax);
__ bind(&load_smi_rax);
__ SmiToInteger32(kScratchRegister, rax);
- __ Cvtlsi2sd(xmm1, kScratchRegister);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
__ bind(&done);
}
@@ -1541,7 +1541,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
__ cvttsd2siq(smi_result, xmm0);
// Check if conversion was successful by converting back and
// comparing to the original double's bits.
- __ Cvtlsi2sd(xmm1, smi_result);
+ __ cvtlsi2sd(xmm1, smi_result);
__ movq(kScratchRegister, xmm1);
__ cmpq(scratch2, kScratchRegister);
__ j(not_equal, on_not_smis);
@@ -1560,7 +1560,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
__ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
__ cvttsd2siq(smi_result, xmm0);
- __ Cvtlsi2sd(xmm1, smi_result);
+ __ cvtlsi2sd(xmm1, smi_result);
__ movq(kScratchRegister, xmm1);
__ cmpq(scratch2, kScratchRegister);
__ j(not_equal, on_not_smis);
@@ -1603,7 +1603,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in double_result - we need this several times later on.
__ movq(scratch, Immediate(1));
- __ Cvtlsi2sd(double_result, scratch);
+ __ cvtlsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
@@ -1623,7 +1623,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&base_is_smi);
__ SmiToInteger32(base, base);
- __ Cvtlsi2sd(double_base, base);
+ __ cvtlsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -1812,7 +1812,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// and may not have contained the exponent value in the first place when the
// input was a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ Cvtlsi2sd(double_exponent, exponent);
+ __ cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -1902,7 +1902,8 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
receiver = rax;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
+ support_wrapper_);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -3618,7 +3619,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
// stack is known to be aligned. This function takes one argument which is
// passed in register.
- __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movq(arg_reg_1, rax);
__ movq(kScratchRegister,
ExternalReference::perform_gc_function(masm->isolate()));
@@ -5376,7 +5376,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
- __ Cvtlsi2sd(xmm1, rcx);
+ __ cvtlsi2sd(xmm1, rcx);
__ bind(&left);
__ JumpIfSmi(rdx, &left_smi, Label::kNear);
@@ -5386,7 +5386,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&left_smi);
__ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
- __ Cvtlsi2sd(xmm0, rcx);
+ __ cvtlsi2sd(xmm0, rcx);
__ bind(&done);
// Compare operands
« no previous file with comments | « src/x64/builtins-x64.cc ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698