Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(867)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 23890030: Rollback trunk to 3.21.15. (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/builtins-x64.cc ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 991 matching lines...) Expand 10 before | Expand all | Expand 10 after
1002 Register heap_number_map = r8; 1002 Register heap_number_map = r8;
1003 Register scratch1 = r9; 1003 Register scratch1 = r9;
1004 Register scratch2 = r10; 1004 Register scratch2 = r10;
1005 // HeapNumbers containing 32bit integer values are also allowed. 1005 // HeapNumbers containing 32bit integer values are also allowed.
1006 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1006 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1007 __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map); 1007 __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
1008 __ j(not_equal, fail); 1008 __ j(not_equal, fail);
1009 __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset)); 1009 __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
1010 // Convert, convert back, and compare the two doubles' bits. 1010 // Convert, convert back, and compare the two doubles' bits.
1011 __ cvttsd2siq(scratch2, xmm0); 1011 __ cvttsd2siq(scratch2, xmm0);
1012 __ Cvtlsi2sd(xmm1, scratch2); 1012 __ cvtlsi2sd(xmm1, scratch2);
1013 __ movq(scratch1, xmm0); 1013 __ movq(scratch1, xmm0);
1014 __ movq(scratch2, xmm1); 1014 __ movq(scratch2, xmm1);
1015 __ cmpq(scratch1, scratch2); 1015 __ cmpq(scratch1, scratch2);
1016 __ j(not_equal, fail); 1016 __ j(not_equal, fail);
1017 __ bind(&ok); 1017 __ bind(&ok);
1018 } 1018 }
1019 1019
1020 1020
1021 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { 1021 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
1022 Label gc_required, not_number; 1022 Label gc_required, not_number;
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
1138 Label input_not_smi, loaded; 1138 Label input_not_smi, loaded;
1139 1139
1140 // Test that rax is a number. 1140 // Test that rax is a number.
1141 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER); 1141 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
1142 __ movq(rax, args.GetArgumentOperand(0)); 1142 __ movq(rax, args.GetArgumentOperand(0));
1143 __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear); 1143 __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
1144 // Input is a smi. Untag and load it onto the FPU stack. 1144 // Input is a smi. Untag and load it onto the FPU stack.
1145 // Then load the bits of the double into rbx. 1145 // Then load the bits of the double into rbx.
1146 __ SmiToInteger32(rax, rax); 1146 __ SmiToInteger32(rax, rax);
1147 __ subq(rsp, Immediate(kDoubleSize)); 1147 __ subq(rsp, Immediate(kDoubleSize));
1148 __ Cvtlsi2sd(xmm1, rax); 1148 __ cvtlsi2sd(xmm1, rax);
1149 __ movsd(Operand(rsp, 0), xmm1); 1149 __ movsd(Operand(rsp, 0), xmm1);
1150 __ movq(rbx, xmm1); 1150 __ movq(rbx, xmm1);
1151 __ movq(rdx, xmm1); 1151 __ movq(rdx, xmm1);
1152 __ fld_d(Operand(rsp, 0)); 1152 __ fld_d(Operand(rsp, 0));
1153 __ addq(rsp, Immediate(kDoubleSize)); 1153 __ addq(rsp, Immediate(kDoubleSize));
1154 __ jmp(&loaded, Label::kNear); 1154 __ jmp(&loaded, Label::kNear);
1155 1155
1156 __ bind(&input_not_smi); 1156 __ bind(&input_not_smi);
1157 // Check if input is a HeapNumber. 1157 // Check if input is a HeapNumber.
1158 __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex); 1158 __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
(...skipping 311 matching lines...) Expand 10 before | Expand all | Expand 10 after
1470 // Get the untagged integer version of the rax heap number in rcx. 1470 // Get the untagged integer version of the rax heap number in rcx.
1471 __ TruncateHeapNumberToI(rcx, rax); 1471 __ TruncateHeapNumberToI(rcx, rax);
1472 1472
1473 __ bind(&done); 1473 __ bind(&done);
1474 __ movl(rax, r8); 1474 __ movl(rax, r8);
1475 } 1475 }
1476 1476
1477 1477
1478 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { 1478 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1479 __ SmiToInteger32(kScratchRegister, rdx); 1479 __ SmiToInteger32(kScratchRegister, rdx);
1480 __ Cvtlsi2sd(xmm0, kScratchRegister); 1480 __ cvtlsi2sd(xmm0, kScratchRegister);
1481 __ SmiToInteger32(kScratchRegister, rax); 1481 __ SmiToInteger32(kScratchRegister, rax);
1482 __ Cvtlsi2sd(xmm1, kScratchRegister); 1482 __ cvtlsi2sd(xmm1, kScratchRegister);
1483 } 1483 }
1484 1484
1485 1485
1486 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, 1486 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1487 Label* not_numbers) { 1487 Label* not_numbers) {
1488 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; 1488 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1489 // Load operand in rdx into xmm0, or branch to not_numbers. 1489 // Load operand in rdx into xmm0, or branch to not_numbers.
1490 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); 1490 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1491 __ JumpIfSmi(rdx, &load_smi_rdx); 1491 __ JumpIfSmi(rdx, &load_smi_rdx);
1492 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); 1492 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
1493 __ j(not_equal, not_numbers); // Argument in rdx is not a number. 1493 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
1494 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 1494 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1495 // Load operand in rax into xmm1, or branch to not_numbers. 1495 // Load operand in rax into xmm1, or branch to not_numbers.
1496 __ JumpIfSmi(rax, &load_smi_rax); 1496 __ JumpIfSmi(rax, &load_smi_rax);
1497 1497
1498 __ bind(&load_nonsmi_rax); 1498 __ bind(&load_nonsmi_rax);
1499 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx); 1499 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
1500 __ j(not_equal, not_numbers); 1500 __ j(not_equal, not_numbers);
1501 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); 1501 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1502 __ jmp(&done); 1502 __ jmp(&done);
1503 1503
1504 __ bind(&load_smi_rdx); 1504 __ bind(&load_smi_rdx);
1505 __ SmiToInteger32(kScratchRegister, rdx); 1505 __ SmiToInteger32(kScratchRegister, rdx);
1506 __ Cvtlsi2sd(xmm0, kScratchRegister); 1506 __ cvtlsi2sd(xmm0, kScratchRegister);
1507 __ JumpIfNotSmi(rax, &load_nonsmi_rax); 1507 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1508 1508
1509 __ bind(&load_smi_rax); 1509 __ bind(&load_smi_rax);
1510 __ SmiToInteger32(kScratchRegister, rax); 1510 __ SmiToInteger32(kScratchRegister, rax);
1511 __ Cvtlsi2sd(xmm1, kScratchRegister); 1511 __ cvtlsi2sd(xmm1, kScratchRegister);
1512 __ bind(&done); 1512 __ bind(&done);
1513 } 1513 }
1514 1514
1515 1515
1516 void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, 1516 void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
1517 Register first, 1517 Register first,
1518 Register second, 1518 Register second,
1519 Register scratch1, 1519 Register scratch1,
1520 Register scratch2, 1520 Register scratch2,
1521 Register scratch3, 1521 Register scratch3,
(...skipping 12 matching lines...) Expand all
1534 __ j(not_equal, 1534 __ j(not_equal,
1535 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) 1535 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1536 ? &maybe_undefined_first 1536 ? &maybe_undefined_first
1537 : on_not_smis); 1537 : on_not_smis);
1538 // Convert HeapNumber to smi if possible. 1538 // Convert HeapNumber to smi if possible.
1539 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); 1539 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1540 __ movq(scratch2, xmm0); 1540 __ movq(scratch2, xmm0);
1541 __ cvttsd2siq(smi_result, xmm0); 1541 __ cvttsd2siq(smi_result, xmm0);
1542 // Check if conversion was successful by converting back and 1542 // Check if conversion was successful by converting back and
1543 // comparing to the original double's bits. 1543 // comparing to the original double's bits.
1544 __ Cvtlsi2sd(xmm1, smi_result); 1544 __ cvtlsi2sd(xmm1, smi_result);
1545 __ movq(kScratchRegister, xmm1); 1545 __ movq(kScratchRegister, xmm1);
1546 __ cmpq(scratch2, kScratchRegister); 1546 __ cmpq(scratch2, kScratchRegister);
1547 __ j(not_equal, on_not_smis); 1547 __ j(not_equal, on_not_smis);
1548 __ Integer32ToSmi(first, smi_result); 1548 __ Integer32ToSmi(first, smi_result);
1549 1549
1550 __ bind(&first_done); 1550 __ bind(&first_done);
1551 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); 1551 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1552 __ bind(&first_smi); 1552 __ bind(&first_smi);
1553 __ AssertNotSmi(second); 1553 __ AssertNotSmi(second);
1554 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); 1554 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1555 __ j(not_equal, 1555 __ j(not_equal,
1556 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) 1556 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1557 ? &maybe_undefined_second 1557 ? &maybe_undefined_second
1558 : on_not_smis); 1558 : on_not_smis);
1559 // Convert second to smi, if possible. 1559 // Convert second to smi, if possible.
1560 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); 1560 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1561 __ movq(scratch2, xmm0); 1561 __ movq(scratch2, xmm0);
1562 __ cvttsd2siq(smi_result, xmm0); 1562 __ cvttsd2siq(smi_result, xmm0);
1563 __ Cvtlsi2sd(xmm1, smi_result); 1563 __ cvtlsi2sd(xmm1, smi_result);
1564 __ movq(kScratchRegister, xmm1); 1564 __ movq(kScratchRegister, xmm1);
1565 __ cmpq(scratch2, kScratchRegister); 1565 __ cmpq(scratch2, kScratchRegister);
1566 __ j(not_equal, on_not_smis); 1566 __ j(not_equal, on_not_smis);
1567 __ Integer32ToSmi(second, smi_result); 1567 __ Integer32ToSmi(second, smi_result);
1568 if (on_success != NULL) { 1568 if (on_success != NULL) {
1569 __ jmp(on_success); 1569 __ jmp(on_success);
1570 } else { 1570 } else {
1571 __ jmp(&done); 1571 __ jmp(&done);
1572 } 1572 }
1573 1573
(...skipping 22 matching lines...) Expand all
1596 const Register scratch = rcx; 1596 const Register scratch = rcx;
1597 const XMMRegister double_result = xmm3; 1597 const XMMRegister double_result = xmm3;
1598 const XMMRegister double_base = xmm2; 1598 const XMMRegister double_base = xmm2;
1599 const XMMRegister double_exponent = xmm1; 1599 const XMMRegister double_exponent = xmm1;
1600 const XMMRegister double_scratch = xmm4; 1600 const XMMRegister double_scratch = xmm4;
1601 1601
1602 Label call_runtime, done, exponent_not_smi, int_exponent; 1602 Label call_runtime, done, exponent_not_smi, int_exponent;
1603 1603
1604 // Save 1 in double_result - we need this several times later on. 1604 // Save 1 in double_result - we need this several times later on.
1605 __ movq(scratch, Immediate(1)); 1605 __ movq(scratch, Immediate(1));
1606 __ Cvtlsi2sd(double_result, scratch); 1606 __ cvtlsi2sd(double_result, scratch);
1607 1607
1608 if (exponent_type_ == ON_STACK) { 1608 if (exponent_type_ == ON_STACK) {
1609 Label base_is_smi, unpack_exponent; 1609 Label base_is_smi, unpack_exponent;
1610 // The exponent and base are supplied as arguments on the stack. 1610 // The exponent and base are supplied as arguments on the stack.
1611 // This can only happen if the stub is called from non-optimized code. 1611 // This can only happen if the stub is called from non-optimized code.
1612 // Load input parameters from stack. 1612 // Load input parameters from stack.
1613 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER); 1613 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
1614 __ movq(base, args.GetArgumentOperand(0)); 1614 __ movq(base, args.GetArgumentOperand(0));
1615 __ movq(exponent, args.GetArgumentOperand(1)); 1615 __ movq(exponent, args.GetArgumentOperand(1));
1616 __ JumpIfSmi(base, &base_is_smi, Label::kNear); 1616 __ JumpIfSmi(base, &base_is_smi, Label::kNear);
1617 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset), 1617 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
1618 Heap::kHeapNumberMapRootIndex); 1618 Heap::kHeapNumberMapRootIndex);
1619 __ j(not_equal, &call_runtime); 1619 __ j(not_equal, &call_runtime);
1620 1620
1621 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); 1621 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
1622 __ jmp(&unpack_exponent, Label::kNear); 1622 __ jmp(&unpack_exponent, Label::kNear);
1623 1623
1624 __ bind(&base_is_smi); 1624 __ bind(&base_is_smi);
1625 __ SmiToInteger32(base, base); 1625 __ SmiToInteger32(base, base);
1626 __ Cvtlsi2sd(double_base, base); 1626 __ cvtlsi2sd(double_base, base);
1627 __ bind(&unpack_exponent); 1627 __ bind(&unpack_exponent);
1628 1628
1629 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); 1629 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
1630 __ SmiToInteger32(exponent, exponent); 1630 __ SmiToInteger32(exponent, exponent);
1631 __ jmp(&int_exponent); 1631 __ jmp(&int_exponent);
1632 1632
1633 __ bind(&exponent_not_smi); 1633 __ bind(&exponent_not_smi);
1634 __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset), 1634 __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
1635 Heap::kHeapNumberMapRootIndex); 1635 Heap::kHeapNumberMapRootIndex);
1636 __ j(not_equal, &call_runtime); 1636 __ j(not_equal, &call_runtime);
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after
1805 __ divsd(double_scratch2, double_result); 1805 __ divsd(double_scratch2, double_result);
1806 __ movsd(double_result, double_scratch2); 1806 __ movsd(double_result, double_scratch2);
1807 // Test whether result is zero. Bail out to check for subnormal result. 1807 // Test whether result is zero. Bail out to check for subnormal result.
1808 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. 1808 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1809 __ xorps(double_scratch2, double_scratch2); 1809 __ xorps(double_scratch2, double_scratch2);
1810 __ ucomisd(double_scratch2, double_result); 1810 __ ucomisd(double_scratch2, double_result);
1811 // double_exponent aliased as double_scratch2 has already been overwritten 1811 // double_exponent aliased as double_scratch2 has already been overwritten
1812 // and may not have contained the exponent value in the first place when the 1812 // and may not have contained the exponent value in the first place when the
1813 // input was a smi. We reset it with exponent value before bailing out. 1813 // input was a smi. We reset it with exponent value before bailing out.
1814 __ j(not_equal, &done); 1814 __ j(not_equal, &done);
1815 __ Cvtlsi2sd(double_exponent, exponent); 1815 __ cvtlsi2sd(double_exponent, exponent);
1816 1816
1817 // Returning or bailing out. 1817 // Returning or bailing out.
1818 Counters* counters = masm->isolate()->counters(); 1818 Counters* counters = masm->isolate()->counters();
1819 if (exponent_type_ == ON_STACK) { 1819 if (exponent_type_ == ON_STACK) {
1820 // The arguments are still on the stack. 1820 // The arguments are still on the stack.
1821 __ bind(&call_runtime); 1821 __ bind(&call_runtime);
1822 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); 1822 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1823 1823
1824 // The stub is called from non-optimized code, which expects the result 1824 // The stub is called from non-optimized code, which expects the result
1825 // as heap number in rax. 1825 // as heap number in rax.
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
1895 } else { 1895 } else {
1896 ASSERT(kind() == Code::LOAD_IC); 1896 ASSERT(kind() == Code::LOAD_IC);
1897 // ----------- S t a t e ------------- 1897 // ----------- S t a t e -------------
1898 // -- rax : receiver 1898 // -- rax : receiver
1899 // -- rcx : name 1899 // -- rcx : name
1900 // -- rsp[0] : return address 1900 // -- rsp[0] : return address
1901 // ----------------------------------- 1901 // -----------------------------------
1902 receiver = rax; 1902 receiver = rax;
1903 } 1903 }
1904 1904
1905 StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss); 1905 StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
1906 support_wrapper_);
1906 __ bind(&miss); 1907 __ bind(&miss);
1907 StubCompiler::TailCallBuiltin( 1908 StubCompiler::TailCallBuiltin(
1908 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); 1909 masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
1909 } 1910 }
1910 1911
1911 1912
1912 void StoreArrayLengthStub::Generate(MacroAssembler* masm) { 1913 void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
1913 // ----------- S t a t e ------------- 1914 // ----------- S t a t e -------------
1914 // -- rax : value 1915 // -- rax : value
1915 // -- rcx : key 1916 // -- rcx : key
(...skipping 1695 matching lines...) Expand 10 before | Expand all | Expand 10 after
3611 // Check stack alignment. 3612 // Check stack alignment.
3612 if (FLAG_debug_code) { 3613 if (FLAG_debug_code) {
3613 __ CheckStackAlignment(); 3614 __ CheckStackAlignment();
3614 } 3615 }
3615 3616
3616 if (do_gc) { 3617 if (do_gc) {
3617 // Pass failure code returned from last attempt as first argument to 3618 // Pass failure code returned from last attempt as first argument to
3618 // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the 3619 // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
3619 // stack is known to be aligned. This function takes one argument which is 3620 // stack is known to be aligned. This function takes one argument which is
3620 // passed in register. 3621 // passed in register.
3621 __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
3622 __ movq(arg_reg_1, rax); 3622 __ movq(arg_reg_1, rax);
3623 __ movq(kScratchRegister, 3623 __ movq(kScratchRegister,
3624 ExternalReference::perform_gc_function(masm->isolate())); 3624 ExternalReference::perform_gc_function(masm->isolate()));
3625 __ call(kScratchRegister); 3625 __ call(kScratchRegister);
3626 } 3626 }
3627 3627
3628 ExternalReference scope_depth = 3628 ExternalReference scope_depth =
3629 ExternalReference::heap_always_allocate_scope_depth(masm->isolate()); 3629 ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
3630 if (always_allocate_scope) { 3630 if (always_allocate_scope) {
3631 Operand scope_depth_operand = masm->ExternalOperand(scope_depth); 3631 Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
(...skipping 1737 matching lines...) Expand 10 before | Expand all | Expand 10 after
5369 5369
5370 // Load left and right operand. 5370 // Load left and right operand.
5371 Label done, left, left_smi, right_smi; 5371 Label done, left, left_smi, right_smi;
5372 __ JumpIfSmi(rax, &right_smi, Label::kNear); 5372 __ JumpIfSmi(rax, &right_smi, Label::kNear);
5373 __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL); 5373 __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
5374 __ j(not_equal, &maybe_undefined1, Label::kNear); 5374 __ j(not_equal, &maybe_undefined1, Label::kNear);
5375 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); 5375 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
5376 __ jmp(&left, Label::kNear); 5376 __ jmp(&left, Label::kNear);
5377 __ bind(&right_smi); 5377 __ bind(&right_smi);
5378 __ SmiToInteger32(rcx, rax); // Can't clobber rax yet. 5378 __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
5379 __ Cvtlsi2sd(xmm1, rcx); 5379 __ cvtlsi2sd(xmm1, rcx);
5380 5380
5381 __ bind(&left); 5381 __ bind(&left);
5382 __ JumpIfSmi(rdx, &left_smi, Label::kNear); 5382 __ JumpIfSmi(rdx, &left_smi, Label::kNear);
5383 __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL); 5383 __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
5384 __ j(not_equal, &maybe_undefined2, Label::kNear); 5384 __ j(not_equal, &maybe_undefined2, Label::kNear);
5385 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 5385 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
5386 __ jmp(&done); 5386 __ jmp(&done);
5387 __ bind(&left_smi); 5387 __ bind(&left_smi);
5388 __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet. 5388 __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
5389 __ Cvtlsi2sd(xmm0, rcx); 5389 __ cvtlsi2sd(xmm0, rcx);
5390 5390
5391 __ bind(&done); 5391 __ bind(&done);
5392 // Compare operands 5392 // Compare operands
5393 __ ucomisd(xmm0, xmm1); 5393 __ ucomisd(xmm0, xmm1);
5394 5394
5395 // Don't base result on EFLAGS when a NaN is involved. 5395 // Don't base result on EFLAGS when a NaN is involved.
5396 __ j(parity_even, &unordered, Label::kNear); 5396 __ j(parity_even, &unordered, Label::kNear);
5397 5397
5398 // Return a result of -1, 0, or 1, based on EFLAGS. 5398 // Return a result of -1, 0, or 1, based on EFLAGS.
5399 // Performing mov, because xor would destroy the flag register. 5399 // Performing mov, because xor would destroy the flag register.
(...skipping 1241 matching lines...) Expand 10 before | Expand all | Expand 10 after
6641 __ bind(&fast_elements_case); 6641 __ bind(&fast_elements_case);
6642 GenerateCase(masm, FAST_ELEMENTS); 6642 GenerateCase(masm, FAST_ELEMENTS);
6643 } 6643 }
6644 6644
6645 6645
6646 #undef __ 6646 #undef __
6647 6647
6648 } } // namespace v8::internal 6648 } } // namespace v8::internal
6649 6649
6650 #endif // V8_TARGET_ARCH_X64 6650 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/builtins-x64.cc ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698