OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 548 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
559 // Test if operands are numbers (smi or HeapNumber objects), and load | 559 // Test if operands are numbers (smi or HeapNumber objects), and load |
560 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if | 560 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if |
561 // either operand is not a number. Operands are in edx and eax. | 561 // either operand is not a number. Operands are in edx and eax. |
562 // Leaves operands unchanged. | 562 // Leaves operands unchanged. |
563 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); | 563 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); |
564 | 564 |
565 // Similar to LoadSSE2Operands but assumes that both operands are smis. | 565 // Similar to LoadSSE2Operands but assumes that both operands are smis. |
566 // Expects operands in edx, eax. | 566 // Expects operands in edx, eax. |
567 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); | 567 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); |
568 | 568 |
569 // Checks that the two floating point numbers loaded into xmm0 and xmm1 | |
570 // have int32 values. | |
571 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm, | |
572 Label* non_int32, | |
573 Register scratch); | |
574 | |
575 // Checks that |operand| has an int32 value. If |int32_result| is different | 569 // Checks that |operand| has an int32 value. If |int32_result| is different |
576 // from |scratch|, it will contain that int32 value. | 570 // from |scratch|, it will contain that int32 value. |
577 static void CheckSSE2OperandIsInt32(MacroAssembler* masm, | 571 static void CheckSSE2OperandIsInt32(MacroAssembler* masm, |
578 Label* non_int32, | 572 Label* non_int32, |
579 XMMRegister operand, | 573 XMMRegister operand, |
580 Register int32_result, | 574 Register int32_result, |
581 Register scratch, | 575 Register scratch, |
582 XMMRegister xmm_scratch); | 576 XMMRegister xmm_scratch); |
583 }; | 577 }; |
584 | 578 |
(...skipping 878 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1463 __ mov(eax, ebx); | 1457 __ mov(eax, ebx); |
1464 break; | 1458 break; |
1465 | 1459 |
1466 default: | 1460 default: |
1467 break; | 1461 break; |
1468 } | 1462 } |
1469 } | 1463 } |
1470 | 1464 |
1471 | 1465 |
1472 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 1466 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
1473 Label call_runtime; | 1467 Label right_arg_changed, call_runtime; |
1474 | 1468 |
1475 switch (op_) { | 1469 switch (op_) { |
1476 case Token::ADD: | 1470 case Token::ADD: |
1477 case Token::SUB: | 1471 case Token::SUB: |
1478 case Token::MUL: | 1472 case Token::MUL: |
1479 case Token::DIV: | 1473 case Token::DIV: |
1480 break; | 1474 break; |
1481 case Token::MOD: | 1475 case Token::MOD: |
1482 case Token::BIT_OR: | 1476 case Token::BIT_OR: |
1483 case Token::BIT_AND: | 1477 case Token::BIT_AND: |
1484 case Token::BIT_XOR: | 1478 case Token::BIT_XOR: |
1485 case Token::SAR: | 1479 case Token::SAR: |
1486 case Token::SHL: | 1480 case Token::SHL: |
1487 case Token::SHR: | 1481 case Token::SHR: |
1488 GenerateRegisterArgsPush(masm); | 1482 GenerateRegisterArgsPush(masm); |
1489 break; | 1483 break; |
1490 default: | 1484 default: |
1491 UNREACHABLE(); | 1485 UNREACHABLE(); |
1492 } | 1486 } |
1493 | 1487 |
| 1488 if (op_ == Token::MOD && has_fixed_right_arg_) { |
| 1489 // It is guaranteed that the value will fit into a Smi, because if it |
| 1490 // didn't, we wouldn't be here, see BinaryOp_Patch. |
| 1491 __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value()))); |
| 1492 __ j(not_equal, &right_arg_changed); |
| 1493 } |
| 1494 |
1494 if (result_type_ == BinaryOpIC::UNINITIALIZED || | 1495 if (result_type_ == BinaryOpIC::UNINITIALIZED || |
1495 result_type_ == BinaryOpIC::SMI) { | 1496 result_type_ == BinaryOpIC::SMI) { |
1496 BinaryOpStub_GenerateSmiCode( | 1497 BinaryOpStub_GenerateSmiCode( |
1497 masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_); | 1498 masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_); |
1498 } else { | 1499 } else { |
1499 BinaryOpStub_GenerateSmiCode( | 1500 BinaryOpStub_GenerateSmiCode( |
1500 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); | 1501 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
1501 } | 1502 } |
1502 | 1503 |
1503 // Code falls through if the result is not returned as either a smi or heap | 1504 // Code falls through if the result is not returned as either a smi or heap |
1504 // number. | 1505 // number. |
| 1506 __ bind(&right_arg_changed); |
1505 switch (op_) { | 1507 switch (op_) { |
1506 case Token::ADD: | 1508 case Token::ADD: |
1507 case Token::SUB: | 1509 case Token::SUB: |
1508 case Token::MUL: | 1510 case Token::MUL: |
1509 case Token::DIV: | 1511 case Token::DIV: |
1510 GenerateTypeTransition(masm); | 1512 GenerateTypeTransition(masm); |
1511 break; | 1513 break; |
1512 case Token::MOD: | 1514 case Token::MOD: |
1513 case Token::BIT_OR: | 1515 case Token::BIT_OR: |
1514 case Token::BIT_AND: | 1516 case Token::BIT_AND: |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1597 Label call_runtime; | 1599 Label call_runtime; |
1598 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); | 1600 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
1599 | 1601 |
1600 // Floating point case. | 1602 // Floating point case. |
1601 switch (op_) { | 1603 switch (op_) { |
1602 case Token::ADD: | 1604 case Token::ADD: |
1603 case Token::SUB: | 1605 case Token::SUB: |
1604 case Token::MUL: | 1606 case Token::MUL: |
1605 case Token::DIV: | 1607 case Token::DIV: |
1606 case Token::MOD: { | 1608 case Token::MOD: { |
1607 Label not_floats; | 1609 Label not_floats, not_int32, right_arg_changed; |
1608 Label not_int32; | |
1609 if (CpuFeatures::IsSupported(SSE2)) { | 1610 if (CpuFeatures::IsSupported(SSE2)) { |
1610 CpuFeatureScope use_sse2(masm, SSE2); | 1611 CpuFeatureScope use_sse2(masm, SSE2); |
1611 // It could be that only SMIs have been seen at either the left | 1612 // It could be that only SMIs have been seen at either the left |
1612 // or the right operand. For precise type feedback, patch the IC | 1613 // or the right operand. For precise type feedback, patch the IC |
1613 // again if this changes. | 1614 // again if this changes. |
1614 // In theory, we would need the same check in the non-SSE2 case, | 1615 // In theory, we would need the same check in the non-SSE2 case, |
1615 // but since we don't support Crankshaft on such hardware we can | 1616 // but since we don't support Crankshaft on such hardware we can |
1616 // afford not to care about precise type feedback. | 1617 // afford not to care about precise type feedback. |
1617 if (left_type_ == BinaryOpIC::SMI) { | 1618 if (left_type_ == BinaryOpIC::SMI) { |
1618 __ JumpIfNotSmi(edx, ¬_int32); | 1619 __ JumpIfNotSmi(edx, ¬_int32); |
1619 } | 1620 } |
1620 if (right_type_ == BinaryOpIC::SMI) { | 1621 if (right_type_ == BinaryOpIC::SMI) { |
1621 __ JumpIfNotSmi(eax, ¬_int32); | 1622 __ JumpIfNotSmi(eax, ¬_int32); |
1622 } | 1623 } |
1623 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); | 1624 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); |
1624 FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx); | 1625 FloatingPointHelper::CheckSSE2OperandIsInt32( |
| 1626 masm, ¬_int32, xmm0, ebx, ecx, xmm2); |
| 1627 FloatingPointHelper::CheckSSE2OperandIsInt32( |
| 1628 masm, ¬_int32, xmm1, edi, ecx, xmm2); |
1625 if (op_ == Token::MOD) { | 1629 if (op_ == Token::MOD) { |
| 1630 if (has_fixed_right_arg_) { |
| 1631 __ cmp(edi, Immediate(fixed_right_arg_value())); |
| 1632 __ j(not_equal, &right_arg_changed); |
| 1633 } |
1626 GenerateRegisterArgsPush(masm); | 1634 GenerateRegisterArgsPush(masm); |
1627 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); | 1635 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
1628 } else { | 1636 } else { |
1629 switch (op_) { | 1637 switch (op_) { |
1630 case Token::ADD: __ addsd(xmm0, xmm1); break; | 1638 case Token::ADD: __ addsd(xmm0, xmm1); break; |
1631 case Token::SUB: __ subsd(xmm0, xmm1); break; | 1639 case Token::SUB: __ subsd(xmm0, xmm1); break; |
1632 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 1640 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
1633 case Token::DIV: __ divsd(xmm0, xmm1); break; | 1641 case Token::DIV: __ divsd(xmm0, xmm1); break; |
1634 default: UNREACHABLE(); | 1642 default: UNREACHABLE(); |
1635 } | 1643 } |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1668 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | 1676 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
1669 __ ret(0); | 1677 __ ret(0); |
1670 __ bind(&after_alloc_failure); | 1678 __ bind(&after_alloc_failure); |
1671 __ fstp(0); // Pop FPU stack before calling runtime. | 1679 __ fstp(0); // Pop FPU stack before calling runtime. |
1672 __ jmp(&call_runtime); | 1680 __ jmp(&call_runtime); |
1673 } | 1681 } |
1674 } | 1682 } |
1675 | 1683 |
1676 __ bind(¬_floats); | 1684 __ bind(¬_floats); |
1677 __ bind(¬_int32); | 1685 __ bind(¬_int32); |
| 1686 __ bind(&right_arg_changed); |
1678 GenerateTypeTransition(masm); | 1687 GenerateTypeTransition(masm); |
1679 break; | 1688 break; |
1680 } | 1689 } |
1681 | 1690 |
1682 case Token::BIT_OR: | 1691 case Token::BIT_OR: |
1683 case Token::BIT_AND: | 1692 case Token::BIT_AND: |
1684 case Token::BIT_XOR: | 1693 case Token::BIT_XOR: |
1685 case Token::SAR: | 1694 case Token::SAR: |
1686 case Token::SHL: | 1695 case Token::SHL: |
1687 case Token::SHR: { | 1696 case Token::SHR: { |
(...skipping 1071 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2759 ASSERT(!scratch.is(right)); // We're about to clobber scratch. | 2768 ASSERT(!scratch.is(right)); // We're about to clobber scratch. |
2760 __ SmiUntag(scratch); | 2769 __ SmiUntag(scratch); |
2761 __ cvtsi2sd(xmm0, scratch); | 2770 __ cvtsi2sd(xmm0, scratch); |
2762 | 2771 |
2763 __ mov(scratch, right); | 2772 __ mov(scratch, right); |
2764 __ SmiUntag(scratch); | 2773 __ SmiUntag(scratch); |
2765 __ cvtsi2sd(xmm1, scratch); | 2774 __ cvtsi2sd(xmm1, scratch); |
2766 } | 2775 } |
2767 | 2776 |
2768 | 2777 |
2769 void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm, | |
2770 Label* non_int32, | |
2771 Register scratch) { | |
2772 CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, scratch, xmm2); | |
2773 CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, scratch, xmm2); | |
2774 } | |
2775 | |
2776 | |
2777 void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm, | 2778 void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm, |
2778 Label* non_int32, | 2779 Label* non_int32, |
2779 XMMRegister operand, | 2780 XMMRegister operand, |
2780 Register int32_result, | 2781 Register int32_result, |
2781 Register scratch, | 2782 Register scratch, |
2782 XMMRegister xmm_scratch) { | 2783 XMMRegister xmm_scratch) { |
2783 __ cvttsd2si(int32_result, Operand(operand)); | 2784 __ cvttsd2si(int32_result, Operand(operand)); |
2784 __ cvtsi2sd(xmm_scratch, int32_result); | 2785 __ cvtsi2sd(xmm_scratch, int32_result); |
2785 __ pcmpeqd(xmm_scratch, operand); | 2786 __ pcmpeqd(xmm_scratch, operand); |
2786 __ movmskps(scratch, xmm_scratch); | 2787 __ movmskps(scratch, xmm_scratch); |
(...skipping 5130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7917 __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); | 7918 __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); |
7918 } | 7919 } |
7919 } | 7920 } |
7920 | 7921 |
7921 | 7922 |
7922 #undef __ | 7923 #undef __ |
7923 | 7924 |
7924 } } // namespace v8::internal | 7925 } } // namespace v8::internal |
7925 | 7926 |
7926 #endif // V8_TARGET_ARCH_IA32 | 7927 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |