OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 549 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
560 | kFCSRInvalidOpFlagMask); | 560 | kFCSRInvalidOpFlagMask); |
561 // If we had no exceptions we are done. | 561 // If we had no exceptions we are done. |
562 __ Branch(&done, eq, scratch, Operand(zero_reg)); | 562 __ Branch(&done, eq, scratch, Operand(zero_reg)); |
563 } | 563 } |
564 | 564 |
565 // Load the double value and perform a manual truncation. | 565 // Load the double value and perform a manual truncation. |
566 Register input_high = scratch2; | 566 Register input_high = scratch2; |
567 Register input_low = scratch3; | 567 Register input_low = scratch3; |
568 __ Move(input_low, input_high, double_input); | 568 __ Move(input_low, input_high, double_input); |
569 | 569 |
570 __ EmitOutOfInt32RangeTruncate(result_reg, | 570 Label normal_exponent, restore_sign; |
571 input_high, | 571 // Extract the biased exponent in result. |
572 input_low, | 572 __ Ext(result_reg, |
573 scratch); | 573 input_high, |
| 574 HeapNumber::kExponentShift, |
| 575 HeapNumber::kExponentBits); |
| 576 |
| 577 // Check for Infinity and NaNs, which should return 0. |
| 578 __ Subu(scratch, result_reg, HeapNumber::kExponentMask); |
| 579 __ Movz(result_reg, zero_reg, scratch); |
| 580 __ Branch(&done, eq, scratch, Operand(zero_reg)); |
| 581 |
| 582 // Express exponent as delta to (number of mantissa bits + 31). |
| 583 __ Subu(result_reg, |
| 584 result_reg, |
| 585 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); |
| 586 |
| 587 // If the delta is strictly positive, all bits would be shifted away, |
| 588 // which means that we can return 0. |
| 589 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg)); |
| 590 __ mov(result_reg, zero_reg); |
| 591 __ Branch(&done); |
| 592 |
| 593 __ bind(&normal_exponent); |
| 594 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; |
| 595 // Calculate shift. |
| 596 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits)); |
| 597 |
| 598 // Save the sign. |
| 599 Register sign = result_reg; |
| 600 result_reg = no_reg; |
| 601 __ And(sign, input_high, Operand(HeapNumber::kSignMask)); |
| 602 |
| 603 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need |
| 604 // to check for this specific case. |
| 605 Label high_shift_needed, high_shift_done; |
| 606 __ Branch(&high_shift_needed, lt, scratch, Operand(32)); |
| 607 __ mov(input_high, zero_reg); |
| 608 __ Branch(&high_shift_done); |
| 609 __ bind(&high_shift_needed); |
| 610 |
| 611 // Set the implicit 1 before the mantissa part in input_high. |
| 612 __ Or(input_high, |
| 613 input_high, |
| 614 Operand(1 << HeapNumber::kMantissaBitsInTopWord)); |
| 615 // Shift the mantissa bits to the correct position. |
| 616 // We don't need to clear non-mantissa bits as they will be shifted away. |
| 617 // If they weren't, it would mean that the answer is in the 32bit range. |
| 618 __ sllv(input_high, input_high, scratch); |
| 619 |
| 620 __ bind(&high_shift_done); |
| 621 |
| 622 // Replace the shifted bits with bits from the lower mantissa word. |
| 623 Label pos_shift, shift_done; |
| 624 __ li(at, 32); |
| 625 __ subu(scratch, at, scratch); |
| 626 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); |
| 627 |
| 628 // Negate scratch. |
| 629 __ Subu(scratch, zero_reg, scratch); |
| 630 __ sllv(input_low, input_low, scratch); |
| 631 __ Branch(&shift_done); |
| 632 |
| 633 __ bind(&pos_shift); |
| 634 __ srlv(input_low, input_low, scratch); |
| 635 |
| 636 __ bind(&shift_done); |
| 637 __ Or(input_high, input_high, Operand(input_low)); |
| 638 // Restore sign if necessary. |
| 639 __ mov(scratch, sign); |
| 640 result_reg = sign; |
| 641 sign = no_reg; |
| 642 __ Subu(result_reg, zero_reg, input_high); |
| 643 __ Movz(result_reg, input_high, scratch); |
574 | 644 |
575 __ bind(&done); | 645 __ bind(&done); |
576 | 646 |
577 __ Pop(scratch, scratch2, scratch3); | 647 __ Pop(scratch, scratch2, scratch3); |
578 __ Ret(); | 648 __ Ret(); |
579 } | 649 } |
580 | 650 |
581 | 651 |
582 bool WriteInt32ToHeapNumberStub::IsPregenerated() { | 652 bool WriteInt32ToHeapNumberStub::IsPregenerated() { |
583 // These variants are compiled ahead of time. See next method. | 653 // These variants are compiled ahead of time. See next method. |
(...skipping 895 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1479 bool smi_operands, | 1549 bool smi_operands, |
1480 Label* not_numbers, | 1550 Label* not_numbers, |
1481 Label* gc_required, | 1551 Label* gc_required, |
1482 Label* miss, | 1552 Label* miss, |
1483 Token::Value op, | 1553 Token::Value op, |
1484 OverwriteMode mode) { | 1554 OverwriteMode mode) { |
1485 Register left = a1; | 1555 Register left = a1; |
1486 Register right = a0; | 1556 Register right = a0; |
1487 Register scratch1 = t3; | 1557 Register scratch1 = t3; |
1488 Register scratch2 = t5; | 1558 Register scratch2 = t5; |
1489 Register scratch3 = t0; | |
1490 | 1559 |
1491 ASSERT(smi_operands || (not_numbers != NULL)); | 1560 ASSERT(smi_operands || (not_numbers != NULL)); |
1492 if (smi_operands) { | 1561 if (smi_operands) { |
1493 __ AssertSmi(left); | 1562 __ AssertSmi(left); |
1494 __ AssertSmi(right); | 1563 __ AssertSmi(right); |
1495 } | 1564 } |
1496 if (left_type == BinaryOpIC::SMI) { | 1565 if (left_type == BinaryOpIC::SMI) { |
1497 __ JumpIfNotSmi(left, miss); | 1566 __ JumpIfNotSmi(left, miss); |
1498 } | 1567 } |
1499 if (right_type == BinaryOpIC::SMI) { | 1568 if (right_type == BinaryOpIC::SMI) { |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1583 case Token::BIT_XOR: | 1652 case Token::BIT_XOR: |
1584 case Token::BIT_AND: | 1653 case Token::BIT_AND: |
1585 case Token::SAR: | 1654 case Token::SAR: |
1586 case Token::SHR: | 1655 case Token::SHR: |
1587 case Token::SHL: { | 1656 case Token::SHL: { |
1588 if (smi_operands) { | 1657 if (smi_operands) { |
1589 __ SmiUntag(a3, left); | 1658 __ SmiUntag(a3, left); |
1590 __ SmiUntag(a2, right); | 1659 __ SmiUntag(a2, right); |
1591 } else { | 1660 } else { |
1592 // Convert operands to 32-bit integers. Right in a2 and left in a3. | 1661 // Convert operands to 32-bit integers. Right in a2 and left in a3. |
1593 __ TruncateNumberToI( | 1662 __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers); |
1594 left, a3, heap_number_map, | 1663 __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers); |
1595 scratch1, scratch2, scratch3, not_numbers); | |
1596 __ TruncateNumberToI( | |
1597 right, a2, heap_number_map, | |
1598 scratch1, scratch2, scratch3, not_numbers); | |
1599 } | 1664 } |
1600 Label result_not_a_smi; | 1665 Label result_not_a_smi; |
1601 switch (op) { | 1666 switch (op) { |
1602 case Token::BIT_OR: | 1667 case Token::BIT_OR: |
1603 __ Or(a2, a3, Operand(a2)); | 1668 __ Or(a2, a3, Operand(a2)); |
1604 break; | 1669 break; |
1605 case Token::BIT_XOR: | 1670 case Token::BIT_XOR: |
1606 __ Xor(a2, a3, Operand(a2)); | 1671 __ Xor(a2, a3, Operand(a2)); |
1607 break; | 1672 break; |
1608 case Token::BIT_AND: | 1673 case Token::BIT_AND: |
(...skipping 5581 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7190 __ bind(&fast_elements_case); | 7255 __ bind(&fast_elements_case); |
7191 GenerateCase(masm, FAST_ELEMENTS); | 7256 GenerateCase(masm, FAST_ELEMENTS); |
7192 } | 7257 } |
7193 | 7258 |
7194 | 7259 |
7195 #undef __ | 7260 #undef __ |
7196 | 7261 |
7197 } } // namespace v8::internal | 7262 } } // namespace v8::internal |
7198 | 7263 |
7199 #endif // V8_TARGET_ARCH_MIPS | 7264 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |