OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 541 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
552 (result2_.code() << 4) + | 552 (result2_.code() << 4) + |
553 (source_.code() << 8) + | 553 (source_.code() << 8) + |
554 (zeros_.code() << 12); | 554 (zeros_.code() << 12); |
555 } | 555 } |
556 | 556 |
557 void Generate(MacroAssembler* masm); | 557 void Generate(MacroAssembler* masm); |
558 }; | 558 }; |
559 | 559 |
560 | 560 |
561 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | 561 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { |
562 #ifndef BIG_ENDIAN_FLOATING_POINT | 562 Register exponent, mantissa; |
563 Register exponent = result1_; | 563 if (kArchEndian == kLittle) { |
564 Register mantissa = result2_; | 564 exponent = result1_; |
565 #else | 565 mantissa = result2_; |
566 Register exponent = result2_; | 566 } else { |
567 Register mantissa = result1_; | 567 exponent = result2_; |
568 #endif | 568 mantissa = result1_; |
| 569 } |
569 Label not_special; | 570 Label not_special; |
570 // Convert from Smi to integer. | 571 // Convert from Smi to integer. |
571 __ sra(source_, source_, kSmiTagSize); | 572 __ sra(source_, source_, kSmiTagSize); |
572 // Move sign bit from source to destination. This works because the sign bit | 573 // Move sign bit from source to destination. This works because the sign bit |
573 // in the exponent word of the double has the same position and polarity as | 574 // in the exponent word of the double has the same position and polarity as |
574 // the 2's complement sign bit in a Smi. | 575 // the 2's complement sign bit in a Smi. |
575 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 576 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
576 __ And(exponent, source_, Operand(HeapNumber::kSignMask)); | 577 __ And(exponent, source_, Operand(HeapNumber::kSignMask)); |
577 // Subtract from 0 if source was negative. | 578 // Subtract from 0 if source was negative. |
578 __ subu(at, zero_reg, source_); | 579 __ subu(at, zero_reg, source_); |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
664 __ Branch(&error, ne, scratch, Operand(zero_reg)); | 665 __ Branch(&error, ne, scratch, Operand(zero_reg)); |
665 __ Move(result_reg, scratch3); | 666 __ Move(result_reg, scratch3); |
666 __ Branch(&done); | 667 __ Branch(&done); |
667 __ bind(&error); | 668 __ bind(&error); |
668 } | 669 } |
669 | 670 |
670 // Load the double value and perform a manual truncation. | 671 // Load the double value and perform a manual truncation. |
671 Register input_high = scratch2; | 672 Register input_high = scratch2; |
672 Register input_low = scratch3; | 673 Register input_low = scratch3; |
673 | 674 |
674 __ lw(input_low, MemOperand(input_reg, double_offset)); | 675 __ lw(input_low, |
675 __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize)); | 676 MemOperand(input_reg, double_offset + Register::kMantissaOffset)); |
| 677 __ lw(input_high, |
| 678 MemOperand(input_reg, double_offset + Register::kExponentOffset)); |
676 | 679 |
677 Label normal_exponent, restore_sign; | 680 Label normal_exponent, restore_sign; |
678 // Extract the biased exponent in result. | 681 // Extract the biased exponent in result. |
679 __ Ext(result_reg, | 682 __ Ext(result_reg, |
680 input_high, | 683 input_high, |
681 HeapNumber::kExponentShift, | 684 HeapNumber::kExponentShift, |
682 HeapNumber::kExponentBits); | 685 HeapNumber::kExponentBits); |
683 | 686 |
684 // Check for Infinity and NaNs, which should return 0. | 687 // Check for Infinity and NaNs, which should return 0. |
685 __ Subu(scratch, result_reg, HeapNumber::kExponentMask); | 688 __ Subu(scratch, result_reg, HeapNumber::kExponentMask); |
(...skipping 2839 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3525 | 3528 |
3526 __ And(scratch4, src, Operand(kReadAlignmentMask)); | 3529 __ And(scratch4, src, Operand(kReadAlignmentMask)); |
3527 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg)); | 3530 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg)); |
3528 | 3531 |
3529 // Loop for src/dst that are not aligned the same way. | 3532 // Loop for src/dst that are not aligned the same way. |
3530 // This loop uses lwl and lwr instructions. These instructions | 3533 // This loop uses lwl and lwr instructions. These instructions |
3531 // depend on the endianness, and the implementation assumes little-endian. | 3534 // depend on the endianness, and the implementation assumes little-endian. |
3532 { | 3535 { |
3533 Label loop; | 3536 Label loop; |
3534 __ bind(&loop); | 3537 __ bind(&loop); |
3535 __ lwr(scratch1, MemOperand(src)); | 3538 if (kArchEndian == kBig) { |
3536 __ Addu(src, src, Operand(kReadAlignment)); | 3539 __ lwl(scratch1, MemOperand(src)); |
3537 __ lwl(scratch1, MemOperand(src, -1)); | 3540 __ Addu(src, src, Operand(kReadAlignment)); |
| 3541 __ lwr(scratch1, MemOperand(src, -1)); |
| 3542 } else { |
| 3543 __ lwr(scratch1, MemOperand(src)); |
| 3544 __ Addu(src, src, Operand(kReadAlignment)); |
| 3545 __ lwl(scratch1, MemOperand(src, -1)); |
| 3546 } |
3538 __ sw(scratch1, MemOperand(dest)); | 3547 __ sw(scratch1, MemOperand(dest)); |
3539 __ Addu(dest, dest, Operand(kReadAlignment)); | 3548 __ Addu(dest, dest, Operand(kReadAlignment)); |
3540 __ Subu(scratch2, limit, dest); | 3549 __ Subu(scratch2, limit, dest); |
3541 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment)); | 3550 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment)); |
3542 } | 3551 } |
3543 | 3552 |
3544 __ Branch(&byte_loop); | 3553 __ Branch(&byte_loop); |
3545 | 3554 |
3546 // Simple loop. | 3555 // Simple loop. |
3547 // Copy words from src to dest, until less than four bytes left. | 3556 // Copy words from src to dest, until less than four bytes left. |
(...skipping 2096 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5644 MemOperand(fp, 6 * kPointerSize), | 5653 MemOperand(fp, 6 * kPointerSize), |
5645 NULL); | 5654 NULL); |
5646 } | 5655 } |
5647 | 5656 |
5648 | 5657 |
5649 #undef __ | 5658 #undef __ |
5650 | 5659 |
5651 } } // namespace v8::internal | 5660 } } // namespace v8::internal |
5652 | 5661 |
5653 #endif // V8_TARGET_ARCH_MIPS | 5662 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |