| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 778 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 789 str(src2, dst2, cond); | 789 str(src2, dst2, cond); |
| 790 } | 790 } |
| 791 } | 791 } |
| 792 } | 792 } |
| 793 | 793 |
| 794 | 794 |
| 795 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { | 795 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { |
| 796 // If needed, restore wanted bits of FPSCR. | 796 // If needed, restore wanted bits of FPSCR. |
| 797 Label fpscr_done; | 797 Label fpscr_done; |
| 798 vmrs(scratch); | 798 vmrs(scratch); |
| 799 if (emit_debug_code()) { | |
| 800 tst(scratch, Operand(kVFPRoundingModeMask)); | |
| 801 Assert(eq, kDefaultRoundingModeNotSet); | |
| 802 } | |
| 803 tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); | 799 tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); |
| 804 b(ne, &fpscr_done); | 800 b(ne, &fpscr_done); |
| 805 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); | 801 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); |
| 806 vmsr(scratch); | 802 vmsr(scratch); |
| 807 bind(&fpscr_done); | 803 bind(&fpscr_done); |
| 808 } | 804 } |
| 809 | 805 |
| 810 | 806 |
| 811 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, | 807 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, |
| 812 const DwVfpRegister src, | 808 const DwVfpRegister src, |
| (...skipping 2984 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3797 | 3793 |
| 3798 | 3794 |
| 3799 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { | 3795 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { |
| 3800 Usat(output_reg, 8, Operand(input_reg)); | 3796 Usat(output_reg, 8, Operand(input_reg)); |
| 3801 } | 3797 } |
| 3802 | 3798 |
| 3803 | 3799 |
| 3804 void MacroAssembler::ClampDoubleToUint8(Register result_reg, | 3800 void MacroAssembler::ClampDoubleToUint8(Register result_reg, |
| 3805 DwVfpRegister input_reg, | 3801 DwVfpRegister input_reg, |
| 3806 LowDwVfpRegister double_scratch) { | 3802 LowDwVfpRegister double_scratch) { |
| 3803 Label above_zero; |
| 3807 Label done; | 3804 Label done; |
| 3805 Label in_bounds; |
| 3808 | 3806 |
| 3809 // Handle inputs >= 255 (including +infinity). | 3807 VFPCompareAndSetFlags(input_reg, 0.0); |
| 3808 b(gt, &above_zero); |
| 3809 |
| 3810 // Double value is less than zero, NaN or Inf, return 0. |
| 3811 mov(result_reg, Operand::Zero()); |
| 3812 b(al, &done); |
| 3813 |
| 3814 // Double value is >= 255, return 255. |
| 3815 bind(&above_zero); |
| 3810 Vmov(double_scratch, 255.0, result_reg); | 3816 Vmov(double_scratch, 255.0, result_reg); |
| 3817 VFPCompareAndSetFlags(input_reg, double_scratch); |
| 3818 b(le, &in_bounds); |
| 3811 mov(result_reg, Operand(255)); | 3819 mov(result_reg, Operand(255)); |
| 3812 VFPCompareAndSetFlags(input_reg, double_scratch); | 3820 b(al, &done); |
| 3813 b(ge, &done); | |
| 3814 | 3821 |
| 3815 // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest | 3822 // In 0-255 range, round and truncate. |
| 3816 // rounding mode will provide the correct result. | 3823 bind(&in_bounds); |
| 3817 vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding); | 3824 // Save FPSCR. |
| 3825 vmrs(ip); |
| 3826 // Set rounding mode to round to the nearest integer by clearing bits[23:22]. |
| 3827 bic(result_reg, ip, Operand(kVFPRoundingModeMask)); |
| 3828 vmsr(result_reg); |
| 3829 vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding); |
| 3818 vmov(result_reg, double_scratch.low()); | 3830 vmov(result_reg, double_scratch.low()); |
| 3819 | 3831 // Restore FPSCR. |
| 3832 vmsr(ip); |
| 3820 bind(&done); | 3833 bind(&done); |
| 3821 } | 3834 } |
| 3822 | 3835 |
| 3823 | 3836 |
| 3824 void MacroAssembler::Throw(BailoutReason reason) { | 3837 void MacroAssembler::Throw(BailoutReason reason) { |
| 3825 Label throw_start; | 3838 Label throw_start; |
| 3826 bind(&throw_start); | 3839 bind(&throw_start); |
| 3827 #ifdef DEBUG | 3840 #ifdef DEBUG |
| 3828 const char* msg = GetBailoutReason(reason); | 3841 const char* msg = GetBailoutReason(reason); |
| 3829 if (msg != NULL) { | 3842 if (msg != NULL) { |
| (...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4085 sub(result, result, Operand(dividend)); | 4098 sub(result, result, Operand(dividend)); |
| 4086 } | 4099 } |
| 4087 if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); | 4100 if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); |
| 4088 add(result, result, Operand(dividend, LSR, 31)); | 4101 add(result, result, Operand(dividend, LSR, 31)); |
| 4089 } | 4102 } |
| 4090 | 4103 |
| 4091 | 4104 |
| 4092 } } // namespace v8::internal | 4105 } } // namespace v8::internal |
| 4093 | 4106 |
| 4094 #endif // V8_TARGET_ARCH_ARM | 4107 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |