| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 778 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 789 str(src2, dst2, cond); | 789 str(src2, dst2, cond); |
| 790 } | 790 } |
| 791 } | 791 } |
| 792 } | 792 } |
| 793 | 793 |
| 794 | 794 |
| 795 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { | 795 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { |
| 796 // If needed, restore wanted bits of FPSCR. | 796 // If needed, restore wanted bits of FPSCR. |
| 797 Label fpscr_done; | 797 Label fpscr_done; |
| 798 vmrs(scratch); | 798 vmrs(scratch); |
| 799 if (emit_debug_code()) { |
| 800 Label rounding_mode_correct; |
| 801 tst(scratch, Operand(kVFPRoundingModeMask)); |
| 802 b(eq, &rounding_mode_correct); |
| 803 // Can't call Assert here, as it will recurse back here. |
| 804 stop("Default rounding mode not set"); |
| 805 bind(&rounding_mode_correct); |
| 806 } |
| 799 tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); | 807 tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); |
| 800 b(ne, &fpscr_done); | 808 b(ne, &fpscr_done); |
| 801 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); | 809 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); |
| 802 vmsr(scratch); | 810 vmsr(scratch); |
| 803 bind(&fpscr_done); | 811 bind(&fpscr_done); |
| 804 } | 812 } |
| 805 | 813 |
| 806 | 814 |
| 807 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, | 815 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, |
| 808 const DwVfpRegister src, | 816 const DwVfpRegister src, |
| (...skipping 2984 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3793 | 3801 |
| 3794 | 3802 |
| 3795 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { | 3803 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { |
| 3796 Usat(output_reg, 8, Operand(input_reg)); | 3804 Usat(output_reg, 8, Operand(input_reg)); |
| 3797 } | 3805 } |
| 3798 | 3806 |
| 3799 | 3807 |
| 3800 void MacroAssembler::ClampDoubleToUint8(Register result_reg, | 3808 void MacroAssembler::ClampDoubleToUint8(Register result_reg, |
| 3801 DwVfpRegister input_reg, | 3809 DwVfpRegister input_reg, |
| 3802 LowDwVfpRegister double_scratch) { | 3810 LowDwVfpRegister double_scratch) { |
| 3803 Label above_zero; | |
| 3804 Label done; | 3811 Label done; |
| 3805 Label in_bounds; | |
| 3806 | 3812 |
| 3807 VFPCompareAndSetFlags(input_reg, 0.0); | 3813 // Handle inputs >= 255 (including +infinity). |
| 3808 b(gt, &above_zero); | 3814 Vmov(double_scratch, 255.0, result_reg); |
| 3815 mov(result_reg, Operand(255)); |
| 3816 VFPCompareAndSetFlags(input_reg, double_scratch); |
| 3817 b(ge, &done); |
| 3809 | 3818 |
| 3810 // Double value is less than zero, NaN or Inf, return 0. | 3819 // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest |
| 3811 mov(result_reg, Operand::Zero()); | 3820 // rounding mode will provide the correct result. |
| 3812 b(al, &done); | 3821 vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding); |
| 3822 vmov(result_reg, double_scratch.low()); |
| 3813 | 3823 |
| 3814 // Double value is >= 255, return 255. | |
| 3815 bind(&above_zero); | |
| 3816 Vmov(double_scratch, 255.0, result_reg); | |
| 3817 VFPCompareAndSetFlags(input_reg, double_scratch); | |
| 3818 b(le, &in_bounds); | |
| 3819 mov(result_reg, Operand(255)); | |
| 3820 b(al, &done); | |
| 3821 | |
| 3822 // In 0-255 range, round and truncate. | |
| 3823 bind(&in_bounds); | |
| 3824 // Save FPSCR. | |
| 3825 vmrs(ip); | |
| 3826 // Set rounding mode to round to the nearest integer by clearing bits[23:22]. | |
| 3827 bic(result_reg, ip, Operand(kVFPRoundingModeMask)); | |
| 3828 vmsr(result_reg); | |
| 3829 vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding); | |
| 3830 vmov(result_reg, double_scratch.low()); | |
| 3831 // Restore FPSCR. | |
| 3832 vmsr(ip); | |
| 3833 bind(&done); | 3824 bind(&done); |
| 3834 } | 3825 } |
| 3835 | 3826 |
| 3836 | 3827 |
| 3837 void MacroAssembler::Throw(BailoutReason reason) { | 3828 void MacroAssembler::Throw(BailoutReason reason) { |
| 3838 Label throw_start; | 3829 Label throw_start; |
| 3839 bind(&throw_start); | 3830 bind(&throw_start); |
| 3840 #ifdef DEBUG | 3831 #ifdef DEBUG |
| 3841 const char* msg = GetBailoutReason(reason); | 3832 const char* msg = GetBailoutReason(reason); |
| 3842 if (msg != NULL) { | 3833 if (msg != NULL) { |
| (...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4098 sub(result, result, Operand(dividend)); | 4089 sub(result, result, Operand(dividend)); |
| 4099 } | 4090 } |
| 4100 if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); | 4091 if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); |
| 4101 add(result, result, Operand(dividend, LSR, 31)); | 4092 add(result, result, Operand(dividend, LSR, 31)); |
| 4102 } | 4093 } |
| 4103 | 4094 |
| 4104 | 4095 |
| 4105 } } // namespace v8::internal | 4096 } } // namespace v8::internal |
| 4106 | 4097 |
| 4107 #endif // V8_TARGET_ARCH_ARM | 4098 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |