Index: src/arm/code-stubs-arm.cc |
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc |
index 98a835fd1a5adb0b3b461a92edd452a49566020a..3dbdb2fc1510d7a3bae024971045f56127c4c320 100644 |
--- a/src/arm/code-stubs-arm.cc |
+++ b/src/arm/code-stubs-arm.cc |
@@ -634,6 +634,111 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { |
} |
+void DoubleToIStub::Generate(MacroAssembler* masm) { |
+ Label out_of_range, only_low, negate, done; |
+ Register input_reg = source(); |
+ Register result_reg = destination(); |
+ |
+ int double_offset = offset(); |
+ // Account for saved regs if input is sp. |
+ if (input_reg.is(sp)) double_offset += 2 * kPointerSize; |
+ |
+ // Immediate values for this stub fit in instructions, so it's safe to use ip. |
+ Register scratch = ip; |
+ Register scratch_low = |
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); |
+ Register scratch_high = |
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low); |
+ LowDwVfpRegister double_scratch = kScratchDoubleReg; |
+ |
+ __ Push(scratch_high, scratch_low); |
+ |
+ if (!skip_fastpath()) { |
+ // Load double input. |
+ __ vldr(double_scratch, MemOperand(input_reg, double_offset)); |
+ __ vmov(scratch_low, scratch_high, double_scratch); |
+ |
+ // Do fast-path convert from double to int. |
+ __ vcvt_s32_f64(double_scratch.low(), double_scratch); |
+ __ vmov(result_reg, double_scratch.low()); |
+ |
+ // If result is not saturated (0x7fffffff or 0x80000000), we are done. |
+ __ sub(scratch, result_reg, Operand(1)); |
+ __ cmp(scratch, Operand(0x7ffffffe)); |
+ __ b(lt, &done); |
+ } else { |
+ // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we |
+ // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate. |
+ if (double_offset == 0) { |
+ __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit()); |
+ } else { |
+ __ ldr(scratch_low, MemOperand(input_reg, double_offset)); |
+ __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize)); |
+ } |
+ } |
+ |
+ __ Ubfx(scratch, scratch_high, |
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
+ // Load scratch with exponent - 1. This is faster than loading |
+ // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. |
+ STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024); |
+ __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); |
+ // If exponent is greater than or equal to 84, the 32 less significant |
+ // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), |
+ // the result is 0. |
+ // Compare exponent with 84 (compare exponent - 1 with 83). |
+ __ cmp(scratch, Operand(83)); |
+ __ b(ge, &out_of_range); |
+ |
+ // If we reach this code, 31 <= exponent <= 83. |
+ // So, we don't have to handle cases where 0 <= exponent <= 20 for |
+ // which we would need to shift right the high part of the mantissa. |
+ // Scratch contains exponent - 1. |
+ // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). |
+ __ rsb(scratch, scratch, Operand(51), SetCC); |
+ __ b(ls, &only_low); |
+ // 21 <= exponent <= 51, shift scratch_low and scratch_high |
+ // to generate the result. |
+ __ mov(scratch_low, Operand(scratch_low, LSR, scratch)); |
+ // Scratch contains: 52 - exponent. |
+ // We needs: exponent - 20. |
+ // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. |
+ __ rsb(scratch, scratch, Operand(32)); |
+ __ Ubfx(result_reg, scratch_high, |
+ 0, HeapNumber::kMantissaBitsInTopWord); |
+ // Set the implicit 1 before the mantissa part in scratch_high. |
+ __ orr(result_reg, result_reg, |
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord)); |
+ __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch)); |
+ __ b(&negate); |
+ |
+ __ bind(&out_of_range); |
+ __ mov(result_reg, Operand::Zero()); |
+ __ b(&done); |
+ |
+ __ bind(&only_low); |
+ // 52 <= exponent <= 83, shift only scratch_low. |
+ // On entry, scratch contains: 52 - exponent. |
+ __ rsb(scratch, scratch, Operand::Zero()); |
+ __ mov(result_reg, Operand(scratch_low, LSL, scratch)); |
+ |
+ __ bind(&negate); |
+ // If input was positive, scratch_high ASR 31 equals 0 and |
+ // scratch_high LSR 31 equals zero. |
+ // New result = (result eor 0) + 0 = result. |
+ // If the input was negative, we have to negate the result. |
+ // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1. |
+ // New result = (result eor 0xffffffff) + 1 = 0 - result. |
+ __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31)); |
+ __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31)); |
+ |
+ __ bind(&done); |
+ |
+ __ Pop(scratch_high, scratch_low); |
+ __ Ret(); |
+} |
+ |
+ |
bool WriteInt32ToHeapNumberStub::IsPregenerated() { |
// These variants are compiled ahead of time. See next method. |
if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { |
@@ -1591,7 +1696,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
Register right = r0; |
Register scratch1 = r6; |
Register scratch2 = r7; |
- Register scratch3 = r4; |
ASSERT(smi_operands || (not_numbers != NULL)); |
if (smi_operands) { |
@@ -1689,12 +1793,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
__ SmiUntag(r2, right); |
} else { |
// Convert operands to 32-bit integers. Right in r2 and left in r3. |
- __ ConvertNumberToInt32( |
- left, r3, heap_number_map, |
- scratch1, scratch2, scratch3, d0, d1, not_numbers); |
- __ ConvertNumberToInt32( |
- right, r2, heap_number_map, |
- scratch1, scratch2, scratch3, d0, d1, not_numbers); |
+ __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers); |
+ __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers); |
} |
Label result_not_a_smi; |