Index: src/a64/code-stubs-a64.cc |
diff --git a/src/a64/code-stubs-a64.cc b/src/a64/code-stubs-a64.cc |
index 269b97f41c17fe8d4107bceb1fbaf832977aabeb..9d41f1ef75a3f64f3a03870e0c18d2c0f723e235 100644 |
--- a/src/a64/code-stubs-a64.cc |
+++ b/src/a64/code-stubs-a64.cc |
@@ -536,6 +536,84 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { |
} |
+void DoubleToIStub::Generate(MacroAssembler* masm) { |
jbramley
2014/02/13 14:20:14
It seems that this stub doesn't always truncate on
rmcilroy
2014/02/14 12:45:07
Done (also on arm). Looks like the is_truncating()
jbramley
2014/02/14 12:57:38
If we do, we should probably rename the stub too,
|
+ Label done; |
+ Register input = source(); |
+ Register result = destination(); |
+ |
+ ASSERT(result.Is64Bits()); |
+ ASSERT(jssp.Is(masm->StackPointer())); |
+ |
+ int double_offset = offset(); |
+ // Account for saved regs if input is sp. |
+ if (input.is(jssp)) double_offset += 2 * kPointerSize; |
jbramley
2014/02/13 14:20:14
Do this near Push(scratch1, scratch2) so it's clea
rmcilroy
2014/02/14 12:45:07
Done.
|
+ |
+ DoubleRegister double_scratch = d0; // only used if !skip_fastpath() |
jbramley
2014/02/13 14:20:14
In that case, put it in the !skip_fastpath() scope
rmcilroy
2014/02/14 12:45:07
I can't - I need to Pop it at the end of the funct
|
+ Register scratch1 = GetRegisterThatIsNotOneOf(input, result); |
+ Register scratch2 = |
+ GetRegisterThatIsNotOneOf(input, result, scratch1); |
+ |
+ __ Push(scratch1, scratch2); |
jbramley
2014/02/13 14:20:14
This is probably the only stub that preserves its
rmcilroy
2014/02/14 12:45:07
I'm not sure what you mean here? What would you me
jbramley
2014/02/14 12:57:38
At the moment, MarkAsCall preserves every live val
rmcilroy
2014/02/14 13:44:18
Ok, done. Just FYI - I expect we will be adding m
|
+ |
+ if (!skip_fastpath()) { |
+ // Try to convert with a FPU convert instruction. This handles all |
+ // non-saturating cases. |
+ __ Push(double_scratch); |
jbramley
2014/02/13 14:20:14
If you could combine this with the previous Push,
rmcilroy
2014/02/14 12:45:07
I can't - Push() barfs that the registers are of d
jbramley
2014/02/14 12:57:38
Ah, yes, sorry.
|
+ if (input.is(jssp)) double_offset += 1 * kDoubleSize; |
+ __ Ldr(double_scratch, MemOperand(input, double_offset)); |
+ __ TryInlineTruncateDoubleToI(result, double_scratch, &done); |
+ __ Fmov(result, double_scratch); |
+ } else { |
+ __ Ldr(result, MemOperand(input, double_offset)); |
+ } |
+ |
+ // If we reach here we need to manually convert the input to an int32. |
+ |
+ // Extract the exponent. |
+ Register exponent = scratch1; |
+ __ Ubfx(exponent, result, HeapNumber::kMantissaBits, |
+ HeapNumber::kExponentBits); |
+ |
+ // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since |
+ // the mantissa gets shifted completely out of the int32_t result. |
+ __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32); |
+ __ CzeroX(result, ge); |
+ __ B(ge, &done); |
+ |
+ // The Fcvtzs sequence handles all cases except where the conversion causes |
+ // signed overflow in the int64_t target. Since we've already handled |
+ // exponents >= 84, we can guarantee that 63 <= exponent < 84. |
+ |
+ if (masm->emit_debug_code()) { |
+ __ Cmp(exponent, HeapNumber::kExponentBias + 63); |
+ // Exponents less than this should have been handled by the Fcvt case. |
+ __ Check(ge, kUnexpectedValue); |
+ } |
+ |
+ // Isolate the mantissa bits, and set the implicit '1'. |
+ Register mantissa = scratch2; |
+ __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits); |
+ __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits); |
+ |
+ // Negate the mantissa if necessary. |
+ __ Tst(result, kXSignMask); |
+ __ Cneg(mantissa, mantissa, ne); |
+ |
+ // Shift the mantissa bits in the correct place. We know that we have to shift |
+ // it left here, because exponent >= 63 >= kMantissaBits. |
+ __ Sub(exponent, exponent, |
+ HeapNumber::kExponentBias + HeapNumber::kMantissaBits); |
+ __ Lsl(result, mantissa, exponent); |
+ |
+ __ Bind(&done); |
+ if (!skip_fastpath()) { |
+ __ Pop(double_scratch); |
+ } |
+ __ Pop(scratch2, scratch1); |
+ __ Ret(); |
+} |
+ |
+ |
// See call site for description. |
static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
Register left, |