Index: src/arm/code-stubs-arm.cc |
=================================================================== |
--- src/arm/code-stubs-arm.cc (revision 6953) |
+++ src/arm/code-stubs-arm.cc (working copy) |
@@ -398,8 +398,11 @@ |
Label* not_number); |
// Loads the number from object into dst as a 32-bit integer if possible. If |
- // the object is not a 32-bit integer control continues at the label |
- // not_int32. If VFP is supported double_scratch is used but not scratch2. |
+ // the object cannot be converted to a 32-bit integer control continues at |
+ // the label not_int32. If VFP is supported double_scratch is used |
+ // but not scratch2. |
+ // Floating point value in the 32bit integer range will be rounded |
+ // to an integer. |
static void LoadNumberAsInteger(MacroAssembler* masm, |
Register object, |
Register dst, |
@@ -409,6 +412,76 @@ |
DwVfpRegister double_scratch, |
Label* not_int32); |
+ // Load the number from object into double_dst in the double format. |
+ // Control will jump to not_int32 if the value cannot be exactly represented |
+ // by a 32bit integer. |
+ // Floating point value in the 32bit integer range that are not exact integer |
+ // won't be loaded. |
+ static void LoadNumberAsInt32Double(MacroAssembler* masm, |
+ Register object, |
+ Destination destination, |
+ DwVfpRegister double_dst, |
+ Register dst1, |
+ Register dst2, |
+ Register heap_number_map, |
+ Register scratch1, |
+ Register scratch2, |
+ SwVfpRegister single_scratch, |
+ Label* not_int32); |
+ |
+ // Loads the number from object into dst as a 32bit integer. |
+ // Control will jump to not_int32 if the object cannot be exactly represented |
+ // by a 32bit integer. |
+ // Floating point value in the 32bit integer range that are not exact integer |
+ // won't be converted. |
+ // scratch3 is not used when VFP3 is supported. |
+ static void LoadNumberAsInt32(MacroAssembler* masm, |
+ Register object, |
+ Register dst, |
+ Register heap_number_map, |
+ Register scratch1, |
+ Register scratch2, |
+ Register scratch3, |
+ DwVfpRegister double_scratch, |
+ Label* not_int32); |
+ |
+ // Generate non VFP3 code to check if a double can be exactly represented by a |
+ // 32bit integer. This does not check for 0 or -0, which need |
+ // to be checked for separately. |
+ // Control jumps to not_int32 if the value is not a 32bit integer, and falls |
+ // through otherwise. |
+ // src1 and src2 will be cloberred. |
+ // |
+ // Expected input: |
+ // - src1: higher (exponent) part of the double value. |
+ // - src2: lower (mantissa) part of the double value. |
+ // Output status: |
+ // - dst: 32 higher bits of the mantissa. (mantissa[51:20]) |
+ // - src2: contains 1. |
+ // - other registers are clobbered. |
+ static void DoubleIs32BitInteger(MacroAssembler* masm, |
+ Register src1, |
+ Register src2, |
+ Register dst, |
+ Register scratch, |
+ Label* not_int32); |
+ |
+ // Generates code to call a C function to do a double operation using core |
+ // registers. (Used when VFP3 is not supported.) |
+ // This code never falls through, but returns with a heap number containing |
+ // the result in r0. |
+ // Register heapnumber_result must be a heap number in which the |
+ // result of the operation will be stored. |
+ // Requires the following layout on entry: |
+ // r0: Left value (least significant part of mantissa). |
+ // r1: Left value (sign, exponent, top of mantissa). |
+ // r2: Right value (least significant part of mantissa). |
+ // r3: Right value (sign, exponent, top of mantissa). |
+ static void CallCCodeForDoubleOperation(MacroAssembler* masm, |
+ Token::Value op, |
+ Register heap_number_result, |
+ Register scratch); |
+ |
private: |
static void LoadNumber(MacroAssembler* masm, |
FloatingPointHelper::Destination destination, |
@@ -560,7 +633,314 @@ |
} |
+void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
+ Register object, |
+ Destination destination, |
+ DwVfpRegister double_dst, |
+ Register dst1, |
+ Register dst2, |
+ Register heap_number_map, |
+ Register scratch1, |
+ Register scratch2, |
+ SwVfpRegister single_scratch, |
+ Label* not_int32) { |
+ ASSERT(!scratch1.is(object) && !scratch2.is(object)); |
+ ASSERT(!scratch1.is(scratch2)); |
+ ASSERT(!heap_number_map.is(object) && |
+ !heap_number_map.is(scratch1) && |
+ !heap_number_map.is(scratch2)); |
+ Label done, obj_is_heap_number; |
+ |
+ __ JumpIfNotSmi(object, &obj_is_heap_number); |
+ __ SmiUntag(scratch1, object); |
+ if (CpuFeatures::IsSupported(VFP3)) { |
+ CpuFeatures::Scope scope(VFP3); |
+ __ vmov(single_scratch, scratch1); |
+ __ vcvt_f64_s32(double_dst, single_scratch); |
+ if (destination == kCoreRegisters) { |
+ __ vmov(dst1, dst2, double_dst); |
+ } |
+ } else { |
+ Label fewer_than_20_useful_bits; |
+ // Expected output: |
+ // | dst1 | dst2 | |
+ // | s | exp | mantissa | |
+ |
+ // Check for zero. |
+ __ cmp(scratch1, Operand(0)); |
+ __ mov(dst1, scratch1); |
+ __ mov(dst2, scratch1); |
+ __ b(eq, &done); |
+ |
+ // Preload the sign of the value. |
+ __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC); |
+ // Get the absolute value of the object (as an unsigned integer). |
+ __ rsb(scratch1, scratch1, Operand(0), SetCC, mi); |
+ |
+ // Get mantisssa[51:20]. |
+ |
+ // Get the position of the first set bit. |
+ __ CountLeadingZeros(dst2, scratch1, scratch2); |
+ __ rsb(dst2, dst2, Operand(31)); |
+ |
+ // Set the exponent. |
+ __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias)); |
+ __ Bfi(dst1, scratch2, scratch2, |
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
+ |
+ // Clear the first non null bit. |
+ __ mov(scratch2, Operand(1)); |
+ __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2)); |
+ |
+ __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); |
+ // Get the number of bits to set in the lower part of the mantissa. |
+ __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); |
+ __ b(mi, &fewer_than_20_useful_bits); |
+ // Set the higher 20 bits of the mantissa. |
+ __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2)); |
+ __ rsb(scratch2, scratch2, Operand(32)); |
+ __ mov(dst2, Operand(scratch1, LSL, scratch2)); |
+ __ b(&done); |
+ |
+ __ bind(&fewer_than_20_useful_bits); |
+ __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); |
+ __ mov(scratch2, Operand(scratch1, LSL, scratch2)); |
+ __ orr(dst1, dst1, scratch2); |
+ // Set dst2 to 0. |
+ __ mov(dst2, Operand(0)); |
+ } |
+ |
+ __ b(&done); |
+ |
+ __ bind(&obj_is_heap_number); |
Søren Thygesen Gjesse
2011/02/28 09:54:32
Please change this label to obj_is_not_smi.
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
|
+ if (FLAG_debug_code) { |
+ __ AbortIfNotRootValue(heap_number_map, |
+ Heap::kHeapNumberMapRootIndex, |
+ "HeapNumberMap register clobbered."); |
+ } |
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
+ |
+ // Load the number. |
+ if (CpuFeatures::IsSupported(VFP3)) { |
+ CpuFeatures::Scope scope(VFP3); |
+ // Load the double value. |
+ __ sub(scratch1, object, Operand(kHeapObjectTag)); |
+ __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); |
+ |
+ __ EmitVFPTruncate(kRoundToZero, |
+ single_scratch, |
+ double_dst, |
+ scratch1, |
+ scratch2, |
+ kCheckForInexactConversion); |
+ |
+ // Jump to not_int32 if the operation did not succeed. |
+ __ b(ne, not_int32); |
+ |
+ if (destination == kCoreRegisters) { |
+ __ vmov(dst1, dst2, double_dst); |
+ } |
+ |
+ } else { |
+ ASSERT(!scratch1.is(object) && !scratch2.is(object)); |
+ // Load the double value in the destination registers.. |
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
+ |
+ // Check for 0 and -0. |
+ __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask)); |
+ __ orr(scratch1, scratch1, Operand(dst2)); |
+ __ cmp(scratch1, Operand(0)); |
+ __ b(eq, &done); |
+ |
+ // Check that the value can be exactly represented by a 32bit integer. |
+ // Jump to not_int32 if that's not the case. |
+ DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); |
+ |
+ // dst1 and dst2 were trashed. Reload the double value. |
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
+ } |
+ |
+ __ bind(&done); |
+} |
+ |
+ |
+void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
+ Register object, |
+ Register dst, |
+ Register heap_number_map, |
+ Register scratch1, |
+ Register scratch2, |
+ Register scratch3, |
+ DwVfpRegister double_scratch, |
+ Label* not_int32) { |
+ ASSERT(!dst.is(object)); |
+ ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); |
+ ASSERT(!scratch1.is(scratch2) && |
+ !scratch1.is(scratch3) && |
+ !scratch2.is(scratch3)); |
+ |
+ Label done; |
+ |
+ // Untag the object in the destination register. |
Søren Thygesen Gjesse
2011/02/28 09:54:32
in -> into
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
|
+ __ SmiUntag(dst, object); |
+ // Just return if the object is a smi. |
+ __ JumpIfSmi(object, &done); |
+ |
+ if (FLAG_debug_code) { |
+ __ AbortIfNotRootValue(heap_number_map, |
+ Heap::kHeapNumberMapRootIndex, |
+ "HeapNumberMap register clobbered."); |
+ } |
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
+ |
+ // Object is a heap number. |
+ // Convert the floating point value to a 32bit integer. |
+ if (CpuFeatures::IsSupported(VFP3)) { |
+ CpuFeatures::Scope scope(VFP3); |
+ SwVfpRegister single_scratch = double_scratch.low(); |
+ // Load the double value. |
+ __ sub(scratch1, object, Operand(kHeapObjectTag)); |
+ __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset); |
+ |
+ __ EmitVFPTruncate(kRoundToZero, |
+ single_scratch, |
+ double_scratch, |
+ scratch1, |
+ scratch2, |
+ kCheckForInexactConversion); |
+ |
+ // Jump to not_int32 if the operation did not succeed. |
+ __ b(ne, not_int32); |
+ // Get the result in the destination register. |
+ __ vmov(dst, single_scratch); |
+ |
+ } else { |
+ // Load the double value in the destination registers.. |
Søren Thygesen Gjesse
2011/02/28 09:54:32
.. -> .
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
|
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
+ __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
+ |
+ // Check for 0 and -0. |
+ __ bic(dst, scratch1, Operand(HeapNumber::kSignMask)); |
+ __ orr(dst, scratch2, Operand(dst)); |
+ __ cmp(dst, Operand(0)); |
+ __ b(eq, &done); |
+ |
+ DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); |
+ |
+ // Registers state after DoubleIs32BitInteger. |
+ // dst: mantissa[51:20]. |
+ // scratch2: 1 |
+ |
+ // Shift back the higher bits of the mantissa. |
+ __ mov(dst, Operand(dst, LSR, scratch3)); |
+ // Set the implicit first bit. |
+ __ rsb(scratch3, scratch3, Operand(32)); |
+ __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); |
+ // Set the sign. |
+ __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
+ __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
+ __ rsb(dst, dst, Operand(0), LeaveCC, mi); |
+ } |
+ |
+ __ bind(&done); |
+} |
+ |
+ |
+void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, |
+ Register src1, |
+ Register src2, |
+ Register dst, |
+ Register scratch, |
+ Label* not_int32) { |
+ // Get exponent alone in scratch. |
+ __ Ubfx(scratch, |
+ src1, |
+ HeapNumber::kExponentShift, |
+ HeapNumber::kExponentBits); |
+ |
+ // Substract the bias from the exponent. |
+ __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC); |
+ |
+ // src1: higher (exponent) part of the double value. |
+ // src2: lower (mantissa) part of the double value. |
+ // scratch: unbiased exponent. |
+ |
+ // Fast cases. Check for obvious non 32bit integer values. |
+ // Negative exponent cannot yield 32bit integers. |
+ __ b(mi, not_int32); |
+ // Exponent greater than 31 cannot yield 32bit integers. |
+ // Also, a positive value with an exponent equal to 31 is outside of the |
+ // signed 32bit integer range. |
+ __ tst(src1, Operand(HeapNumber::kSignMask)); |
Søren Thygesen Gjesse
2011/02/28 09:54:32
Maybe add a bit more commenting here would be nice
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
|
+ __ cmp(scratch, Operand(30), eq); |
+ __ cmp(scratch, Operand(31), ne); |
+ __ b(gt, not_int32); |
+ // - Bits [21:0] in the mantissa are not null. |
+ __ tst(src2, Operand(0x3fffff)); |
+ __ b(ne, not_int32); |
+ |
+ // Otherwise the exponent needs to be big enough to shift left all the |
+ // non zero bits left. So we need the (30 - exponent) last bits of the |
+ // 31 higher bits of the mantissa to be null. |
+ // Because bits [21:20] are null, we can check instead that the |
Søren Thygesen Gjesse
2011/02/28 09:54:32
20 -> 0?
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
|
+ // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null. |
+ |
+ // Get the 32 higher bits of the mantissa in dst. |
+ __ Ubfx(dst, src2, HeapNumber::kMantissaBitsInTopWord, 12); |
Søren Thygesen Gjesse
2011/02/28 09:54:32
12 -> 32 - HeapNumber::kMantissaBitsInTopWord
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
|
+ __ orr(dst, |
+ dst, |
+ Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord)); |
+ |
+ // Create the mask and test the lower bits (of the higher bits). |
+ __ rsb(scratch, scratch, Operand(32)); |
+ __ mov(src2, Operand(1)); |
+ __ mov(src1, Operand(src2, LSL, scratch)); |
+ __ sub(src1, src1, Operand(1)); |
+ __ tst(dst, src1); |
+ __ b(ne, not_int32); |
+} |
+ |
+ |
+void FloatingPointHelper::CallCCodeForDoubleOperation( |
+ MacroAssembler* masm, |
+ Token::Value op, |
+ Register heap_number_result, |
+ Register scratch) { |
+ // Using core registers: |
+ // r0: Left value (least significant part of mantissa). |
+ // r1: Left value (sign, exponent, top of mantissa). |
+ // r2: Right value (least significant part of mantissa). |
+ // r3: Right value (sign, exponent, top of mantissa). |
+ |
+ // Assert that heap_number_result is callee-saved. |
+ // We currently always use r5 to pass it. |
+ ASSERT(heap_number_result.is(r5)); |
+ |
+ // Push the current return address before the C call. Return will be |
+ // through pop(pc) below. |
+ __ push(lr); |
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. |
+ // Call C routine that may not cause GC or other trouble. |
+ __ CallCFunction(ExternalReference::double_fp_operation(op), 4); |
+ // Store answer in the overwritable heap number. |
+#if !defined(USE_ARM_EABI) |
+ // Double returned in fp coprocessor register 0 and 1, encoded as |
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we |
+ // need to substract the tag from heap_number_result. |
+ __ sub(scratch, heap_number_result, Operand(kHeapObjectTag)); |
+ __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset)); |
+#else |
+ // Double returned in registers 0 and 1. |
+ __ Strd(r0, r1, FieldMemOperand(heap_number_result, |
+ HeapNumber::kValueOffset)); |
+#endif |
+ // Place heap_number_result in r0 and return to the pushed return address. |
+ __ mov(r0, Operand(heap_number_result)); |
+ __ pop(pc); |
+} |
+ |
+ |
// See comment for class. |
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { |
Label max_negative_int; |
@@ -2704,33 +3084,11 @@ |
__ add(r0, r0, Operand(kHeapObjectTag)); |
__ Ret(); |
} else { |
- // Using core registers: |
- // r0: Left value (least significant part of mantissa). |
- // r1: Left value (sign, exponent, top of mantissa). |
- // r2: Right value (least significant part of mantissa). |
- // r3: Right value (sign, exponent, top of mantissa). |
- |
- // Push the current return address before the C call. Return will be |
- // through pop(pc) below. |
- __ push(lr); |
- __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. |
- // Call C routine that may not cause GC or other trouble. r5 is callee |
- // save. |
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); |
- // Store answer in the overwritable heap number. |
-#if !defined(USE_ARM_EABI) |
- // Double returned in fp coprocessor register 0 and 1, encoded as |
- // register cr8. Offsets must be divisible by 4 for coprocessor so we |
- // need to substract the tag from r5. |
- __ sub(scratch1, result, Operand(kHeapObjectTag)); |
- __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); |
-#else |
- // Double returned in registers 0 and 1. |
- __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); |
-#endif |
- // Plase result in r0 and return to the pushed return address. |
- __ mov(r0, Operand(result)); |
- __ pop(pc); |
+ // Call the C function to handle the double operation. |
+ FloatingPointHelper::CallCCodeForDoubleOperation(masm, |
+ op_, |
+ result, |
+ scratch1); |
} |
break; |
} |
@@ -2776,7 +3134,6 @@ |
break; |
case Token::SAR: |
// Use only the 5 least significant bits of the shift count. |
- __ and_(r2, r2, Operand(0x1f)); |
__ GetLeastBitsFromInt32(r2, r2, 5); |
__ mov(r2, Operand(r3, ASR, r2)); |
break; |
@@ -2921,7 +3278,287 @@ |
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
ASSERT(operands_type_ == TRBinaryOpIC::INT32); |
+ Register left = r1; |
+ Register right = r0; |
+ Register scratch1 = r7; |
+ Register scratch2 = r9; |
+ DwVfpRegister double_scratch = d0; |
+ SwVfpRegister single_scratch = s3; |
+ |
+ Register heap_number_result = no_reg; |
+ Register heap_number_map = r6; |
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
+ |
+ Label call_runtime, restore_left_and_call_runtime; |
+ // Labels for type transition, used for wrong input or output types. |
+ // Both label are currently actually bound to the same position. We use two |
+ // different label to differentiate the cause leading to type transition. |
+ Label input_transition, output_transition; |
+ |
+ // Smi-smi fast case. |
+ Label skip; |
+ __ orr(scratch1, left, right); |
+ __ JumpIfNotSmi(scratch1, &skip); |
+ GenerateSmiSmiOperation(masm); |
+ // Fall through if the result is not a smi. |
+ __ bind(&skip); |
+ |
+ switch (op_) { |
+ case Token::ADD: |
+ case Token::SUB: |
+ case Token::MUL: |
+ case Token::DIV: |
+ case Token::MOD: { |
+ // Load both operands and chech that they are 32bit integer. |
+ // Jump to type transition if they are not. |
+ FloatingPointHelper::Destination destination = |
+ CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? |
+ FloatingPointHelper::kVFPRegisters : |
+ FloatingPointHelper::kCoreRegisters; |
+ |
Søren Thygesen Gjesse
2011/02/28 09:54:32
Maybe add a comment here that r0 and r1 are preser
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
|
+ FloatingPointHelper::LoadNumberAsInt32Double(masm, |
+ right, |
+ destination, |
+ d7, |
+ r2, |
+ r3, |
+ heap_number_map, |
+ scratch1, |
+ scratch2, |
+ s0, |
+ &input_transition); |
+ FloatingPointHelper::LoadNumberAsInt32Double(masm, |
+ left, |
+ destination, |
+ d6, |
+ r4, |
+ r5, |
+ heap_number_map, |
+ scratch1, |
+ scratch2, |
+ s0, |
+ &input_transition); |
+ |
+ if (destination == FloatingPointHelper::kVFPRegisters) { |
+ CpuFeatures::Scope scope(VFP3); |
+ Label return_heap_number; |
+ switch (op_) { |
+ case Token::ADD: |
+ __ vadd(d5, d6, d7); |
+ break; |
+ case Token::SUB: |
+ __ vsub(d5, d6, d7); |
+ break; |
+ case Token::MUL: |
+ __ vmul(d5, d6, d7); |
+ break; |
+ case Token::DIV: |
+ __ vdiv(d5, d6, d7); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ |
+ if (op_ != Token::DIV) { |
+ // These operations produce an integer result. |
+ // Try to return a smi if we can. |
+ // Otherwise return a heap number if allowed, or jump to type |
+ // transition. |
+ |
+ __ EmitVFPTruncate(kRoundToZero, |
+ single_scratch, |
+ d5, |
+ scratch1, |
+ scratch2); |
+ |
+ if (result_type_ <= TRBinaryOpIC::INT32) { |
+ // If the ne condition is set, result does |
+ // not fit in a 32bit integer. |
+ __ b(ne, &output_transition); |
+ } |
+ |
+ // Check if the result fits in a smi. |
+ __ vmov(scratch1, single_scratch); |
+ __ add(scratch2, scratch1, Operand(0x40000000), SetCC); |
+ // If not try to return a heap number. |
+ __ b(mi, &return_heap_number); |
+ // Tag the result and return. |
+ __ SmiTag(r0, scratch1); |
+ __ Ret(); |
+ } |
+ |
+ if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER |
+ : TRBinaryOpIC::INT32) { |
+ __ bind(&return_heap_number); |
+ // We are using vfp registers so r5 is available. |
+ heap_number_result = r5; |
+ GenerateHeapResultAllocation(masm, |
+ heap_number_result, |
+ heap_number_map, |
+ scratch1, |
+ scratch2, |
+ &call_runtime); |
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
+ __ vstr(d5, r0, HeapNumber::kValueOffset); |
+ __ mov(r0, heap_number_result); |
+ __ Ret(); |
+ } |
+ |
+ // A DIV operation expecting an integer result falls through |
+ // to type transition. |
+ |
+ } else { |
+ // We preserved r0 and r1 to be able to call runtime. |
+ // Save the left value on the stack. |
+ __ Push(r5, r4); |
+ |
+ // Allocate a heap number to store the result. |
+ heap_number_result = r5; |
+ GenerateHeapResultAllocation(masm, |
+ heap_number_result, |
+ heap_number_map, |
+ scratch1, |
+ scratch2, |
+ &call_runtime); |
+ |
+ // Load the left value from the value saved on the stack. |
+ __ Pop(r1, r0); |
+ |
+ // Call the C function to handle the double operation. |
+ FloatingPointHelper::CallCCodeForDoubleOperation( |
+ masm, op_, heap_number_result, scratch1); |
+ } |
+ |
+ break; |
+ } |
+ |
+ case Token::BIT_OR: |
+ case Token::BIT_XOR: |
+ case Token::BIT_AND: |
+ case Token::SAR: |
+ case Token::SHR: |
+ case Token::SHL: { |
+ Label return_heap_number; |
+ Register scratch3 = r5; |
+ // Convert operands to 32-bit integers. Right in r2 and left in r3. |
Søren Thygesen Gjesse
2011/02/28 09:54:32
Add "Preserve r0 and r1 for the runtime call to th
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
|
+ FloatingPointHelper::LoadNumberAsInt32(masm, |
+ left, |
+ r3, |
+ heap_number_map, |
+ scratch1, |
+ scratch2, |
+ scratch3, |
+ d0, |
+ &input_transition); |
+ FloatingPointHelper::LoadNumberAsInt32(masm, |
+ right, |
+ r2, |
+ heap_number_map, |
+ scratch1, |
+ scratch2, |
+ scratch3, |
+ d0, |
+ &input_transition); |
+ |
+ // The ECMA-262 standard specifies that, for shift operations, only the |
+ // 5 least significant bits of the shift value should be used. |
+ switch (op_) { |
+ case Token::BIT_OR: |
+ __ orr(r2, r3, Operand(r2)); |
+ break; |
+ case Token::BIT_XOR: |
+ __ eor(r2, r3, Operand(r2)); |
+ break; |
+ case Token::BIT_AND: |
+ __ and_(r2, r3, Operand(r2)); |
+ break; |
+ case Token::SAR: |
+ __ and_(r2, r2, Operand(0x1f)); |
+ __ mov(r2, Operand(r3, ASR, r2)); |
+ break; |
+ case Token::SHR: |
+ __ and_(r2, r2, Operand(0x1f)); |
+ __ mov(r2, Operand(r3, LSR, r2), SetCC); |
+ // SHR is special because it is required to produce a positive answer. |
+ // We only get a negative result if the shift value (r2) is 0. |
+ // This result cannot be respresented as a signed 32bit integer, try |
+ // to return a heap number if we can. |
+ // The non vfp3 code does not support this special case, so jump to |
Søren Thygesen Gjesse
2011/02/28 09:54:32
Can we fix WriteInt32ToHeapNumberStub to avoid a r
Søren Thygesen Gjesse
2011/03/02 09:33:08
Postponed.
|
+ // runtime if we don't support it. |
+ if (CpuFeatures::IsSupported(VFP3)) { |
Søren Thygesen Gjesse
2011/02/28 09:54:32
No need to actually enter VFP3 scope here, as the
Søren Thygesen Gjesse
2011/03/02 09:33:08
Done.
|
+ CpuFeatures::Scope scope(VFP3); |
+ __ b(mi, |
+ (result_type_ <= TRBinaryOpIC::INT32) ? &output_transition |
+ : &return_heap_number); |
+ } else { |
+ __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &output_transition |
+ : &call_runtime); |
+ } |
+ break; |
+ case Token::SHL: |
+ __ and_(r2, r2, Operand(0x1f)); |
+ __ mov(r2, Operand(r3, LSL, r2)); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ |
+ // Check if the result fits in a smi. |
+ __ add(scratch1, r2, Operand(0x40000000), SetCC); |
+ // If not try to return a heap number. (We know the result is an int32.) |
+ __ b(mi, &return_heap_number); |
+ // Tag the result and return. |
+ __ SmiTag(r0, r2); |
+ __ Ret(); |
+ |
+ __ bind(&return_heap_number); |
+ if (CpuFeatures::IsSupported(VFP3)) { |
+ CpuFeatures::Scope scope(VFP3); |
+ heap_number_result = r5; |
+ GenerateHeapResultAllocation(masm, |
+ heap_number_result, |
+ heap_number_map, |
+ scratch1, |
+ scratch2, |
+ &call_runtime); |
+ |
+ if (op_ != Token::SHR) { |
+ // Convert the result to a floating point value. |
+ __ vmov(double_scratch.low(), r2); |
+ __ vcvt_f64_s32(double_scratch, double_scratch.low()); |
+ } else { |
+ // The result must be interpreted as an unsigned 32bit integer. |
+ __ vmov(double_scratch.low(), r2); |
+ __ vcvt_f64_u32(double_scratch, double_scratch.low()); |
+ } |
+ |
+ // Store the result. |
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
+ __ vstr(double_scratch, r0, HeapNumber::kValueOffset); |
+ __ mov(r0, heap_number_result); |
+ __ Ret(); |
+ } else { |
+ // Tail call that writes the int32 in r2 to the heap number in r0, using |
+ // r3 as scratch. r0 is preserved and returned. |
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
+ __ TailCallStub(&stub); |
+ } |
+ |
+ break; |
+ } |
+ |
+ default: |
+ UNREACHABLE(); |
+ } |
+ |
+ __ bind(&input_transition); |
+ __ bind(&output_transition); |
GenerateTypeTransition(masm); |
+ |
Søren Thygesen Gjesse
2011/02/28 09:54:32
I see no jump to this label - dead code?
Søren Thygesen Gjesse
2011/03/02 09:33:08
Removed.
|
+ __ bind(&restore_left_and_call_runtime); |
+ __ Pop(r1, r0); |
+ __ bind(&call_runtime); |
+ GenerateCallRuntime(masm); |
} |