Chromium Code Reviews| Index: src/arm/code-stubs-arm.cc |
| diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc |
| index e9b7cf0ca09c5baabec58f3243e2c0cd47e16576..57ec077c1961d2e3a05f4433832ef77ab859497c 100644 |
| --- a/src/arm/code-stubs-arm.cc |
| +++ b/src/arm/code-stubs-arm.cc |
| @@ -599,8 +599,8 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
| FloatingPointHelper::Destination destination, |
| Register scratch1, |
| Register scratch2) { |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); |
| __ vmov(d7.high(), scratch1); |
| __ vcvt_f64_s32(d7, d7.high()); |
| @@ -669,9 +669,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
| __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
| // Handle loading a double from a heap number. |
| - if (CpuFeatures::IsSupported(VFP3) && |
| + if (CpuFeatures::IsSupported(VFP2) && |
| destination == kVFPRegisters) { |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| // Load the double from tagged HeapNumber to double register. |
| __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| __ vldr(dst, scratch1, HeapNumber::kValueOffset); |
| @@ -684,8 +684,8 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
| // Handle loading a double from a smi. |
| __ bind(&is_smi); |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| // Convert smi to double using VFP instructions. |
| __ vmov(dst.high(), scratch1); |
| __ vcvt_f64_s32(dst, dst.high()); |
| @@ -762,8 +762,8 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, |
| Label done; |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| __ vmov(single_scratch, int_scratch); |
| __ vcvt_f64_s32(double_dst, single_scratch); |
| if (destination == kCoreRegisters) { |
| @@ -856,8 +856,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
| __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
| // Load the number. |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| // Load the double value. |
| __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); |
| @@ -927,8 +927,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
| // Object is a heap number. |
| // Convert the floating point value to a 32-bit integer. |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| SwVfpRegister single_scratch = double_scratch.low(); |
| // Load the double value. |
| __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| @@ -1058,7 +1058,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( |
| __ push(lr); |
| __ PrepareCallCFunction(0, 2, scratch); |
| if (masm->use_eabi_hardfloat()) { |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| __ vmov(d0, r0, r1); |
| __ vmov(d1, r2, r3); |
| } |
| @@ -1070,7 +1070,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( |
| // Store answer in the overwritable heap number. Double returned in |
| // registers r0 and r1 or in d0. |
| if (masm->use_eabi_hardfloat()) { |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| __ vstr(d0, |
| FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
| } else { |
| @@ -1289,9 +1289,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| } |
| // Lhs is a smi, rhs is a number. |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| // Convert lhs to a double in d7. |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); |
| // Load the double from rhs, tagged HeapNumber r0, to d6. |
| __ sub(r7, rhs, Operand(kHeapObjectTag)); |
| @@ -1329,8 +1329,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| } |
| // Rhs is a smi, lhs is a heap number. |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| // Load the double from lhs, tagged HeapNumber r1, to d7. |
| __ sub(r7, lhs, Operand(kHeapObjectTag)); |
| __ vldr(d7, r7, HeapNumber::kValueOffset); |
| @@ -1442,7 +1442,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, |
| __ push(lr); |
| __ PrepareCallCFunction(0, 2, r5); |
| if (masm->use_eabi_hardfloat()) { |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| __ vmov(d0, r0, r1); |
| __ vmov(d1, r2, r3); |
| } |
| @@ -1517,8 +1517,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
| // Both are heap numbers. Load them up then jump to the code we have |
| // for that. |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| __ sub(r7, rhs, Operand(kHeapObjectTag)); |
| __ vldr(d6, r7, HeapNumber::kValueOffset); |
| __ sub(r7, lhs, Operand(kHeapObjectTag)); |
| @@ -1607,8 +1607,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
| Label load_result_from_cache; |
| if (!object_is_smi) { |
| __ JumpIfSmi(object, &is_smi); |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| __ CheckMap(object, |
| scratch1, |
| Heap::kHeapNumberMapRootIndex, |
| @@ -1739,9 +1739,9 @@ void CompareStub::Generate(MacroAssembler* masm) { |
| // The arguments have been converted to doubles and stored in d6 and d7, if |
| // VFP3 is supported, or in r0, r1, r2, and r3. |
| Isolate* isolate = masm->isolate(); |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| __ bind(&lhs_not_nan); |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| Label no_nan; |
| // ARMv7 VFP3 instructions to implement double precision comparison. |
| __ VFPCompareAndSetFlags(d7, d6); |
| @@ -1860,7 +1860,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { |
| // This stub overrides SometimesSetsUpAFrame() to return false. That means |
| // we cannot call anything that could cause a GC from this stub. |
| // This stub uses VFP3 instructions. |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| Label patch; |
| const Register map = r9.is(tos_) ? r7 : r9; |
| @@ -1972,7 +1972,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
| // restore them. |
| __ stm(db_w, sp, kCallerSaved | lr.bit()); |
| if (save_doubles_ == kSaveFPRegs) { |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); |
| for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { |
| DwVfpRegister reg = DwVfpRegister::from_code(i); |
| @@ -1990,7 +1990,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
| ExternalReference::store_buffer_overflow_function(masm->isolate()), |
| argument_count); |
| if (save_doubles_ == kSaveFPRegs) { |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { |
| DwVfpRegister reg = DwVfpRegister::from_code(i); |
| __ vldr(reg, MemOperand(sp, i * kDoubleSize)); |
| @@ -2220,9 +2220,9 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( |
| __ mov(r0, r2); // Move newly allocated heap number to r0. |
| } |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| __ vmov(s0, r1); |
| __ vcvt_f64_s32(d0, s0); |
| __ sub(r2, r0, Operand(kHeapObjectTag)); |
| @@ -2522,7 +2522,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
| // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 |
| // depending on whether VFP3 is available or not. |
| FloatingPointHelper::Destination destination = |
| - CpuFeatures::IsSupported(VFP3) && |
| + CpuFeatures::IsSupported(VFP2) && |
| op_ != Token::MOD ? |
| FloatingPointHelper::kVFPRegisters : |
| FloatingPointHelper::kCoreRegisters; |
| @@ -2549,7 +2549,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
| // Using VFP registers: |
| // d6: Left value |
| // d7: Right value |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| switch (op_) { |
| case Token::ADD: |
| __ vadd(d5, d6, d7); |
| @@ -2638,7 +2638,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
| // The code below for writing into heap numbers isn't capable of |
| // writing the register as an unsigned int so we go to slow case if we |
| // hit this case. |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| __ b(mi, &result_not_a_smi); |
| } else { |
| __ b(mi, not_numbers); |
| @@ -2677,10 +2677,10 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
| // result. |
| __ mov(r0, Operand(r5)); |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
| // mentioned above SHR needs to always produce a positive result. |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| __ vmov(s0, r2); |
| if (op_ == Token::SHR) { |
| __ vcvt_f64_u32(d0, s0); |
| @@ -2839,7 +2839,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| // Jump to type transition if they are not. The registers r0 and r1 (right |
| // and left) are preserved for the runtime call. |
| FloatingPointHelper::Destination destination = |
| - (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD) |
| + (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD) |
| ? FloatingPointHelper::kVFPRegisters |
| : FloatingPointHelper::kCoreRegisters; |
| @@ -2867,7 +2867,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| &transition); |
| if (destination == FloatingPointHelper::kVFPRegisters) { |
| - CpuFeatures::Scope scope(VFP3); |
| + CpuFeatures::Scope scope(VFP2); |
| Label return_heap_number; |
| switch (op_) { |
| case Token::ADD: |
| @@ -3034,9 +3034,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| // We only get a negative result if the shift value (r2) is 0. |
| // This result cannot be respresented as a signed 32-bit integer, try |
| // to return a heap number if we can. |
| - // The non vfp3 code does not support this special case, so jump to |
| + // The non vfp2 code does not support this special case, so jump to |
| // runtime if we don't support it. |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| __ b(mi, (result_type_ <= BinaryOpIC::INT32) |
| ? &transition |
| : &return_heap_number); |
| @@ -3071,8 +3071,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| scratch2, |
| &call_runtime); |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| if (op_ != Token::SHR) { |
| // Convert the result to a floating point value. |
| __ vmov(double_scratch.low(), r2); |
| @@ -3301,8 +3301,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
| const Register cache_entry = r0; |
| const bool tagged = (argument_type_ == TAGGED); |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| if (tagged) { |
| // Argument is a number and is on stack and in r0. |
| // Load argument and check if it is a smi. |
| @@ -3403,8 +3403,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
| ExternalReference(RuntimeFunction(), masm->isolate()); |
| __ TailCallExternalReference(runtime_function, 1, 1); |
| } else { |
| - ASSERT(CpuFeatures::IsSupported(VFP3)); |
| - CpuFeatures::Scope scope(VFP3); |
| + ASSERT(CpuFeatures::IsSupported(VFP2)); |
| + CpuFeatures::Scope scope(VFP2); |
| Label no_update; |
| Label skip_cache; |
| @@ -3465,6 +3465,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
| void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
| Register scratch) { |
| + ASSERT(CpuFeatures::IsEnabled(VFP2)); |
| Isolate* isolate = masm->isolate(); |
| __ push(lr); |
| @@ -3525,7 +3526,7 @@ void InterruptStub::Generate(MacroAssembler* masm) { |
| void MathPowStub::Generate(MacroAssembler* masm) { |
| - CpuFeatures::Scope vfp3_scope(VFP3); |
| + CpuFeatures::Scope vfp2_scope(VFP2); |
| const Register base = r1; |
| const Register exponent = r2; |
| const Register heapnumbermap = r5; |
| @@ -3624,7 +3625,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { |
| // Add +0 to convert -0 to +0. |
| __ vadd(double_scratch, double_base, kDoubleRegZero); |
| - __ vmov(double_result, 1); |
| + __ vmov(double_result, 1.0); |
| __ vsqrt(double_scratch, double_scratch); |
| __ vdiv(double_result, double_result, double_scratch); |
| __ jmp(&done); |
| @@ -3981,12 +3982,12 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
| // Save callee-saved registers (incl. cp and fp), sp, and lr |
| __ stm(db_w, sp, kCalleeSaved | lr.bit()); |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| // Save callee-saved vfp registers. |
| __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
| // Set up the reserved register for 0.0. |
| - __ vmov(kDoubleRegZero, 0.0); |
| + __ Vmov(kDoubleRegZero, 0.0); |
|
Rodolph Perfetta
2012/07/25 10:59:54
The Vmov macro will use kDoubleRegZero if one wont
|
| } |
| // Get address of argv, see stm above. |
| @@ -3997,7 +3998,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
| // Set up argv in r4. |
| int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; |
| } |
| __ ldr(r4, MemOperand(sp, offset_to_argv)); |
| @@ -4135,8 +4136,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
| } |
| #endif |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| // Restore callee-saved vfp registers. |
| __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
| } |
| @@ -6663,8 +6664,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
| // Inlining the double comparison and falling back to the general compare |
| // stub if NaN is involved or VFP3 is unsupported. |
| - if (CpuFeatures::IsSupported(VFP3)) { |
| - CpuFeatures::Scope scope(VFP3); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| // Load left and right operand |
| __ sub(r2, r1, Operand(kHeapObjectTag)); |