Chromium Code Reviews| Index: src/ia32/code-stubs-ia32.cc |
| diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc |
| index 174ebbbfaf7ea5e0086ab0de0e1867caba9c1867..1c3faac9ea4d18a8bd58ad84bf507bfd34380d42 100644 |
| --- a/src/ia32/code-stubs-ia32.cc |
| +++ b/src/ia32/code-stubs-ia32.cc |
| @@ -456,7 +456,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
| // restore them. |
| __ pushad(); |
| if (save_doubles_ == kSaveFPRegs) { |
| - CpuFeatureScope scope(masm, SSE2); |
| __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); |
| for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
| XMMRegister reg = XMMRegister::from_code(i); |
| @@ -473,7 +472,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
| ExternalReference::store_buffer_overflow_function(isolate()), |
| argument_count); |
| if (save_doubles_ == kSaveFPRegs) { |
| - CpuFeatureScope scope(masm, SSE2); |
| for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
| XMMRegister reg = XMMRegister::from_code(i); |
| __ movsd(reg, Operand(esp, i * kDoubleSize)); |
| @@ -726,7 +724,6 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, |
| void MathPowStub::Generate(MacroAssembler* masm) { |
| - CpuFeatureScope use_sse2(masm, SSE2); |
| Factory* factory = isolate()->factory(); |
| const Register exponent = eax; |
| const Register base = edx; |
| @@ -2041,15 +2038,14 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { |
| Label non_number_comparison; |
| Label unordered; |
| __ bind(&generic_heap_number_comparison); |
| - if (CpuFeatures::IsSupported(SSE2)) { |
| - CpuFeatureScope use_sse2(masm, SSE2); |
| - CpuFeatureScope use_cmov(masm, CMOV); |
| - FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); |
| - __ ucomisd(xmm0, xmm1); |
| + FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); |
| + __ ucomisd(xmm0, xmm1); |
| + // Don't base result on EFLAGS when a NaN is involved. |
| + __ j(parity_even, &unordered, Label::kNear); |
| - // Don't base result on EFLAGS when a NaN is involved. |
| - __ j(parity_even, &unordered, Label::kNear); |
| + if (CpuFeatures::IsSupported(CMOV)) { |
| + CpuFeatureScope use_cmov(masm, CMOV); |
| // Return a result of -1, 0, or 1, based on EFLAGS. |
| __ mov(eax, 0); // equal |
| __ mov(ecx, Immediate(Smi::FromInt(1))); |
| @@ -2058,15 +2054,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { |
| __ cmov(below, eax, ecx); |
| __ ret(0); |
| } else { |
| - FloatingPointHelper::CheckFloatOperands( |
| - masm, &non_number_comparison, ebx); |
| - FloatingPointHelper::LoadFloatOperand(masm, eax); |
| - FloatingPointHelper::LoadFloatOperand(masm, edx); |
| - __ FCmp(); |
| - |
| - // Don't base result on EFLAGS when a NaN is involved. |
| - __ j(parity_even, &unordered, Label::kNear); |
| - |
| Label below_label, above_label; |
| // Return a result of -1, 0, or 1, based on EFLAGS. |
| __ j(below, &below_label, Label::kNear); |
| @@ -2604,28 +2591,20 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
| // It is important that the store buffer overflow stubs are generated first. |
| ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
| CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
| - if (Serializer::enabled(isolate)) { |
| - PlatformFeatureScope sse2(isolate, SSE2); |
| - BinaryOpICStub::GenerateAheadOfTime(isolate); |
| - BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); |
| - } else { |
| - BinaryOpICStub::GenerateAheadOfTime(isolate); |
| - BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); |
| - } |
| + BinaryOpICStub::GenerateAheadOfTime(isolate); |
| + BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); |
| } |
| void CodeStub::GenerateFPStubs(Isolate* isolate) { |
| - if (CpuFeatures::IsSupported(SSE2)) { |
| - CEntryStub save_doubles(isolate, 1, kSaveFPRegs); |
| - // Stubs might already be in the snapshot, detect that and don't regenerate, |
| - // which would lead to code stub initialization state being messed up. |
| - Code* save_doubles_code; |
| - if (!save_doubles.FindCodeInCache(&save_doubles_code)) { |
| - save_doubles_code = *(save_doubles.GetCode()); |
| - } |
| - isolate->set_fp_stubs_generated(true); |
| + CEntryStub save_doubles(isolate, 1, kSaveFPRegs); |
| + // Stubs might already be in the snapshot, detect that and don't regenerate, |
| + // which would lead to code stub initialization state being messed up. |
| + Code* save_doubles_code; |
| + if (!save_doubles.FindCodeInCache(&save_doubles_code)) { |
| + save_doubles_code = *(save_doubles.GetCode()); |
| } |
| + isolate->set_fp_stubs_generated(true); |
| } |
| @@ -3775,8 +3754,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
| // Inlining the double comparison and falling back to the general compare |
| // stub if NaN is involved or SSE2 or CMOV is unsupported. |
| - if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) { |
| - CpuFeatureScope scope1(masm, SSE2); |
| + if (CpuFeatures::IsSupported(CMOV)) { |
| CpuFeatureScope scope2(masm, CMOV); |
| // Load left and right operand. |
| @@ -4322,15 +4300,13 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( |
| Isolate* isolate) { |
| StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs); |
| stub.GetCode(); |
| - if (CpuFeatures::IsSafeForSnapshot(isolate, SSE2)) { |
| - StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); |
| - stub2.GetCode(); |
| - } |
| + StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); |
| + stub2.GetCode(); |
| } |
| bool CodeStub::CanUseFPRegisters() { |
|
Sven Panne
2014/05/07 13:46:03
We can nuke CanUseFPRegisters, it is always true o
|
| - return CpuFeatures::IsSupported(SSE2); |
| + return true; // SSE2 support is mandatory. |
| } |
| @@ -4606,8 +4582,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { |
| ecx, |
| edi, |
| xmm0, |
| - &slow_elements_from_double, |
| - false); |
| + &slow_elements_from_double); |
| __ pop(edx); |
| __ ret(0); |
| } |