Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "v8.h" | 5 #include "v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_IA32 | 7 #if V8_TARGET_ARCH_IA32 |
| 8 | 8 |
| 9 #include "bootstrapper.h" | 9 #include "bootstrapper.h" |
| 10 #include "code-stubs.h" | 10 #include "code-stubs.h" |
| (...skipping 438 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 449 __ ret(0); | 449 __ ret(0); |
| 450 } | 450 } |
| 451 | 451 |
| 452 | 452 |
| 453 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 453 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
| 454 // We don't allow a GC during a store buffer overflow so there is no need to | 454 // We don't allow a GC during a store buffer overflow so there is no need to |
| 455 // store the registers in any particular way, but we do have to store and | 455 // store the registers in any particular way, but we do have to store and |
| 456 // restore them. | 456 // restore them. |
| 457 __ pushad(); | 457 __ pushad(); |
| 458 if (save_doubles_ == kSaveFPRegs) { | 458 if (save_doubles_ == kSaveFPRegs) { |
| 459 CpuFeatureScope scope(masm, SSE2); | |
| 460 __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); | 459 __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); |
| 461 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { | 460 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
| 462 XMMRegister reg = XMMRegister::from_code(i); | 461 XMMRegister reg = XMMRegister::from_code(i); |
| 463 __ movsd(Operand(esp, i * kDoubleSize), reg); | 462 __ movsd(Operand(esp, i * kDoubleSize), reg); |
| 464 } | 463 } |
| 465 } | 464 } |
| 466 const int argument_count = 1; | 465 const int argument_count = 1; |
| 467 | 466 |
| 468 AllowExternalCallThatCantCauseGC scope(masm); | 467 AllowExternalCallThatCantCauseGC scope(masm); |
| 469 __ PrepareCallCFunction(argument_count, ecx); | 468 __ PrepareCallCFunction(argument_count, ecx); |
| 470 __ mov(Operand(esp, 0 * kPointerSize), | 469 __ mov(Operand(esp, 0 * kPointerSize), |
| 471 Immediate(ExternalReference::isolate_address(isolate()))); | 470 Immediate(ExternalReference::isolate_address(isolate()))); |
| 472 __ CallCFunction( | 471 __ CallCFunction( |
| 473 ExternalReference::store_buffer_overflow_function(isolate()), | 472 ExternalReference::store_buffer_overflow_function(isolate()), |
| 474 argument_count); | 473 argument_count); |
| 475 if (save_doubles_ == kSaveFPRegs) { | 474 if (save_doubles_ == kSaveFPRegs) { |
| 476 CpuFeatureScope scope(masm, SSE2); | |
| 477 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { | 475 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { |
| 478 XMMRegister reg = XMMRegister::from_code(i); | 476 XMMRegister reg = XMMRegister::from_code(i); |
| 479 __ movsd(reg, Operand(esp, i * kDoubleSize)); | 477 __ movsd(reg, Operand(esp, i * kDoubleSize)); |
| 480 } | 478 } |
| 481 __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); | 479 __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); |
| 482 } | 480 } |
| 483 __ popad(); | 481 __ popad(); |
| 484 __ ret(0); | 482 __ ret(0); |
| 485 } | 483 } |
| 486 | 484 |
| (...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 719 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); | 717 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); |
| 720 __ cmp(scratch, factory->heap_number_map()); | 718 __ cmp(scratch, factory->heap_number_map()); |
| 721 __ j(not_equal, non_float); // argument in eax is not a number -> NaN | 719 __ j(not_equal, non_float); // argument in eax is not a number -> NaN |
| 722 | 720 |
| 723 // Fall-through: Both operands are numbers. | 721 // Fall-through: Both operands are numbers. |
| 724 __ bind(&done); | 722 __ bind(&done); |
| 725 } | 723 } |
| 726 | 724 |
| 727 | 725 |
| 728 void MathPowStub::Generate(MacroAssembler* masm) { | 726 void MathPowStub::Generate(MacroAssembler* masm) { |
| 729 CpuFeatureScope use_sse2(masm, SSE2); | |
| 730 Factory* factory = isolate()->factory(); | 727 Factory* factory = isolate()->factory(); |
| 731 const Register exponent = eax; | 728 const Register exponent = eax; |
| 732 const Register base = edx; | 729 const Register base = edx; |
| 733 const Register scratch = ecx; | 730 const Register scratch = ecx; |
| 734 const XMMRegister double_result = xmm3; | 731 const XMMRegister double_result = xmm3; |
| 735 const XMMRegister double_base = xmm2; | 732 const XMMRegister double_base = xmm2; |
| 736 const XMMRegister double_exponent = xmm1; | 733 const XMMRegister double_exponent = xmm1; |
| 737 const XMMRegister double_scratch = xmm4; | 734 const XMMRegister double_scratch = xmm4; |
| 738 | 735 |
| 739 Label call_runtime, done, exponent_not_smi, int_exponent; | 736 Label call_runtime, done, exponent_not_smi, int_exponent; |
| (...skipping 1294 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2034 __ j(equal, &return_not_equal); | 2031 __ j(equal, &return_not_equal); |
| 2035 | 2032 |
| 2036 // Fall through to the general case. | 2033 // Fall through to the general case. |
| 2037 __ bind(&slow); | 2034 __ bind(&slow); |
| 2038 } | 2035 } |
| 2039 | 2036 |
| 2040 // Generate the number comparison code. | 2037 // Generate the number comparison code. |
| 2041 Label non_number_comparison; | 2038 Label non_number_comparison; |
| 2042 Label unordered; | 2039 Label unordered; |
| 2043 __ bind(&generic_heap_number_comparison); | 2040 __ bind(&generic_heap_number_comparison); |
| 2044 if (CpuFeatures::IsSupported(SSE2)) { | 2041 |
| 2045 CpuFeatureScope use_sse2(masm, SSE2); | 2042 FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); |
| 2043 __ ucomisd(xmm0, xmm1); | |
| 2044 // Don't base result on EFLAGS when a NaN is involved. | |
| 2045 __ j(parity_even, &unordered, Label::kNear); | |
| 2046 | |
| 2047 if (CpuFeatures::IsSupported(CMOV)) { | |
| 2046 CpuFeatureScope use_cmov(masm, CMOV); | 2048 CpuFeatureScope use_cmov(masm, CMOV); |
| 2047 | |
| 2048 FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); | |
| 2049 __ ucomisd(xmm0, xmm1); | |
| 2050 | |
| 2051 // Don't base result on EFLAGS when a NaN is involved. | |
| 2052 __ j(parity_even, &unordered, Label::kNear); | |
| 2053 // Return a result of -1, 0, or 1, based on EFLAGS. | 2049 // Return a result of -1, 0, or 1, based on EFLAGS. |
| 2054 __ mov(eax, 0); // equal | 2050 __ mov(eax, 0); // equal |
| 2055 __ mov(ecx, Immediate(Smi::FromInt(1))); | 2051 __ mov(ecx, Immediate(Smi::FromInt(1))); |
| 2056 __ cmov(above, eax, ecx); | 2052 __ cmov(above, eax, ecx); |
| 2057 __ mov(ecx, Immediate(Smi::FromInt(-1))); | 2053 __ mov(ecx, Immediate(Smi::FromInt(-1))); |
| 2058 __ cmov(below, eax, ecx); | 2054 __ cmov(below, eax, ecx); |
| 2059 __ ret(0); | 2055 __ ret(0); |
| 2060 } else { | 2056 } else { |
| 2061 FloatingPointHelper::CheckFloatOperands( | |
| 2062 masm, &non_number_comparison, ebx); | |
| 2063 FloatingPointHelper::LoadFloatOperand(masm, eax); | |
| 2064 FloatingPointHelper::LoadFloatOperand(masm, edx); | |
| 2065 __ FCmp(); | |
| 2066 | |
| 2067 // Don't base result on EFLAGS when a NaN is involved. | |
| 2068 __ j(parity_even, &unordered, Label::kNear); | |
| 2069 | |
| 2070 Label below_label, above_label; | 2057 Label below_label, above_label; |
| 2071 // Return a result of -1, 0, or 1, based on EFLAGS. | 2058 // Return a result of -1, 0, or 1, based on EFLAGS. |
| 2072 __ j(below, &below_label, Label::kNear); | 2059 __ j(below, &below_label, Label::kNear); |
| 2073 __ j(above, &above_label, Label::kNear); | 2060 __ j(above, &above_label, Label::kNear); |
| 2074 | 2061 |
| 2075 __ Move(eax, Immediate(0)); | 2062 __ Move(eax, Immediate(0)); |
| 2076 __ ret(0); | 2063 __ ret(0); |
| 2077 | 2064 |
| 2078 __ bind(&below_label); | 2065 __ bind(&below_label); |
| 2079 __ mov(eax, Immediate(Smi::FromInt(-1))); | 2066 __ mov(eax, Immediate(Smi::FromInt(-1))); |
| (...skipping 517 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2597 } | 2584 } |
| 2598 | 2585 |
| 2599 | 2586 |
| 2600 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 2587 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
| 2601 CEntryStub::GenerateAheadOfTime(isolate); | 2588 CEntryStub::GenerateAheadOfTime(isolate); |
| 2602 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 2589 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| 2603 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 2590 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
| 2604 // It is important that the store buffer overflow stubs are generated first. | 2591 // It is important that the store buffer overflow stubs are generated first. |
| 2605 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); | 2592 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
| 2606 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); | 2593 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
| 2607 if (Serializer::enabled(isolate)) { | 2594 BinaryOpICStub::GenerateAheadOfTime(isolate); |
| 2608 PlatformFeatureScope sse2(isolate, SSE2); | 2595 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); |
| 2609 BinaryOpICStub::GenerateAheadOfTime(isolate); | |
| 2610 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); | |
| 2611 } else { | |
| 2612 BinaryOpICStub::GenerateAheadOfTime(isolate); | |
| 2613 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); | |
| 2614 } | |
| 2615 } | 2596 } |
| 2616 | 2597 |
| 2617 | 2598 |
| 2618 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 2599 void CodeStub::GenerateFPStubs(Isolate* isolate) { |
| 2619 if (CpuFeatures::IsSupported(SSE2)) { | 2600 CEntryStub save_doubles(isolate, 1, kSaveFPRegs); |
| 2620 CEntryStub save_doubles(isolate, 1, kSaveFPRegs); | 2601 // Stubs might already be in the snapshot, detect that and don't regenerate, |
| 2621 // Stubs might already be in the snapshot, detect that and don't regenerate, | 2602 // which would lead to code stub initialization state being messed up. |
| 2622 // which would lead to code stub initialization state being messed up. | 2603 Code* save_doubles_code; |
| 2623 Code* save_doubles_code; | 2604 if (!save_doubles.FindCodeInCache(&save_doubles_code)) { |
| 2624 if (!save_doubles.FindCodeInCache(&save_doubles_code)) { | 2605 save_doubles_code = *(save_doubles.GetCode()); |
| 2625 save_doubles_code = *(save_doubles.GetCode()); | |
| 2626 } | |
| 2627 isolate->set_fp_stubs_generated(true); | |
| 2628 } | 2606 } |
| 2607 isolate->set_fp_stubs_generated(true); | |
| 2629 } | 2608 } |
| 2630 | 2609 |
| 2631 | 2610 |
| 2632 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { | 2611 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { |
| 2633 CEntryStub stub(isolate, 1, kDontSaveFPRegs); | 2612 CEntryStub stub(isolate, 1, kDontSaveFPRegs); |
| 2634 stub.GetCode(); | 2613 stub.GetCode(); |
| 2635 } | 2614 } |
| 2636 | 2615 |
| 2637 | 2616 |
| 2638 void CEntryStub::Generate(MacroAssembler* masm) { | 2617 void CEntryStub::Generate(MacroAssembler* masm) { |
| (...skipping 1129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3768 | 3747 |
| 3769 if (left_ == CompareIC::SMI) { | 3748 if (left_ == CompareIC::SMI) { |
| 3770 __ JumpIfNotSmi(edx, &miss); | 3749 __ JumpIfNotSmi(edx, &miss); |
| 3771 } | 3750 } |
| 3772 if (right_ == CompareIC::SMI) { | 3751 if (right_ == CompareIC::SMI) { |
| 3773 __ JumpIfNotSmi(eax, &miss); | 3752 __ JumpIfNotSmi(eax, &miss); |
| 3774 } | 3753 } |
| 3775 | 3754 |
| 3776 // Inlining the double comparison and falling back to the general compare | 3755 // Inlining the double comparison and falling back to the general compare |
| 3777 // stub if NaN is involved or SSE2 or CMOV is unsupported. | 3756 // stub if NaN is involved or SSE2 or CMOV is unsupported. |
| 3778 if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) { | 3757 if (CpuFeatures::IsSupported(CMOV)) { |
| 3779 CpuFeatureScope scope1(masm, SSE2); | |
| 3780 CpuFeatureScope scope2(masm, CMOV); | 3758 CpuFeatureScope scope2(masm, CMOV); |
| 3781 | 3759 |
| 3782 // Load left and right operand. | 3760 // Load left and right operand. |
| 3783 Label done, left, left_smi, right_smi; | 3761 Label done, left, left_smi, right_smi; |
| 3784 __ JumpIfSmi(eax, &right_smi, Label::kNear); | 3762 __ JumpIfSmi(eax, &right_smi, Label::kNear); |
| 3785 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), | 3763 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), |
| 3786 isolate()->factory()->heap_number_map()); | 3764 isolate()->factory()->heap_number_map()); |
| 3787 __ j(not_equal, &maybe_undefined1, Label::kNear); | 3765 __ j(not_equal, &maybe_undefined1, Label::kNear); |
| 3788 __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | 3766 __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
| 3789 __ jmp(&left, Label::kNear); | 3767 __ jmp(&left, Label::kNear); |
| (...skipping 525 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4315 __ mov(result_, Immediate(0)); | 4293 __ mov(result_, Immediate(0)); |
| 4316 __ Drop(1); | 4294 __ Drop(1); |
| 4317 __ ret(2 * kPointerSize); | 4295 __ ret(2 * kPointerSize); |
| 4318 } | 4296 } |
| 4319 | 4297 |
| 4320 | 4298 |
| 4321 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( | 4299 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( |
| 4322 Isolate* isolate) { | 4300 Isolate* isolate) { |
| 4323 StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs); | 4301 StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs); |
| 4324 stub.GetCode(); | 4302 stub.GetCode(); |
| 4325 if (CpuFeatures::IsSafeForSnapshot(isolate, SSE2)) { | 4303 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); |
| 4326 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); | 4304 stub2.GetCode(); |
| 4327 stub2.GetCode(); | |
| 4328 } | |
| 4329 } | 4305 } |
| 4330 | 4306 |
| 4331 | 4307 |
| 4332 bool CodeStub::CanUseFPRegisters() { | 4308 bool CodeStub::CanUseFPRegisters() { |
|
Sven Panne
2014/05/07 13:46:03
We can nuke CanUseFPRegisters, it is always true o
| |
| 4333 return CpuFeatures::IsSupported(SSE2); | 4309 return true; // SSE2 support is mandatory. |
| 4334 } | 4310 } |
| 4335 | 4311 |
| 4336 | 4312 |
| 4337 // Takes the input in 3 registers: address_ value_ and object_. A pointer to | 4313 // Takes the input in 3 registers: address_ value_ and object_. A pointer to |
| 4338 // the value has just been written into the object, now this stub makes sure | 4314 // the value has just been written into the object, now this stub makes sure |
| 4339 // we keep the GC informed. The word in the object where the value has been | 4315 // we keep the GC informed. The word in the object where the value has been |
| 4340 // written is in the address register. | 4316 // written is in the address register. |
| 4341 void RecordWriteStub::Generate(MacroAssembler* masm) { | 4317 void RecordWriteStub::Generate(MacroAssembler* masm) { |
| 4342 Label skip_to_incremental_noncompacting; | 4318 Label skip_to_incremental_noncompacting; |
| 4343 Label skip_to_incremental_compacting; | 4319 Label skip_to_incremental_compacting; |
| (...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4599 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. | 4575 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. |
| 4600 __ bind(&double_elements); | 4576 __ bind(&double_elements); |
| 4601 | 4577 |
| 4602 __ push(edx); | 4578 __ push(edx); |
| 4603 __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset)); | 4579 __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset)); |
| 4604 __ StoreNumberToDoubleElements(eax, | 4580 __ StoreNumberToDoubleElements(eax, |
| 4605 edx, | 4581 edx, |
| 4606 ecx, | 4582 ecx, |
| 4607 edi, | 4583 edi, |
| 4608 xmm0, | 4584 xmm0, |
| 4609 &slow_elements_from_double, | 4585 &slow_elements_from_double); |
| 4610 false); | |
| 4611 __ pop(edx); | 4586 __ pop(edx); |
| 4612 __ ret(0); | 4587 __ ret(0); |
| 4613 } | 4588 } |
| 4614 | 4589 |
| 4615 | 4590 |
| 4616 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 4591 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
| 4617 CEntryStub ces(isolate(), 1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); | 4592 CEntryStub ces(isolate(), 1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); |
| 4618 __ call(ces.GetCode(), RelocInfo::CODE_TARGET); | 4593 __ call(ces.GetCode(), RelocInfo::CODE_TARGET); |
| 4619 int parameter_count_offset = | 4594 int parameter_count_offset = |
| 4620 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 4595 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
| (...skipping 513 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5134 Operand(ebp, 7 * kPointerSize), | 5109 Operand(ebp, 7 * kPointerSize), |
| 5135 NULL); | 5110 NULL); |
| 5136 } | 5111 } |
| 5137 | 5112 |
| 5138 | 5113 |
| 5139 #undef __ | 5114 #undef __ |
| 5140 | 5115 |
| 5141 } } // namespace v8::internal | 5116 } } // namespace v8::internal |
| 5142 | 5117 |
| 5143 #endif // V8_TARGET_ARCH_IA32 | 5118 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |