Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(64)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 275433004: Require SSE2 support for the ia32 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/code-stubs-ia32.h ('k') | src/ia32/codegen-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "v8.h" 5 #include "v8.h"
6 6
7 #if V8_TARGET_ARCH_IA32 7 #if V8_TARGET_ARCH_IA32
8 8
9 #include "bootstrapper.h" 9 #include "bootstrapper.h"
10 #include "code-stubs.h" 10 #include "code-stubs.h"
(...skipping 448 matching lines...) Expand 10 before | Expand all | Expand 10 after
459 __ ret(0); 459 __ ret(0);
460 } 460 }
461 461
462 462
463 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { 463 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
464 // We don't allow a GC during a store buffer overflow so there is no need to 464 // We don't allow a GC during a store buffer overflow so there is no need to
465 // store the registers in any particular way, but we do have to store and 465 // store the registers in any particular way, but we do have to store and
466 // restore them. 466 // restore them.
467 __ pushad(); 467 __ pushad();
468 if (save_doubles_ == kSaveFPRegs) { 468 if (save_doubles_ == kSaveFPRegs) {
469 CpuFeatureScope scope(masm, SSE2);
470 __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); 469 __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
471 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { 470 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
472 XMMRegister reg = XMMRegister::from_code(i); 471 XMMRegister reg = XMMRegister::from_code(i);
473 __ movsd(Operand(esp, i * kDoubleSize), reg); 472 __ movsd(Operand(esp, i * kDoubleSize), reg);
474 } 473 }
475 } 474 }
476 const int argument_count = 1; 475 const int argument_count = 1;
477 476
478 AllowExternalCallThatCantCauseGC scope(masm); 477 AllowExternalCallThatCantCauseGC scope(masm);
479 __ PrepareCallCFunction(argument_count, ecx); 478 __ PrepareCallCFunction(argument_count, ecx);
480 __ mov(Operand(esp, 0 * kPointerSize), 479 __ mov(Operand(esp, 0 * kPointerSize),
481 Immediate(ExternalReference::isolate_address(isolate()))); 480 Immediate(ExternalReference::isolate_address(isolate())));
482 __ CallCFunction( 481 __ CallCFunction(
483 ExternalReference::store_buffer_overflow_function(isolate()), 482 ExternalReference::store_buffer_overflow_function(isolate()),
484 argument_count); 483 argument_count);
485 if (save_doubles_ == kSaveFPRegs) { 484 if (save_doubles_ == kSaveFPRegs) {
486 CpuFeatureScope scope(masm, SSE2);
487 for (int i = 0; i < XMMRegister::kNumRegisters; i++) { 485 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
488 XMMRegister reg = XMMRegister::from_code(i); 486 XMMRegister reg = XMMRegister::from_code(i);
489 __ movsd(reg, Operand(esp, i * kDoubleSize)); 487 __ movsd(reg, Operand(esp, i * kDoubleSize));
490 } 488 }
491 __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); 489 __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
492 } 490 }
493 __ popad(); 491 __ popad();
494 __ ret(0); 492 __ ret(0);
495 } 493 }
496 494
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
729 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); 727 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
730 __ cmp(scratch, factory->heap_number_map()); 728 __ cmp(scratch, factory->heap_number_map());
731 __ j(not_equal, non_float); // argument in eax is not a number -> NaN 729 __ j(not_equal, non_float); // argument in eax is not a number -> NaN
732 730
733 // Fall-through: Both operands are numbers. 731 // Fall-through: Both operands are numbers.
734 __ bind(&done); 732 __ bind(&done);
735 } 733 }
736 734
737 735
738 void MathPowStub::Generate(MacroAssembler* masm) { 736 void MathPowStub::Generate(MacroAssembler* masm) {
739 CpuFeatureScope use_sse2(masm, SSE2);
740 Factory* factory = isolate()->factory(); 737 Factory* factory = isolate()->factory();
741 const Register exponent = eax; 738 const Register exponent = eax;
742 const Register base = edx; 739 const Register base = edx;
743 const Register scratch = ecx; 740 const Register scratch = ecx;
744 const XMMRegister double_result = xmm3; 741 const XMMRegister double_result = xmm3;
745 const XMMRegister double_base = xmm2; 742 const XMMRegister double_base = xmm2;
746 const XMMRegister double_exponent = xmm1; 743 const XMMRegister double_exponent = xmm1;
747 const XMMRegister double_scratch = xmm4; 744 const XMMRegister double_scratch = xmm4;
748 745
749 Label call_runtime, done, exponent_not_smi, int_exponent; 746 Label call_runtime, done, exponent_not_smi, int_exponent;
(...skipping 1294 matching lines...) Expand 10 before | Expand all | Expand 10 after
2044 __ j(equal, &return_not_equal); 2041 __ j(equal, &return_not_equal);
2045 2042
2046 // Fall through to the general case. 2043 // Fall through to the general case.
2047 __ bind(&slow); 2044 __ bind(&slow);
2048 } 2045 }
2049 2046
2050 // Generate the number comparison code. 2047 // Generate the number comparison code.
2051 Label non_number_comparison; 2048 Label non_number_comparison;
2052 Label unordered; 2049 Label unordered;
2053 __ bind(&generic_heap_number_comparison); 2050 __ bind(&generic_heap_number_comparison);
2054 if (CpuFeatures::IsSupported(SSE2)) { 2051
2055 CpuFeatureScope use_sse2(masm, SSE2); 2052 FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
2053 __ ucomisd(xmm0, xmm1);
2054 // Don't base result on EFLAGS when a NaN is involved.
2055 __ j(parity_even, &unordered, Label::kNear);
2056
2057 if (CpuFeatures::IsSupported(CMOV)) {
2056 CpuFeatureScope use_cmov(masm, CMOV); 2058 CpuFeatureScope use_cmov(masm, CMOV);
2057
2058 FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
2059 __ ucomisd(xmm0, xmm1);
2060
2061 // Don't base result on EFLAGS when a NaN is involved.
2062 __ j(parity_even, &unordered, Label::kNear);
2063 // Return a result of -1, 0, or 1, based on EFLAGS. 2059 // Return a result of -1, 0, or 1, based on EFLAGS.
2064 __ mov(eax, 0); // equal 2060 __ mov(eax, 0); // equal
2065 __ mov(ecx, Immediate(Smi::FromInt(1))); 2061 __ mov(ecx, Immediate(Smi::FromInt(1)));
2066 __ cmov(above, eax, ecx); 2062 __ cmov(above, eax, ecx);
2067 __ mov(ecx, Immediate(Smi::FromInt(-1))); 2063 __ mov(ecx, Immediate(Smi::FromInt(-1)));
2068 __ cmov(below, eax, ecx); 2064 __ cmov(below, eax, ecx);
2069 __ ret(0); 2065 __ ret(0);
2070 } else { 2066 } else {
2071 FloatingPointHelper::CheckFloatOperands(
2072 masm, &non_number_comparison, ebx);
2073 FloatingPointHelper::LoadFloatOperand(masm, eax);
2074 FloatingPointHelper::LoadFloatOperand(masm, edx);
2075 __ FCmp();
2076
2077 // Don't base result on EFLAGS when a NaN is involved.
2078 __ j(parity_even, &unordered, Label::kNear);
2079
2080 Label below_label, above_label; 2067 Label below_label, above_label;
2081 // Return a result of -1, 0, or 1, based on EFLAGS. 2068 // Return a result of -1, 0, or 1, based on EFLAGS.
2082 __ j(below, &below_label, Label::kNear); 2069 __ j(below, &below_label, Label::kNear);
2083 __ j(above, &above_label, Label::kNear); 2070 __ j(above, &above_label, Label::kNear);
2084 2071
2085 __ Move(eax, Immediate(0)); 2072 __ Move(eax, Immediate(0));
2086 __ ret(0); 2073 __ ret(0);
2087 2074
2088 __ bind(&below_label); 2075 __ bind(&below_label);
2089 __ mov(eax, Immediate(Smi::FromInt(-1))); 2076 __ mov(eax, Immediate(Smi::FromInt(-1)));
(...skipping 517 matching lines...) Expand 10 before | Expand all | Expand 10 after
2607 } 2594 }
2608 2595
2609 2596
2610 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 2597 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
2611 CEntryStub::GenerateAheadOfTime(isolate); 2598 CEntryStub::GenerateAheadOfTime(isolate);
2612 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 2599 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
2613 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); 2600 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
2614 // It is important that the store buffer overflow stubs are generated first. 2601 // It is important that the store buffer overflow stubs are generated first.
2615 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); 2602 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
2616 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); 2603 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
2617 if (Serializer::enabled(isolate)) { 2604 BinaryOpICStub::GenerateAheadOfTime(isolate);
2618 PlatformFeatureScope sse2(isolate, SSE2); 2605 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
2619 BinaryOpICStub::GenerateAheadOfTime(isolate);
2620 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
2621 } else {
2622 BinaryOpICStub::GenerateAheadOfTime(isolate);
2623 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
2624 }
2625 } 2606 }
2626 2607
2627 2608
2628 void CodeStub::GenerateFPStubs(Isolate* isolate) { 2609 void CodeStub::GenerateFPStubs(Isolate* isolate) {
2629 if (CpuFeatures::IsSupported(SSE2)) { 2610 CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
2630 CEntryStub save_doubles(isolate, 1, kSaveFPRegs); 2611 // Stubs might already be in the snapshot, detect that and don't regenerate,
2631 // Stubs might already be in the snapshot, detect that and don't regenerate, 2612 // which would lead to code stub initialization state being messed up.
2632 // which would lead to code stub initialization state being messed up. 2613 Code* save_doubles_code;
2633 Code* save_doubles_code; 2614 if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
2634 if (!save_doubles.FindCodeInCache(&save_doubles_code)) { 2615 save_doubles_code = *(save_doubles.GetCode());
2635 save_doubles_code = *(save_doubles.GetCode());
2636 }
2637 isolate->set_fp_stubs_generated(true);
2638 } 2616 }
2617 isolate->set_fp_stubs_generated(true);
2639 } 2618 }
2640 2619
2641 2620
2642 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { 2621 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
2643 CEntryStub stub(isolate, 1, kDontSaveFPRegs); 2622 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
2644 stub.GetCode(); 2623 stub.GetCode();
2645 } 2624 }
2646 2625
2647 2626
2648 void CEntryStub::Generate(MacroAssembler* masm) { 2627 void CEntryStub::Generate(MacroAssembler* masm) {
(...skipping 1129 matching lines...) Expand 10 before | Expand all | Expand 10 after
3778 3757
3779 if (left_ == CompareIC::SMI) { 3758 if (left_ == CompareIC::SMI) {
3780 __ JumpIfNotSmi(edx, &miss); 3759 __ JumpIfNotSmi(edx, &miss);
3781 } 3760 }
3782 if (right_ == CompareIC::SMI) { 3761 if (right_ == CompareIC::SMI) {
3783 __ JumpIfNotSmi(eax, &miss); 3762 __ JumpIfNotSmi(eax, &miss);
3784 } 3763 }
3785 3764
3786 // Inlining the double comparison and falling back to the general compare 3765 // Inlining the double comparison and falling back to the general compare
3787 // stub if NaN is involved or SSE2 or CMOV is unsupported. 3766 // stub if NaN is involved or SSE2 or CMOV is unsupported.
3788 if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) { 3767 if (CpuFeatures::IsSupported(CMOV)) {
3789 CpuFeatureScope scope1(masm, SSE2);
3790 CpuFeatureScope scope2(masm, CMOV); 3768 CpuFeatureScope scope2(masm, CMOV);
3791 3769
3792 // Load left and right operand. 3770 // Load left and right operand.
3793 Label done, left, left_smi, right_smi; 3771 Label done, left, left_smi, right_smi;
3794 __ JumpIfSmi(eax, &right_smi, Label::kNear); 3772 __ JumpIfSmi(eax, &right_smi, Label::kNear);
3795 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), 3773 __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
3796 isolate()->factory()->heap_number_map()); 3774 isolate()->factory()->heap_number_map());
3797 __ j(not_equal, &maybe_undefined1, Label::kNear); 3775 __ j(not_equal, &maybe_undefined1, Label::kNear);
3798 __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 3776 __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
3799 __ jmp(&left, Label::kNear); 3777 __ jmp(&left, Label::kNear);
(...skipping 525 matching lines...) Expand 10 before | Expand all | Expand 10 after
4325 __ mov(result_, Immediate(0)); 4303 __ mov(result_, Immediate(0));
4326 __ Drop(1); 4304 __ Drop(1);
4327 __ ret(2 * kPointerSize); 4305 __ ret(2 * kPointerSize);
4328 } 4306 }
4329 4307
4330 4308
4331 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( 4309 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4332 Isolate* isolate) { 4310 Isolate* isolate) {
4333 StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs); 4311 StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs);
4334 stub.GetCode(); 4312 stub.GetCode();
4335 if (CpuFeatures::IsSafeForSnapshot(isolate, SSE2)) { 4313 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4336 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); 4314 stub2.GetCode();
4337 stub2.GetCode();
4338 }
4339 } 4315 }
4340 4316
4341 4317
4342 bool CodeStub::CanUseFPRegisters() {
4343 return CpuFeatures::IsSupported(SSE2);
4344 }
4345
4346
4347 // Takes the input in 3 registers: address_ value_ and object_. A pointer to 4318 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4348 // the value has just been written into the object, now this stub makes sure 4319 // the value has just been written into the object, now this stub makes sure
4349 // we keep the GC informed. The word in the object where the value has been 4320 // we keep the GC informed. The word in the object where the value has been
4350 // written is in the address register. 4321 // written is in the address register.
4351 void RecordWriteStub::Generate(MacroAssembler* masm) { 4322 void RecordWriteStub::Generate(MacroAssembler* masm) {
4352 Label skip_to_incremental_noncompacting; 4323 Label skip_to_incremental_noncompacting;
4353 Label skip_to_incremental_compacting; 4324 Label skip_to_incremental_compacting;
4354 4325
4355 // The first two instructions are generated with labels so as to get the 4326 // The first two instructions are generated with labels so as to get the
4356 // offset fixed up correctly by the bind(Label*) call. We patch it back and 4327 // offset fixed up correctly by the bind(Label*) call. We patch it back and
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after
4609 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. 4580 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
4610 __ bind(&double_elements); 4581 __ bind(&double_elements);
4611 4582
4612 __ push(edx); 4583 __ push(edx);
4613 __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset)); 4584 __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
4614 __ StoreNumberToDoubleElements(eax, 4585 __ StoreNumberToDoubleElements(eax,
4615 edx, 4586 edx,
4616 ecx, 4587 ecx,
4617 edi, 4588 edi,
4618 xmm0, 4589 xmm0,
4619 &slow_elements_from_double, 4590 &slow_elements_from_double);
4620 false);
4621 __ pop(edx); 4591 __ pop(edx);
4622 __ ret(0); 4592 __ ret(0);
4623 } 4593 }
4624 4594
4625 4595
4626 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { 4596 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4627 CEntryStub ces(isolate(), 1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); 4597 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4628 __ call(ces.GetCode(), RelocInfo::CODE_TARGET); 4598 __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
4629 int parameter_count_offset = 4599 int parameter_count_offset =
4630 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; 4600 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4631 __ mov(ebx, MemOperand(ebp, parameter_count_offset)); 4601 __ mov(ebx, MemOperand(ebp, parameter_count_offset));
4632 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); 4602 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4633 __ pop(ecx); 4603 __ pop(ecx);
4634 int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE 4604 int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
4635 ? kPointerSize 4605 ? kPointerSize
4636 : 0; 4606 : 0;
4637 __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset)); 4607 __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
(...skipping 506 matching lines...) Expand 10 before | Expand all | Expand 10 after
5144 Operand(ebp, 7 * kPointerSize), 5114 Operand(ebp, 7 * kPointerSize),
5145 NULL); 5115 NULL);
5146 } 5116 }
5147 5117
5148 5118
5149 #undef __ 5119 #undef __
5150 5120
5151 } } // namespace v8::internal 5121 } } // namespace v8::internal
5152 5122
5153 #endif // V8_TARGET_ARCH_IA32 5123 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/code-stubs-ia32.h ('k') | src/ia32/codegen-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698