OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_X64 | 5 #if V8_TARGET_ARCH_X64 |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/division-by-constant.h" | 8 #include "src/base/division-by-constant.h" |
9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
(...skipping 3241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3252 shrq(dst, Immediate(32)); | 3252 shrq(dst, Immediate(32)); |
3253 } | 3253 } |
3254 | 3254 |
3255 | 3255 |
3256 void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) { | 3256 void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) { |
3257 if (CpuFeatures::IsSupported(SSE4_1)) { | 3257 if (CpuFeatures::IsSupported(SSE4_1)) { |
3258 CpuFeatureScope sse_scope(this, SSE4_1); | 3258 CpuFeatureScope sse_scope(this, SSE4_1); |
3259 pinsrd(dst, src, imm8); | 3259 pinsrd(dst, src, imm8); |
3260 return; | 3260 return; |
3261 } | 3261 } |
3262 Movd(xmm0, src); | 3262 Movd(kScratchDoubleReg, src); |
3263 if (imm8 == 1) { | 3263 if (imm8 == 1) { |
3264 punpckldq(dst, xmm0); | 3264 punpckldq(dst, kScratchDoubleReg); |
3265 } else { | 3265 } else { |
3266 DCHECK_EQ(0, imm8); | 3266 DCHECK_EQ(0, imm8); |
3267 Movss(dst, xmm0); | 3267 Movss(dst, kScratchDoubleReg); |
3268 } | 3268 } |
3269 } | 3269 } |
3270 | 3270 |
3271 | 3271 |
3272 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) { | 3272 void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) { |
3273 DCHECK(imm8 == 0 || imm8 == 1); | 3273 DCHECK(imm8 == 0 || imm8 == 1); |
3274 if (CpuFeatures::IsSupported(SSE4_1)) { | 3274 if (CpuFeatures::IsSupported(SSE4_1)) { |
3275 CpuFeatureScope sse_scope(this, SSE4_1); | 3275 CpuFeatureScope sse_scope(this, SSE4_1); |
3276 pinsrd(dst, src, imm8); | 3276 pinsrd(dst, src, imm8); |
3277 return; | 3277 return; |
3278 } | 3278 } |
3279 Movd(xmm0, src); | 3279 Movd(kScratchDoubleReg, src); |
3280 if (imm8 == 1) { | 3280 if (imm8 == 1) { |
3281 punpckldq(dst, xmm0); | 3281 punpckldq(dst, kScratchDoubleReg); |
3282 } else { | 3282 } else { |
3283 DCHECK_EQ(0, imm8); | 3283 DCHECK_EQ(0, imm8); |
3284 Movss(dst, xmm0); | 3284 Movss(dst, kScratchDoubleReg); |
3285 } | 3285 } |
3286 } | 3286 } |
3287 | 3287 |
3288 | 3288 |
3289 void MacroAssembler::Lzcntl(Register dst, Register src) { | 3289 void MacroAssembler::Lzcntl(Register dst, Register src) { |
3290 if (CpuFeatures::IsSupported(LZCNT)) { | 3290 if (CpuFeatures::IsSupported(LZCNT)) { |
3291 CpuFeatureScope scope(this, LZCNT); | 3291 CpuFeatureScope scope(this, LZCNT); |
3292 lzcntl(dst, src); | 3292 lzcntl(dst, src); |
3293 return; | 3293 return; |
3294 } | 3294 } |
(...skipping 441 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3736 Register input_reg, | 3736 Register input_reg, |
3737 int offset) { | 3737 int offset) { |
3738 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true); | 3738 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true); |
3739 call(stub.GetCode(), RelocInfo::CODE_TARGET); | 3739 call(stub.GetCode(), RelocInfo::CODE_TARGET); |
3740 } | 3740 } |
3741 | 3741 |
3742 | 3742 |
3743 void MacroAssembler::TruncateHeapNumberToI(Register result_reg, | 3743 void MacroAssembler::TruncateHeapNumberToI(Register result_reg, |
3744 Register input_reg) { | 3744 Register input_reg) { |
3745 Label done; | 3745 Label done; |
3746 Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 3746 Movsd(kScratchDoubleReg, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
3747 Cvttsd2siq(result_reg, xmm0); | 3747 Cvttsd2siq(result_reg, kScratchDoubleReg); |
3748 cmpq(result_reg, Immediate(1)); | 3748 cmpq(result_reg, Immediate(1)); |
3749 j(no_overflow, &done, Label::kNear); | 3749 j(no_overflow, &done, Label::kNear); |
3750 | 3750 |
3751 // Slow case. | 3751 // Slow case. |
3752 if (input_reg.is(result_reg)) { | 3752 if (input_reg.is(result_reg)) { |
3753 subp(rsp, Immediate(kDoubleSize)); | 3753 subp(rsp, Immediate(kDoubleSize)); |
3754 Movsd(MemOperand(rsp, 0), xmm0); | 3754 Movsd(MemOperand(rsp, 0), kScratchDoubleReg); |
3755 SlowTruncateToI(result_reg, rsp, 0); | 3755 SlowTruncateToI(result_reg, rsp, 0); |
3756 addp(rsp, Immediate(kDoubleSize)); | 3756 addp(rsp, Immediate(kDoubleSize)); |
3757 } else { | 3757 } else { |
3758 SlowTruncateToI(result_reg, input_reg); | 3758 SlowTruncateToI(result_reg, input_reg); |
3759 } | 3759 } |
3760 | 3760 |
3761 bind(&done); | 3761 bind(&done); |
3762 // Keep our invariant that the upper 32 bits are zero. | 3762 // Keep our invariant that the upper 32 bits are zero. |
3763 movl(result_reg, result_reg); | 3763 movl(result_reg, result_reg); |
3764 } | 3764 } |
(...skipping 16 matching lines...) Expand all Loading... |
3781 movl(result_reg, result_reg); | 3781 movl(result_reg, result_reg); |
3782 } | 3782 } |
3783 | 3783 |
3784 | 3784 |
3785 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg, | 3785 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg, |
3786 XMMRegister scratch, | 3786 XMMRegister scratch, |
3787 MinusZeroMode minus_zero_mode, | 3787 MinusZeroMode minus_zero_mode, |
3788 Label* lost_precision, Label* is_nan, | 3788 Label* lost_precision, Label* is_nan, |
3789 Label* minus_zero, Label::Distance dst) { | 3789 Label* minus_zero, Label::Distance dst) { |
3790 Cvttsd2si(result_reg, input_reg); | 3790 Cvttsd2si(result_reg, input_reg); |
3791 Cvtlsi2sd(xmm0, result_reg); | 3791 Cvtlsi2sd(kScratchDoubleReg, result_reg); |
3792 Ucomisd(xmm0, input_reg); | 3792 Ucomisd(kScratchDoubleReg, input_reg); |
3793 j(not_equal, lost_precision, dst); | 3793 j(not_equal, lost_precision, dst); |
3794 j(parity_even, is_nan, dst); // NaN. | 3794 j(parity_even, is_nan, dst); // NaN. |
3795 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { | 3795 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { |
3796 Label done; | 3796 Label done; |
3797 // The integer converted back is equal to the original. We | 3797 // The integer converted back is equal to the original. We |
3798 // only have to test if we got -0 as an input. | 3798 // only have to test if we got -0 as an input. |
3799 testl(result_reg, result_reg); | 3799 testl(result_reg, result_reg); |
3800 j(not_zero, &done, Label::kNear); | 3800 j(not_zero, &done, Label::kNear); |
3801 Movmskpd(result_reg, input_reg); | 3801 Movmskpd(result_reg, input_reg); |
3802 // Bit 0 contains the sign of the double in input_reg. | 3802 // Bit 0 contains the sign of the double in input_reg. |
(...skipping 1918 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5721 movl(rax, dividend); | 5721 movl(rax, dividend); |
5722 shrl(rax, Immediate(31)); | 5722 shrl(rax, Immediate(31)); |
5723 addl(rdx, rax); | 5723 addl(rdx, rax); |
5724 } | 5724 } |
5725 | 5725 |
5726 | 5726 |
5727 } // namespace internal | 5727 } // namespace internal |
5728 } // namespace v8 | 5728 } // namespace v8 |
5729 | 5729 |
5730 #endif // V8_TARGET_ARCH_X64 | 5730 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |