OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_X64 | 5 #if V8_TARGET_ARCH_X64 |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/division-by-constant.h" | 8 #include "src/base/division-by-constant.h" |
9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
(...skipping 793 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
804 CpuFeatureScope scope(this, AVX); | 804 CpuFeatureScope scope(this, AVX); |
805 vxorpd(dst, dst, dst); | 805 vxorpd(dst, dst, dst); |
806 vcvtlsi2sd(dst, dst, src); | 806 vcvtlsi2sd(dst, dst, src); |
807 } else { | 807 } else { |
808 xorpd(dst, dst); | 808 xorpd(dst, dst); |
809 cvtlsi2sd(dst, src); | 809 cvtlsi2sd(dst, src); |
810 } | 810 } |
811 } | 811 } |
812 | 812 |
813 | 813 |
| 814 void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) { |
| 815 if (CpuFeatures::IsSupported(AVX)) { |
| 816 CpuFeatureScope scope(this, AVX); |
| 817 vcvttsd2si(dst, src); |
| 818 } else { |
| 819 cvttsd2si(dst, src); |
| 820 } |
| 821 } |
| 822 |
| 823 |
| 824 void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) { |
| 825 if (CpuFeatures::IsSupported(AVX)) { |
| 826 CpuFeatureScope scope(this, AVX); |
| 827 vcvttsd2si(dst, src); |
| 828 } else { |
| 829 cvttsd2si(dst, src); |
| 830 } |
| 831 } |
| 832 |
| 833 |
| 834 void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) { |
| 835 if (CpuFeatures::IsSupported(AVX)) { |
| 836 CpuFeatureScope scope(this, AVX); |
| 837 vcvttsd2siq(dst, src); |
| 838 } else { |
| 839 cvttsd2siq(dst, src); |
| 840 } |
| 841 } |
| 842 |
| 843 |
| 844 void MacroAssembler::Cvttsd2siq(Register dst, const Operand& src) { |
| 845 if (CpuFeatures::IsSupported(AVX)) { |
| 846 CpuFeatureScope scope(this, AVX); |
| 847 vcvttsd2siq(dst, src); |
| 848 } else { |
| 849 cvttsd2siq(dst, src); |
| 850 } |
| 851 } |
| 852 |
| 853 |
814 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { | 854 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { |
815 DCHECK(!r.IsDouble()); | 855 DCHECK(!r.IsDouble()); |
816 if (r.IsInteger8()) { | 856 if (r.IsInteger8()) { |
817 movsxbq(dst, src); | 857 movsxbq(dst, src); |
818 } else if (r.IsUInteger8()) { | 858 } else if (r.IsUInteger8()) { |
819 movzxbl(dst, src); | 859 movzxbl(dst, src); |
820 } else if (r.IsInteger16()) { | 860 } else if (r.IsInteger16()) { |
821 movsxwq(dst, src); | 861 movsxwq(dst, src); |
822 } else if (r.IsUInteger16()) { | 862 } else if (r.IsUInteger16()) { |
823 movzxwl(dst, src); | 863 movzxwl(dst, src); |
(...skipping 2455 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3279 int offset) { | 3319 int offset) { |
3280 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true); | 3320 DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true); |
3281 call(stub.GetCode(), RelocInfo::CODE_TARGET); | 3321 call(stub.GetCode(), RelocInfo::CODE_TARGET); |
3282 } | 3322 } |
3283 | 3323 |
3284 | 3324 |
3285 void MacroAssembler::TruncateHeapNumberToI(Register result_reg, | 3325 void MacroAssembler::TruncateHeapNumberToI(Register result_reg, |
3286 Register input_reg) { | 3326 Register input_reg) { |
3287 Label done; | 3327 Label done; |
3288 Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 3328 Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
3289 cvttsd2siq(result_reg, xmm0); | 3329 Cvttsd2siq(result_reg, xmm0); |
3290 cmpq(result_reg, Immediate(1)); | 3330 cmpq(result_reg, Immediate(1)); |
3291 j(no_overflow, &done, Label::kNear); | 3331 j(no_overflow, &done, Label::kNear); |
3292 | 3332 |
3293 // Slow case. | 3333 // Slow case. |
3294 if (input_reg.is(result_reg)) { | 3334 if (input_reg.is(result_reg)) { |
3295 subp(rsp, Immediate(kDoubleSize)); | 3335 subp(rsp, Immediate(kDoubleSize)); |
3296 Movsd(MemOperand(rsp, 0), xmm0); | 3336 Movsd(MemOperand(rsp, 0), xmm0); |
3297 SlowTruncateToI(result_reg, rsp, 0); | 3337 SlowTruncateToI(result_reg, rsp, 0); |
3298 addp(rsp, Immediate(kDoubleSize)); | 3338 addp(rsp, Immediate(kDoubleSize)); |
3299 } else { | 3339 } else { |
3300 SlowTruncateToI(result_reg, input_reg); | 3340 SlowTruncateToI(result_reg, input_reg); |
3301 } | 3341 } |
3302 | 3342 |
3303 bind(&done); | 3343 bind(&done); |
3304 // Keep our invariant that the upper 32 bits are zero. | 3344 // Keep our invariant that the upper 32 bits are zero. |
3305 movl(result_reg, result_reg); | 3345 movl(result_reg, result_reg); |
3306 } | 3346 } |
3307 | 3347 |
3308 | 3348 |
3309 void MacroAssembler::TruncateDoubleToI(Register result_reg, | 3349 void MacroAssembler::TruncateDoubleToI(Register result_reg, |
3310 XMMRegister input_reg) { | 3350 XMMRegister input_reg) { |
3311 Label done; | 3351 Label done; |
3312 cvttsd2siq(result_reg, input_reg); | 3352 Cvttsd2siq(result_reg, input_reg); |
3313 cmpq(result_reg, Immediate(1)); | 3353 cmpq(result_reg, Immediate(1)); |
3314 j(no_overflow, &done, Label::kNear); | 3354 j(no_overflow, &done, Label::kNear); |
3315 | 3355 |
3316 subp(rsp, Immediate(kDoubleSize)); | 3356 subp(rsp, Immediate(kDoubleSize)); |
3317 Movsd(MemOperand(rsp, 0), input_reg); | 3357 Movsd(MemOperand(rsp, 0), input_reg); |
3318 SlowTruncateToI(result_reg, rsp, 0); | 3358 SlowTruncateToI(result_reg, rsp, 0); |
3319 addp(rsp, Immediate(kDoubleSize)); | 3359 addp(rsp, Immediate(kDoubleSize)); |
3320 | 3360 |
3321 bind(&done); | 3361 bind(&done); |
3322 // Keep our invariant that the upper 32 bits are zero. | 3362 // Keep our invariant that the upper 32 bits are zero. |
3323 movl(result_reg, result_reg); | 3363 movl(result_reg, result_reg); |
3324 } | 3364 } |
3325 | 3365 |
3326 | 3366 |
3327 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg, | 3367 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg, |
3328 XMMRegister scratch, | 3368 XMMRegister scratch, |
3329 MinusZeroMode minus_zero_mode, | 3369 MinusZeroMode minus_zero_mode, |
3330 Label* lost_precision, Label* is_nan, | 3370 Label* lost_precision, Label* is_nan, |
3331 Label* minus_zero, Label::Distance dst) { | 3371 Label* minus_zero, Label::Distance dst) { |
3332 cvttsd2si(result_reg, input_reg); | 3372 Cvttsd2si(result_reg, input_reg); |
3333 Cvtlsi2sd(xmm0, result_reg); | 3373 Cvtlsi2sd(xmm0, result_reg); |
3334 ucomisd(xmm0, input_reg); | 3374 ucomisd(xmm0, input_reg); |
3335 j(not_equal, lost_precision, dst); | 3375 j(not_equal, lost_precision, dst); |
3336 j(parity_even, is_nan, dst); // NaN. | 3376 j(parity_even, is_nan, dst); // NaN. |
3337 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { | 3377 if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { |
3338 Label done; | 3378 Label done; |
3339 // The integer converted back is equal to the original. We | 3379 // The integer converted back is equal to the original. We |
3340 // only have to test if we got -0 as an input. | 3380 // only have to test if we got -0 as an input. |
3341 testl(result_reg, result_reg); | 3381 testl(result_reg, result_reg); |
3342 j(not_zero, &done, Label::kNear); | 3382 j(not_zero, &done, Label::kNear); |
(...skipping 1818 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5161 movl(rax, dividend); | 5201 movl(rax, dividend); |
5162 shrl(rax, Immediate(31)); | 5202 shrl(rax, Immediate(31)); |
5163 addl(rdx, rax); | 5203 addl(rdx, rax); |
5164 } | 5204 } |
5165 | 5205 |
5166 | 5206 |
5167 } // namespace internal | 5207 } // namespace internal |
5168 } // namespace v8 | 5208 } // namespace v8 |
5169 | 5209 |
5170 #endif // V8_TARGET_ARCH_X64 | 5210 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |