| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if V8_TARGET_ARCH_X64 | 5 #if V8_TARGET_ARCH_X64 |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/code-factory.h" | 8 #include "src/code-factory.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/hydrogen-osr.h" | 10 #include "src/hydrogen-osr.h" |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 81 | 81 |
| 82 | 82 |
| 83 void LCodeGen::SaveCallerDoubles() { | 83 void LCodeGen::SaveCallerDoubles() { |
| 84 DCHECK(info()->saves_caller_doubles()); | 84 DCHECK(info()->saves_caller_doubles()); |
| 85 DCHECK(NeedsEagerFrame()); | 85 DCHECK(NeedsEagerFrame()); |
| 86 Comment(";;; Save clobbered callee double registers"); | 86 Comment(";;; Save clobbered callee double registers"); |
| 87 int count = 0; | 87 int count = 0; |
| 88 BitVector* doubles = chunk()->allocated_double_registers(); | 88 BitVector* doubles = chunk()->allocated_double_registers(); |
| 89 BitVector::Iterator save_iterator(doubles); | 89 BitVector::Iterator save_iterator(doubles); |
| 90 while (!save_iterator.Done()) { | 90 while (!save_iterator.Done()) { |
| 91 __ movsd(MemOperand(rsp, count * kDoubleSize), | 91 __ Movsd(MemOperand(rsp, count * kDoubleSize), |
| 92 XMMRegister::from_code(save_iterator.Current())); | 92 XMMRegister::from_code(save_iterator.Current())); |
| 93 save_iterator.Advance(); | 93 save_iterator.Advance(); |
| 94 count++; | 94 count++; |
| 95 } | 95 } |
| 96 } | 96 } |
| 97 | 97 |
| 98 | 98 |
| 99 void LCodeGen::RestoreCallerDoubles() { | 99 void LCodeGen::RestoreCallerDoubles() { |
| 100 DCHECK(info()->saves_caller_doubles()); | 100 DCHECK(info()->saves_caller_doubles()); |
| 101 DCHECK(NeedsEagerFrame()); | 101 DCHECK(NeedsEagerFrame()); |
| 102 Comment(";;; Restore clobbered callee double registers"); | 102 Comment(";;; Restore clobbered callee double registers"); |
| 103 BitVector* doubles = chunk()->allocated_double_registers(); | 103 BitVector* doubles = chunk()->allocated_double_registers(); |
| 104 BitVector::Iterator save_iterator(doubles); | 104 BitVector::Iterator save_iterator(doubles); |
| 105 int count = 0; | 105 int count = 0; |
| 106 while (!save_iterator.Done()) { | 106 while (!save_iterator.Done()) { |
| 107 __ movsd(XMMRegister::from_code(save_iterator.Current()), | 107 __ Movsd(XMMRegister::from_code(save_iterator.Current()), |
| 108 MemOperand(rsp, count * kDoubleSize)); | 108 MemOperand(rsp, count * kDoubleSize)); |
| 109 save_iterator.Advance(); | 109 save_iterator.Advance(); |
| 110 count++; | 110 count++; |
| 111 } | 111 } |
| 112 } | 112 } |
| 113 | 113 |
| 114 | 114 |
| 115 bool LCodeGen::GeneratePrologue() { | 115 bool LCodeGen::GeneratePrologue() { |
| 116 DCHECK(is_generating()); | 116 DCHECK(is_generating()); |
| 117 | 117 |
| (...skipping 1866 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1984 } else { | 1984 } else { |
| 1985 // Since we operate on +0 and/or -0, addsd and andsd have the same effect. | 1985 // Since we operate on +0 and/or -0, addsd and andsd have the same effect. |
| 1986 __ addsd(left_reg, right_reg); | 1986 __ addsd(left_reg, right_reg); |
| 1987 } | 1987 } |
| 1988 __ jmp(&return_left, Label::kNear); | 1988 __ jmp(&return_left, Label::kNear); |
| 1989 | 1989 |
| 1990 __ bind(&check_nan_left); | 1990 __ bind(&check_nan_left); |
| 1991 __ ucomisd(left_reg, left_reg); // NaN check. | 1991 __ ucomisd(left_reg, left_reg); // NaN check. |
| 1992 __ j(parity_even, &return_left, Label::kNear); | 1992 __ j(parity_even, &return_left, Label::kNear); |
| 1993 __ bind(&return_right); | 1993 __ bind(&return_right); |
| 1994 __ movaps(left_reg, right_reg); | 1994 __ Movapd(left_reg, right_reg); |
| 1995 | 1995 |
| 1996 __ bind(&return_left); | 1996 __ bind(&return_left); |
| 1997 } | 1997 } |
| 1998 } | 1998 } |
| 1999 | 1999 |
| 2000 | 2000 |
| 2001 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 2001 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
| 2002 XMMRegister left = ToDoubleRegister(instr->left()); | 2002 XMMRegister left = ToDoubleRegister(instr->left()); |
| 2003 XMMRegister right = ToDoubleRegister(instr->right()); | 2003 XMMRegister right = ToDoubleRegister(instr->right()); |
| 2004 XMMRegister result = ToDoubleRegister(instr->result()); | 2004 XMMRegister result = ToDoubleRegister(instr->result()); |
| (...skipping 28 matching lines...) Expand all Loading... |
| 2033 case Token::DIV: | 2033 case Token::DIV: |
| 2034 if (CpuFeatures::IsSupported(AVX)) { | 2034 if (CpuFeatures::IsSupported(AVX)) { |
| 2035 CpuFeatureScope scope(masm(), AVX); | 2035 CpuFeatureScope scope(masm(), AVX); |
| 2036 __ vdivsd(result, left, right); | 2036 __ vdivsd(result, left, right); |
| 2037 } else { | 2037 } else { |
| 2038 DCHECK(result.is(left)); | 2038 DCHECK(result.is(left)); |
| 2039 __ divsd(left, right); | 2039 __ divsd(left, right); |
| 2040 } | 2040 } |
| 2041 // Don't delete this mov. It may improve performance on some CPUs, | 2041 // Don't delete this mov. It may improve performance on some CPUs, |
| 2042 // when there is a (v)mulsd depending on the result | 2042 // when there is a (v)mulsd depending on the result |
| 2043 __ movaps(result, result); | 2043 __ Movapd(result, result); |
| 2044 break; | 2044 break; |
| 2045 case Token::MOD: { | 2045 case Token::MOD: { |
| 2046 XMMRegister xmm_scratch = double_scratch0(); | 2046 XMMRegister xmm_scratch = double_scratch0(); |
| 2047 __ PrepareCallCFunction(2); | 2047 __ PrepareCallCFunction(2); |
| 2048 __ movaps(xmm_scratch, left); | 2048 __ Movapd(xmm_scratch, left); |
| 2049 DCHECK(right.is(xmm1)); | 2049 DCHECK(right.is(xmm1)); |
| 2050 __ CallCFunction( | 2050 __ CallCFunction( |
| 2051 ExternalReference::mod_two_doubles_operation(isolate()), 2); | 2051 ExternalReference::mod_two_doubles_operation(isolate()), 2); |
| 2052 __ movaps(result, xmm_scratch); | 2052 __ Movapd(result, xmm_scratch); |
| 2053 break; | 2053 break; |
| 2054 } | 2054 } |
| 2055 default: | 2055 default: |
| 2056 UNREACHABLE(); | 2056 UNREACHABLE(); |
| 2057 break; | 2057 break; |
| 2058 } | 2058 } |
| 2059 } | 2059 } |
| 2060 | 2060 |
| 2061 | 2061 |
| 2062 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 2062 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
| (...skipping 321 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2384 __ Cmp(input_reg, factory()->the_hole_value()); | 2384 __ Cmp(input_reg, factory()->the_hole_value()); |
| 2385 EmitBranch(instr, equal); | 2385 EmitBranch(instr, equal); |
| 2386 return; | 2386 return; |
| 2387 } | 2387 } |
| 2388 | 2388 |
| 2389 XMMRegister input_reg = ToDoubleRegister(instr->object()); | 2389 XMMRegister input_reg = ToDoubleRegister(instr->object()); |
| 2390 __ ucomisd(input_reg, input_reg); | 2390 __ ucomisd(input_reg, input_reg); |
| 2391 EmitFalseBranch(instr, parity_odd); | 2391 EmitFalseBranch(instr, parity_odd); |
| 2392 | 2392 |
| 2393 __ subp(rsp, Immediate(kDoubleSize)); | 2393 __ subp(rsp, Immediate(kDoubleSize)); |
| 2394 __ movsd(MemOperand(rsp, 0), input_reg); | 2394 __ Movsd(MemOperand(rsp, 0), input_reg); |
| 2395 __ addp(rsp, Immediate(kDoubleSize)); | 2395 __ addp(rsp, Immediate(kDoubleSize)); |
| 2396 | 2396 |
| 2397 int offset = sizeof(kHoleNanUpper32); | 2397 int offset = sizeof(kHoleNanUpper32); |
| 2398 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32)); | 2398 __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32)); |
| 2399 EmitBranch(instr, equal); | 2399 EmitBranch(instr, equal); |
| 2400 } | 2400 } |
| 2401 | 2401 |
| 2402 | 2402 |
| 2403 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { | 2403 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { |
| 2404 Representation rep = instr->hydrogen()->value()->representation(); | 2404 Representation rep = instr->hydrogen()->value()->representation(); |
| (...skipping 457 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2862 Register object = ToRegister(instr->object()); | 2862 Register object = ToRegister(instr->object()); |
| 2863 __ Load(result, MemOperand(object, offset), access.representation()); | 2863 __ Load(result, MemOperand(object, offset), access.representation()); |
| 2864 } | 2864 } |
| 2865 return; | 2865 return; |
| 2866 } | 2866 } |
| 2867 | 2867 |
| 2868 Register object = ToRegister(instr->object()); | 2868 Register object = ToRegister(instr->object()); |
| 2869 if (instr->hydrogen()->representation().IsDouble()) { | 2869 if (instr->hydrogen()->representation().IsDouble()) { |
| 2870 DCHECK(access.IsInobject()); | 2870 DCHECK(access.IsInobject()); |
| 2871 XMMRegister result = ToDoubleRegister(instr->result()); | 2871 XMMRegister result = ToDoubleRegister(instr->result()); |
| 2872 __ movsd(result, FieldOperand(object, offset)); | 2872 __ Movsd(result, FieldOperand(object, offset)); |
| 2873 return; | 2873 return; |
| 2874 } | 2874 } |
| 2875 | 2875 |
| 2876 Register result = ToRegister(instr->result()); | 2876 Register result = ToRegister(instr->result()); |
| 2877 if (!access.IsInobject()) { | 2877 if (!access.IsInobject()) { |
| 2878 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset)); | 2878 __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset)); |
| 2879 object = result; | 2879 object = result; |
| 2880 } | 2880 } |
| 2881 | 2881 |
| 2882 Representation representation = access.representation(); | 2882 Representation representation = access.representation(); |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2995 key, | 2995 key, |
| 2996 instr->hydrogen()->key()->representation(), | 2996 instr->hydrogen()->key()->representation(), |
| 2997 elements_kind, | 2997 elements_kind, |
| 2998 instr->base_offset())); | 2998 instr->base_offset())); |
| 2999 | 2999 |
| 3000 if (elements_kind == FLOAT32_ELEMENTS) { | 3000 if (elements_kind == FLOAT32_ELEMENTS) { |
| 3001 XMMRegister result(ToDoubleRegister(instr->result())); | 3001 XMMRegister result(ToDoubleRegister(instr->result())); |
| 3002 __ movss(result, operand); | 3002 __ movss(result, operand); |
| 3003 __ cvtss2sd(result, result); | 3003 __ cvtss2sd(result, result); |
| 3004 } else if (elements_kind == FLOAT64_ELEMENTS) { | 3004 } else if (elements_kind == FLOAT64_ELEMENTS) { |
| 3005 __ movsd(ToDoubleRegister(instr->result()), operand); | 3005 __ Movsd(ToDoubleRegister(instr->result()), operand); |
| 3006 } else { | 3006 } else { |
| 3007 Register result(ToRegister(instr->result())); | 3007 Register result(ToRegister(instr->result())); |
| 3008 switch (elements_kind) { | 3008 switch (elements_kind) { |
| 3009 case INT8_ELEMENTS: | 3009 case INT8_ELEMENTS: |
| 3010 __ movsxbl(result, operand); | 3010 __ movsxbl(result, operand); |
| 3011 break; | 3011 break; |
| 3012 case UINT8_ELEMENTS: | 3012 case UINT8_ELEMENTS: |
| 3013 case UINT8_CLAMPED_ELEMENTS: | 3013 case UINT8_CLAMPED_ELEMENTS: |
| 3014 __ movzxbl(result, operand); | 3014 __ movzxbl(result, operand); |
| 3015 break; | 3015 break; |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3066 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); | 3066 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); |
| 3067 DeoptimizeIf(equal, instr, Deoptimizer::kHole); | 3067 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 3068 } | 3068 } |
| 3069 | 3069 |
| 3070 Operand double_load_operand = BuildFastArrayOperand( | 3070 Operand double_load_operand = BuildFastArrayOperand( |
| 3071 instr->elements(), | 3071 instr->elements(), |
| 3072 key, | 3072 key, |
| 3073 instr->hydrogen()->key()->representation(), | 3073 instr->hydrogen()->key()->representation(), |
| 3074 FAST_DOUBLE_ELEMENTS, | 3074 FAST_DOUBLE_ELEMENTS, |
| 3075 instr->base_offset()); | 3075 instr->base_offset()); |
| 3076 __ movsd(result, double_load_operand); | 3076 __ Movsd(result, double_load_operand); |
| 3077 } | 3077 } |
| 3078 | 3078 |
| 3079 | 3079 |
| 3080 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3080 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 3081 HLoadKeyed* hinstr = instr->hydrogen(); | 3081 HLoadKeyed* hinstr = instr->hydrogen(); |
| 3082 Register result = ToRegister(instr->result()); | 3082 Register result = ToRegister(instr->result()); |
| 3083 LOperand* key = instr->key(); | 3083 LOperand* key = instr->key(); |
| 3084 bool requires_hole_check = hinstr->RequiresHoleCheck(); | 3084 bool requires_hole_check = hinstr->RequiresHoleCheck(); |
| 3085 Representation representation = hinstr->representation(); | 3085 Representation representation = hinstr->representation(); |
| 3086 int offset = instr->base_offset(); | 3086 int offset = instr->base_offset(); |
| (...skipping 731 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3818 __ xorps(xmm_scratch, xmm_scratch); | 3818 __ xorps(xmm_scratch, xmm_scratch); |
| 3819 __ ucomisd(input_reg, xmm_scratch); | 3819 __ ucomisd(input_reg, xmm_scratch); |
| 3820 __ j(above, &positive, Label::kNear); | 3820 __ j(above, &positive, Label::kNear); |
| 3821 __ j(not_carry, &zero, Label::kNear); | 3821 __ j(not_carry, &zero, Label::kNear); |
| 3822 __ pcmpeqd(input_reg, input_reg); | 3822 __ pcmpeqd(input_reg, input_reg); |
| 3823 __ jmp(&done, Label::kNear); | 3823 __ jmp(&done, Label::kNear); |
| 3824 __ bind(&zero); | 3824 __ bind(&zero); |
| 3825 ExternalReference ninf = | 3825 ExternalReference ninf = |
| 3826 ExternalReference::address_of_negative_infinity(); | 3826 ExternalReference::address_of_negative_infinity(); |
| 3827 Operand ninf_operand = masm()->ExternalOperand(ninf); | 3827 Operand ninf_operand = masm()->ExternalOperand(ninf); |
| 3828 __ movsd(input_reg, ninf_operand); | 3828 __ Movsd(input_reg, ninf_operand); |
| 3829 __ jmp(&done, Label::kNear); | 3829 __ jmp(&done, Label::kNear); |
| 3830 __ bind(&positive); | 3830 __ bind(&positive); |
| 3831 __ fldln2(); | 3831 __ fldln2(); |
| 3832 __ subp(rsp, Immediate(kDoubleSize)); | 3832 __ subp(rsp, Immediate(kDoubleSize)); |
| 3833 __ movsd(Operand(rsp, 0), input_reg); | 3833 __ Movsd(Operand(rsp, 0), input_reg); |
| 3834 __ fld_d(Operand(rsp, 0)); | 3834 __ fld_d(Operand(rsp, 0)); |
| 3835 __ fyl2x(); | 3835 __ fyl2x(); |
| 3836 __ fstp_d(Operand(rsp, 0)); | 3836 __ fstp_d(Operand(rsp, 0)); |
| 3837 __ movsd(input_reg, Operand(rsp, 0)); | 3837 __ Movsd(input_reg, Operand(rsp, 0)); |
| 3838 __ addp(rsp, Immediate(kDoubleSize)); | 3838 __ addp(rsp, Immediate(kDoubleSize)); |
| 3839 __ bind(&done); | 3839 __ bind(&done); |
| 3840 } | 3840 } |
| 3841 | 3841 |
| 3842 | 3842 |
| 3843 void LCodeGen::DoMathClz32(LMathClz32* instr) { | 3843 void LCodeGen::DoMathClz32(LMathClz32* instr) { |
| 3844 Register input = ToRegister(instr->value()); | 3844 Register input = ToRegister(instr->value()); |
| 3845 Register result = ToRegister(instr->result()); | 3845 Register result = ToRegister(instr->result()); |
| 3846 | 3846 |
| 3847 __ Lzcntl(result, input); | 3847 __ Lzcntl(result, input); |
| (...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4018 __ AssertNotSmi(object); | 4018 __ AssertNotSmi(object); |
| 4019 | 4019 |
| 4020 DCHECK(!representation.IsSmi() || | 4020 DCHECK(!representation.IsSmi() || |
| 4021 !instr->value()->IsConstantOperand() || | 4021 !instr->value()->IsConstantOperand() || |
| 4022 IsInteger32Constant(LConstantOperand::cast(instr->value()))); | 4022 IsInteger32Constant(LConstantOperand::cast(instr->value()))); |
| 4023 if (!FLAG_unbox_double_fields && representation.IsDouble()) { | 4023 if (!FLAG_unbox_double_fields && representation.IsDouble()) { |
| 4024 DCHECK(access.IsInobject()); | 4024 DCHECK(access.IsInobject()); |
| 4025 DCHECK(!hinstr->has_transition()); | 4025 DCHECK(!hinstr->has_transition()); |
| 4026 DCHECK(!hinstr->NeedsWriteBarrier()); | 4026 DCHECK(!hinstr->NeedsWriteBarrier()); |
| 4027 XMMRegister value = ToDoubleRegister(instr->value()); | 4027 XMMRegister value = ToDoubleRegister(instr->value()); |
| 4028 __ movsd(FieldOperand(object, offset), value); | 4028 __ Movsd(FieldOperand(object, offset), value); |
| 4029 return; | 4029 return; |
| 4030 } | 4030 } |
| 4031 | 4031 |
| 4032 if (hinstr->has_transition()) { | 4032 if (hinstr->has_transition()) { |
| 4033 Handle<Map> transition = hinstr->transition_map(); | 4033 Handle<Map> transition = hinstr->transition_map(); |
| 4034 AddDeprecationDependency(transition); | 4034 AddDeprecationDependency(transition); |
| 4035 if (!hinstr->NeedsWriteBarrierForMap()) { | 4035 if (!hinstr->NeedsWriteBarrierForMap()) { |
| 4036 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition); | 4036 __ Move(FieldOperand(object, HeapObject::kMapOffset), transition); |
| 4037 } else { | 4037 } else { |
| 4038 Register temp = ToRegister(instr->temp()); | 4038 Register temp = ToRegister(instr->temp()); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 4066 DCHECK(kSmiTagSize + kSmiShiftSize == 32); | 4066 DCHECK(kSmiTagSize + kSmiShiftSize == 32); |
| 4067 offset += kPointerSize / 2; | 4067 offset += kPointerSize / 2; |
| 4068 representation = Representation::Integer32(); | 4068 representation = Representation::Integer32(); |
| 4069 } | 4069 } |
| 4070 | 4070 |
| 4071 Operand operand = FieldOperand(write_register, offset); | 4071 Operand operand = FieldOperand(write_register, offset); |
| 4072 | 4072 |
| 4073 if (FLAG_unbox_double_fields && representation.IsDouble()) { | 4073 if (FLAG_unbox_double_fields && representation.IsDouble()) { |
| 4074 DCHECK(access.IsInobject()); | 4074 DCHECK(access.IsInobject()); |
| 4075 XMMRegister value = ToDoubleRegister(instr->value()); | 4075 XMMRegister value = ToDoubleRegister(instr->value()); |
| 4076 __ movsd(operand, value); | 4076 __ Movsd(operand, value); |
| 4077 | 4077 |
| 4078 } else if (instr->value()->IsRegister()) { | 4078 } else if (instr->value()->IsRegister()) { |
| 4079 Register value = ToRegister(instr->value()); | 4079 Register value = ToRegister(instr->value()); |
| 4080 __ Store(operand, value, representation); | 4080 __ Store(operand, value, representation); |
| 4081 } else { | 4081 } else { |
| 4082 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); | 4082 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
| 4083 if (IsInteger32Constant(operand_value)) { | 4083 if (IsInteger32Constant(operand_value)) { |
| 4084 DCHECK(!hinstr->NeedsWriteBarrier()); | 4084 DCHECK(!hinstr->NeedsWriteBarrier()); |
| 4085 int32_t value = ToInteger32(operand_value); | 4085 int32_t value = ToInteger32(operand_value); |
| 4086 if (representation.IsSmi()) { | 4086 if (representation.IsSmi()) { |
| (...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4239 key, | 4239 key, |
| 4240 instr->hydrogen()->key()->representation(), | 4240 instr->hydrogen()->key()->representation(), |
| 4241 elements_kind, | 4241 elements_kind, |
| 4242 instr->base_offset())); | 4242 instr->base_offset())); |
| 4243 | 4243 |
| 4244 if (elements_kind == FLOAT32_ELEMENTS) { | 4244 if (elements_kind == FLOAT32_ELEMENTS) { |
| 4245 XMMRegister value(ToDoubleRegister(instr->value())); | 4245 XMMRegister value(ToDoubleRegister(instr->value())); |
| 4246 __ cvtsd2ss(value, value); | 4246 __ cvtsd2ss(value, value); |
| 4247 __ movss(operand, value); | 4247 __ movss(operand, value); |
| 4248 } else if (elements_kind == FLOAT64_ELEMENTS) { | 4248 } else if (elements_kind == FLOAT64_ELEMENTS) { |
| 4249 __ movsd(operand, ToDoubleRegister(instr->value())); | 4249 __ Movsd(operand, ToDoubleRegister(instr->value())); |
| 4250 } else { | 4250 } else { |
| 4251 Register value(ToRegister(instr->value())); | 4251 Register value(ToRegister(instr->value())); |
| 4252 switch (elements_kind) { | 4252 switch (elements_kind) { |
| 4253 case INT8_ELEMENTS: | 4253 case INT8_ELEMENTS: |
| 4254 case UINT8_ELEMENTS: | 4254 case UINT8_ELEMENTS: |
| 4255 case UINT8_CLAMPED_ELEMENTS: | 4255 case UINT8_CLAMPED_ELEMENTS: |
| 4256 __ movb(operand, value); | 4256 __ movb(operand, value); |
| 4257 break; | 4257 break; |
| 4258 case INT16_ELEMENTS: | 4258 case INT16_ELEMENTS: |
| 4259 case UINT16_ELEMENTS: | 4259 case UINT16_ELEMENTS: |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4297 __ subsd(value, xmm_scratch); | 4297 __ subsd(value, xmm_scratch); |
| 4298 } | 4298 } |
| 4299 | 4299 |
| 4300 Operand double_store_operand = BuildFastArrayOperand( | 4300 Operand double_store_operand = BuildFastArrayOperand( |
| 4301 instr->elements(), | 4301 instr->elements(), |
| 4302 key, | 4302 key, |
| 4303 instr->hydrogen()->key()->representation(), | 4303 instr->hydrogen()->key()->representation(), |
| 4304 FAST_DOUBLE_ELEMENTS, | 4304 FAST_DOUBLE_ELEMENTS, |
| 4305 instr->base_offset()); | 4305 instr->base_offset()); |
| 4306 | 4306 |
| 4307 __ movsd(double_store_operand, value); | 4307 __ Movsd(double_store_operand, value); |
| 4308 } | 4308 } |
| 4309 | 4309 |
| 4310 | 4310 |
| 4311 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { | 4311 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
| 4312 HStoreKeyed* hinstr = instr->hydrogen(); | 4312 HStoreKeyed* hinstr = instr->hydrogen(); |
| 4313 LOperand* key = instr->key(); | 4313 LOperand* key = instr->key(); |
| 4314 int offset = instr->base_offset(); | 4314 int offset = instr->base_offset(); |
| 4315 Representation representation = hinstr->value()->representation(); | 4315 Representation representation = hinstr->value()->representation(); |
| 4316 | 4316 |
| 4317 if (kPointerSize == kInt32Size && !key->IsConstantOperand() && | 4317 if (kPointerSize == kInt32Size && !key->IsConstantOperand() && |
| (...skipping 486 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4804 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); | 4804 __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| 4805 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | 4805 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| 4806 RecordSafepointWithRegisters( | 4806 RecordSafepointWithRegisters( |
| 4807 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | 4807 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
| 4808 __ StoreToSafepointRegisterSlot(reg, rax); | 4808 __ StoreToSafepointRegisterSlot(reg, rax); |
| 4809 } | 4809 } |
| 4810 | 4810 |
| 4811 // Done. Put the value in temp_xmm into the value of the allocated heap | 4811 // Done. Put the value in temp_xmm into the value of the allocated heap |
| 4812 // number. | 4812 // number. |
| 4813 __ bind(&done); | 4813 __ bind(&done); |
| 4814 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm); | 4814 __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm); |
| 4815 } | 4815 } |
| 4816 | 4816 |
| 4817 | 4817 |
| 4818 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 4818 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
| 4819 class DeferredNumberTagD final : public LDeferredCode { | 4819 class DeferredNumberTagD final : public LDeferredCode { |
| 4820 public: | 4820 public: |
| 4821 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | 4821 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
| 4822 : LDeferredCode(codegen), instr_(instr) { } | 4822 : LDeferredCode(codegen), instr_(instr) { } |
| 4823 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } | 4823 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } |
| 4824 LInstruction* instr() override { return instr_; } | 4824 LInstruction* instr() override { return instr_; } |
| 4825 | 4825 |
| 4826 private: | 4826 private: |
| 4827 LNumberTagD* instr_; | 4827 LNumberTagD* instr_; |
| 4828 }; | 4828 }; |
| 4829 | 4829 |
| 4830 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 4830 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 4831 Register reg = ToRegister(instr->result()); | 4831 Register reg = ToRegister(instr->result()); |
| 4832 Register tmp = ToRegister(instr->temp()); | 4832 Register tmp = ToRegister(instr->temp()); |
| 4833 | 4833 |
| 4834 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 4834 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
| 4835 if (FLAG_inline_new) { | 4835 if (FLAG_inline_new) { |
| 4836 __ AllocateHeapNumber(reg, tmp, deferred->entry()); | 4836 __ AllocateHeapNumber(reg, tmp, deferred->entry()); |
| 4837 } else { | 4837 } else { |
| 4838 __ jmp(deferred->entry()); | 4838 __ jmp(deferred->entry()); |
| 4839 } | 4839 } |
| 4840 __ bind(deferred->exit()); | 4840 __ bind(deferred->exit()); |
| 4841 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); | 4841 __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
| 4842 } | 4842 } |
| 4843 | 4843 |
| 4844 | 4844 |
| 4845 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4845 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
| 4846 // TODO(3095996): Get rid of this. For now, we need to make the | 4846 // TODO(3095996): Get rid of this. For now, we need to make the |
| 4847 // result register contain a valid pointer because it is already | 4847 // result register contain a valid pointer because it is already |
| 4848 // contained in the register pointer map. | 4848 // contained in the register pointer map. |
| 4849 Register reg = ToRegister(instr->result()); | 4849 Register reg = ToRegister(instr->result()); |
| 4850 __ Move(reg, Smi::FromInt(0)); | 4850 __ Move(reg, Smi::FromInt(0)); |
| 4851 | 4851 |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4907 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4907 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4908 // Smi check. | 4908 // Smi check. |
| 4909 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); | 4909 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); |
| 4910 | 4910 |
| 4911 // Heap number map check. | 4911 // Heap number map check. |
| 4912 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 4912 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 4913 Heap::kHeapNumberMapRootIndex); | 4913 Heap::kHeapNumberMapRootIndex); |
| 4914 | 4914 |
| 4915 // On x64 it is safe to load at heap number offset before evaluating the map | 4915 // On x64 it is safe to load at heap number offset before evaluating the map |
| 4916 // check, since all heap objects are at least two words long. | 4916 // check, since all heap objects are at least two words long. |
| 4917 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4917 __ Movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4918 | 4918 |
| 4919 if (can_convert_undefined_to_nan) { | 4919 if (can_convert_undefined_to_nan) { |
| 4920 __ j(not_equal, &convert, Label::kNear); | 4920 __ j(not_equal, &convert, Label::kNear); |
| 4921 } else { | 4921 } else { |
| 4922 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); | 4922 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
| 4923 } | 4923 } |
| 4924 | 4924 |
| 4925 if (deoptimize_on_minus_zero) { | 4925 if (deoptimize_on_minus_zero) { |
| 4926 XMMRegister xmm_scratch = double_scratch0(); | 4926 XMMRegister xmm_scratch = double_scratch0(); |
| 4927 __ xorps(xmm_scratch, xmm_scratch); | 4927 __ xorps(xmm_scratch, xmm_scratch); |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4986 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); | 4986 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); |
| 4987 DeoptimizeIf(not_equal, instr, | 4987 DeoptimizeIf(not_equal, instr, |
| 4988 Deoptimizer::kNotAHeapNumberUndefinedBoolean); | 4988 Deoptimizer::kNotAHeapNumberUndefinedBoolean); |
| 4989 __ Set(input_reg, 0); | 4989 __ Set(input_reg, 0); |
| 4990 } else { | 4990 } else { |
| 4991 XMMRegister scratch = ToDoubleRegister(instr->temp()); | 4991 XMMRegister scratch = ToDoubleRegister(instr->temp()); |
| 4992 DCHECK(!scratch.is(xmm0)); | 4992 DCHECK(!scratch.is(xmm0)); |
| 4993 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 4993 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 4994 Heap::kHeapNumberMapRootIndex); | 4994 Heap::kHeapNumberMapRootIndex); |
| 4995 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); | 4995 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
| 4996 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4996 __ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4997 __ cvttsd2si(input_reg, xmm0); | 4997 __ cvttsd2si(input_reg, xmm0); |
| 4998 __ Cvtlsi2sd(scratch, input_reg); | 4998 __ Cvtlsi2sd(scratch, input_reg); |
| 4999 __ ucomisd(xmm0, scratch); | 4999 __ ucomisd(xmm0, scratch); |
| 5000 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); | 5000 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); |
| 5001 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); | 5001 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); |
| 5002 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { | 5002 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { |
| 5003 __ testl(input_reg, input_reg); | 5003 __ testl(input_reg, input_reg); |
| 5004 __ j(not_zero, done); | 5004 __ j(not_zero, done); |
| 5005 __ movmskpd(input_reg, xmm0); | 5005 __ movmskpd(input_reg, xmm0); |
| 5006 __ andl(input_reg, Immediate(1)); | 5006 __ andl(input_reg, Immediate(1)); |
| (...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5298 | 5298 |
| 5299 // Check for undefined. Undefined is converted to zero for clamping | 5299 // Check for undefined. Undefined is converted to zero for clamping |
| 5300 // conversions. | 5300 // conversions. |
| 5301 __ Cmp(input_reg, factory()->undefined_value()); | 5301 __ Cmp(input_reg, factory()->undefined_value()); |
| 5302 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); | 5302 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); |
| 5303 __ xorl(input_reg, input_reg); | 5303 __ xorl(input_reg, input_reg); |
| 5304 __ jmp(&done, Label::kNear); | 5304 __ jmp(&done, Label::kNear); |
| 5305 | 5305 |
| 5306 // Heap number | 5306 // Heap number |
| 5307 __ bind(&heap_number); | 5307 __ bind(&heap_number); |
| 5308 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5308 __ Movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 5309 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); | 5309 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); |
| 5310 __ jmp(&done, Label::kNear); | 5310 __ jmp(&done, Label::kNear); |
| 5311 | 5311 |
| 5312 // smi | 5312 // smi |
| 5313 __ bind(&is_smi); | 5313 __ bind(&is_smi); |
| 5314 __ SmiToInteger32(input_reg, input_reg); | 5314 __ SmiToInteger32(input_reg, input_reg); |
| 5315 __ ClampUint8(input_reg); | 5315 __ ClampUint8(input_reg); |
| 5316 | 5316 |
| 5317 __ bind(&done); | 5317 __ bind(&done); |
| 5318 } | 5318 } |
| (...skipping 586 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5905 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5905 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 5906 } | 5906 } |
| 5907 | 5907 |
| 5908 | 5908 |
| 5909 #undef __ | 5909 #undef __ |
| 5910 | 5910 |
| 5911 } // namespace internal | 5911 } // namespace internal |
| 5912 } // namespace v8 | 5912 } // namespace v8 |
| 5913 | 5913 |
| 5914 #endif // V8_TARGET_ARCH_X64 | 5914 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |