Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(87)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 153913002: A64: Synchronize with r16756. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 668 matching lines...) Expand 10 before | Expand all | Expand 10 after
679 } 679 }
680 Free(reg); 680 Free(reg);
681 if (i < stack_depth_-1) i++; 681 if (i < stack_depth_-1) i++;
682 } 682 }
683 } 683 }
684 if (instr->IsReturn()) { 684 if (instr->IsReturn()) {
685 while (stack_depth_ > 0) { 685 while (stack_depth_ > 0) {
686 __ fstp(0); 686 __ fstp(0);
687 stack_depth_--; 687 stack_depth_--;
688 } 688 }
689 __ VerifyX87StackDepth(0); 689 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
690 } 690 }
691 } 691 }
692 692
693 693
694 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) { 694 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
695 ASSERT(stack_depth_ <= 1); 695 ASSERT(stack_depth_ <= 1);
696 // If ever used for new stubs producing two pairs of doubles joined into two 696 // If ever used for new stubs producing two pairs of doubles joined into two
697 // phis this assert hits. That situation is not handled, since the two stacks 697 // phis this assert hits. That situation is not handled, since the two stacks
698 // might have st0 and st1 swapped. 698 // might have st0 and st1 swapped.
699 if (current_block_id + 1 != goto_instr->block_id()) { 699 if (current_block_id + 1 != goto_instr->block_id()) {
(...skipping 1026 matching lines...) Expand 10 before | Expand all | Expand 10 after
1726 break; 1726 break;
1727 case 5: 1727 case 5:
1728 __ lea(left, Operand(left, left, times_4, 0)); 1728 __ lea(left, Operand(left, left, times_4, 0));
1729 break; 1729 break;
1730 case 8: 1730 case 8:
1731 __ shl(left, 3); 1731 __ shl(left, 3);
1732 break; 1732 break;
1733 case 9: 1733 case 9:
1734 __ lea(left, Operand(left, left, times_8, 0)); 1734 __ lea(left, Operand(left, left, times_8, 0));
1735 break; 1735 break;
1736 case 16: 1736 case 16:
1737 __ shl(left, 4); 1737 __ shl(left, 4);
1738 break; 1738 break;
1739 default: 1739 default:
1740 __ imul(left, left, constant); 1740 __ imul(left, left, constant);
1741 break; 1741 break;
1742 } 1742 }
1743 } else { 1743 } else {
1744 __ imul(left, left, constant); 1744 __ imul(left, left, constant);
1745 } 1745 }
1746 } else { 1746 } else {
1747 if (instr->hydrogen()->representation().IsSmi()) { 1747 if (instr->hydrogen()->representation().IsSmi()) {
1748 __ SmiUntag(left); 1748 __ SmiUntag(left);
(...skipping 452 matching lines...) Expand 10 before | Expand all | Expand 10 after
2201 } 2201 }
2202 } 2202 }
2203 2203
2204 2204
2205 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 2205 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2206 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { 2206 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2207 CpuFeatureScope scope(masm(), SSE2); 2207 CpuFeatureScope scope(masm(), SSE2);
2208 XMMRegister left = ToDoubleRegister(instr->left()); 2208 XMMRegister left = ToDoubleRegister(instr->left());
2209 XMMRegister right = ToDoubleRegister(instr->right()); 2209 XMMRegister right = ToDoubleRegister(instr->right());
2210 XMMRegister result = ToDoubleRegister(instr->result()); 2210 XMMRegister result = ToDoubleRegister(instr->result());
2211 // Modulo uses a fixed result register.
2212 ASSERT(instr->op() == Token::MOD || left.is(result));
2213 switch (instr->op()) { 2211 switch (instr->op()) {
2214 case Token::ADD: 2212 case Token::ADD:
2215 __ addsd(left, right); 2213 __ addsd(left, right);
2216 break; 2214 break;
2217 case Token::SUB: 2215 case Token::SUB:
2218 __ subsd(left, right); 2216 __ subsd(left, right);
2219 break; 2217 break;
2220 case Token::MUL: 2218 case Token::MUL:
2221 __ mulsd(left, right); 2219 __ mulsd(left, right);
2222 break; 2220 break;
2223 case Token::DIV: 2221 case Token::DIV:
2224 __ divsd(left, right); 2222 __ divsd(left, right);
2225 // Don't delete this mov. It may improve performance on some CPUs, 2223 // Don't delete this mov. It may improve performance on some CPUs,
2226 // when there is a mulsd depending on the result 2224 // when there is a mulsd depending on the result
2227 __ movaps(left, left); 2225 __ movaps(left, left);
2228 break; 2226 break;
2229 case Token::MOD: { 2227 case Token::MOD: {
2230 // Pass two doubles as arguments on the stack. 2228 // Pass two doubles as arguments on the stack.
2231 __ PrepareCallCFunction(4, eax); 2229 __ PrepareCallCFunction(4, eax);
2232 __ movdbl(Operand(esp, 0 * kDoubleSize), left); 2230 __ movdbl(Operand(esp, 0 * kDoubleSize), left);
2233 __ movdbl(Operand(esp, 1 * kDoubleSize), right); 2231 __ movdbl(Operand(esp, 1 * kDoubleSize), right);
2234 __ CallCFunction( 2232 __ CallCFunction(
2235 ExternalReference::double_fp_operation(Token::MOD, isolate()), 2233 ExternalReference::double_fp_operation(Token::MOD, isolate()),
2236 4); 2234 4);
2237 2235
2238 // Return value is in st(0) on ia32. 2236 // Return value is in st(0) on ia32.
2239 // Store it into the (fixed) result register. 2237 // Store it into the result register.
2240 __ sub(Operand(esp), Immediate(kDoubleSize)); 2238 __ sub(Operand(esp), Immediate(kDoubleSize));
2241 __ fstp_d(Operand(esp, 0)); 2239 __ fstp_d(Operand(esp, 0));
2242 __ movdbl(result, Operand(esp, 0)); 2240 __ movdbl(result, Operand(esp, 0));
2243 __ add(Operand(esp), Immediate(kDoubleSize)); 2241 __ add(Operand(esp), Immediate(kDoubleSize));
2244 break; 2242 break;
2245 } 2243 }
2246 default: 2244 default:
2247 UNREACHABLE(); 2245 UNREACHABLE();
2248 break; 2246 break;
2249 } 2247 }
(...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after
2549 2547
2550 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2548 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2551 // We can statically evaluate the comparison. 2549 // We can statically evaluate the comparison.
2552 double left_val = ToDouble(LConstantOperand::cast(left)); 2550 double left_val = ToDouble(LConstantOperand::cast(left));
2553 double right_val = ToDouble(LConstantOperand::cast(right)); 2551 double right_val = ToDouble(LConstantOperand::cast(right));
2554 int next_block = EvalComparison(instr->op(), left_val, right_val) ? 2552 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2555 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); 2553 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2556 EmitGoto(next_block); 2554 EmitGoto(next_block);
2557 } else { 2555 } else {
2558 if (instr->is_double()) { 2556 if (instr->is_double()) {
2559 CpuFeatureScope scope(masm(), SSE2); 2557 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2558 CpuFeatureScope scope(masm(), SSE2);
2559 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2560 } else {
2561 X87Fxch(ToX87Register(right));
2562 X87Fxch(ToX87Register(left), 1);
2563 __ fld(0);
2564 __ fld(2);
2565 __ FCmp();
2566 }
2560 // Don't base result on EFLAGS when a NaN is involved. Instead 2567 // Don't base result on EFLAGS when a NaN is involved. Instead
2561 // jump to the false block. 2568 // jump to the false block.
2562 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2563 __ j(parity_even, instr->FalseLabel(chunk_)); 2569 __ j(parity_even, instr->FalseLabel(chunk_));
2564 } else { 2570 } else {
2565 if (right->IsConstantOperand()) { 2571 if (right->IsConstantOperand()) {
2566 __ cmp(ToOperand(left), 2572 __ cmp(ToOperand(left),
2567 ToImmediate(right, instr->hydrogen()->representation())); 2573 ToImmediate(right, instr->hydrogen()->representation()));
2568 } else if (left->IsConstantOperand()) { 2574 } else if (left->IsConstantOperand()) {
2569 __ cmp(ToOperand(right), 2575 __ cmp(ToOperand(right),
2570 ToImmediate(left, instr->hydrogen()->representation())); 2576 ToImmediate(left, instr->hydrogen()->representation()));
2571 // We transposed the operands. Reverse the condition. 2577 // We transposed the operands. Reverse the condition.
2572 cc = ReverseCondition(cc); 2578 cc = ReverseCondition(cc);
(...skipping 1397 matching lines...) Expand 10 before | Expand all | Expand 10 after
3970 __ cvttsd2si(output_reg, Operand(input_reg)); 3976 __ cvttsd2si(output_reg, Operand(input_reg));
3971 // Overflow is signalled with minint. 3977 // Overflow is signalled with minint.
3972 __ cmp(output_reg, 0x80000000u); 3978 __ cmp(output_reg, 0x80000000u);
3973 DeoptimizeIf(equal, instr->environment()); 3979 DeoptimizeIf(equal, instr->environment());
3974 __ jmp(&done, Label::kNear); 3980 __ jmp(&done, Label::kNear);
3975 3981
3976 // Non-zero negative reaches here. 3982 // Non-zero negative reaches here.
3977 __ bind(&negative_sign); 3983 __ bind(&negative_sign);
3978 // Truncate, then compare and compensate. 3984 // Truncate, then compare and compensate.
3979 __ cvttsd2si(output_reg, Operand(input_reg)); 3985 __ cvttsd2si(output_reg, Operand(input_reg));
3980 __ cvtsi2sd(xmm_scratch, output_reg); 3986 __ Cvtsi2sd(xmm_scratch, output_reg);
3981 __ ucomisd(input_reg, xmm_scratch); 3987 __ ucomisd(input_reg, xmm_scratch);
3982 __ j(equal, &done, Label::kNear); 3988 __ j(equal, &done, Label::kNear);
3983 __ sub(output_reg, Immediate(1)); 3989 __ sub(output_reg, Immediate(1));
3984 DeoptimizeIf(overflow, instr->environment()); 3990 DeoptimizeIf(overflow, instr->environment());
3985 3991
3986 __ bind(&done); 3992 __ bind(&done);
3987 } 3993 }
3988 } 3994 }
3989 3995
3990 3996
(...skipping 29 matching lines...) Expand all
4020 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then 4026 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
4021 // compare and compensate. 4027 // compare and compensate.
4022 __ movsd(input_temp, input_reg); // Do not alter input_reg. 4028 __ movsd(input_temp, input_reg); // Do not alter input_reg.
4023 __ subsd(input_temp, xmm_scratch); 4029 __ subsd(input_temp, xmm_scratch);
4024 __ cvttsd2si(output_reg, Operand(input_temp)); 4030 __ cvttsd2si(output_reg, Operand(input_temp));
4025 // Catch minint due to overflow, and to prevent overflow when compensating. 4031 // Catch minint due to overflow, and to prevent overflow when compensating.
4026 __ cmp(output_reg, 0x80000000u); 4032 __ cmp(output_reg, 0x80000000u);
4027 __ RecordComment("D2I conversion overflow"); 4033 __ RecordComment("D2I conversion overflow");
4028 DeoptimizeIf(equal, instr->environment()); 4034 DeoptimizeIf(equal, instr->environment());
4029 4035
4030 __ cvtsi2sd(xmm_scratch, output_reg); 4036 __ Cvtsi2sd(xmm_scratch, output_reg);
4031 __ ucomisd(xmm_scratch, input_temp); 4037 __ ucomisd(xmm_scratch, input_temp);
4032 __ j(equal, &done); 4038 __ j(equal, &done);
4033 __ sub(output_reg, Immediate(1)); 4039 __ sub(output_reg, Immediate(1));
4034 // No overflow because we already ruled out minint. 4040 // No overflow because we already ruled out minint.
4035 __ jmp(&done); 4041 __ jmp(&done);
4036 4042
4037 __ bind(&round_to_zero); 4043 __ bind(&round_to_zero);
4038 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if 4044 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
4039 // we can ignore the difference between a result of -0 and +0. 4045 // we can ignore the difference between a result of -0 and +0.
4040 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4046 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
4119 __ CallStub(&stub); 4125 __ CallStub(&stub);
4120 } else { 4126 } else {
4121 ASSERT(exponent_type.IsDouble()); 4127 ASSERT(exponent_type.IsDouble());
4122 MathPowStub stub(MathPowStub::DOUBLE); 4128 MathPowStub stub(MathPowStub::DOUBLE);
4123 __ CallStub(&stub); 4129 __ CallStub(&stub);
4124 } 4130 }
4125 } 4131 }
4126 4132
4127 4133
4128 void LCodeGen::DoRandom(LRandom* instr) { 4134 void LCodeGen::DoRandom(LRandom* instr) {
4129 class DeferredDoRandom V8_FINAL : public LDeferredCode { 4135 CpuFeatureScope scope(masm(), SSE2);
4130 public:
4131 DeferredDoRandom(LCodeGen* codegen,
4132 LRandom* instr,
4133 const X87Stack& x87_stack)
4134 : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4135 virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredRandom(instr_); }
4136 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4137 private:
4138 LRandom* instr_;
4139 };
4140 4136
4141 DeferredDoRandom* deferred =
4142 new(zone()) DeferredDoRandom(this, instr, x87_stack_);
4143
4144 CpuFeatureScope scope(masm(), SSE2);
4145 // Having marked this instruction as a call we can use any
4146 // registers.
4147 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
4148 ASSERT(ToRegister(instr->global_object()).is(eax));
4149 // Assert that the register size is indeed the size of each seed. 4137 // Assert that the register size is indeed the size of each seed.
4150 static const int kSeedSize = sizeof(uint32_t); 4138 static const int kSeedSize = sizeof(uint32_t);
4151 STATIC_ASSERT(kPointerSize == kSeedSize); 4139 STATIC_ASSERT(kPointerSize == kSeedSize);
4152 4140
4153 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset)); 4141 // Load native context
4142 Register global_object = ToRegister(instr->global_object());
4143 Register native_context = global_object;
4144 __ mov(native_context, FieldOperand(
4145 global_object, GlobalObject::kNativeContextOffset));
4146
4147 // Load state (FixedArray of the native context's random seeds)
4154 static const int kRandomSeedOffset = 4148 static const int kRandomSeedOffset =
4155 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize; 4149 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
4156 __ mov(ebx, FieldOperand(eax, kRandomSeedOffset)); 4150 Register state = native_context;
4157 // ebx: FixedArray of the native context's random seeds 4151 __ mov(state, FieldOperand(native_context, kRandomSeedOffset));
4158 4152
4159 // Load state[0]. 4153 // Load state[0].
4160 __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize)); 4154 Register state0 = ToRegister(instr->scratch());
4161 // If state[0] == 0, call runtime to initialize seeds. 4155 __ mov(state0, FieldOperand(state, ByteArray::kHeaderSize));
4162 __ test(ecx, ecx);
4163 __ j(zero, deferred->entry());
4164 // Load state[1]. 4156 // Load state[1].
4165 __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize)); 4157 Register state1 = ToRegister(instr->scratch2());
4166 // ecx: state[0] 4158 __ mov(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
4167 // eax: state[1]
4168 4159
4169 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16) 4160 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
4170 __ movzx_w(edx, ecx); 4161 Register scratch3 = ToRegister(instr->scratch3());
4171 __ imul(edx, edx, 18273); 4162 __ movzx_w(scratch3, state0);
4172 __ shr(ecx, 16); 4163 __ imul(scratch3, scratch3, 18273);
4173 __ add(ecx, edx); 4164 __ shr(state0, 16);
4165 __ add(state0, scratch3);
4174 // Save state[0]. 4166 // Save state[0].
4175 __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx); 4167 __ mov(FieldOperand(state, ByteArray::kHeaderSize), state0);
4176 4168
4177 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16) 4169 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
4178 __ movzx_w(edx, eax); 4170 __ movzx_w(scratch3, state1);
4179 __ imul(edx, edx, 36969); 4171 __ imul(scratch3, scratch3, 36969);
4180 __ shr(eax, 16); 4172 __ shr(state1, 16);
4181 __ add(eax, edx); 4173 __ add(state1, scratch3);
4182 // Save state[1]. 4174 // Save state[1].
4183 __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax); 4175 __ mov(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
4184 4176
4185 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF) 4177 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
4186 __ shl(ecx, 14); 4178 Register random = state0;
4187 __ and_(eax, Immediate(0x3FFFF)); 4179 __ shl(random, 14);
4188 __ add(eax, ecx); 4180 __ and_(state1, Immediate(0x3FFFF));
4181 __ add(random, state1);
4189 4182
4190 __ bind(deferred->exit()); 4183 // Convert 32 random bits in random to 0.(32 random bits) in a double
4191 // Convert 32 random bits in eax to 0.(32 random bits) in a double
4192 // by computing: 4184 // by computing:
4193 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). 4185 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
4194 __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. 4186 XMMRegister result = ToDoubleRegister(instr->result());
4195 __ movd(xmm2, ebx); 4187 // We use xmm0 as fixed scratch register here.
4196 __ movd(xmm1, eax); 4188 XMMRegister scratch4 = xmm0;
4197 __ cvtss2sd(xmm2, xmm2); 4189 __ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
4198 __ xorps(xmm1, xmm2); 4190 __ movd(scratch4, scratch3);
4199 __ subsd(xmm1, xmm2); 4191 __ movd(result, random);
4192 __ cvtss2sd(scratch4, scratch4);
4193 __ xorps(result, scratch4);
4194 __ subsd(result, scratch4);
4200 } 4195 }
4201 4196
4202 4197
4203 void LCodeGen::DoDeferredRandom(LRandom* instr) {
4204 __ PrepareCallCFunction(1, ebx);
4205 __ mov(Operand(esp, 0), eax);
4206 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
4207 // Return value is in eax.
4208 }
4209
4210
4211 void LCodeGen::DoMathLog(LMathLog* instr) { 4198 void LCodeGen::DoMathLog(LMathLog* instr) {
4212 CpuFeatureScope scope(masm(), SSE2); 4199 CpuFeatureScope scope(masm(), SSE2);
4213 ASSERT(instr->value()->Equals(instr->result())); 4200 ASSERT(instr->value()->Equals(instr->result()));
4214 XMMRegister input_reg = ToDoubleRegister(instr->value()); 4201 XMMRegister input_reg = ToDoubleRegister(instr->value());
4215 Label positive, done, zero; 4202 Label positive, done, zero;
4216 __ xorps(xmm0, xmm0); 4203 __ xorps(xmm0, xmm0);
4217 __ ucomisd(input_reg, xmm0); 4204 __ ucomisd(input_reg, xmm0);
4218 __ j(above, &positive, Label::kNear); 4205 __ j(above, &positive, Label::kNear);
4219 __ j(equal, &zero, Label::kNear); 4206 __ j(equal, &zero, Label::kNear);
4220 ExternalReference nan = 4207 ExternalReference nan =
(...skipping 769 matching lines...) Expand 10 before | Expand all | Expand 10 after
4990 } 4977 }
4991 4978
4992 4979
4993 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4980 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4994 LOperand* input = instr->value(); 4981 LOperand* input = instr->value();
4995 LOperand* output = instr->result(); 4982 LOperand* output = instr->result();
4996 ASSERT(input->IsRegister() || input->IsStackSlot()); 4983 ASSERT(input->IsRegister() || input->IsStackSlot());
4997 ASSERT(output->IsDoubleRegister()); 4984 ASSERT(output->IsDoubleRegister());
4998 if (CpuFeatures::IsSupported(SSE2)) { 4985 if (CpuFeatures::IsSupported(SSE2)) {
4999 CpuFeatureScope scope(masm(), SSE2); 4986 CpuFeatureScope scope(masm(), SSE2);
5000 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); 4987 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
5001 } else if (input->IsRegister()) { 4988 } else if (input->IsRegister()) {
5002 Register input_reg = ToRegister(input); 4989 Register input_reg = ToRegister(input);
5003 __ push(input_reg); 4990 __ push(input_reg);
5004 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); 4991 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
5005 __ pop(input_reg); 4992 __ pop(input_reg);
5006 } else { 4993 } else {
5007 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); 4994 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
5008 } 4995 }
5009 } 4996 }
5010 4997
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
5099 Label done; 5086 Label done;
5100 5087
5101 if (signedness == SIGNED_INT32) { 5088 if (signedness == SIGNED_INT32) {
5102 // There was overflow, so bits 30 and 31 of the original integer 5089 // There was overflow, so bits 30 and 31 of the original integer
5103 // disagree. Try to allocate a heap number in new space and store 5090 // disagree. Try to allocate a heap number in new space and store
5104 // the value in there. If that fails, call the runtime system. 5091 // the value in there. If that fails, call the runtime system.
5105 __ SmiUntag(reg); 5092 __ SmiUntag(reg);
5106 __ xor_(reg, 0x80000000); 5093 __ xor_(reg, 0x80000000);
5107 if (CpuFeatures::IsSupported(SSE2)) { 5094 if (CpuFeatures::IsSupported(SSE2)) {
5108 CpuFeatureScope feature_scope(masm(), SSE2); 5095 CpuFeatureScope feature_scope(masm(), SSE2);
5109 __ cvtsi2sd(xmm0, Operand(reg)); 5096 __ Cvtsi2sd(xmm0, Operand(reg));
5110 } else { 5097 } else {
5111 __ push(reg); 5098 __ push(reg);
5112 __ fild_s(Operand(esp, 0)); 5099 __ fild_s(Operand(esp, 0));
5113 __ pop(reg); 5100 __ pop(reg);
5114 } 5101 }
5115 } else { 5102 } else {
5116 if (CpuFeatures::IsSupported(SSE2)) { 5103 if (CpuFeatures::IsSupported(SSE2)) {
5117 CpuFeatureScope feature_scope(masm(), SSE2); 5104 CpuFeatureScope feature_scope(masm(), SSE2);
5118 __ LoadUint32(xmm0, reg, 5105 __ LoadUint32(xmm0, reg,
5119 ToDoubleRegister(LNumberTagU::cast(instr)->temp())); 5106 ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
5301 // Pop FPU stack before deoptimizing. 5288 // Pop FPU stack before deoptimizing.
5302 __ fstp(0); 5289 __ fstp(0);
5303 DeoptimizeIf(not_zero, env); 5290 DeoptimizeIf(not_zero, env);
5304 } 5291 }
5305 __ jmp(&done, Label::kNear); 5292 __ jmp(&done, Label::kNear);
5306 } else { 5293 } else {
5307 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); 5294 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5308 } 5295 }
5309 5296
5310 __ bind(&load_smi); 5297 __ bind(&load_smi);
5311 __ SmiUntag(input_reg); // Untag smi before converting to float. 5298 // Clobbering a temp is faster than re-tagging the
5312 __ push(input_reg); 5299 // input register since we avoid dependencies.
5300 __ mov(temp_reg, input_reg);
5301 __ SmiUntag(temp_reg); // Untag smi before converting to float.
5302 __ push(temp_reg);
5313 __ fild_s(Operand(esp, 0)); 5303 __ fild_s(Operand(esp, 0));
5314 __ pop(input_reg); 5304 __ add(esp, Immediate(kPointerSize));
5315 __ SmiTag(input_reg); // Retag smi.
5316 __ bind(&done); 5305 __ bind(&done);
5317 X87CommitWrite(res_reg); 5306 X87CommitWrite(res_reg);
5318 } 5307 }
5319 5308
5320 5309
5321 void LCodeGen::EmitNumberUntagD(Register input_reg, 5310 void LCodeGen::EmitNumberUntagD(Register input_reg,
5322 Register temp_reg, 5311 Register temp_reg,
5323 XMMRegister result_reg, 5312 XMMRegister result_reg,
5324 bool can_convert_undefined_to_nan, 5313 bool can_convert_undefined_to_nan,
5325 bool deoptimize_on_minus_zero, 5314 bool deoptimize_on_minus_zero,
5326 LEnvironment* env, 5315 LEnvironment* env,
5327 NumberUntagDMode mode) { 5316 NumberUntagDMode mode) {
5328 Label load_smi, done; 5317 Label convert, load_smi, done;
5329 5318
5330 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 5319 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5331 // Smi check. 5320 // Smi check.
5332 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); 5321 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5333 5322
5334 // Heap number map check. 5323 // Heap number map check.
5335 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 5324 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5336 factory()->heap_number_map()); 5325 factory()->heap_number_map());
5337 if (!can_convert_undefined_to_nan) { 5326 if (can_convert_undefined_to_nan) {
5327 __ j(not_equal, &convert, Label::kNear);
5328 } else {
5338 DeoptimizeIf(not_equal, env); 5329 DeoptimizeIf(not_equal, env);
5339 } else { 5330 }
5340 Label heap_number, convert;
5341 __ j(equal, &heap_number, Label::kNear);
5342 5331
5343 // Convert undefined (and hole) to NaN.
5344 __ cmp(input_reg, factory()->undefined_value());
5345 DeoptimizeIf(not_equal, env);
5346
5347 __ bind(&convert);
5348 ExternalReference nan =
5349 ExternalReference::address_of_canonical_non_hole_nan();
5350 __ movdbl(result_reg, Operand::StaticVariable(nan));
5351 __ jmp(&done, Label::kNear);
5352
5353 __ bind(&heap_number);
5354 }
5355 // Heap number to XMM conversion. 5332 // Heap number to XMM conversion.
5356 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); 5333 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5334
5357 if (deoptimize_on_minus_zero) { 5335 if (deoptimize_on_minus_zero) {
5358 XMMRegister xmm_scratch = xmm0; 5336 XMMRegister xmm_scratch = xmm0;
5359 __ xorps(xmm_scratch, xmm_scratch); 5337 __ xorps(xmm_scratch, xmm_scratch);
5360 __ ucomisd(result_reg, xmm_scratch); 5338 __ ucomisd(result_reg, xmm_scratch);
5361 __ j(not_zero, &done, Label::kNear); 5339 __ j(not_zero, &done, Label::kNear);
5362 __ movmskpd(temp_reg, result_reg); 5340 __ movmskpd(temp_reg, result_reg);
5363 __ test_b(temp_reg, 1); 5341 __ test_b(temp_reg, 1);
5364 DeoptimizeIf(not_zero, env); 5342 DeoptimizeIf(not_zero, env);
5365 } 5343 }
5366 __ jmp(&done, Label::kNear); 5344 __ jmp(&done, Label::kNear);
5345
5346 if (can_convert_undefined_to_nan) {
5347 __ bind(&convert);
5348
5349 // Convert undefined (and hole) to NaN.
5350 __ cmp(input_reg, factory()->undefined_value());
5351 DeoptimizeIf(not_equal, env);
5352
5353 ExternalReference nan =
5354 ExternalReference::address_of_canonical_non_hole_nan();
5355 __ movdbl(result_reg, Operand::StaticVariable(nan));
5356 __ jmp(&done, Label::kNear);
5357 }
5367 } else { 5358 } else {
5368 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); 5359 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5369 } 5360 }
5370 5361
5371 // Smi to XMM conversion
5372 __ bind(&load_smi); 5362 __ bind(&load_smi);
5373 __ SmiUntag(input_reg); // Untag smi before converting to float. 5363 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
5374 __ cvtsi2sd(result_reg, Operand(input_reg)); 5364 // input register since we avoid dependencies.
5375 __ SmiTag(input_reg); // Retag smi. 5365 __ mov(temp_reg, input_reg);
5366 __ SmiUntag(temp_reg); // Untag smi before converting to float.
5367 __ Cvtsi2sd(result_reg, Operand(temp_reg));
5376 __ bind(&done); 5368 __ bind(&done);
5377 } 5369 }
5378 5370
5379 5371
5380 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { 5372 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5381 Register input_reg = ToRegister(instr->value()); 5373 Register input_reg = ToRegister(instr->value());
5382 5374
5383 5375
5384 if (instr->truncating()) { 5376 if (instr->truncating()) {
5385 Label heap_number, slow_case; 5377 Label heap_number, slow_case;
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
5426 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5418 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5427 private: 5419 private:
5428 LTaggedToI* instr_; 5420 LTaggedToI* instr_;
5429 }; 5421 };
5430 5422
5431 LOperand* input = instr->value(); 5423 LOperand* input = instr->value();
5432 ASSERT(input->IsRegister()); 5424 ASSERT(input->IsRegister());
5433 Register input_reg = ToRegister(input); 5425 Register input_reg = ToRegister(input);
5434 ASSERT(input_reg.is(ToRegister(instr->result()))); 5426 ASSERT(input_reg.is(ToRegister(instr->result())));
5435 5427
5436 DeferredTaggedToI* deferred = 5428 if (instr->hydrogen()->value()->representation().IsSmi()) {
5437 new(zone()) DeferredTaggedToI(this, instr, x87_stack_); 5429 __ SmiUntag(input_reg);
5430 } else {
5431 DeferredTaggedToI* deferred =
5432 new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5438 5433
5439 __ JumpIfNotSmi(input_reg, deferred->entry()); 5434 __ JumpIfNotSmi(input_reg, deferred->entry());
5440 __ SmiUntag(input_reg); 5435 __ SmiUntag(input_reg);
5441 __ bind(deferred->exit()); 5436 __ bind(deferred->exit());
5437 }
5442 } 5438 }
5443 5439
5444 5440
5445 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5441 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5446 LOperand* input = instr->value(); 5442 LOperand* input = instr->value();
5447 ASSERT(input->IsRegister()); 5443 ASSERT(input->IsRegister());
5448 LOperand* temp = instr->temp(); 5444 LOperand* temp = instr->temp();
5449 ASSERT(temp == NULL || temp->IsRegister()); 5445 ASSERT(temp->IsRegister());
5450 LOperand* result = instr->result(); 5446 LOperand* result = instr->result();
5451 ASSERT(result->IsDoubleRegister()); 5447 ASSERT(result->IsDoubleRegister());
5452 5448
5453 Register input_reg = ToRegister(input); 5449 Register input_reg = ToRegister(input);
5454 bool deoptimize_on_minus_zero = 5450 bool deoptimize_on_minus_zero =
5455 instr->hydrogen()->deoptimize_on_minus_zero(); 5451 instr->hydrogen()->deoptimize_on_minus_zero();
5456 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; 5452 Register temp_reg = ToRegister(temp);
5457 5453
5458 HValue* value = instr->hydrogen()->value(); 5454 HValue* value = instr->hydrogen()->value();
5459 NumberUntagDMode mode = value->representation().IsSmi() 5455 NumberUntagDMode mode = value->representation().IsSmi()
5460 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; 5456 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5461 5457
5462 if (CpuFeatures::IsSupported(SSE2)) { 5458 if (CpuFeatures::IsSupported(SSE2)) {
5463 CpuFeatureScope scope(masm(), SSE2); 5459 CpuFeatureScope scope(masm(), SSE2);
5464 XMMRegister result_reg = ToDoubleRegister(result); 5460 XMMRegister result_reg = ToDoubleRegister(result);
5465 EmitNumberUntagD(input_reg, 5461 EmitNumberUntagD(input_reg,
5466 temp_reg, 5462 temp_reg,
(...skipping 906 matching lines...) Expand 10 before | Expand all | Expand 10 after
6373 FixedArray::kHeaderSize - kPointerSize)); 6369 FixedArray::kHeaderSize - kPointerSize));
6374 __ bind(&done); 6370 __ bind(&done);
6375 } 6371 }
6376 6372
6377 6373
6378 #undef __ 6374 #undef __
6379 6375
6380 } } // namespace v8::internal 6376 } } // namespace v8::internal
6381 6377
6382 #endif // V8_TARGET_ARCH_IA32 6378 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698