Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 24205004: Rollback trunk to 3.21.16.2 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/codegen-ia32.cc ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1715 matching lines...) Expand 10 before | Expand all | Expand 10 after
1726 break; 1726 break;
1727 case 5: 1727 case 5:
1728 __ lea(left, Operand(left, left, times_4, 0)); 1728 __ lea(left, Operand(left, left, times_4, 0));
1729 break; 1729 break;
1730 case 8: 1730 case 8:
1731 __ shl(left, 3); 1731 __ shl(left, 3);
1732 break; 1732 break;
1733 case 9: 1733 case 9:
1734 __ lea(left, Operand(left, left, times_8, 0)); 1734 __ lea(left, Operand(left, left, times_8, 0));
1735 break; 1735 break;
1736 case 16: 1736 case 16:
1737 __ shl(left, 4); 1737 __ shl(left, 4);
1738 break; 1738 break;
1739 default: 1739 default:
1740 __ imul(left, left, constant); 1740 __ imul(left, left, constant);
1741 break; 1741 break;
1742 } 1742 }
1743 } else { 1743 } else {
1744 __ imul(left, left, constant); 1744 __ imul(left, left, constant);
1745 } 1745 }
1746 } else { 1746 } else {
1747 if (instr->hydrogen()->representation().IsSmi()) { 1747 if (instr->hydrogen()->representation().IsSmi()) {
1748 __ SmiUntag(left); 1748 __ SmiUntag(left);
(...skipping 452 matching lines...) Expand 10 before | Expand all | Expand 10 after
2201 } 2201 }
2202 } 2202 }
2203 2203
2204 2204
2205 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 2205 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2206 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { 2206 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2207 CpuFeatureScope scope(masm(), SSE2); 2207 CpuFeatureScope scope(masm(), SSE2);
2208 XMMRegister left = ToDoubleRegister(instr->left()); 2208 XMMRegister left = ToDoubleRegister(instr->left());
2209 XMMRegister right = ToDoubleRegister(instr->right()); 2209 XMMRegister right = ToDoubleRegister(instr->right());
2210 XMMRegister result = ToDoubleRegister(instr->result()); 2210 XMMRegister result = ToDoubleRegister(instr->result());
2211 // Modulo uses a fixed result register.
2212 ASSERT(instr->op() == Token::MOD || left.is(result));
2211 switch (instr->op()) { 2213 switch (instr->op()) {
2212 case Token::ADD: 2214 case Token::ADD:
2213 __ addsd(left, right); 2215 __ addsd(left, right);
2214 break; 2216 break;
2215 case Token::SUB: 2217 case Token::SUB:
2216 __ subsd(left, right); 2218 __ subsd(left, right);
2217 break; 2219 break;
2218 case Token::MUL: 2220 case Token::MUL:
2219 __ mulsd(left, right); 2221 __ mulsd(left, right);
2220 break; 2222 break;
2221 case Token::DIV: 2223 case Token::DIV:
2222 __ divsd(left, right); 2224 __ divsd(left, right);
2223 // Don't delete this mov. It may improve performance on some CPUs, 2225 // Don't delete this mov. It may improve performance on some CPUs,
2224 // when there is a mulsd depending on the result 2226 // when there is a mulsd depending on the result
2225 __ movaps(left, left); 2227 __ movaps(left, left);
2226 break; 2228 break;
2227 case Token::MOD: { 2229 case Token::MOD: {
2228 // Pass two doubles as arguments on the stack. 2230 // Pass two doubles as arguments on the stack.
2229 __ PrepareCallCFunction(4, eax); 2231 __ PrepareCallCFunction(4, eax);
2230 __ movdbl(Operand(esp, 0 * kDoubleSize), left); 2232 __ movdbl(Operand(esp, 0 * kDoubleSize), left);
2231 __ movdbl(Operand(esp, 1 * kDoubleSize), right); 2233 __ movdbl(Operand(esp, 1 * kDoubleSize), right);
2232 __ CallCFunction( 2234 __ CallCFunction(
2233 ExternalReference::double_fp_operation(Token::MOD, isolate()), 2235 ExternalReference::double_fp_operation(Token::MOD, isolate()),
2234 4); 2236 4);
2235 2237
2236 // Return value is in st(0) on ia32. 2238 // Return value is in st(0) on ia32.
2237 // Store it into the result register. 2239 // Store it into the (fixed) result register.
2238 __ sub(Operand(esp), Immediate(kDoubleSize)); 2240 __ sub(Operand(esp), Immediate(kDoubleSize));
2239 __ fstp_d(Operand(esp, 0)); 2241 __ fstp_d(Operand(esp, 0));
2240 __ movdbl(result, Operand(esp, 0)); 2242 __ movdbl(result, Operand(esp, 0));
2241 __ add(Operand(esp), Immediate(kDoubleSize)); 2243 __ add(Operand(esp), Immediate(kDoubleSize));
2242 break; 2244 break;
2243 } 2245 }
2244 default: 2246 default:
2245 UNREACHABLE(); 2247 UNREACHABLE();
2246 break; 2248 break;
2247 } 2249 }
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
2331 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { 2333 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2332 int false_block = instr->FalseDestination(chunk_); 2334 int false_block = instr->FalseDestination(chunk_);
2333 if (cc == no_condition) { 2335 if (cc == no_condition) {
2334 __ jmp(chunk_->GetAssemblyLabel(false_block)); 2336 __ jmp(chunk_->GetAssemblyLabel(false_block));
2335 } else { 2337 } else {
2336 __ j(cc, chunk_->GetAssemblyLabel(false_block)); 2338 __ j(cc, chunk_->GetAssemblyLabel(false_block));
2337 } 2339 }
2338 } 2340 }
2339 2341
2340 2342
2343 void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
2344 Representation r = instr->hydrogen()->value()->representation();
2345 if (r.IsSmiOrInteger32() || r.IsDouble()) {
2346 EmitBranch(instr, no_condition);
2347 } else {
2348 ASSERT(r.IsTagged());
2349 Register reg = ToRegister(instr->value());
2350 HType type = instr->hydrogen()->value()->type();
2351 if (type.IsTaggedNumber()) {
2352 EmitBranch(instr, no_condition);
2353 }
2354 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2355 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2356 factory()->heap_number_map());
2357 EmitBranch(instr, equal);
2358 }
2359 }
2360
2361
2341 void LCodeGen::DoBranch(LBranch* instr) { 2362 void LCodeGen::DoBranch(LBranch* instr) {
2342 Representation r = instr->hydrogen()->value()->representation(); 2363 Representation r = instr->hydrogen()->value()->representation();
2343 if (r.IsSmiOrInteger32()) { 2364 if (r.IsSmiOrInteger32()) {
2344 Register reg = ToRegister(instr->value()); 2365 Register reg = ToRegister(instr->value());
2345 __ test(reg, Operand(reg)); 2366 __ test(reg, Operand(reg));
2346 EmitBranch(instr, not_zero); 2367 EmitBranch(instr, not_zero);
2347 } else if (r.IsDouble()) { 2368 } else if (r.IsDouble()) {
2348 ASSERT(!info()->IsStub()); 2369 ASSERT(!info()->IsStub());
2349 CpuFeatureScope scope(masm(), SSE2); 2370 CpuFeatureScope scope(masm(), SSE2);
2350 XMMRegister reg = ToDoubleRegister(instr->value()); 2371 XMMRegister reg = ToDoubleRegister(instr->value());
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
2528 2549
2529 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2550 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2530 // We can statically evaluate the comparison. 2551 // We can statically evaluate the comparison.
2531 double left_val = ToDouble(LConstantOperand::cast(left)); 2552 double left_val = ToDouble(LConstantOperand::cast(left));
2532 double right_val = ToDouble(LConstantOperand::cast(right)); 2553 double right_val = ToDouble(LConstantOperand::cast(right));
2533 int next_block = EvalComparison(instr->op(), left_val, right_val) ? 2554 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2534 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); 2555 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2535 EmitGoto(next_block); 2556 EmitGoto(next_block);
2536 } else { 2557 } else {
2537 if (instr->is_double()) { 2558 if (instr->is_double()) {
2538 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { 2559 CpuFeatureScope scope(masm(), SSE2);
2539 CpuFeatureScope scope(masm(), SSE2);
2540 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2541 } else {
2542 X87Fxch(ToX87Register(right));
2543 X87Fxch(ToX87Register(left), 1);
2544 __ fld(0);
2545 __ fld(2);
2546 __ FCmp();
2547 }
2548 // Don't base result on EFLAGS when a NaN is involved. Instead 2560 // Don't base result on EFLAGS when a NaN is involved. Instead
2549 // jump to the false block. 2561 // jump to the false block.
2562 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2550 __ j(parity_even, instr->FalseLabel(chunk_)); 2563 __ j(parity_even, instr->FalseLabel(chunk_));
2551 } else { 2564 } else {
2552 if (right->IsConstantOperand()) { 2565 if (right->IsConstantOperand()) {
2553 __ cmp(ToOperand(left), 2566 __ cmp(ToOperand(left),
2554 ToImmediate(right, instr->hydrogen()->representation())); 2567 ToImmediate(right, instr->hydrogen()->representation()));
2555 } else if (left->IsConstantOperand()) { 2568 } else if (left->IsConstantOperand()) {
2556 __ cmp(ToOperand(right), 2569 __ cmp(ToOperand(right),
2557 ToImmediate(left, instr->hydrogen()->representation())); 2570 ToImmediate(left, instr->hydrogen()->representation()));
2558 // We transposed the operands. Reverse the condition. 2571 // We transposed the operands. Reverse the condition.
2559 cc = ReverseCondition(cc); 2572 cc = ReverseCondition(cc);
(...skipping 1397 matching lines...) Expand 10 before | Expand all | Expand 10 after
3957 __ cvttsd2si(output_reg, Operand(input_reg)); 3970 __ cvttsd2si(output_reg, Operand(input_reg));
3958 // Overflow is signalled with minint. 3971 // Overflow is signalled with minint.
3959 __ cmp(output_reg, 0x80000000u); 3972 __ cmp(output_reg, 0x80000000u);
3960 DeoptimizeIf(equal, instr->environment()); 3973 DeoptimizeIf(equal, instr->environment());
3961 __ jmp(&done, Label::kNear); 3974 __ jmp(&done, Label::kNear);
3962 3975
3963 // Non-zero negative reaches here. 3976 // Non-zero negative reaches here.
3964 __ bind(&negative_sign); 3977 __ bind(&negative_sign);
3965 // Truncate, then compare and compensate. 3978 // Truncate, then compare and compensate.
3966 __ cvttsd2si(output_reg, Operand(input_reg)); 3979 __ cvttsd2si(output_reg, Operand(input_reg));
3967 __ Cvtsi2sd(xmm_scratch, output_reg); 3980 __ cvtsi2sd(xmm_scratch, output_reg);
3968 __ ucomisd(input_reg, xmm_scratch); 3981 __ ucomisd(input_reg, xmm_scratch);
3969 __ j(equal, &done, Label::kNear); 3982 __ j(equal, &done, Label::kNear);
3970 __ sub(output_reg, Immediate(1)); 3983 __ sub(output_reg, Immediate(1));
3971 DeoptimizeIf(overflow, instr->environment()); 3984 DeoptimizeIf(overflow, instr->environment());
3972 3985
3973 __ bind(&done); 3986 __ bind(&done);
3974 } 3987 }
3975 } 3988 }
3976 3989
3977 3990
(...skipping 29 matching lines...) Expand all
4007 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then 4020 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
4008 // compare and compensate. 4021 // compare and compensate.
4009 __ movsd(input_temp, input_reg); // Do not alter input_reg. 4022 __ movsd(input_temp, input_reg); // Do not alter input_reg.
4010 __ subsd(input_temp, xmm_scratch); 4023 __ subsd(input_temp, xmm_scratch);
4011 __ cvttsd2si(output_reg, Operand(input_temp)); 4024 __ cvttsd2si(output_reg, Operand(input_temp));
4012 // Catch minint due to overflow, and to prevent overflow when compensating. 4025 // Catch minint due to overflow, and to prevent overflow when compensating.
4013 __ cmp(output_reg, 0x80000000u); 4026 __ cmp(output_reg, 0x80000000u);
4014 __ RecordComment("D2I conversion overflow"); 4027 __ RecordComment("D2I conversion overflow");
4015 DeoptimizeIf(equal, instr->environment()); 4028 DeoptimizeIf(equal, instr->environment());
4016 4029
4017 __ Cvtsi2sd(xmm_scratch, output_reg); 4030 __ cvtsi2sd(xmm_scratch, output_reg);
4018 __ ucomisd(xmm_scratch, input_temp); 4031 __ ucomisd(xmm_scratch, input_temp);
4019 __ j(equal, &done); 4032 __ j(equal, &done);
4020 __ sub(output_reg, Immediate(1)); 4033 __ sub(output_reg, Immediate(1));
4021 // No overflow because we already ruled out minint. 4034 // No overflow because we already ruled out minint.
4022 __ jmp(&done); 4035 __ jmp(&done);
4023 4036
4024 __ bind(&round_to_zero); 4037 __ bind(&round_to_zero);
4025 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if 4038 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
4026 // we can ignore the difference between a result of -0 and +0. 4039 // we can ignore the difference between a result of -0 and +0.
4027 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4040 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
(...skipping 930 matching lines...) Expand 10 before | Expand all | Expand 10 after
4958 } 4971 }
4959 4972
4960 4973
4961 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4974 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4962 LOperand* input = instr->value(); 4975 LOperand* input = instr->value();
4963 LOperand* output = instr->result(); 4976 LOperand* output = instr->result();
4964 ASSERT(input->IsRegister() || input->IsStackSlot()); 4977 ASSERT(input->IsRegister() || input->IsStackSlot());
4965 ASSERT(output->IsDoubleRegister()); 4978 ASSERT(output->IsDoubleRegister());
4966 if (CpuFeatures::IsSupported(SSE2)) { 4979 if (CpuFeatures::IsSupported(SSE2)) {
4967 CpuFeatureScope scope(masm(), SSE2); 4980 CpuFeatureScope scope(masm(), SSE2);
4968 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); 4981 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4969 } else if (input->IsRegister()) { 4982 } else if (input->IsRegister()) {
4970 Register input_reg = ToRegister(input); 4983 Register input_reg = ToRegister(input);
4971 __ push(input_reg); 4984 __ push(input_reg);
4972 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); 4985 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4973 __ pop(input_reg); 4986 __ pop(input_reg);
4974 } else { 4987 } else {
4975 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); 4988 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4976 } 4989 }
4977 } 4990 }
4978 4991
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
5067 Label done; 5080 Label done;
5068 5081
5069 if (signedness == SIGNED_INT32) { 5082 if (signedness == SIGNED_INT32) {
5070 // There was overflow, so bits 30 and 31 of the original integer 5083 // There was overflow, so bits 30 and 31 of the original integer
5071 // disagree. Try to allocate a heap number in new space and store 5084 // disagree. Try to allocate a heap number in new space and store
5072 // the value in there. If that fails, call the runtime system. 5085 // the value in there. If that fails, call the runtime system.
5073 __ SmiUntag(reg); 5086 __ SmiUntag(reg);
5074 __ xor_(reg, 0x80000000); 5087 __ xor_(reg, 0x80000000);
5075 if (CpuFeatures::IsSupported(SSE2)) { 5088 if (CpuFeatures::IsSupported(SSE2)) {
5076 CpuFeatureScope feature_scope(masm(), SSE2); 5089 CpuFeatureScope feature_scope(masm(), SSE2);
5077 __ Cvtsi2sd(xmm0, Operand(reg)); 5090 __ cvtsi2sd(xmm0, Operand(reg));
5078 } else { 5091 } else {
5079 __ push(reg); 5092 __ push(reg);
5080 __ fild_s(Operand(esp, 0)); 5093 __ fild_s(Operand(esp, 0));
5081 __ pop(reg); 5094 __ pop(reg);
5082 } 5095 }
5083 } else { 5096 } else {
5084 if (CpuFeatures::IsSupported(SSE2)) { 5097 if (CpuFeatures::IsSupported(SSE2)) {
5085 CpuFeatureScope feature_scope(masm(), SSE2); 5098 CpuFeatureScope feature_scope(masm(), SSE2);
5086 __ LoadUint32(xmm0, reg, 5099 __ LoadUint32(xmm0, reg,
5087 ToDoubleRegister(LNumberTagU::cast(instr)->temp())); 5100 ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
(...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after
5288 } 5301 }
5289 5302
5290 5303
5291 void LCodeGen::EmitNumberUntagD(Register input_reg, 5304 void LCodeGen::EmitNumberUntagD(Register input_reg,
5292 Register temp_reg, 5305 Register temp_reg,
5293 XMMRegister result_reg, 5306 XMMRegister result_reg,
5294 bool can_convert_undefined_to_nan, 5307 bool can_convert_undefined_to_nan,
5295 bool deoptimize_on_minus_zero, 5308 bool deoptimize_on_minus_zero,
5296 LEnvironment* env, 5309 LEnvironment* env,
5297 NumberUntagDMode mode) { 5310 NumberUntagDMode mode) {
5298 Label convert, load_smi, done; 5311 Label load_smi, done;
5299 5312
5300 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 5313 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5301 // Smi check. 5314 // Smi check.
5302 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); 5315 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5303 5316
5304 // Heap number map check. 5317 // Heap number map check.
5305 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 5318 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5306 factory()->heap_number_map()); 5319 factory()->heap_number_map());
5307 if (can_convert_undefined_to_nan) { 5320 if (!can_convert_undefined_to_nan) {
5308 __ j(not_equal, &convert, Label::kNear); 5321 DeoptimizeIf(not_equal, env);
5309 } else { 5322 } else {
5323 Label heap_number, convert;
5324 __ j(equal, &heap_number, Label::kNear);
5325
5326 // Convert undefined (and hole) to NaN.
5327 __ cmp(input_reg, factory()->undefined_value());
5310 DeoptimizeIf(not_equal, env); 5328 DeoptimizeIf(not_equal, env);
5329
5330 __ bind(&convert);
5331 ExternalReference nan =
5332 ExternalReference::address_of_canonical_non_hole_nan();
5333 __ movdbl(result_reg, Operand::StaticVariable(nan));
5334 __ jmp(&done, Label::kNear);
5335
5336 __ bind(&heap_number);
5311 } 5337 }
5312
5313 // Heap number to XMM conversion. 5338 // Heap number to XMM conversion.
5314 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); 5339 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5315
5316 if (deoptimize_on_minus_zero) { 5340 if (deoptimize_on_minus_zero) {
5317 XMMRegister xmm_scratch = xmm0; 5341 XMMRegister xmm_scratch = xmm0;
5318 __ xorps(xmm_scratch, xmm_scratch); 5342 __ xorps(xmm_scratch, xmm_scratch);
5319 __ ucomisd(result_reg, xmm_scratch); 5343 __ ucomisd(result_reg, xmm_scratch);
5320 __ j(not_zero, &done, Label::kNear); 5344 __ j(not_zero, &done, Label::kNear);
5321 __ movmskpd(temp_reg, result_reg); 5345 __ movmskpd(temp_reg, result_reg);
5322 __ test_b(temp_reg, 1); 5346 __ test_b(temp_reg, 1);
5323 DeoptimizeIf(not_zero, env); 5347 DeoptimizeIf(not_zero, env);
5324 } 5348 }
5325 __ jmp(&done, Label::kNear); 5349 __ jmp(&done, Label::kNear);
5326
5327 if (can_convert_undefined_to_nan) {
5328 __ bind(&convert);
5329
5330 // Convert undefined (and hole) to NaN.
5331 __ cmp(input_reg, factory()->undefined_value());
5332 DeoptimizeIf(not_equal, env);
5333
5334 ExternalReference nan =
5335 ExternalReference::address_of_canonical_non_hole_nan();
5336 __ movdbl(result_reg, Operand::StaticVariable(nan));
5337 __ jmp(&done, Label::kNear);
5338 }
5339 } else { 5350 } else {
5340 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); 5351 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5341 } 5352 }
5342 5353
5343 __ bind(&load_smi); 5354 __ bind(&load_smi);
5344 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the 5355 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
5345 // input register since we avoid dependencies. 5356 // input register since we avoid dependencies.
5346 __ mov(temp_reg, input_reg); 5357 __ mov(temp_reg, input_reg);
5347 __ SmiUntag(temp_reg); // Untag smi before converting to float. 5358 __ SmiUntag(temp_reg); // Untag smi before converting to float.
5348 __ Cvtsi2sd(result_reg, Operand(temp_reg)); 5359 __ cvtsi2sd(result_reg, Operand(temp_reg));
5349 __ bind(&done); 5360 __ bind(&done);
5350 } 5361 }
5351 5362
5352 5363
5353 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { 5364 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5354 Register input_reg = ToRegister(instr->value()); 5365 Register input_reg = ToRegister(instr->value());
5355 5366
5356 5367
5357 if (instr->truncating()) { 5368 if (instr->truncating()) {
5358 Label heap_number, slow_case; 5369 Label heap_number, slow_case;
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
5399 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5410 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5400 private: 5411 private:
5401 LTaggedToI* instr_; 5412 LTaggedToI* instr_;
5402 }; 5413 };
5403 5414
5404 LOperand* input = instr->value(); 5415 LOperand* input = instr->value();
5405 ASSERT(input->IsRegister()); 5416 ASSERT(input->IsRegister());
5406 Register input_reg = ToRegister(input); 5417 Register input_reg = ToRegister(input);
5407 ASSERT(input_reg.is(ToRegister(instr->result()))); 5418 ASSERT(input_reg.is(ToRegister(instr->result())));
5408 5419
5409 if (instr->hydrogen()->value()->representation().IsSmi()) { 5420 DeferredTaggedToI* deferred =
5410 __ SmiUntag(input_reg); 5421 new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5411 } else {
5412 DeferredTaggedToI* deferred =
5413 new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5414 5422
5415 __ JumpIfNotSmi(input_reg, deferred->entry()); 5423 __ JumpIfNotSmi(input_reg, deferred->entry());
5416 __ SmiUntag(input_reg); 5424 __ SmiUntag(input_reg);
5417 __ bind(deferred->exit()); 5425 __ bind(deferred->exit());
5418 }
5419 } 5426 }
5420 5427
5421 5428
5422 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5429 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5423 LOperand* input = instr->value(); 5430 LOperand* input = instr->value();
5424 ASSERT(input->IsRegister()); 5431 ASSERT(input->IsRegister());
5425 LOperand* temp = instr->temp(); 5432 LOperand* temp = instr->temp();
5426 ASSERT(temp->IsRegister()); 5433 ASSERT(temp->IsRegister());
5427 LOperand* result = instr->result(); 5434 LOperand* result = instr->result();
5428 ASSERT(result->IsDoubleRegister()); 5435 ASSERT(result->IsDoubleRegister());
(...skipping 921 matching lines...) Expand 10 before | Expand all | Expand 10 after
6350 FixedArray::kHeaderSize - kPointerSize)); 6357 FixedArray::kHeaderSize - kPointerSize));
6351 __ bind(&done); 6358 __ bind(&done);
6352 } 6359 }
6353 6360
6354 6361
6355 #undef __ 6362 #undef __
6356 6363
6357 } } // namespace v8::internal 6364 } } // namespace v8::internal
6358 6365
6359 #endif // V8_TARGET_ARCH_IA32 6366 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/codegen-ia32.cc ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698