Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(74)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 22290005: Move ToI conversions to the MacroAssembler (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: address review Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 428 matching lines...) Expand 10 before | Expand all | Expand 10 after
439 ASSERT(info()->IsStub()); 439 ASSERT(info()->IsStub());
440 frame_is_built_ = true; 440 frame_is_built_ = true;
441 // Build the frame in such a way that esi isn't trashed. 441 // Build the frame in such a way that esi isn't trashed.
442 __ push(ebp); // Caller's frame pointer. 442 __ push(ebp); // Caller's frame pointer.
443 __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); 443 __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
444 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); 444 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
445 __ lea(ebp, Operand(esp, 2 * kPointerSize)); 445 __ lea(ebp, Operand(esp, 2 * kPointerSize));
446 Comment(";;; Deferred code"); 446 Comment(";;; Deferred code");
447 } 447 }
448 code->Generate(); 448 code->Generate();
449 __ bind(code->done());
449 if (NeedsDeferredFrame()) { 450 if (NeedsDeferredFrame()) {
450 Comment(";;; Destroy frame"); 451 Comment(";;; Destroy frame");
451 ASSERT(frame_is_built_); 452 ASSERT(frame_is_built_);
452 frame_is_built_ = false; 453 frame_is_built_ = false;
453 __ mov(esp, ebp); 454 __ mov(esp, ebp);
454 __ pop(ebp); 455 __ pop(ebp);
455 } 456 }
456 __ jmp(code->exit()); 457 __ jmp(code->exit());
457 } 458 }
458 } 459 }
(...skipping 4807 matching lines...) Expand 10 before | Expand all | Expand 10 after
5266 5267
5267 // Smi to XMM conversion 5268 // Smi to XMM conversion
5268 __ bind(&load_smi); 5269 __ bind(&load_smi);
5269 __ SmiUntag(input_reg); // Untag smi before converting to float. 5270 __ SmiUntag(input_reg); // Untag smi before converting to float.
5270 __ cvtsi2sd(result_reg, Operand(input_reg)); 5271 __ cvtsi2sd(result_reg, Operand(input_reg));
5271 __ SmiTag(input_reg); // Retag smi. 5272 __ SmiTag(input_reg); // Retag smi.
5272 __ bind(&done); 5273 __ bind(&done);
5273 } 5274 }
5274 5275
5275 5276
5276 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { 5277 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5277 Label done, heap_number;
5278 Register input_reg = ToRegister(instr->value()); 5278 Register input_reg = ToRegister(instr->value());
5279 5279
5280 // Heap number map check.
5281 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5282 factory()->heap_number_map());
5283 5280
5284 if (instr->truncating()) { 5281 if (instr->truncating()) {
5282 Label heap_number, slow_case;
5283
5284 // Heap number map check.
5285 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5286 factory()->heap_number_map());
5285 __ j(equal, &heap_number, Label::kNear); 5287 __ j(equal, &heap_number, Label::kNear);
5288
5286 // Check for undefined. Undefined is converted to zero for truncating 5289 // Check for undefined. Undefined is converted to zero for truncating
5287 // conversions. 5290 // conversions.
5288 __ cmp(input_reg, factory()->undefined_value()); 5291 __ cmp(input_reg, factory()->undefined_value());
5289 __ RecordComment("Deferred TaggedToI: cannot truncate"); 5292 __ RecordComment("Deferred TaggedToI: cannot truncate");
5290 DeoptimizeIf(not_equal, instr->environment()); 5293 DeoptimizeIf(not_equal, instr->environment());
5291 __ mov(input_reg, 0); 5294 __ mov(input_reg, 0);
5292 __ jmp(&done, Label::kNear); 5295 __ jmp(done);
5293 5296
5294 __ bind(&heap_number); 5297 __ bind(&heap_number);
5295 if (CpuFeatures::IsSupported(SSE3)) { 5298 __ TruncateHeapNumberToI(input_reg, input_reg);
5296 CpuFeatureScope scope(masm(), SSE3);
5297 Label convert;
5298 // Use more powerful conversion when sse3 is available.
5299 // Load x87 register with heap number.
5300 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5301 // Get exponent alone and check for too-big exponent.
5302 __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5303 __ and_(input_reg, HeapNumber::kExponentMask);
5304 const uint32_t kTooBigExponent =
5305 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
5306 __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
5307 __ j(less, &convert, Label::kNear);
5308 // Pop FPU stack before deoptimizing.
5309 __ fstp(0);
5310 __ RecordComment("Deferred TaggedToI: exponent too big");
5311 DeoptimizeIf(no_condition, instr->environment());
5312
5313 // Reserve space for 64 bit answer.
5314 __ bind(&convert);
5315 __ sub(Operand(esp), Immediate(kDoubleSize));
5316 // Do conversion, which cannot fail because we checked the exponent.
5317 __ fisttp_d(Operand(esp, 0));
5318 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
5319 __ add(Operand(esp), Immediate(kDoubleSize));
5320 } else if (CpuFeatures::IsSupported(SSE2)) {
5321 CpuFeatureScope scope(masm(), SSE2);
5322 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
5323 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
5324 __ cvttsd2si(input_reg, Operand(xmm0));
5325 __ cmp(input_reg, 0x80000000u);
5326 __ j(not_equal, &done);
5327 // Check if the input was 0x8000000 (kMinInt).
5328 // If no, then we got an overflow and we deoptimize.
5329 ExternalReference min_int = ExternalReference::address_of_min_int();
5330 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
5331 __ ucomisd(xmm_temp, xmm0);
5332 DeoptimizeIf(not_equal, instr->environment());
5333 DeoptimizeIf(parity_even, instr->environment()); // NaN.
5334 } else {
5335 UNREACHABLE();
5336 }
5337 } else if (CpuFeatures::IsSupported(SSE2)) {
5338 CpuFeatureScope scope(masm(), SSE2);
5339 // Deoptimize if we don't have a heap number.
5340 __ RecordComment("Deferred TaggedToI: not a heap number");
5341 DeoptimizeIf(not_equal, instr->environment());
5342
5343 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
5344 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
5345 __ cvttsd2si(input_reg, Operand(xmm0));
5346 __ cvtsi2sd(xmm_temp, Operand(input_reg));
5347 __ ucomisd(xmm0, xmm_temp);
5348 __ RecordComment("Deferred TaggedToI: lost precision");
5349 DeoptimizeIf(not_equal, instr->environment());
5350 __ RecordComment("Deferred TaggedToI: NaN");
5351 DeoptimizeIf(parity_even, instr->environment()); // NaN.
5352 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5353 __ test(input_reg, Operand(input_reg));
5354 __ j(not_zero, &done);
5355 __ movmskpd(input_reg, xmm0);
5356 __ and_(input_reg, 1);
5357 __ RecordComment("Deferred TaggedToI: minus zero");
5358 DeoptimizeIf(not_zero, instr->environment());
5359 }
5360 } else { 5299 } else {
5361 UNREACHABLE(); 5300 Label bailout;
5301 XMMRegister scratch = (instr->temp() != NULL)
5302 ? ToDoubleRegister(instr->temp())
5303 : no_xmm_reg;
5304 __ TaggedToI(input_reg, input_reg, scratch,
5305 instr->hydrogen()->GetMinusZeroMode(), &bailout);
5306 __ jmp(done);
5307 __ bind(&bailout);
5308 DeoptimizeIf(no_condition, instr->environment());
5362 } 5309 }
5363 __ bind(&done);
5364 } 5310 }
5365 5311
5366 5312
5367 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 5313 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5368 class DeferredTaggedToI V8_FINAL : public LDeferredCode { 5314 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5369 public: 5315 public:
5370 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 5316 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5371 : LDeferredCode(codegen), instr_(instr) { } 5317 : LDeferredCode(codegen), instr_(instr) { }
5372 virtual void Generate() V8_OVERRIDE { 5318 virtual void Generate() V8_OVERRIDE {
5373 codegen()->DoDeferredTaggedToI(instr_); 5319 codegen()->DoDeferredTaggedToI(instr_, done());
5374 } 5320 }
5375 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5321 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5376 private: 5322 private:
5377 LTaggedToI* instr_; 5323 LTaggedToI* instr_;
5378 }; 5324 };
5379 5325
5380 LOperand* input = instr->value(); 5326 LOperand* input = instr->value();
5381 ASSERT(input->IsRegister()); 5327 ASSERT(input->IsRegister());
5382 Register input_reg = ToRegister(input); 5328 Register input_reg = ToRegister(input);
5383 ASSERT(input_reg.is(ToRegister(instr->result()))); 5329 ASSERT(input_reg.is(ToRegister(instr->result())));
5384 5330
5385 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 5331 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5386 5332
5387 __ JumpIfNotSmi(input_reg, deferred->entry()); 5333 __ JumpIfNotSmi(input_reg, deferred->entry());
5388 __ SmiUntag(input_reg); 5334 __ SmiUntag(input_reg);
5389 __ bind(deferred->exit()); 5335 __ bind(deferred->exit());
5390 } 5336 }
5391 5337
5392 5338
5393 void LCodeGen::DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
5394 Label done, heap_number;
5395 Register result_reg = ToRegister(instr->result());
5396 Register input_reg = ToRegister(instr->value());
5397
5398 // Heap number map check.
5399 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5400 factory()->heap_number_map());
5401 if (instr->truncating()) {
5402 __ j(equal, &heap_number, Label::kNear);
5403 // Check for undefined. Undefined is converted to zero for truncating
5404 // conversions.
5405 __ cmp(input_reg, factory()->undefined_value());
5406 __ RecordComment("Deferred TaggedToI: cannot truncate");
5407 DeoptimizeIf(not_equal, instr->environment());
5408 __ xor_(result_reg, result_reg);
5409 __ jmp(&done, Label::kFar);
5410 __ bind(&heap_number);
5411 } else {
5412 // Deoptimize if we don't have a heap number.
5413 DeoptimizeIf(not_equal, instr->environment());
5414 }
5415
5416 // Surprisingly, all of this crazy bit manipulation is considerably
5417 // faster than using the built-in x86 CPU conversion functions (about 6x).
5418 Label right_exponent, adjust_bias, zero_result;
5419 Register scratch = ToRegister(instr->scratch());
5420 Register scratch2 = ToRegister(instr->scratch2());
5421 // Get exponent word.
5422 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5423 // Get exponent alone in scratch2.
5424 __ mov(scratch2, scratch);
5425 __ and_(scratch2, HeapNumber::kExponentMask);
5426 __ shr(scratch2, HeapNumber::kExponentShift);
5427 if (instr->truncating()) {
5428 __ j(zero, &zero_result);
5429 } else {
5430 __ j(not_zero, &adjust_bias);
5431 __ test(scratch, Immediate(HeapNumber::kMantissaMask));
5432 DeoptimizeIf(not_zero, instr->environment());
5433 __ cmp(FieldOperand(input_reg, HeapNumber::kMantissaOffset), Immediate(0));
5434 DeoptimizeIf(not_equal, instr->environment());
5435 __ bind(&adjust_bias);
5436 }
5437 __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
5438 if (!instr->truncating()) {
5439 DeoptimizeIf(negative, instr->environment());
5440 } else {
5441 __ j(negative, &zero_result);
5442 }
5443
5444 // Get the second half of the double. For some exponents we don't
5445 // actually need this because the bits get shifted out again, but
5446 // it's probably slower to test than just to do it.
5447 Register scratch3 = ToRegister(instr->scratch3());
5448 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5449 __ xor_(result_reg, result_reg);
5450
5451 const uint32_t non_int32_exponent = 31;
5452 __ cmp(scratch2, Immediate(non_int32_exponent));
5453 // If we have a match of the int32 exponent then skip some logic.
5454 __ j(equal, &right_exponent, Label::kNear);
5455 // If the number doesn't find in an int32, deopt.
5456 DeoptimizeIf(greater, instr->environment());
5457
5458 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5459 // < 31.
5460 __ mov(result_reg, Immediate(31));
5461 __ sub(result_reg, scratch2);
5462
5463 __ bind(&right_exponent);
5464
5465 // Save off exponent for negative check later.
5466 __ mov(scratch2, scratch);
5467
5468 // Here result_reg is the shift, scratch is the exponent word.
5469 // Get the top bits of the mantissa.
5470 __ and_(scratch, HeapNumber::kMantissaMask);
5471 // Put back the implicit 1.
5472 __ or_(scratch, 1 << HeapNumber::kExponentShift);
5473 // Shift up the mantissa bits to take up the space the exponent used to
5474 // take. We have kExponentShift + 1 significant bits int he low end of the
5475 // word. Shift them to the top bits.
5476 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
5477 __ shl(scratch, shift_distance);
5478 if (!instr->truncating()) {
5479 // If not truncating, a non-zero value in the bottom 22 bits means a
5480 // non-integral value --> trigger a deopt.
5481 __ test(scratch3, Immediate((1 << (32 - shift_distance)) - 1));
5482 DeoptimizeIf(not_equal, instr->environment());
5483 }
5484 // Shift down 22 bits to get the most significant 10 bits or the low
5485 // mantissa word.
5486 __ shr(scratch3, 32 - shift_distance);
5487 __ or_(scratch3, scratch);
5488 if (!instr->truncating()) {
5489 // If truncating, a non-zero value in the bits that will be shifted away
5490 // when adjusting the exponent means rounding --> deopt.
5491 __ mov(scratch, 0x1);
5492 ASSERT(result_reg.is(ecx));
5493 __ shl_cl(scratch);
5494 __ dec(scratch);
5495 __ test(scratch3, scratch);
5496 DeoptimizeIf(not_equal, instr->environment());
5497 }
5498 // Move down according to the exponent.
5499 ASSERT(result_reg.is(ecx));
5500 __ shr_cl(scratch3);
5501 // Now the unsigned 32-bit answer is in scratch3. We need to move it to
5502 // result_reg and we may need to fix the sign.
5503 Label negative_result;
5504 __ xor_(result_reg, result_reg);
5505 __ cmp(scratch2, result_reg);
5506 __ j(less, &negative_result, Label::kNear);
5507 __ cmp(scratch3, result_reg);
5508 __ mov(result_reg, scratch3);
5509 // If the result is > MAX_INT, result doesn't fit in signed 32-bit --> deopt.
5510 DeoptimizeIf(less, instr->environment());
5511 __ jmp(&done, Label::kNear);
5512 __ bind(&zero_result);
5513 __ xor_(result_reg, result_reg);
5514 __ jmp(&done, Label::kNear);
5515 __ bind(&negative_result);
5516 __ sub(result_reg, scratch3);
5517 if (!instr->truncating()) {
5518 // -0.0 triggers a deopt.
5519 DeoptimizeIf(zero, instr->environment());
5520 }
5521 // If the negative subtraction overflows into a positive number, there was an
5522 // overflow --> deopt.
5523 DeoptimizeIf(positive, instr->environment());
5524 __ bind(&done);
5525 }
5526
5527
5528 void LCodeGen::DoTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
5529 class DeferredTaggedToINoSSE2 V8_FINAL : public LDeferredCode {
5530 public:
5531 DeferredTaggedToINoSSE2(LCodeGen* codegen, LTaggedToINoSSE2* instr)
5532 : LDeferredCode(codegen), instr_(instr) { }
5533 virtual void Generate() V8_OVERRIDE {
5534 codegen()->DoDeferredTaggedToINoSSE2(instr_);
5535 }
5536 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5537 private:
5538 LTaggedToINoSSE2* instr_;
5539 };
5540
5541 LOperand* input = instr->value();
5542 ASSERT(input->IsRegister());
5543 Register input_reg = ToRegister(input);
5544 ASSERT(input_reg.is(ToRegister(instr->result())));
5545
5546 DeferredTaggedToINoSSE2* deferred =
5547 new(zone()) DeferredTaggedToINoSSE2(this, instr);
5548
5549 // Smi check.
5550 __ JumpIfNotSmi(input_reg, deferred->entry());
5551 __ SmiUntag(input_reg); // Untag smi.
5552 __ bind(deferred->exit());
5553 }
5554
5555
5556 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5339 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5557 LOperand* input = instr->value(); 5340 LOperand* input = instr->value();
5558 ASSERT(input->IsRegister()); 5341 ASSERT(input->IsRegister());
5559 LOperand* temp = instr->temp(); 5342 LOperand* temp = instr->temp();
5560 ASSERT(temp == NULL || temp->IsRegister()); 5343 ASSERT(temp == NULL || temp->IsRegister());
5561 LOperand* result = instr->result(); 5344 LOperand* result = instr->result();
5562 ASSERT(result->IsDoubleRegister()); 5345 ASSERT(result->IsDoubleRegister());
5563 5346
5564 Register input_reg = ToRegister(input); 5347 Register input_reg = ToRegister(input);
5565 bool deoptimize_on_minus_zero = 5348 bool deoptimize_on_minus_zero =
(...skipping 26 matching lines...) Expand all
5592 } 5375 }
5593 5376
5594 5377
5595 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5378 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5596 LOperand* input = instr->value(); 5379 LOperand* input = instr->value();
5597 ASSERT(input->IsDoubleRegister()); 5380 ASSERT(input->IsDoubleRegister());
5598 LOperand* result = instr->result(); 5381 LOperand* result = instr->result();
5599 ASSERT(result->IsRegister()); 5382 ASSERT(result->IsRegister());
5600 Register result_reg = ToRegister(result); 5383 Register result_reg = ToRegister(result);
5601 5384
5602 Label done; 5385 if (instr->truncating()) {
5603 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { 5386 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5604 CpuFeatureScope scope(masm(), SSE2); 5387 CpuFeatureScope scope(masm(), SSE2);
5605 5388 XMMRegister input_reg = ToDoubleRegister(input);
5606 XMMRegister input_reg = ToDoubleRegister(input); 5389 __ TruncateDoubleToI(result_reg, input_reg);
5607
5608 __ cvttsd2si(result_reg, Operand(input_reg));
5609
5610 if (instr->truncating()) {
5611 // Performs a truncating conversion of a floating point number as used by
5612 // the JS bitwise operations.
5613 Label fast_case_succeeded;
5614 __ cmp(result_reg, 0x80000000u);
5615 __ j(not_equal, &fast_case_succeeded);
5616 __ sub(esp, Immediate(kDoubleSize));
5617 __ movdbl(MemOperand(esp, 0), input_reg);
5618 DoubleToIStub stub(esp, result_reg, 0, true);
5619 __ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
5620 __ add(esp, Immediate(kDoubleSize));
5621 __ bind(&fast_case_succeeded);
5622 } else { 5390 } else {
5623 __ cvtsi2sd(xmm0, Operand(result_reg)); 5391 X87Register input_reg = ToX87Register(input);
5624 __ ucomisd(xmm0, input_reg); 5392 X87Fxch(input_reg);
5625 DeoptimizeIf(not_equal, instr->environment()); 5393 __ TruncateX87TOSToI(result_reg);
5626 DeoptimizeIf(parity_even, instr->environment()); // NaN.
5627 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5628 // The integer converted back is equal to the original. We
5629 // only have to test if we got -0 as an input.
5630 __ test(result_reg, Operand(result_reg));
5631 __ j(not_zero, &done, Label::kNear);
5632 __ movmskpd(result_reg, input_reg);
5633 // Bit 0 contains the sign of the double in input_reg.
5634 // If input was positive, we are ok and return 0, otherwise
5635 // deoptimize.
5636 __ and_(result_reg, 1);
5637 DeoptimizeIf(not_zero, instr->environment());
5638 }
5639 __ bind(&done);
5640 } 5394 }
5641 } else { 5395 } else {
5642 X87Register input_reg = ToX87Register(input); 5396 Label bailout, done;
5643 __ push(result_reg); 5397 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5644 X87Mov(Operand(esp, 0), input_reg, kX87IntOperand); 5398 CpuFeatureScope scope(masm(), SSE2);
5645 if (instr->truncating()) { 5399 XMMRegister input_reg = ToDoubleRegister(input);
5646 __ pop(result_reg); 5400 __ DoubleToI(result_reg, input_reg, xmm0,
5401 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5647 } else { 5402 } else {
5403 X87Register input_reg = ToX87Register(input);
5648 X87Fxch(input_reg); 5404 X87Fxch(input_reg);
5649 __ fld(0); 5405 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5650 __ fild_s(Operand(esp, 0)); 5406 &bailout, Label::kNear);
5651 __ pop(result_reg);
5652 __ FCmp();
5653 DeoptimizeIf(not_equal, instr->environment());
5654 DeoptimizeIf(parity_even, instr->environment()); // NaN.
5655 } 5407 }
5656 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5408 __ jmp(&done, Label::kNear);
5657 __ test(result_reg, Operand(result_reg)); 5409 __ bind(&bailout);
5658 __ j(not_zero, &done, Label::kNear); 5410 DeoptimizeIf(no_condition, instr->environment());
5659 // To check for minus zero, we load the value again as float, and check 5411 __ bind(&done);
5660 // if that is still 0.
5661 X87Fxch(input_reg);
5662 __ push(result_reg);
5663 __ fst_s(Operand(esp, 0));
5664 __ pop(result_reg);
5665 __ test(result_reg, Operand(result_reg));
5666 DeoptimizeIf(not_zero, instr->environment());
5667 __ bind(&done);
5668 }
5669 } 5412 }
5670 } 5413 }
5671 5414
5672 5415
5673 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 5416 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5674 LOperand* input = instr->value(); 5417 LOperand* input = instr->value();
5675 ASSERT(input->IsDoubleRegister()); 5418 ASSERT(input->IsDoubleRegister());
5676 LOperand* result = instr->result(); 5419 LOperand* result = instr->result();
5677 ASSERT(result->IsRegister()); 5420 ASSERT(result->IsRegister());
5678 Register result_reg = ToRegister(result); 5421 Register result_reg = ToRegister(result);
5679 5422
5680 Label done; 5423 Label bailout, done;
5681 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { 5424 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5682 CpuFeatureScope scope(masm(), SSE2); 5425 CpuFeatureScope scope(masm(), SSE2);
5683
5684 XMMRegister input_reg = ToDoubleRegister(input); 5426 XMMRegister input_reg = ToDoubleRegister(input);
5685 5427 __ DoubleToI(result_reg, input_reg, xmm0,
5686 __ cvttsd2si(result_reg, Operand(input_reg)); 5428 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5687 __ cvtsi2sd(xmm0, Operand(result_reg));
5688 __ ucomisd(xmm0, input_reg);
5689 DeoptimizeIf(not_equal, instr->environment());
5690 DeoptimizeIf(parity_even, instr->environment()); // NaN.
5691 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5692 // The integer converted back is equal to the original. We
5693 // only have to test if we got -0 as an input.
5694 __ test(result_reg, Operand(result_reg));
5695 __ j(not_zero, &done, Label::kNear);
5696 __ movmskpd(result_reg, input_reg);
5697 // Bit 0 contains the sign of the double in input_reg.
5698 // If input was positive, we are ok and return 0, otherwise
5699 // deoptimize.
5700 __ and_(result_reg, 1);
5701 DeoptimizeIf(not_zero, instr->environment());
5702 __ bind(&done);
5703 }
5704 } else { 5429 } else {
5705 X87Register input_reg = ToX87Register(input); 5430 X87Register input_reg = ToX87Register(input);
5706 X87Fxch(input_reg); 5431 X87Fxch(input_reg);
5707 __ push(result_reg); 5432 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5708 X87Mov(Operand(esp, 0), input_reg, kX87IntOperand); 5433 &bailout, Label::kNear);
5709 __ fld(0); 5434 }
5710 __ fild_s(Operand(esp, 0)); 5435 __ jmp(&done, Label::kNear);
5711 __ pop(result_reg); 5436 __ bind(&bailout);
5712 __ FCmp(); 5437 DeoptimizeIf(no_condition, instr->environment());
5713 DeoptimizeIf(not_equal, instr->environment()); 5438 __ bind(&done);
5714 DeoptimizeIf(parity_even, instr->environment()); // NaN.
5715 5439
5716 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5717 __ test(result_reg, Operand(result_reg));
5718 __ j(not_zero, &done, Label::kNear);
5719 // To check for minus zero, we load the value again as float, and check
5720 // if that is still 0.
5721 __ push(result_reg);
5722 __ fst_s(Operand(esp, 0));
5723 __ pop(result_reg);
5724 __ test(result_reg, Operand(result_reg));
5725 DeoptimizeIf(not_zero, instr->environment());
5726 __ bind(&done);
5727 }
5728 }
5729 __ SmiTag(result_reg); 5440 __ SmiTag(result_reg);
5730 DeoptimizeIf(overflow, instr->environment()); 5441 DeoptimizeIf(overflow, instr->environment());
5731 } 5442 }
5732 5443
5733 5444
5734 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 5445 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5735 LOperand* input = instr->value(); 5446 LOperand* input = instr->value();
5736 __ test(ToOperand(input), Immediate(kSmiTagMask)); 5447 __ test(ToOperand(input), Immediate(kSmiTagMask));
5737 DeoptimizeIf(not_zero, instr->environment()); 5448 DeoptimizeIf(not_zero, instr->environment());
5738 } 5449 }
(...skipping 811 matching lines...) Expand 10 before | Expand all | Expand 10 after
6550 FixedArray::kHeaderSize - kPointerSize)); 6261 FixedArray::kHeaderSize - kPointerSize));
6551 __ bind(&done); 6262 __ bind(&done);
6552 } 6263 }
6553 6264
6554 6265
6555 #undef __ 6266 #undef __
6556 6267
6557 } } // namespace v8::internal 6268 } } // namespace v8::internal
6558 6269
6559 #endif // V8_TARGET_ARCH_IA32 6270 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698