| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 352 instr->hydrogen_value()->id(), | 352 instr->hydrogen_value()->id(), |
| 353 instr->Mnemonic()); | 353 instr->Mnemonic()); |
| 354 } | 354 } |
| 355 | 355 |
| 356 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); | 356 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); |
| 357 | 357 |
| 358 RecordAndUpdatePosition(instr->position()); | 358 RecordAndUpdatePosition(instr->position()); |
| 359 | 359 |
| 360 instr->CompileToNative(this); | 360 instr->CompileToNative(this); |
| 361 | 361 |
| 362 if (!CpuFeatures::IsSupported(SSE2)) { | 362 if (!CpuFeatures::IsSupported(SSE2) && |
| 363 if (FLAG_debug_code && FLAG_enable_slow_asserts) { | 363 FLAG_debug_code && FLAG_enable_slow_asserts) { |
| 364 __ VerifyX87StackDepth(x87_stack_depth_); | 364 __ VerifyX87StackDepth(x87_stack_.depth()); |
| 365 } | |
| 366 } | 365 } |
| 367 } | 366 } |
| 368 EnsureSpaceForLazyDeopt(); | 367 EnsureSpaceForLazyDeopt(); |
| 369 return !is_aborted(); | 368 return !is_aborted(); |
| 370 } | 369 } |
| 371 | 370 |
| 372 | 371 |
| 373 bool LCodeGen::GenerateJumpTable() { | 372 bool LCodeGen::GenerateJumpTable() { |
| 374 Label needs_frame; | 373 Label needs_frame; |
| 375 if (jump_table_.length() > 0) { | 374 if (jump_table_.length() > 0) { |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 490 return X87Register::FromAllocationIndex(index); | 489 return X87Register::FromAllocationIndex(index); |
| 491 } | 490 } |
| 492 | 491 |
| 493 | 492 |
| 494 XMMRegister LCodeGen::ToDoubleRegister(int index) const { | 493 XMMRegister LCodeGen::ToDoubleRegister(int index) const { |
| 495 return XMMRegister::FromAllocationIndex(index); | 494 return XMMRegister::FromAllocationIndex(index); |
| 496 } | 495 } |
| 497 | 496 |
| 498 | 497 |
| 499 void LCodeGen::X87LoadForUsage(X87Register reg) { | 498 void LCodeGen::X87LoadForUsage(X87Register reg) { |
| 500 ASSERT(X87StackContains(reg)); | 499 ASSERT(x87_stack_.Contains(reg)); |
| 501 X87Fxch(reg); | 500 x87_stack_.Fxch(reg); |
| 502 x87_stack_depth_--; | 501 x87_stack_.pop(); |
| 503 } | 502 } |
| 504 | 503 |
| 505 | 504 |
| 506 void LCodeGen::X87Fxch(X87Register reg, int other_slot) { | 505 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { |
| 507 ASSERT(X87StackContains(reg) && x87_stack_depth_ > other_slot); | 506 ASSERT(Contains(reg) && stack_depth_ > other_slot); |
| 508 int i = X87ArrayIndex(reg); | 507 int i = ArrayIndex(reg); |
| 509 int st = x87_st2idx(i); | 508 int st = st2idx(i); |
| 510 if (st != other_slot) { | 509 if (st != other_slot) { |
| 511 int other_i = x87_st2idx(other_slot); | 510 int other_i = st2idx(other_slot); |
| 512 X87Register other = x87_stack_[other_i]; | 511 X87Register other = stack_[other_i]; |
| 513 x87_stack_[other_i] = reg; | 512 stack_[other_i] = reg; |
| 514 x87_stack_[i] = other; | 513 stack_[i] = other; |
| 515 if (st == 0) { | 514 if (st == 0) { |
| 516 __ fxch(other_slot); | 515 __ fxch(other_slot); |
| 517 } else if (other_slot == 0) { | 516 } else if (other_slot == 0) { |
| 518 __ fxch(st); | 517 __ fxch(st); |
| 519 } else { | 518 } else { |
| 520 __ fxch(st); | 519 __ fxch(st); |
| 521 __ fxch(other_slot); | 520 __ fxch(other_slot); |
| 522 __ fxch(st); | 521 __ fxch(st); |
| 523 } | 522 } |
| 524 } | 523 } |
| 525 } | 524 } |
| 526 | 525 |
| 527 | 526 |
| 528 int LCodeGen::x87_st2idx(int pos) { | 527 int LCodeGen::X87Stack::st2idx(int pos) { |
| 529 return x87_stack_depth_ - pos - 1; | 528 return stack_depth_ - pos - 1; |
| 530 } | 529 } |
| 531 | 530 |
| 532 | 531 |
| 533 int LCodeGen::X87ArrayIndex(X87Register reg) { | 532 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { |
| 534 for (int i = 0; i < x87_stack_depth_; i++) { | 533 for (int i = 0; i < stack_depth_; i++) { |
| 535 if (x87_stack_[i].is(reg)) return i; | 534 if (stack_[i].is(reg)) return i; |
| 536 } | 535 } |
| 537 UNREACHABLE(); | 536 UNREACHABLE(); |
| 538 return -1; | 537 return -1; |
| 539 } | 538 } |
| 540 | 539 |
| 541 | 540 |
| 542 bool LCodeGen::X87StackContains(X87Register reg) { | 541 bool LCodeGen::X87Stack::Contains(X87Register reg) { |
| 543 for (int i = 0; i < x87_stack_depth_; i++) { | 542 for (int i = 0; i < stack_depth_; i++) { |
| 544 if (x87_stack_[i].is(reg)) return true; | 543 if (stack_[i].is(reg)) return true; |
| 545 } | 544 } |
| 546 return false; | 545 return false; |
| 547 } | 546 } |
| 548 | 547 |
| 549 | 548 |
| 550 void LCodeGen::X87Free(X87Register reg) { | 549 void LCodeGen::X87Stack::Free(X87Register reg) { |
| 551 ASSERT(X87StackContains(reg)); | 550 ASSERT(Contains(reg)); |
| 552 int i = X87ArrayIndex(reg); | 551 int i = ArrayIndex(reg); |
| 553 int st = x87_st2idx(i); | 552 int st = st2idx(i); |
| 554 if (st > 0) { | 553 if (st > 0) { |
| 555 // keep track of how fstp(i) changes the order of elements | 554 // keep track of how fstp(i) changes the order of elements |
| 556 int tos_i = x87_st2idx(0); | 555 int tos_i = st2idx(0); |
| 557 x87_stack_[i] = x87_stack_[tos_i]; | 556 stack_[i] = stack_[tos_i]; |
| 558 } | 557 } |
| 559 x87_stack_depth_--; | 558 pop(); |
| 560 __ fstp(st); | 559 __ fstp(st); |
| 561 } | 560 } |
| 562 | 561 |
| 563 | 562 |
| 564 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { | 563 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { |
| 565 if (X87StackContains(dst)) { | 564 if (x87_stack_.Contains(dst)) { |
| 566 X87Fxch(dst); | 565 x87_stack_.Fxch(dst); |
| 567 __ fstp(0); | 566 __ fstp(0); |
| 568 } else { | 567 } else { |
| 569 ASSERT(x87_stack_depth_ < X87Register::kNumAllocatableRegisters); | 568 x87_stack_.push(dst); |
| 570 x87_stack_[x87_stack_depth_] = dst; | |
| 571 x87_stack_depth_++; | |
| 572 } | 569 } |
| 573 X87Fld(src, opts); | 570 X87Fld(src, opts); |
| 574 } | 571 } |
| 575 | 572 |
| 576 | 573 |
| 577 void LCodeGen::X87Fld(Operand src, X87OperandType opts) { | 574 void LCodeGen::X87Fld(Operand src, X87OperandType opts) { |
| 578 if (opts == kX87DoubleOperand) { | 575 ASSERT(!src.is_reg_only()); |
| 579 __ fld_d(src); | 576 switch (opts) { |
| 580 } else if (opts == kX87FloatOperand) { | 577 case kX87DoubleOperand: |
| 581 __ fld_s(src); | 578 __ fld_d(src); |
| 582 } else if (opts == kX87IntOperand) { | 579 break; |
| 583 __ fild_s(src); | 580 case kX87FloatOperand: |
| 584 } else { | 581 __ fld_s(src); |
| 585 UNREACHABLE(); | 582 break; |
| 583 case kX87IntOperand: |
| 584 __ fild_s(src); |
| 585 break; |
| 586 default: |
| 587 UNREACHABLE(); |
| 586 } | 588 } |
| 587 } | 589 } |
| 588 | 590 |
| 589 | 591 |
| 590 void LCodeGen::X87Mov(Operand dst, X87Register src) { | 592 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { |
| 591 X87Fxch(src); | 593 ASSERT(!dst.is_reg_only()); |
| 592 __ fst_d(dst); | 594 x87_stack_.Fxch(src); |
| 595 switch (opts) { |
| 596 case kX87DoubleOperand: |
| 597 __ fst_d(dst); |
| 598 break; |
| 599 case kX87IntOperand: |
| 600 __ fist_s(dst); |
| 601 break; |
| 602 default: |
| 603 UNREACHABLE(); |
| 604 } |
| 593 } | 605 } |
| 594 | 606 |
| 595 | 607 |
| 596 void LCodeGen::X87PrepareToWrite(X87Register reg) { | 608 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { |
| 597 if (X87StackContains(reg)) { | 609 if (Contains(reg)) { |
| 598 X87Free(reg); | 610 Free(reg); |
| 599 } | 611 } |
| 600 // Mark this register as the next register to write to | 612 // Mark this register as the next register to write to |
| 601 x87_stack_[x87_stack_depth_] = reg; | 613 stack_[stack_depth_] = reg; |
| 602 } | 614 } |
| 603 | 615 |
| 604 | 616 |
| 605 void LCodeGen::X87CommitWrite(X87Register reg) { | 617 void LCodeGen::X87Stack::CommitWrite(X87Register reg) { |
| 606 // Assert the reg is prepared to write, but not on the virtual stack yet | 618 // Assert the reg is prepared to write, but not on the virtual stack yet |
| 607 ASSERT(!X87StackContains(reg) && x87_stack_[x87_stack_depth_].is(reg) && | 619 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) && |
| 608 x87_stack_depth_ < X87Register::kNumAllocatableRegisters); | 620 stack_depth_ < X87Register::kNumAllocatableRegisters); |
| 609 x87_stack_depth_++; | 621 stack_depth_++; |
| 610 } | 622 } |
| 611 | 623 |
| 612 | 624 |
| 613 void LCodeGen::X87PrepareBinaryOp( | 625 void LCodeGen::X87PrepareBinaryOp( |
| 614 X87Register left, X87Register right, X87Register result) { | 626 X87Register left, X87Register right, X87Register result) { |
| 615 // You need to use DefineSameAsFirst for x87 instructions | 627 // You need to use DefineSameAsFirst for x87 instructions |
| 616 ASSERT(result.is(left)); | 628 ASSERT(result.is(left)); |
| 617 X87Fxch(right, 1); | 629 x87_stack_.Fxch(right, 1); |
| 618 X87Fxch(left); | 630 x87_stack_.Fxch(left); |
| 619 } | 631 } |
| 620 | 632 |
| 621 | 633 |
| 622 void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) { | 634 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { |
| 623 if (x87_stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) { | 635 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) { |
| 624 bool double_inputs = instr->HasDoubleRegisterInput(); | 636 bool double_inputs = instr->HasDoubleRegisterInput(); |
| 625 | 637 |
| 626 // Flush stack from tos down, since FreeX87() will mess with tos | 638 // Flush stack from tos down, since FreeX87() will mess with tos |
| 627 for (int i = x87_stack_depth_-1; i >= 0; i--) { | 639 for (int i = stack_depth_-1; i >= 0; i--) { |
| 628 X87Register reg = x87_stack_[i]; | 640 X87Register reg = stack_[i]; |
| 629 // Skip registers which contain the inputs for the next instruction | 641 // Skip registers which contain the inputs for the next instruction |
| 630 // when flushing the stack | 642 // when flushing the stack |
| 631 if (double_inputs && instr->IsDoubleInput(reg, this)) { | 643 if (double_inputs && instr->IsDoubleInput(reg, cgen)) { |
| 632 continue; | 644 continue; |
| 633 } | 645 } |
| 634 X87Free(reg); | 646 Free(reg); |
| 635 if (i < x87_stack_depth_-1) i++; | 647 if (i < stack_depth_-1) i++; |
| 636 } | 648 } |
| 637 } | 649 } |
| 638 if (instr->IsReturn()) { | 650 if (instr->IsReturn()) { |
| 639 while (x87_stack_depth_ > 0) { | 651 while (stack_depth_ > 0) { |
| 640 __ fstp(0); | 652 __ fstp(0); |
| 641 x87_stack_depth_--; | 653 stack_depth_--; |
| 642 } | 654 } |
| 643 } | 655 } |
| 644 } | 656 } |
| 645 | 657 |
| 646 | 658 |
| 647 void LCodeGen::EmitFlushX87ForDeopt() { | 659 void LCodeGen::EmitFlushX87ForDeopt() { |
| 648 for (int i = 0; i < x87_stack_depth_; i++) __ fstp(0); | 660 // The deoptimizer does not support X87 Registers. But as long as we |
| 661 // deopt from a stub its not a problem, since we will re-materialize the |
| 662 // original stub inputs, which can't be double registers. |
| 663 ASSERT(info()->IsStub()); |
| 664 if (FLAG_debug_code && FLAG_enable_slow_asserts) { |
| 665 __ pushfd(); |
| 666 __ VerifyX87StackDepth(x87_stack_.depth()); |
| 667 __ popfd(); |
| 668 } |
| 669 for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0); |
| 649 } | 670 } |
| 650 | 671 |
| 651 | 672 |
| 652 Register LCodeGen::ToRegister(LOperand* op) const { | 673 Register LCodeGen::ToRegister(LOperand* op) const { |
| 653 ASSERT(op->IsRegister()); | 674 ASSERT(op->IsRegister()); |
| 654 return ToRegister(op->index()); | 675 return ToRegister(op->index()); |
| 655 } | 676 } |
| 656 | 677 |
| 657 | 678 |
| 658 X87Register LCodeGen::ToX87Register(LOperand* op) const { | 679 X87Register LCodeGen::ToX87Register(LOperand* op) const { |
| (...skipping 337 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 996 __ bind(&no_deopt); | 1017 __ bind(&no_deopt); |
| 997 __ mov(Operand::StaticVariable(count), eax); | 1018 __ mov(Operand::StaticVariable(count), eax); |
| 998 __ pop(eax); | 1019 __ pop(eax); |
| 999 __ popfd(); | 1020 __ popfd(); |
| 1000 } | 1021 } |
| 1001 | 1022 |
| 1002 // Before Instructions which can deopt, we normally flush the x87 stack. But | 1023 // Before Instructions which can deopt, we normally flush the x87 stack. But |
| 1003 // we can have inputs or outputs of the current instruction on the stack, | 1024 // we can have inputs or outputs of the current instruction on the stack, |
| 1004 // thus we need to flush them here from the physical stack to leave it in a | 1025 // thus we need to flush them here from the physical stack to leave it in a |
| 1005 // consistent state. | 1026 // consistent state. |
| 1006 if (x87_stack_depth_ > 0) { | 1027 if (x87_stack_.depth() > 0) { |
| 1007 Label done; | 1028 Label done; |
| 1008 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); | 1029 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
| 1009 EmitFlushX87ForDeopt(); | 1030 EmitFlushX87ForDeopt(); |
| 1010 __ bind(&done); | 1031 __ bind(&done); |
| 1011 } | 1032 } |
| 1012 | 1033 |
| 1013 if (info()->ShouldTrapOnDeopt()) { | 1034 if (info()->ShouldTrapOnDeopt()) { |
| 1014 Label done; | 1035 Label done; |
| 1015 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); | 1036 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
| 1016 __ int3(); | 1037 __ int3(); |
| (...skipping 841 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1858 void LCodeGen::DoConstantS(LConstantS* instr) { | 1879 void LCodeGen::DoConstantS(LConstantS* instr) { |
| 1859 __ Set(ToRegister(instr->result()), Immediate(instr->value())); | 1880 __ Set(ToRegister(instr->result()), Immediate(instr->value())); |
| 1860 } | 1881 } |
| 1861 | 1882 |
| 1862 | 1883 |
| 1863 void LCodeGen::DoConstantD(LConstantD* instr) { | 1884 void LCodeGen::DoConstantD(LConstantD* instr) { |
| 1864 double v = instr->value(); | 1885 double v = instr->value(); |
| 1865 uint64_t int_val = BitCast<uint64_t, double>(v); | 1886 uint64_t int_val = BitCast<uint64_t, double>(v); |
| 1866 int32_t lower = static_cast<int32_t>(int_val); | 1887 int32_t lower = static_cast<int32_t>(int_val); |
| 1867 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); | 1888 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
| 1889 ASSERT(instr->result()->IsDoubleRegister()); |
| 1868 | 1890 |
| 1869 if (!CpuFeatures::IsSafeForSnapshot(SSE2)) { | 1891 if (!CpuFeatures::IsSafeForSnapshot(SSE2)) { |
| 1870 __ push(Immediate(upper)); | 1892 __ push(Immediate(upper)); |
| 1871 __ push(Immediate(lower)); | 1893 __ push(Immediate(lower)); |
| 1872 X87Mov(ToX87Register(instr->result()), Operand(esp, 0)); | 1894 X87Register reg = ToX87Register(instr->result()); |
| 1895 X87Mov(reg, Operand(esp, 0)); |
| 1873 __ add(Operand(esp), Immediate(kDoubleSize)); | 1896 __ add(Operand(esp), Immediate(kDoubleSize)); |
| 1874 } else { | 1897 } else { |
| 1875 CpuFeatureScope scope1(masm(), SSE2); | 1898 CpuFeatureScope scope1(masm(), SSE2); |
| 1876 ASSERT(instr->result()->IsDoubleRegister()); | |
| 1877 XMMRegister res = ToDoubleRegister(instr->result()); | 1899 XMMRegister res = ToDoubleRegister(instr->result()); |
| 1878 if (int_val == 0) { | 1900 if (int_val == 0) { |
| 1879 __ xorps(res, res); | 1901 __ xorps(res, res); |
| 1880 } else { | 1902 } else { |
| 1881 Register temp = ToRegister(instr->temp()); | 1903 Register temp = ToRegister(instr->temp()); |
| 1882 if (CpuFeatures::IsSupported(SSE4_1)) { | 1904 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 1883 CpuFeatureScope scope2(masm(), SSE4_1); | 1905 CpuFeatureScope scope2(masm(), SSE4_1); |
| 1884 if (lower != 0) { | 1906 if (lower != 0) { |
| 1885 __ Set(temp, Immediate(lower)); | 1907 __ Set(temp, Immediate(lower)); |
| 1886 __ movd(res, Operand(temp)); | 1908 __ movd(res, Operand(temp)); |
| (...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2259 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), | 2281 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), |
| 2260 factory()->heap_number_map()); | 2282 factory()->heap_number_map()); |
| 2261 EmitBranch(instr, equal); | 2283 EmitBranch(instr, equal); |
| 2262 } | 2284 } |
| 2263 } | 2285 } |
| 2264 | 2286 |
| 2265 | 2287 |
| 2266 void LCodeGen::DoBranch(LBranch* instr) { | 2288 void LCodeGen::DoBranch(LBranch* instr) { |
| 2267 Representation r = instr->hydrogen()->value()->representation(); | 2289 Representation r = instr->hydrogen()->value()->representation(); |
| 2268 if (r.IsSmiOrInteger32()) { | 2290 if (r.IsSmiOrInteger32()) { |
| 2269 ASSERT(!info()->IsStub()); | |
| 2270 Register reg = ToRegister(instr->value()); | 2291 Register reg = ToRegister(instr->value()); |
| 2271 __ test(reg, Operand(reg)); | 2292 __ test(reg, Operand(reg)); |
| 2272 EmitBranch(instr, not_zero); | 2293 EmitBranch(instr, not_zero); |
| 2273 } else if (r.IsDouble()) { | 2294 } else if (r.IsDouble()) { |
| 2274 ASSERT(!info()->IsStub()); | 2295 ASSERT(!info()->IsStub()); |
| 2275 CpuFeatureScope scope(masm(), SSE2); | 2296 CpuFeatureScope scope(masm(), SSE2); |
| 2276 XMMRegister reg = ToDoubleRegister(instr->value()); | 2297 XMMRegister reg = ToDoubleRegister(instr->value()); |
| 2277 __ xorps(xmm0, xmm0); | 2298 __ xorps(xmm0, xmm0); |
| 2278 __ ucomisd(reg, xmm0); | 2299 __ ucomisd(reg, xmm0); |
| 2279 EmitBranch(instr, not_equal); | 2300 EmitBranch(instr, not_equal); |
| (...skipping 2587 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4867 | 4888 |
| 4868 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4889 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 4869 EmitPushTaggedOperand(instr->left()); | 4890 EmitPushTaggedOperand(instr->left()); |
| 4870 EmitPushTaggedOperand(instr->right()); | 4891 EmitPushTaggedOperand(instr->right()); |
| 4871 StringAddStub stub(instr->hydrogen()->flags()); | 4892 StringAddStub stub(instr->hydrogen()->flags()); |
| 4872 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | 4893 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 4873 } | 4894 } |
| 4874 | 4895 |
| 4875 | 4896 |
| 4876 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 4897 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
| 4898 LOperand* input = instr->value(); |
| 4899 LOperand* output = instr->result(); |
| 4900 ASSERT(input->IsRegister() || input->IsStackSlot()); |
| 4901 ASSERT(output->IsDoubleRegister()); |
| 4877 if (CpuFeatures::IsSupported(SSE2)) { | 4902 if (CpuFeatures::IsSupported(SSE2)) { |
| 4878 CpuFeatureScope scope(masm(), SSE2); | 4903 CpuFeatureScope scope(masm(), SSE2); |
| 4879 LOperand* input = instr->value(); | |
| 4880 ASSERT(input->IsRegister() || input->IsStackSlot()); | |
| 4881 LOperand* output = instr->result(); | |
| 4882 ASSERT(output->IsDoubleRegister()); | |
| 4883 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); | 4904 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); |
| 4905 } else if (input->IsRegister()) { |
| 4906 Register input_reg = ToRegister(input); |
| 4907 __ push(input_reg); |
| 4908 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); |
| 4909 __ pop(input_reg); |
| 4884 } else { | 4910 } else { |
| 4885 UNREACHABLE(); | 4911 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); |
| 4886 } | 4912 } |
| 4887 } | 4913 } |
| 4888 | 4914 |
| 4889 | 4915 |
| 4890 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { | 4916 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { |
| 4891 Register input = ToRegister(instr->value()); | 4917 Register input = ToRegister(instr->value()); |
| 4892 __ SmiTag(input); | 4918 __ SmiTag(input); |
| 4893 if (!instr->hydrogen()->value()->HasRange() || | 4919 if (!instr->hydrogen()->value()->HasRange() || |
| 4894 !instr->hydrogen()->value()->range()->IsInSmiRange()) { | 4920 !instr->hydrogen()->value()->range()->IsInSmiRange()) { |
| 4895 DeoptimizeIf(overflow, instr->environment()); | 4921 DeoptimizeIf(overflow, instr->environment()); |
| (...skipping 667 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5563 mode); | 5589 mode); |
| 5564 } | 5590 } |
| 5565 } | 5591 } |
| 5566 | 5592 |
| 5567 | 5593 |
| 5568 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 5594 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
| 5569 LOperand* input = instr->value(); | 5595 LOperand* input = instr->value(); |
| 5570 ASSERT(input->IsDoubleRegister()); | 5596 ASSERT(input->IsDoubleRegister()); |
| 5571 LOperand* result = instr->result(); | 5597 LOperand* result = instr->result(); |
| 5572 ASSERT(result->IsRegister()); | 5598 ASSERT(result->IsRegister()); |
| 5573 CpuFeatureScope scope(masm(), SSE2); | |
| 5574 | |
| 5575 XMMRegister input_reg = ToDoubleRegister(input); | |
| 5576 Register result_reg = ToRegister(result); | 5599 Register result_reg = ToRegister(result); |
| 5577 | 5600 |
| 5578 __ cvttsd2si(result_reg, Operand(input_reg)); | 5601 Label done; |
| 5602 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { |
| 5603 CpuFeatureScope scope(masm(), SSE2); |
| 5579 | 5604 |
| 5580 if (instr->truncating()) { | 5605 XMMRegister input_reg = ToDoubleRegister(input); |
| 5581 // Performs a truncating conversion of a floating point number as used by | 5606 |
| 5582 // the JS bitwise operations. | 5607 __ cvttsd2si(result_reg, Operand(input_reg)); |
| 5583 Label fast_case_succeeded; | 5608 |
| 5584 __ cmp(result_reg, 0x80000000u); | 5609 if (instr->truncating()) { |
| 5585 __ j(not_equal, &fast_case_succeeded); | 5610 // Performs a truncating conversion of a floating point number as used by |
| 5586 __ sub(esp, Immediate(kDoubleSize)); | 5611 // the JS bitwise operations. |
| 5587 __ movdbl(MemOperand(esp, 0), input_reg); | 5612 Label fast_case_succeeded; |
| 5588 DoubleToIStub stub(esp, result_reg, 0, true); | 5613 __ cmp(result_reg, 0x80000000u); |
| 5589 __ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); | 5614 __ j(not_equal, &fast_case_succeeded); |
| 5590 __ add(esp, Immediate(kDoubleSize)); | 5615 __ sub(esp, Immediate(kDoubleSize)); |
| 5591 __ bind(&fast_case_succeeded); | 5616 __ movdbl(MemOperand(esp, 0), input_reg); |
| 5617 DoubleToIStub stub(esp, result_reg, 0, true); |
| 5618 __ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); |
| 5619 __ add(esp, Immediate(kDoubleSize)); |
| 5620 __ bind(&fast_case_succeeded); |
| 5621 } else { |
| 5622 __ cvtsi2sd(xmm0, Operand(result_reg)); |
| 5623 __ ucomisd(xmm0, input_reg); |
| 5624 DeoptimizeIf(not_equal, instr->environment()); |
| 5625 DeoptimizeIf(parity_even, instr->environment()); // NaN. |
| 5626 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5627 // The integer converted back is equal to the original. We |
| 5628 // only have to test if we got -0 as an input. |
| 5629 __ test(result_reg, Operand(result_reg)); |
| 5630 __ j(not_zero, &done, Label::kNear); |
| 5631 __ movmskpd(result_reg, input_reg); |
| 5632 // Bit 0 contains the sign of the double in input_reg. |
| 5633 // If input was positive, we are ok and return 0, otherwise |
| 5634 // deoptimize. |
| 5635 __ and_(result_reg, 1); |
| 5636 DeoptimizeIf(not_zero, instr->environment()); |
| 5637 } |
| 5638 __ bind(&done); |
| 5639 } |
| 5592 } else { | 5640 } else { |
| 5593 Label done; | 5641 X87Register input_reg = ToX87Register(input); |
| 5594 __ cvtsi2sd(xmm0, Operand(result_reg)); | 5642 __ push(result_reg); |
| 5595 __ ucomisd(xmm0, input_reg); | 5643 X87Mov(Operand(esp, 0), input_reg, kX87IntOperand); |
| 5596 DeoptimizeIf(not_equal, instr->environment()); | 5644 if (instr->truncating()) { |
| 5597 DeoptimizeIf(parity_even, instr->environment()); // NaN. | 5645 __ pop(result_reg); |
| 5646 } else { |
| 5647 X87Fxch(input_reg); |
| 5648 __ fld(0); |
| 5649 __ fild_s(Operand(esp, 0)); |
| 5650 __ pop(result_reg); |
| 5651 __ FCmp(); |
| 5652 DeoptimizeIf(not_equal, instr->environment()); |
| 5653 DeoptimizeIf(parity_even, instr->environment()); // NaN. |
| 5654 } |
| 5598 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5655 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5599 // The integer converted back is equal to the original. We | |
| 5600 // only have to test if we got -0 as an input. | |
| 5601 __ test(result_reg, Operand(result_reg)); | 5656 __ test(result_reg, Operand(result_reg)); |
| 5602 __ j(not_zero, &done, Label::kNear); | 5657 __ j(not_zero, &done, Label::kNear); |
| 5603 __ movmskpd(result_reg, input_reg); | 5658 // To check for minus zero, we load the value again as float, and check |
| 5604 // Bit 0 contains the sign of the double in input_reg. | 5659 // if that is still 0. |
| 5605 // If input was positive, we are ok and return 0, otherwise | 5660 X87Fxch(input_reg); |
| 5606 // deoptimize. | 5661 __ push(result_reg); |
| 5607 __ and_(result_reg, 1); | 5662 __ fst_s(Operand(esp, 0)); |
| 5663 __ pop(result_reg); |
| 5664 __ test(result_reg, Operand(result_reg)); |
| 5608 DeoptimizeIf(not_zero, instr->environment()); | 5665 DeoptimizeIf(not_zero, instr->environment()); |
| 5666 __ bind(&done); |
| 5609 } | 5667 } |
| 5610 __ bind(&done); | |
| 5611 } | 5668 } |
| 5612 } | 5669 } |
| 5613 | 5670 |
| 5614 | 5671 |
| 5615 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5672 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 5616 LOperand* input = instr->value(); | 5673 LOperand* input = instr->value(); |
| 5617 ASSERT(input->IsDoubleRegister()); | 5674 ASSERT(input->IsDoubleRegister()); |
| 5618 LOperand* result = instr->result(); | 5675 LOperand* result = instr->result(); |
| 5619 ASSERT(result->IsRegister()); | 5676 ASSERT(result->IsRegister()); |
| 5620 CpuFeatureScope scope(masm(), SSE2); | |
| 5621 | |
| 5622 XMMRegister input_reg = ToDoubleRegister(input); | |
| 5623 Register result_reg = ToRegister(result); | 5677 Register result_reg = ToRegister(result); |
| 5624 | 5678 |
| 5625 Label done; | 5679 Label done; |
| 5626 __ cvttsd2si(result_reg, Operand(input_reg)); | 5680 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { |
| 5627 __ cvtsi2sd(xmm0, Operand(result_reg)); | 5681 CpuFeatureScope scope(masm(), SSE2); |
| 5628 __ ucomisd(xmm0, input_reg); | |
| 5629 DeoptimizeIf(not_equal, instr->environment()); | |
| 5630 DeoptimizeIf(parity_even, instr->environment()); // NaN. | |
| 5631 | 5682 |
| 5632 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5683 XMMRegister input_reg = ToDoubleRegister(input); |
| 5633 // The integer converted back is equal to the original. We | 5684 |
| 5634 // only have to test if we got -0 as an input. | 5685 __ cvttsd2si(result_reg, Operand(input_reg)); |
| 5635 __ test(result_reg, Operand(result_reg)); | 5686 __ cvtsi2sd(xmm0, Operand(result_reg)); |
| 5636 __ j(not_zero, &done, Label::kNear); | 5687 __ ucomisd(xmm0, input_reg); |
| 5637 __ movmskpd(result_reg, input_reg); | 5688 DeoptimizeIf(not_equal, instr->environment()); |
| 5638 // Bit 0 contains the sign of the double in input_reg. | 5689 DeoptimizeIf(parity_even, instr->environment()); // NaN. |
| 5639 // If input was positive, we are ok and return 0, otherwise | 5690 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5640 // deoptimize. | 5691 // The integer converted back is equal to the original. We |
| 5641 __ and_(result_reg, 1); | 5692 // only have to test if we got -0 as an input. |
| 5642 DeoptimizeIf(not_zero, instr->environment()); | 5693 __ test(result_reg, Operand(result_reg)); |
| 5643 __ bind(&done); | 5694 __ j(not_zero, &done, Label::kNear); |
| 5695 __ movmskpd(result_reg, input_reg); |
| 5696 // Bit 0 contains the sign of the double in input_reg. |
| 5697 // If input was positive, we are ok and return 0, otherwise |
| 5698 // deoptimize. |
| 5699 __ and_(result_reg, 1); |
| 5700 DeoptimizeIf(not_zero, instr->environment()); |
| 5701 __ bind(&done); |
| 5702 } |
| 5703 } else { |
| 5704 X87Register input_reg = ToX87Register(input); |
| 5705 X87Fxch(input_reg); |
| 5706 __ push(result_reg); |
| 5707 X87Mov(Operand(esp, 0), input_reg, kX87IntOperand); |
| 5708 __ fld(0); |
| 5709 __ fild_s(Operand(esp, 0)); |
| 5710 __ pop(result_reg); |
| 5711 __ FCmp(); |
| 5712 DeoptimizeIf(not_equal, instr->environment()); |
| 5713 DeoptimizeIf(parity_even, instr->environment()); // NaN. |
| 5714 |
| 5715 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5716 __ test(result_reg, Operand(result_reg)); |
| 5717 __ j(not_zero, &done, Label::kNear); |
| 5718 // To check for minus zero, we load the value again as float, and check |
| 5719 // if that is still 0. |
| 5720 __ push(result_reg); |
| 5721 __ fst_s(Operand(esp, 0)); |
| 5722 __ pop(result_reg); |
| 5723 __ test(result_reg, Operand(result_reg)); |
| 5724 DeoptimizeIf(not_zero, instr->environment()); |
| 5725 __ bind(&done); |
| 5726 } |
| 5644 } | 5727 } |
| 5645 __ SmiTag(result_reg); | 5728 __ SmiTag(result_reg); |
| 5646 DeoptimizeIf(overflow, instr->environment()); | 5729 DeoptimizeIf(overflow, instr->environment()); |
| 5647 } | 5730 } |
| 5648 | 5731 |
| 5649 | 5732 |
| 5650 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5733 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 5651 LOperand* input = instr->value(); | 5734 LOperand* input = instr->value(); |
| 5652 __ test(ToOperand(input), Immediate(kSmiTagMask)); | 5735 __ test(ToOperand(input), Immediate(kSmiTagMask)); |
| 5653 DeoptimizeIf(not_zero, instr->environment()); | 5736 DeoptimizeIf(not_zero, instr->environment()); |
| (...skipping 812 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6466 FixedArray::kHeaderSize - kPointerSize)); | 6549 FixedArray::kHeaderSize - kPointerSize)); |
| 6467 __ bind(&done); | 6550 __ bind(&done); |
| 6468 } | 6551 } |
| 6469 | 6552 |
| 6470 | 6553 |
| 6471 #undef __ | 6554 #undef __ |
| 6472 | 6555 |
| 6473 } } // namespace v8::internal | 6556 } } // namespace v8::internal |
| 6474 | 6557 |
| 6475 #endif // V8_TARGET_ARCH_IA32 | 6558 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |