Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 346 current_instruction_, | 346 current_instruction_, |
| 347 instr->hydrogen_value()->id(), | 347 instr->hydrogen_value()->id(), |
| 348 instr->Mnemonic()); | 348 instr->Mnemonic()); |
| 349 } | 349 } |
| 350 | 350 |
| 351 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); | 351 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); |
| 352 | 352 |
| 353 instr->CompileToNative(this); | 353 instr->CompileToNative(this); |
| 354 | 354 |
| 355 if (!CpuFeatures::IsSupported(SSE2)) { | 355 if (!CpuFeatures::IsSupported(SSE2)) { |
| 356 ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1); | |
| 357 if (FLAG_debug_code && FLAG_enable_slow_asserts) { | 356 if (FLAG_debug_code && FLAG_enable_slow_asserts) { |
| 358 __ VerifyX87StackDepth(x87_stack_depth_); | 357 __ VerifyX87StackDepth(x87_stack_depth_); |
| 359 } | 358 } |
| 360 } | 359 } |
| 361 } | 360 } |
| 362 EnsureSpaceForLazyDeopt(); | 361 EnsureSpaceForLazyDeopt(); |
| 363 return !is_aborted(); | 362 return !is_aborted(); |
| 364 } | 363 } |
| 365 | 364 |
| 366 | 365 |
| (...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 494 safepoints_.Emit(masm(), GetStackSlotCount()); | 493 safepoints_.Emit(masm(), GetStackSlotCount()); |
| 495 return !is_aborted(); | 494 return !is_aborted(); |
| 496 } | 495 } |
| 497 | 496 |
| 498 | 497 |
| 499 Register LCodeGen::ToRegister(int index) const { | 498 Register LCodeGen::ToRegister(int index) const { |
| 500 return Register::FromAllocationIndex(index); | 499 return Register::FromAllocationIndex(index); |
| 501 } | 500 } |
| 502 | 501 |
| 503 | 502 |
| 503 X87Register LCodeGen::ToX87Register(int index) const { | |
| 504 return X87Register::FromAllocationIndex(index); | |
| 505 } | |
| 506 | |
| 507 | |
| 504 XMMRegister LCodeGen::ToDoubleRegister(int index) const { | 508 XMMRegister LCodeGen::ToDoubleRegister(int index) const { |
| 505 return XMMRegister::FromAllocationIndex(index); | 509 return XMMRegister::FromAllocationIndex(index); |
| 506 } | 510 } |
| 507 | 511 |
| 508 | 512 |
| 509 bool LCodeGen::IsX87TopOfStack(LOperand* op) const { | 513 void LCodeGen::X87LoadForUsage(X87Register reg) { |
| 510 return op->IsDoubleRegister(); | 514 ASSERT(X87StackContains(reg)); |
| 515 X87Fxch(reg); | |
| 516 x87_stack_depth_--; | |
| 511 } | 517 } |
| 512 | 518 |
| 513 | 519 |
| 514 void LCodeGen::ReadX87Operand(Operand dst) { | 520 void LCodeGen::X87Fxch(X87Register reg, int other_slot) { |
| 515 ASSERT(x87_stack_depth_ == 1); | 521 int i = X87ArrayIndex(reg); |
| 522 int st = x87_st2idx(i); | |
| 523 if (st != other_slot) { | |
| 524 int other_i = x87_st2idx(other_slot); | |
| 525 X87Register other = x87_stack_[other_i]; | |
| 526 x87_stack_[other_i] = reg; | |
| 527 x87_stack_[i] = other; | |
| 528 if (st == 0) { | |
| 529 __ fxch(other_slot); | |
| 530 } else if (other_slot == 0) { | |
| 531 __ fxch(st); | |
| 532 } else { | |
| 533 __ fxch(st); | |
| 534 __ fxch(other_slot); | |
| 535 __ fxch(st); | |
| 536 } | |
| 537 } | |
| 538 } | |
| 539 | |
| 540 | |
| 541 int LCodeGen::x87_st2idx(int pos) { | |
| 542 return x87_stack_depth_ - pos - 1; | |
| 543 } | |
| 544 | |
| 545 | |
| 546 int LCodeGen::X87ArrayIndex(X87Register reg) { | |
| 547 for (int i = 0; i < x87_stack_depth_; i++) { | |
| 548 if (x87_stack_[i].is(reg)) return i; | |
| 549 } | |
| 550 UNREACHABLE(); | |
| 551 return -1; | |
| 552 } | |
| 553 | |
| 554 | |
| 555 bool LCodeGen::X87StackContains(X87Register reg) { | |
| 556 for (int i = 0; i < x87_stack_depth_; i++) { | |
| 557 if (x87_stack_[i].is(reg)) return true; | |
| 558 } | |
| 559 return false; | |
| 560 } | |
| 561 | |
| 562 | |
| 563 void LCodeGen::X87Free(X87Register reg) { | |
| 564 ASSERT(x87_stack_depth_ > 0); | |
| 565 int i = X87ArrayIndex(reg); | |
| 566 int st = x87_st2idx(i); | |
| 567 if (st > 0) x87_stack_[i] = x87_stack_[x87_st2idx(0)]; | |
|
mvstanton
2013/07/04 12:30:55
Could you use { } around the assignment? I'm usual
| |
| 568 x87_stack_depth_--; | |
| 569 __ fstp(st); | |
| 570 } | |
| 571 | |
| 572 | |
| 573 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { | |
| 574 if (X87StackContains(dst)) { | |
| 575 X87Fxch(dst); | |
| 576 __ fstp(0); | |
| 577 } else { | |
| 578 x87_stack_[x87_stack_depth_] = dst; | |
| 579 x87_stack_depth_++; | |
| 580 } | |
| 581 X87Fld(src, opts); | |
| 582 } | |
| 583 | |
| 584 | |
| 585 void LCodeGen::X87Fld(Operand src, X87OperandType opts) { | |
| 586 if (opts == kX87DoubleOperand) { | |
| 587 __ fld_d(src); | |
| 588 } else if (opts == kX87FloatOperand) { | |
| 589 __ fld_s(src); | |
| 590 } else if (opts == kX87IntOperand) { | |
| 591 __ fild_s(src); | |
| 592 } else { | |
| 593 UNREACHABLE(); | |
| 594 } | |
| 595 } | |
| 596 | |
| 597 | |
| 598 void LCodeGen::X87Mov(Operand dst, X87Register src) { | |
| 599 X87Fxch(src); | |
| 516 __ fst_d(dst); | 600 __ fst_d(dst); |
| 517 } | 601 } |
| 518 | 602 |
| 519 | 603 |
| 520 void LCodeGen::PushX87DoubleOperand(Operand src) { | 604 void LCodeGen::X87PrepareToWrite(X87Register reg) { |
| 521 ASSERT(x87_stack_depth_ == 0); | 605 if (X87StackContains(reg)) { |
| 522 x87_stack_depth_++; | 606 X87Free(reg); |
| 523 __ fld_d(src); | 607 } |
| 608 // Mark this register as the next register to write to | |
| 609 x87_stack_[x87_stack_depth_] = reg; | |
| 524 } | 610 } |
| 525 | 611 |
| 526 | 612 |
| 527 void LCodeGen::PushX87FloatOperand(Operand src) { | 613 void LCodeGen::X87CommitWrite(X87Register reg) { |
| 528 ASSERT(x87_stack_depth_ == 0); | 614 // Assert the reg is prepared to write, but not on the virtual stack yet |
| 615 ASSERT(!X87StackContains(reg) && x87_stack_[x87_stack_depth_].is(reg)); | |
| 529 x87_stack_depth_++; | 616 x87_stack_depth_++; |
| 530 __ fld_s(src); | |
| 531 } | 617 } |
| 532 | 618 |
| 533 | 619 |
| 534 void LCodeGen::PopX87() { | 620 void LCodeGen::X87PrepareBinaryOp( |
| 535 ASSERT(x87_stack_depth_ == 1); | 621 X87Register left, X87Register right, X87Register result) { |
| 536 x87_stack_depth_--; | 622 // You need to use DefineSameAsFirst for x87 instructions |
| 537 __ fstp(0); | 623 ASSERT(result.is(left)); |
| 538 } | 624 X87Fxch(right, 1); |
| 539 | 625 X87Fxch(left); |
| 540 | |
| 541 void LCodeGen::CurrentInstructionReturnsX87Result() { | |
| 542 ASSERT(x87_stack_depth_ <= 1); | |
| 543 if (x87_stack_depth_ == 0) { | |
| 544 x87_stack_depth_ = 1; | |
| 545 } | |
| 546 } | 626 } |
| 547 | 627 |
| 548 | 628 |
| 549 void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) { | 629 void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) { |
| 550 if (x87_stack_depth_ > 0) { | 630 if (x87_stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) { |
| 551 if ((instr->ClobbersDoubleRegisters() || | 631 bool double_inputs = instr->HasDoubleRegisterInput(); |
| 552 instr->HasDoubleRegisterResult()) && | 632 |
| 553 !instr->HasDoubleRegisterInput()) { | 633 // Flush stack from tos down, since PopX87() will mess with tos |
|
mvstanton
2013/07/04 12:30:55
Update the comment to reflect that PopX87() is gon
| |
| 554 PopX87(); | 634 for (int i = x87_stack_depth_-1; i >= 0; i--) { |
| 635 X87Register reg = x87_stack_[i]; | |
| 636 // Skip input registers when flushing | |
| 637 if (double_inputs && instr->IsDoubleInput(reg, this)) { | |
| 638 continue; | |
| 639 } | |
| 640 X87Free(reg); | |
| 641 if (i < x87_stack_depth_-1) i++; | |
| 642 } | |
| 643 } | |
| 644 if (instr->IsReturn()) { | |
| 645 while (x87_stack_depth_ > 0) { | |
| 646 __ fstp(0); | |
| 647 x87_stack_depth_--; | |
| 555 } | 648 } |
| 556 } | 649 } |
| 557 } | 650 } |
| 558 | 651 |
| 559 | 652 |
| 560 Register LCodeGen::ToRegister(LOperand* op) const { | 653 Register LCodeGen::ToRegister(LOperand* op) const { |
| 561 ASSERT(op->IsRegister()); | 654 ASSERT(op->IsRegister()); |
| 562 return ToRegister(op->index()); | 655 return ToRegister(op->index()); |
| 563 } | 656 } |
| 564 | 657 |
| 565 | 658 |
| 659 X87Register LCodeGen::ToX87Register(LOperand* op) const { | |
| 660 ASSERT(op->IsDoubleRegister()); | |
| 661 return ToX87Register(op->index()); | |
| 662 } | |
| 663 | |
| 664 | |
| 566 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 665 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
| 567 ASSERT(op->IsDoubleRegister()); | 666 ASSERT(op->IsDoubleRegister()); |
| 568 return ToDoubleRegister(op->index()); | 667 return ToDoubleRegister(op->index()); |
| 569 } | 668 } |
| 570 | 669 |
| 571 | 670 |
| 572 int LCodeGen::ToInteger32(LConstantOperand* op) const { | 671 int LCodeGen::ToInteger32(LConstantOperand* op) const { |
| 573 HConstant* constant = chunk_->LookupConstant(op); | 672 HConstant* constant = chunk_->LookupConstant(op); |
| 574 return constant->Integer32Value(); | 673 return constant->Integer32Value(); |
| 575 } | 674 } |
| (...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 828 deoptimizations_.Add(environment, zone()); | 927 deoptimizations_.Add(environment, zone()); |
| 829 } | 928 } |
| 830 } | 929 } |
| 831 | 930 |
| 832 | 931 |
| 833 void LCodeGen::DeoptimizeIf(Condition cc, | 932 void LCodeGen::DeoptimizeIf(Condition cc, |
| 834 LEnvironment* environment, | 933 LEnvironment* environment, |
| 835 Deoptimizer::BailoutType bailout_type) { | 934 Deoptimizer::BailoutType bailout_type) { |
| 836 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 935 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 837 ASSERT(environment->HasBeenRegistered()); | 936 ASSERT(environment->HasBeenRegistered()); |
| 838 // It's an error to deoptimize with the x87 fp stack in use. | |
| 839 ASSERT(x87_stack_depth_ == 0); | |
| 840 int id = environment->deoptimization_index(); | 937 int id = environment->deoptimization_index(); |
| 841 ASSERT(info()->IsOptimizing() || info()->IsStub()); | 938 ASSERT(info()->IsOptimizing() || info()->IsStub()); |
| 842 Address entry = | 939 Address entry = |
| 843 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 940 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 844 if (entry == NULL) { | 941 if (entry == NULL) { |
| 845 Abort("bailout was not prepared"); | 942 Abort("bailout was not prepared"); |
| 846 return; | 943 return; |
| 847 } | 944 } |
| 848 | 945 |
| 849 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { | 946 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 867 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | 964 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); |
| 868 | 965 |
| 869 __ bind(&no_deopt); | 966 __ bind(&no_deopt); |
| 870 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset), | 967 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset), |
| 871 eax); | 968 eax); |
| 872 __ pop(ebx); | 969 __ pop(ebx); |
| 873 __ pop(eax); | 970 __ pop(eax); |
| 874 __ popfd(); | 971 __ popfd(); |
| 875 } | 972 } |
| 876 | 973 |
| 974 if (x87_stack_depth_ > 0) { | |
| 975 Label done; | |
|
mvstanton
2013/07/04 12:30:55
Could you explain what is happening here with a co
| |
| 976 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); | |
| 977 for (int i = 0; i < x87_stack_depth_; i++) __ fstp(0); | |
| 978 __ bind(&done); | |
| 979 } | |
| 980 | |
| 877 if (FLAG_trap_on_deopt && info()->IsOptimizing()) { | 981 if (FLAG_trap_on_deopt && info()->IsOptimizing()) { |
| 878 Label done; | 982 Label done; |
| 879 if (cc != no_condition) { | 983 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
| 880 __ j(NegateCondition(cc), &done, Label::kNear); | |
| 881 } | |
| 882 __ int3(); | 984 __ int3(); |
| 883 __ bind(&done); | 985 __ bind(&done); |
| 884 } | 986 } |
| 885 | 987 |
| 886 ASSERT(info()->IsStub() || frame_is_built_); | 988 ASSERT(info()->IsStub() || frame_is_built_); |
| 887 if (cc == no_condition && frame_is_built_) { | 989 if (cc == no_condition && frame_is_built_) { |
| 888 if (bailout_type == Deoptimizer::LAZY) { | 990 if (bailout_type == Deoptimizer::LAZY) { |
| 889 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 991 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| 890 } else { | 992 } else { |
| 891 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | 993 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); |
| (...skipping 822 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1714 } | 1816 } |
| 1715 | 1817 |
| 1716 | 1818 |
| 1717 void LCodeGen::DoConstantD(LConstantD* instr) { | 1819 void LCodeGen::DoConstantD(LConstantD* instr) { |
| 1718 double v = instr->value(); | 1820 double v = instr->value(); |
| 1719 uint64_t int_val = BitCast<uint64_t, double>(v); | 1821 uint64_t int_val = BitCast<uint64_t, double>(v); |
| 1720 int32_t lower = static_cast<int32_t>(int_val); | 1822 int32_t lower = static_cast<int32_t>(int_val); |
| 1721 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); | 1823 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
| 1722 | 1824 |
| 1723 if (!CpuFeatures::IsSafeForSnapshot(SSE2)) { | 1825 if (!CpuFeatures::IsSafeForSnapshot(SSE2)) { |
| 1826 __ push(Immediate(upper)); | |
| 1724 __ push(Immediate(lower)); | 1827 __ push(Immediate(lower)); |
| 1725 __ push(Immediate(upper)); | 1828 X87Mov(ToX87Register(instr->result()), Operand(esp, 0)); |
| 1726 PushX87DoubleOperand(Operand(esp, 0)); | |
| 1727 __ add(Operand(esp), Immediate(kDoubleSize)); | 1829 __ add(Operand(esp), Immediate(kDoubleSize)); |
| 1728 CurrentInstructionReturnsX87Result(); | |
| 1729 } else { | 1830 } else { |
| 1730 CpuFeatureScope scope1(masm(), SSE2); | 1831 CpuFeatureScope scope1(masm(), SSE2); |
| 1731 ASSERT(instr->result()->IsDoubleRegister()); | 1832 ASSERT(instr->result()->IsDoubleRegister()); |
| 1732 XMMRegister res = ToDoubleRegister(instr->result()); | 1833 XMMRegister res = ToDoubleRegister(instr->result()); |
| 1733 if (int_val == 0) { | 1834 if (int_val == 0) { |
| 1734 __ xorps(res, res); | 1835 __ xorps(res, res); |
| 1735 } else { | 1836 } else { |
| 1736 Register temp = ToRegister(instr->temp()); | 1837 Register temp = ToRegister(instr->temp()); |
| 1737 if (CpuFeatures::IsSupported(SSE4_1)) { | 1838 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 1738 CpuFeatureScope scope2(masm(), SSE4_1); | 1839 CpuFeatureScope scope2(masm(), SSE4_1); |
| (...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1983 __ j(parity_even, &return_left, Label::kNear); // left == NaN. | 2084 __ j(parity_even, &return_left, Label::kNear); // left == NaN. |
| 1984 __ bind(&return_right); | 2085 __ bind(&return_right); |
| 1985 __ movsd(left_reg, right_reg); | 2086 __ movsd(left_reg, right_reg); |
| 1986 | 2087 |
| 1987 __ bind(&return_left); | 2088 __ bind(&return_left); |
| 1988 } | 2089 } |
| 1989 } | 2090 } |
| 1990 | 2091 |
| 1991 | 2092 |
| 1992 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 2093 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
| 1993 CpuFeatureScope scope(masm(), SSE2); | 2094 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { |
| 1994 XMMRegister left = ToDoubleRegister(instr->left()); | 2095 CpuFeatureScope scope(masm(), SSE2); |
| 1995 XMMRegister right = ToDoubleRegister(instr->right()); | 2096 XMMRegister left = ToDoubleRegister(instr->left()); |
| 1996 XMMRegister result = ToDoubleRegister(instr->result()); | 2097 XMMRegister right = ToDoubleRegister(instr->right()); |
| 1997 // Modulo uses a fixed result register. | 2098 XMMRegister result = ToDoubleRegister(instr->result()); |
| 1998 ASSERT(instr->op() == Token::MOD || left.is(result)); | 2099 // Modulo uses a fixed result register. |
| 1999 switch (instr->op()) { | 2100 ASSERT(instr->op() == Token::MOD || left.is(result)); |
| 2000 case Token::ADD: | 2101 switch (instr->op()) { |
| 2001 __ addsd(left, right); | 2102 case Token::ADD: |
| 2002 break; | 2103 __ addsd(left, right); |
| 2003 case Token::SUB: | 2104 break; |
| 2004 __ subsd(left, right); | 2105 case Token::SUB: |
| 2005 break; | 2106 __ subsd(left, right); |
| 2006 case Token::MUL: | 2107 break; |
| 2007 __ mulsd(left, right); | 2108 case Token::MUL: |
| 2008 break; | 2109 __ mulsd(left, right); |
| 2009 case Token::DIV: | 2110 break; |
| 2010 __ divsd(left, right); | 2111 case Token::DIV: |
| 2011 // Don't delete this mov. It may improve performance on some CPUs, | 2112 __ divsd(left, right); |
| 2012 // when there is a mulsd depending on the result | 2113 // Don't delete this mov. It may improve performance on some CPUs, |
| 2013 __ movaps(left, left); | 2114 // when there is a mulsd depending on the result |
| 2014 break; | 2115 __ movaps(left, left); |
| 2015 case Token::MOD: { | 2116 break; |
| 2016 // Pass two doubles as arguments on the stack. | 2117 case Token::MOD: { |
| 2017 __ PrepareCallCFunction(4, eax); | 2118 // Pass two doubles as arguments on the stack. |
| 2018 __ movdbl(Operand(esp, 0 * kDoubleSize), left); | 2119 __ PrepareCallCFunction(4, eax); |
| 2019 __ movdbl(Operand(esp, 1 * kDoubleSize), right); | 2120 __ movdbl(Operand(esp, 0 * kDoubleSize), left); |
| 2020 __ CallCFunction( | 2121 __ movdbl(Operand(esp, 1 * kDoubleSize), right); |
| 2021 ExternalReference::double_fp_operation(Token::MOD, isolate()), | 2122 __ CallCFunction( |
| 2022 4); | 2123 ExternalReference::double_fp_operation(Token::MOD, isolate()), |
| 2124 4); | |
| 2023 | 2125 |
| 2024 // Return value is in st(0) on ia32. | 2126 // Return value is in st(0) on ia32. |
| 2025 // Store it into the (fixed) result register. | 2127 // Store it into the (fixed) result register. |
| 2026 __ sub(Operand(esp), Immediate(kDoubleSize)); | 2128 __ sub(Operand(esp), Immediate(kDoubleSize)); |
| 2027 __ fstp_d(Operand(esp, 0)); | 2129 __ fstp_d(Operand(esp, 0)); |
| 2028 __ movdbl(result, Operand(esp, 0)); | 2130 __ movdbl(result, Operand(esp, 0)); |
| 2029 __ add(Operand(esp), Immediate(kDoubleSize)); | 2131 __ add(Operand(esp), Immediate(kDoubleSize)); |
| 2030 break; | 2132 break; |
| 2133 } | |
| 2134 default: | |
| 2135 UNREACHABLE(); | |
| 2136 break; | |
| 2031 } | 2137 } |
| 2032 default: | 2138 } else { |
| 2033 UNREACHABLE(); | 2139 X87Register left = ToX87Register(instr->left()); |
| 2034 break; | 2140 X87Register right = ToX87Register(instr->right()); |
| 2141 X87Register result = ToX87Register(instr->result()); | |
| 2142 X87PrepareBinaryOp(left, right, result); | |
| 2143 switch (instr->op()) { | |
| 2144 case Token::MUL: | |
| 2145 __ fmul_i(1); | |
| 2146 break; | |
| 2147 default: | |
| 2148 UNREACHABLE(); | |
| 2149 break; | |
| 2150 } | |
| 2035 } | 2151 } |
| 2036 } | 2152 } |
| 2037 | 2153 |
| 2038 | 2154 |
| 2039 void LCodeGen::DoNegateNoSSE2D(LNegateNoSSE2D* instr) { | |
| 2040 __ push(Immediate(-1)); | |
| 2041 __ fild_s(Operand(esp, 0)); | |
| 2042 __ add(esp, Immediate(kPointerSize)); | |
| 2043 __ fmulp(); | |
| 2044 CurrentInstructionReturnsX87Result(); | |
| 2045 } | |
| 2046 | |
| 2047 | |
| 2048 | |
| 2049 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 2155 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
| 2050 ASSERT(ToRegister(instr->context()).is(esi)); | 2156 ASSERT(ToRegister(instr->context()).is(esi)); |
| 2051 ASSERT(ToRegister(instr->left()).is(edx)); | 2157 ASSERT(ToRegister(instr->left()).is(edx)); |
| 2052 ASSERT(ToRegister(instr->right()).is(eax)); | 2158 ASSERT(ToRegister(instr->right()).is(eax)); |
| 2053 ASSERT(ToRegister(instr->result()).is(eax)); | 2159 ASSERT(ToRegister(instr->result()).is(eax)); |
| 2054 | 2160 |
| 2055 BinaryOpStub stub(instr->op(), NO_OVERWRITE); | 2161 BinaryOpStub stub(instr->op(), NO_OVERWRITE); |
| 2056 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | 2162 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 2057 __ nop(); // Signals no inlined code. | 2163 __ nop(); // Signals no inlined code. |
| 2058 } | 2164 } |
| (...skipping 897 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2956 HObjectAccess access = instr->hydrogen()->access(); | 3062 HObjectAccess access = instr->hydrogen()->access(); |
| 2957 int offset = access.offset(); | 3063 int offset = access.offset(); |
| 2958 Register object = ToRegister(instr->object()); | 3064 Register object = ToRegister(instr->object()); |
| 2959 if (FLAG_track_double_fields && | 3065 if (FLAG_track_double_fields && |
| 2960 instr->hydrogen()->representation().IsDouble()) { | 3066 instr->hydrogen()->representation().IsDouble()) { |
| 2961 if (CpuFeatures::IsSupported(SSE2)) { | 3067 if (CpuFeatures::IsSupported(SSE2)) { |
| 2962 CpuFeatureScope scope(masm(), SSE2); | 3068 CpuFeatureScope scope(masm(), SSE2); |
| 2963 XMMRegister result = ToDoubleRegister(instr->result()); | 3069 XMMRegister result = ToDoubleRegister(instr->result()); |
| 2964 __ movdbl(result, FieldOperand(object, offset)); | 3070 __ movdbl(result, FieldOperand(object, offset)); |
| 2965 } else { | 3071 } else { |
| 2966 PushX87DoubleOperand(FieldOperand(object, offset)); | 3072 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); |
| 2967 CurrentInstructionReturnsX87Result(); | |
| 2968 } | 3073 } |
| 2969 return; | 3074 return; |
| 2970 } | 3075 } |
| 2971 | 3076 |
| 2972 Register result = ToRegister(instr->result()); | 3077 Register result = ToRegister(instr->result()); |
| 2973 if (access.IsInobject()) { | 3078 if (access.IsInobject()) { |
| 2974 __ mov(result, FieldOperand(object, offset)); | 3079 __ mov(result, FieldOperand(object, offset)); |
| 2975 } else { | 3080 } else { |
| 2976 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); | 3081 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); |
| 2977 __ mov(result, FieldOperand(result, offset)); | 3082 __ mov(result, FieldOperand(result, offset)); |
| (...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3202 elements_kind, | 3307 elements_kind, |
| 3203 0, | 3308 0, |
| 3204 instr->additional_index())); | 3309 instr->additional_index())); |
| 3205 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 3310 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 3206 if (CpuFeatures::IsSupported(SSE2)) { | 3311 if (CpuFeatures::IsSupported(SSE2)) { |
| 3207 CpuFeatureScope scope(masm(), SSE2); | 3312 CpuFeatureScope scope(masm(), SSE2); |
| 3208 XMMRegister result(ToDoubleRegister(instr->result())); | 3313 XMMRegister result(ToDoubleRegister(instr->result())); |
| 3209 __ movss(result, operand); | 3314 __ movss(result, operand); |
| 3210 __ cvtss2sd(result, result); | 3315 __ cvtss2sd(result, result); |
| 3211 } else { | 3316 } else { |
| 3212 PushX87FloatOperand(operand); | 3317 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); |
| 3213 CurrentInstructionReturnsX87Result(); | |
| 3214 } | 3318 } |
| 3215 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 3319 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| 3216 if (CpuFeatures::IsSupported(SSE2)) { | 3320 if (CpuFeatures::IsSupported(SSE2)) { |
| 3217 CpuFeatureScope scope(masm(), SSE2); | 3321 CpuFeatureScope scope(masm(), SSE2); |
| 3218 __ movdbl(ToDoubleRegister(instr->result()), operand); | 3322 __ movdbl(ToDoubleRegister(instr->result()), operand); |
| 3219 } else { | 3323 } else { |
| 3220 PushX87DoubleOperand(operand); | 3324 X87Mov(ToX87Register(instr->result()), operand); |
| 3221 CurrentInstructionReturnsX87Result(); | |
| 3222 } | 3325 } |
| 3223 } else { | 3326 } else { |
| 3224 Register result(ToRegister(instr->result())); | 3327 Register result(ToRegister(instr->result())); |
| 3225 switch (elements_kind) { | 3328 switch (elements_kind) { |
| 3226 case EXTERNAL_BYTE_ELEMENTS: | 3329 case EXTERNAL_BYTE_ELEMENTS: |
| 3227 __ movsx_b(result, operand); | 3330 __ movsx_b(result, operand); |
| 3228 break; | 3331 break; |
| 3229 case EXTERNAL_PIXEL_ELEMENTS: | 3332 case EXTERNAL_PIXEL_ELEMENTS: |
| 3230 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 3333 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 3231 __ movzx_b(result, operand); | 3334 __ movzx_b(result, operand); |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3282 instr->key(), | 3385 instr->key(), |
| 3283 instr->hydrogen()->key()->representation(), | 3386 instr->hydrogen()->key()->representation(), |
| 3284 FAST_DOUBLE_ELEMENTS, | 3387 FAST_DOUBLE_ELEMENTS, |
| 3285 FixedDoubleArray::kHeaderSize - kHeapObjectTag, | 3388 FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
| 3286 instr->additional_index()); | 3389 instr->additional_index()); |
| 3287 if (CpuFeatures::IsSupported(SSE2)) { | 3390 if (CpuFeatures::IsSupported(SSE2)) { |
| 3288 CpuFeatureScope scope(masm(), SSE2); | 3391 CpuFeatureScope scope(masm(), SSE2); |
| 3289 XMMRegister result = ToDoubleRegister(instr->result()); | 3392 XMMRegister result = ToDoubleRegister(instr->result()); |
| 3290 __ movdbl(result, double_load_operand); | 3393 __ movdbl(result, double_load_operand); |
| 3291 } else { | 3394 } else { |
| 3292 PushX87DoubleOperand(double_load_operand); | 3395 X87Mov(ToX87Register(instr->result()), double_load_operand); |
| 3293 CurrentInstructionReturnsX87Result(); | |
| 3294 } | 3396 } |
| 3295 } | 3397 } |
| 3296 | 3398 |
| 3297 | 3399 |
| 3298 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3400 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 3299 Register result = ToRegister(instr->result()); | 3401 Register result = ToRegister(instr->result()); |
| 3300 | 3402 |
| 3301 // Load the result. | 3403 // Load the result. |
| 3302 __ mov(result, | 3404 __ mov(result, |
| 3303 BuildFastArrayOperand(instr->elements(), | 3405 BuildFastArrayOperand(instr->elements(), |
| (...skipping 1650 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4954 | 5056 |
| 4955 Register reg = ToRegister(instr->result()); | 5057 Register reg = ToRegister(instr->result()); |
| 4956 | 5058 |
| 4957 bool convert_hole = false; | 5059 bool convert_hole = false; |
| 4958 HValue* change_input = instr->hydrogen()->value(); | 5060 HValue* change_input = instr->hydrogen()->value(); |
| 4959 if (change_input->IsLoadKeyed()) { | 5061 if (change_input->IsLoadKeyed()) { |
| 4960 HLoadKeyed* load = HLoadKeyed::cast(change_input); | 5062 HLoadKeyed* load = HLoadKeyed::cast(change_input); |
| 4961 convert_hole = load->UsesMustHandleHole(); | 5063 convert_hole = load->UsesMustHandleHole(); |
| 4962 } | 5064 } |
| 4963 | 5065 |
| 5066 bool use_sse2 = CpuFeatures::IsSupported(SSE2); | |
| 5067 if (!use_sse2) { | |
| 5068 // Put the value to the top of stack | |
| 5069 X87Register src = ToX87Register(instr->value()); | |
| 5070 X87LoadForUsage(src); | |
| 5071 } | |
| 5072 | |
| 4964 Label no_special_nan_handling; | 5073 Label no_special_nan_handling; |
| 4965 Label done; | 5074 Label done; |
| 4966 if (convert_hole) { | 5075 if (convert_hole) { |
| 4967 bool use_sse2 = CpuFeatures::IsSupported(SSE2); | |
| 4968 if (use_sse2) { | 5076 if (use_sse2) { |
| 4969 CpuFeatureScope scope(masm(), SSE2); | 5077 CpuFeatureScope scope(masm(), SSE2); |
| 4970 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 5078 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 4971 __ ucomisd(input_reg, input_reg); | 5079 __ ucomisd(input_reg, input_reg); |
| 4972 } else { | 5080 } else { |
| 4973 __ fld(0); | 5081 __ fld(0); |
| 4974 __ fld(0); | |
| 4975 __ FCmp(); | 5082 __ FCmp(); |
| 4976 } | 5083 } |
| 4977 | 5084 |
| 4978 __ j(parity_odd, &no_special_nan_handling); | 5085 __ j(parity_odd, &no_special_nan_handling); |
| 4979 __ sub(esp, Immediate(kDoubleSize)); | 5086 __ sub(esp, Immediate(kDoubleSize)); |
| 4980 if (use_sse2) { | 5087 if (use_sse2) { |
| 4981 CpuFeatureScope scope(masm(), SSE2); | 5088 CpuFeatureScope scope(masm(), SSE2); |
| 4982 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 5089 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 4983 __ movdbl(MemOperand(esp, 0), input_reg); | 5090 __ movdbl(MemOperand(esp, 0), input_reg); |
| 4984 } else { | 5091 } else { |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5018 __ jmp(deferred->entry()); | 5125 __ jmp(deferred->entry()); |
| 5019 } | 5126 } |
| 5020 __ bind(deferred->exit()); | 5127 __ bind(deferred->exit()); |
| 5021 if (CpuFeatures::IsSupported(SSE2)) { | 5128 if (CpuFeatures::IsSupported(SSE2)) { |
| 5022 CpuFeatureScope scope(masm(), SSE2); | 5129 CpuFeatureScope scope(masm(), SSE2); |
| 5023 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 5130 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 5024 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); | 5131 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
| 5025 } else { | 5132 } else { |
| 5026 __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset)); | 5133 __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
| 5027 } | 5134 } |
| 5135 if (!use_sse2) { | |
| 5136 // clean up the stack | |
| 5137 __ fstp(0); | |
| 5138 } | |
| 5028 __ bind(&done); | 5139 __ bind(&done); |
| 5029 } | 5140 } |
| 5030 | 5141 |
| 5031 | 5142 |
| 5032 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 5143 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
| 5033 // TODO(3095996): Get rid of this. For now, we need to make the | 5144 // TODO(3095996): Get rid of this. For now, we need to make the |
| 5034 // result register contain a valid pointer because it is already | 5145 // result register contain a valid pointer because it is already |
| 5035 // contained in the register pointer map. | 5146 // contained in the register pointer map. |
| 5036 Register reg = ToRegister(instr->result()); | 5147 Register reg = ToRegister(instr->result()); |
| 5037 __ Set(reg, Immediate(0)); | 5148 __ Set(reg, Immediate(0)); |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 5067 DeoptimizeIf(not_zero, instr->environment()); | 5178 DeoptimizeIf(not_zero, instr->environment()); |
| 5068 } else { | 5179 } else { |
| 5069 __ AssertSmi(result); | 5180 __ AssertSmi(result); |
| 5070 } | 5181 } |
| 5071 __ SmiUntag(result); | 5182 __ SmiUntag(result); |
| 5072 } | 5183 } |
| 5073 | 5184 |
| 5074 | 5185 |
| 5075 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, | 5186 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, |
| 5076 Register temp_reg, | 5187 Register temp_reg, |
| 5188 X87Register res_reg, | |
| 5077 bool allow_undefined_as_nan, | 5189 bool allow_undefined_as_nan, |
| 5078 bool deoptimize_on_minus_zero, | 5190 bool deoptimize_on_minus_zero, |
| 5079 LEnvironment* env, | 5191 LEnvironment* env, |
| 5080 NumberUntagDMode mode) { | 5192 NumberUntagDMode mode) { |
| 5081 Label load_smi, done; | 5193 Label load_smi, done; |
| 5082 | 5194 |
| 5195 X87PrepareToWrite(res_reg); | |
| 5083 STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE > | 5196 STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE > |
| 5084 NUMBER_CANDIDATE_IS_ANY_TAGGED); | 5197 NUMBER_CANDIDATE_IS_ANY_TAGGED); |
| 5085 if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 5198 if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 5086 // Smi check. | 5199 // Smi check. |
| 5087 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); | 5200 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); |
| 5088 | 5201 |
| 5089 // Heap number map check. | 5202 // Heap number map check. |
| 5090 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 5203 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 5091 factory()->heap_number_map()); | 5204 factory()->heap_number_map()); |
| 5092 if (!allow_undefined_as_nan) { | 5205 if (!allow_undefined_as_nan) { |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5133 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); | 5246 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
| 5134 } | 5247 } |
| 5135 | 5248 |
| 5136 __ bind(&load_smi); | 5249 __ bind(&load_smi); |
| 5137 __ SmiUntag(input_reg); // Untag smi before converting to float. | 5250 __ SmiUntag(input_reg); // Untag smi before converting to float. |
| 5138 __ push(input_reg); | 5251 __ push(input_reg); |
| 5139 __ fild_s(Operand(esp, 0)); | 5252 __ fild_s(Operand(esp, 0)); |
| 5140 __ pop(input_reg); | 5253 __ pop(input_reg); |
| 5141 __ SmiTag(input_reg); // Retag smi. | 5254 __ SmiTag(input_reg); // Retag smi. |
| 5142 __ bind(&done); | 5255 __ bind(&done); |
| 5256 X87CommitWrite(res_reg); | |
| 5143 } | 5257 } |
| 5144 | 5258 |
| 5145 | 5259 |
| 5146 void LCodeGen::EmitNumberUntagD(Register input_reg, | 5260 void LCodeGen::EmitNumberUntagD(Register input_reg, |
| 5147 Register temp_reg, | 5261 Register temp_reg, |
| 5148 XMMRegister result_reg, | 5262 XMMRegister result_reg, |
| 5149 bool allow_undefined_as_nan, | 5263 bool allow_undefined_as_nan, |
| 5150 bool deoptimize_on_minus_zero, | 5264 bool deoptimize_on_minus_zero, |
| 5151 LEnvironment* env, | 5265 LEnvironment* env, |
| 5152 NumberUntagDMode mode) { | 5266 NumberUntagDMode mode) { |
| (...skipping 361 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5514 EmitNumberUntagD(input_reg, | 5628 EmitNumberUntagD(input_reg, |
| 5515 temp_reg, | 5629 temp_reg, |
| 5516 result_reg, | 5630 result_reg, |
| 5517 instr->hydrogen()->allow_undefined_as_nan(), | 5631 instr->hydrogen()->allow_undefined_as_nan(), |
| 5518 deoptimize_on_minus_zero, | 5632 deoptimize_on_minus_zero, |
| 5519 instr->environment(), | 5633 instr->environment(), |
| 5520 mode); | 5634 mode); |
| 5521 } else { | 5635 } else { |
| 5522 EmitNumberUntagDNoSSE2(input_reg, | 5636 EmitNumberUntagDNoSSE2(input_reg, |
| 5523 temp_reg, | 5637 temp_reg, |
| 5638 ToX87Register(instr->result()), | |
| 5524 instr->hydrogen()->allow_undefined_as_nan(), | 5639 instr->hydrogen()->allow_undefined_as_nan(), |
| 5525 deoptimize_on_minus_zero, | 5640 deoptimize_on_minus_zero, |
| 5526 instr->environment(), | 5641 instr->environment(), |
| 5527 mode); | 5642 mode); |
| 5528 CurrentInstructionReturnsX87Result(); | |
| 5529 } | 5643 } |
| 5530 } | 5644 } |
| 5531 | 5645 |
| 5532 | 5646 |
| 5533 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 5647 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
| 5534 LOperand* input = instr->value(); | 5648 LOperand* input = instr->value(); |
| 5535 ASSERT(input->IsDoubleRegister()); | 5649 ASSERT(input->IsDoubleRegister()); |
| 5536 LOperand* result = instr->result(); | 5650 LOperand* result = instr->result(); |
| 5537 ASSERT(result->IsRegister()); | 5651 ASSERT(result->IsRegister()); |
| 5538 CpuFeatureScope scope(masm(), SSE2); | 5652 CpuFeatureScope scope(masm(), SSE2); |
| (...skipping 1033 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6572 FixedArray::kHeaderSize - kPointerSize)); | 6686 FixedArray::kHeaderSize - kPointerSize)); |
| 6573 __ bind(&done); | 6687 __ bind(&done); |
| 6574 } | 6688 } |
| 6575 | 6689 |
| 6576 | 6690 |
| 6577 #undef __ | 6691 #undef __ |
| 6578 | 6692 |
| 6579 } } // namespace v8::internal | 6693 } } // namespace v8::internal |
| 6580 | 6694 |
| 6581 #endif // V8_TARGET_ARCH_IA32 | 6695 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |