| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 #include "src/compiler/code-generator-impl.h" | 6 #include "src/compiler/code-generator-impl.h" |
| 7 #include "src/compiler/gap-resolver.h" | 7 #include "src/compiler/gap-resolver.h" |
| 8 #include "src/compiler/node-matchers.h" | 8 #include "src/compiler/node-matchers.h" |
| 9 #include "src/mips/macro-assembler-mips.h" | 9 #include "src/mips/macro-assembler-mips.h" |
| 10 #include "src/scopes.h" | 10 #include "src/scopes.h" |
| (...skipping 524 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 535 case kMipsMov: | 535 case kMipsMov: |
| 536 // TODO(plind): Should we combine mov/li like this, or use separate instr? | 536 // TODO(plind): Should we combine mov/li like this, or use separate instr? |
| 537 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType | 537 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType |
| 538 if (HasRegisterInput(instr, 0)) { | 538 if (HasRegisterInput(instr, 0)) { |
| 539 __ mov(i.OutputRegister(), i.InputRegister(0)); | 539 __ mov(i.OutputRegister(), i.InputRegister(0)); |
| 540 } else { | 540 } else { |
| 541 __ li(i.OutputRegister(), i.InputOperand(0)); | 541 __ li(i.OutputRegister(), i.InputOperand(0)); |
| 542 } | 542 } |
| 543 break; | 543 break; |
| 544 | 544 |
| 545 case kMipsCmpS: |
| 546 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here. |
| 547 break; |
| 548 case kMipsAddS: |
| 549 // TODO(plind): add special case: combine mult & add. |
| 550 __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 551 i.InputDoubleRegister(1)); |
| 552 break; |
| 553 case kMipsSubS: |
| 554 __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 555 i.InputDoubleRegister(1)); |
| 556 break; |
| 557 case kMipsMulS: |
| 558 // TODO(plind): add special case: right op is -1.0, see arm port. |
| 559 __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 560 i.InputDoubleRegister(1)); |
| 561 break; |
| 562 case kMipsDivS: |
| 563 __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 564 i.InputDoubleRegister(1)); |
| 565 break; |
| 566 case kMipsModS: { |
| 567 // TODO(bmeurer): We should really get rid of this special instruction, |
| 568 // and generate a CallAddress instruction instead. |
| 569 FrameScope scope(masm(), StackFrame::MANUAL); |
| 570 __ PrepareCallCFunction(0, 2, kScratchReg); |
| 571 __ MovToFloatParameters(i.InputDoubleRegister(0), |
| 572 i.InputDoubleRegister(1)); |
| 573 // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate()) |
| 574 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), |
| 575 0, 2); |
| 576 // Move the result in the double result register. |
| 577 __ MovFromFloatResult(i.OutputSingleRegister()); |
| 578 break; |
| 579 } |
| 580 case kMipsSqrtS: { |
| 581 __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| 582 break; |
| 583 } |
| 545 case kMipsCmpD: | 584 case kMipsCmpD: |
| 546 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here. | 585 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here. |
| 547 break; | 586 break; |
| 548 case kMipsAddD: | 587 case kMipsAddD: |
| 549 // TODO(plind): add special case: combine mult & add. | 588 // TODO(plind): add special case: combine mult & add. |
| 550 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), | 589 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 551 i.InputDoubleRegister(1)); | 590 i.InputDoubleRegister(1)); |
| 552 break; | 591 break; |
| 553 case kMipsSubD: | 592 case kMipsSubD: |
| 554 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), | 593 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| (...skipping 14 matching lines...) Expand all Loading... |
| 569 FrameScope scope(masm(), StackFrame::MANUAL); | 608 FrameScope scope(masm(), StackFrame::MANUAL); |
| 570 __ PrepareCallCFunction(0, 2, kScratchReg); | 609 __ PrepareCallCFunction(0, 2, kScratchReg); |
| 571 __ MovToFloatParameters(i.InputDoubleRegister(0), | 610 __ MovToFloatParameters(i.InputDoubleRegister(0), |
| 572 i.InputDoubleRegister(1)); | 611 i.InputDoubleRegister(1)); |
| 573 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), | 612 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), |
| 574 0, 2); | 613 0, 2); |
| 575 // Move the result in the double result register. | 614 // Move the result in the double result register. |
| 576 __ MovFromFloatResult(i.OutputDoubleRegister()); | 615 __ MovFromFloatResult(i.OutputDoubleRegister()); |
| 577 break; | 616 break; |
| 578 } | 617 } |
| 618 case kMipsSqrtD: { |
| 619 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| 620 break; |
| 621 } |
| 579 case kMipsFloat64RoundDown: { | 622 case kMipsFloat64RoundDown: { |
| 580 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor); | 623 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor); |
| 581 break; | 624 break; |
| 582 } | 625 } |
| 583 case kMipsFloat64RoundTruncate: { | 626 case kMipsFloat64RoundTruncate: { |
| 584 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate); | 627 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate); |
| 585 break; | 628 break; |
| 586 } | 629 } |
| 587 case kMipsFloat64RoundUp: { | 630 case kMipsFloat64RoundUp: { |
| 588 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil); | 631 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil); |
| 589 break; | 632 break; |
| 590 } | 633 } |
| 591 case kMipsSqrtD: { | |
| 592 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); | |
| 593 break; | |
| 594 } | |
| 595 case kMipsCvtSD: { | 634 case kMipsCvtSD: { |
| 596 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); | 635 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); |
| 597 break; | 636 break; |
| 598 } | 637 } |
| 599 case kMipsCvtDS: { | 638 case kMipsCvtDS: { |
| 600 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); | 639 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); |
| 601 break; | 640 break; |
| 602 } | 641 } |
| 603 case kMipsCvtDW: { | 642 case kMipsCvtDW: { |
| 604 FPURegister scratch = kScratchDoubleReg; | 643 FPURegister scratch = kScratchDoubleReg; |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 774 // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow. | 813 // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow. |
| 775 cc = FlagsConditionToConditionOvf(branch->condition); | 814 cc = FlagsConditionToConditionOvf(branch->condition); |
| 776 __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg)); | 815 __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg)); |
| 777 | 816 |
| 778 } else if (instr->arch_opcode() == kMipsCmp) { | 817 } else if (instr->arch_opcode() == kMipsCmp) { |
| 779 cc = FlagsConditionToConditionCmp(branch->condition); | 818 cc = FlagsConditionToConditionCmp(branch->condition); |
| 780 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); | 819 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); |
| 781 | 820 |
| 782 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel. | 821 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel. |
| 783 | 822 |
| 823 } else if (instr->arch_opcode() == kMipsCmpS) { |
| 824 // TODO(dusmil) optimize unordered checks to use fewer instructions |
| 825 // even if we have to unfold BranchF macro. |
| 826 Label* nan = flabel; |
| 827 switch (branch->condition) { |
| 828 case kEqual: |
| 829 cc = eq; |
| 830 break; |
| 831 case kNotEqual: |
| 832 cc = ne; |
| 833 nan = tlabel; |
| 834 break; |
| 835 case kUnsignedLessThan: |
| 836 cc = lt; |
| 837 break; |
| 838 case kUnsignedGreaterThanOrEqual: |
| 839 cc = ge; |
| 840 nan = tlabel; |
| 841 break; |
| 842 case kUnsignedLessThanOrEqual: |
| 843 cc = le; |
| 844 break; |
| 845 case kUnsignedGreaterThan: |
| 846 cc = gt; |
| 847 nan = tlabel; |
| 848 break; |
| 849 default: |
| 850 UNSUPPORTED_COND(kMipsCmpS, branch->condition); |
| 851 break; |
| 852 } |
| 853 __ BranchFS(tlabel, nan, cc, i.InputDoubleRegister(0), |
| 854 i.InputDoubleRegister(1)); |
| 855 |
| 856 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel. |
| 857 |
| 784 } else if (instr->arch_opcode() == kMipsCmpD) { | 858 } else if (instr->arch_opcode() == kMipsCmpD) { |
| 785 // TODO(dusmil) optimize unordered checks to use fewer instructions | 859 // TODO(dusmil) optimize unordered checks to use fewer instructions |
| 786 // even if we have to unfold BranchF macro. | 860 // even if we have to unfold BranchF macro. |
| 787 Label* nan = flabel; | 861 Label* nan = flabel; |
| 788 switch (branch->condition) { | 862 switch (branch->condition) { |
| 789 case kEqual: | 863 case kEqual: |
| 790 cc = eq; | 864 cc = eq; |
| 791 break; | 865 break; |
| 792 case kNotEqual: | 866 case kNotEqual: |
| 793 cc = ne; | 867 cc = ne; |
| (...skipping 428 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1222 } | 1296 } |
| 1223 } | 1297 } |
| 1224 MarkLazyDeoptSite(); | 1298 MarkLazyDeoptSite(); |
| 1225 } | 1299 } |
| 1226 | 1300 |
| 1227 #undef __ | 1301 #undef __ |
| 1228 | 1302 |
| 1229 } // namespace compiler | 1303 } // namespace compiler |
| 1230 } // namespace internal | 1304 } // namespace internal |
| 1231 } // namespace v8 | 1305 } // namespace v8 |
| OLD | NEW |