Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(160)

Side by Side Diff: src/compiler/mips64/code-generator-mips64.cc

Issue 1045203003: MIPS64: [turbofan] Add backend support for float32 operations. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/compiler/mips64/instruction-codes-mips64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/code-generator.h" 5 #include "src/compiler/code-generator.h"
6 #include "src/compiler/code-generator-impl.h" 6 #include "src/compiler/code-generator-impl.h"
7 #include "src/compiler/gap-resolver.h" 7 #include "src/compiler/gap-resolver.h"
8 #include "src/compiler/node-matchers.h" 8 #include "src/compiler/node-matchers.h"
9 #include "src/mips/macro-assembler-mips.h" 9 #include "src/mips/macro-assembler-mips.h"
10 #include "src/scopes.h" 10 #include "src/scopes.h"
(...skipping 586 matching lines...) Expand 10 before | Expand all | Expand 10 after
597 case kMips64Mov: 597 case kMips64Mov:
598 // TODO(plind): Should we combine mov/li like this, or use separate instr? 598 // TODO(plind): Should we combine mov/li like this, or use separate instr?
599 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType 599 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
600 if (HasRegisterInput(instr, 0)) { 600 if (HasRegisterInput(instr, 0)) {
601 __ mov(i.OutputRegister(), i.InputRegister(0)); 601 __ mov(i.OutputRegister(), i.InputRegister(0));
602 } else { 602 } else {
603 __ li(i.OutputRegister(), i.InputOperand(0)); 603 __ li(i.OutputRegister(), i.InputOperand(0));
604 } 604 }
605 break; 605 break;
606 606
607 case kMips64CmpS:
608 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
609 break;
610 case kMips64AddS:
611 // TODO(plind): add special case: combine mult & add.
612 __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
613 i.InputDoubleRegister(1));
614 break;
615 case kMips64SubS:
616 __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
617 i.InputDoubleRegister(1));
618 break;
619 case kMips64MulS:
620 // TODO(plind): add special case: right op is -1.0, see arm port.
621 __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
622 i.InputDoubleRegister(1));
623 break;
624 case kMips64DivS:
625 __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
626 i.InputDoubleRegister(1));
627 break;
628 case kMips64ModS: {
629 // TODO(bmeurer): We should really get rid of this special instruction,
630 // and generate a CallAddress instruction instead.
631 FrameScope scope(masm(), StackFrame::MANUAL);
632 __ PrepareCallCFunction(0, 2, kScratchReg);
633 __ MovToFloatParameters(i.InputDoubleRegister(0),
634 i.InputDoubleRegister(1));
635 // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
636 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
637 0, 2);
638 // Move the result in the double result register.
639 __ MovFromFloatResult(i.OutputSingleRegister());
640 break;
641 }
642 case kMips64SqrtS: {
643 __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
644 break;
645 }
607 case kMips64CmpD: 646 case kMips64CmpD:
608 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here. 647 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
609 break; 648 break;
610 case kMips64AddD: 649 case kMips64AddD:
611 // TODO(plind): add special case: combine mult & add. 650 // TODO(plind): add special case: combine mult & add.
612 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 651 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
613 i.InputDoubleRegister(1)); 652 i.InputDoubleRegister(1));
614 break; 653 break;
615 case kMips64SubD: 654 case kMips64SubD:
616 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 655 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
(...skipping 14 matching lines...) Expand all
631 FrameScope scope(masm(), StackFrame::MANUAL); 670 FrameScope scope(masm(), StackFrame::MANUAL);
632 __ PrepareCallCFunction(0, 2, kScratchReg); 671 __ PrepareCallCFunction(0, 2, kScratchReg);
633 __ MovToFloatParameters(i.InputDoubleRegister(0), 672 __ MovToFloatParameters(i.InputDoubleRegister(0),
634 i.InputDoubleRegister(1)); 673 i.InputDoubleRegister(1));
635 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), 674 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
636 0, 2); 675 0, 2);
637 // Move the result in the double result register. 676 // Move the result in the double result register.
638 __ MovFromFloatResult(i.OutputDoubleRegister()); 677 __ MovFromFloatResult(i.OutputDoubleRegister());
639 break; 678 break;
640 } 679 }
680 case kMips64SqrtD: {
681 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
682 break;
683 }
641 case kMips64Float64RoundDown: { 684 case kMips64Float64RoundDown: {
642 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor); 685 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
643 break; 686 break;
644 } 687 }
645 case kMips64Float64RoundTruncate: { 688 case kMips64Float64RoundTruncate: {
646 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate); 689 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
647 break; 690 break;
648 } 691 }
649 case kMips64Float64RoundUp: { 692 case kMips64Float64RoundUp: {
650 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil); 693 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
651 break; 694 break;
652 } 695 }
653 case kMips64SqrtD: {
654 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
655 break;
656 }
657 case kMips64CvtSD: { 696 case kMips64CvtSD: {
658 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); 697 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
659 break; 698 break;
660 } 699 }
661 case kMips64CvtDS: { 700 case kMips64CvtDS: {
662 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); 701 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
663 break; 702 break;
664 } 703 }
665 case kMips64CvtDW: { 704 case kMips64CvtDW: {
666 FPURegister scratch = kScratchDoubleReg; 705 FPURegister scratch = kScratchDoubleReg;
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
839 cc = FlagsConditionToConditionOvf(branch->condition); 878 cc = FlagsConditionToConditionOvf(branch->condition);
840 879
841 __ dsra32(kScratchReg, i.OutputRegister(), 0); 880 __ dsra32(kScratchReg, i.OutputRegister(), 0);
842 __ sra(at, i.OutputRegister(), 31); 881 __ sra(at, i.OutputRegister(), 31);
843 __ Branch(tlabel, cc, at, Operand(kScratchReg)); 882 __ Branch(tlabel, cc, at, Operand(kScratchReg));
844 } else if (instr->arch_opcode() == kMips64Cmp) { 883 } else if (instr->arch_opcode() == kMips64Cmp) {
845 cc = FlagsConditionToConditionCmp(branch->condition); 884 cc = FlagsConditionToConditionCmp(branch->condition);
846 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); 885 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
847 886
848 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel. 887 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
888 } else if (instr->arch_opcode() == kMips64CmpS) {
889 // TODO(dusmil) optimize unordered checks to use fewer instructions
890 // even if we have to unfold BranchF macro.
891 Label* nan = flabel;
892 switch (branch->condition) {
893 case kEqual:
894 cc = eq;
895 break;
896 case kNotEqual:
897 cc = ne;
898 nan = tlabel;
899 break;
900 case kUnsignedLessThan:
901 cc = lt;
902 break;
903 case kUnsignedGreaterThanOrEqual:
904 cc = ge;
905 nan = tlabel;
906 break;
907 case kUnsignedLessThanOrEqual:
908 cc = le;
909 break;
910 case kUnsignedGreaterThan:
911 cc = gt;
912 nan = tlabel;
913 break;
914 default:
915 UNSUPPORTED_COND(kMips64CmpS, branch->condition);
916 break;
917 }
918 __ BranchFS(tlabel, nan, cc, i.InputDoubleRegister(0),
919 i.InputDoubleRegister(1));
920
921 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
922
849 } else if (instr->arch_opcode() == kMips64CmpD) { 923 } else if (instr->arch_opcode() == kMips64CmpD) {
850 // TODO(dusmil) optimize unordered checks to use less instructions 924 // TODO(dusmil) optimize unordered checks to use less instructions
851 // even if we have to unfold BranchF macro. 925 // even if we have to unfold BranchF macro.
852 Label* nan = flabel; 926 Label* nan = flabel;
853 switch (branch->condition) { 927 switch (branch->condition) {
854 case kEqual: 928 case kEqual:
855 cc = eq; 929 cc = eq;
856 break; 930 break;
857 case kNotEqual: 931 case kNotEqual:
858 cc = ne; 932 cc = ne;
(...skipping 429 matching lines...) Expand 10 before | Expand all | Expand 10 after
1288 } 1362 }
1289 } 1363 }
1290 MarkLazyDeoptSite(); 1364 MarkLazyDeoptSite();
1291 } 1365 }
1292 1366
1293 #undef __ 1367 #undef __
1294 1368
1295 } // namespace compiler 1369 } // namespace compiler
1296 } // namespace internal 1370 } // namespace internal
1297 } // namespace v8 1371 } // namespace v8
OLDNEW
« no previous file with comments | « no previous file | src/compiler/mips64/instruction-codes-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698