| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 | 6 |
| 7 #include "src/compiler/code-generator-impl.h" | 7 #include "src/compiler/code-generator-impl.h" |
| 8 #include "src/compiler/gap-resolver.h" | 8 #include "src/compiler/gap-resolver.h" |
| 9 #include "src/compiler/node-matchers.h" | 9 #include "src/compiler/node-matchers.h" |
| 10 #include "src/scopes.h" | 10 #include "src/scopes.h" |
| (...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 258 if (instr->addressing_mode() != kMode_None) { \ | 258 if (instr->addressing_mode() != kMode_None) { \ |
| 259 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ | 259 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ |
| 260 } else if (instr->InputAt(0)->IsRegister()) { \ | 260 } else if (instr->InputAt(0)->IsRegister()) { \ |
| 261 __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \ | 261 __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \ |
| 262 } else { \ | 262 } else { \ |
| 263 __ asm_instr(i.OutputRegister(), i.InputOperand(0)); \ | 263 __ asm_instr(i.OutputRegister(), i.InputOperand(0)); \ |
| 264 } \ | 264 } \ |
| 265 } while (0) | 265 } while (0) |
| 266 | 266 |
| 267 | 267 |
| 268 #define ASSEMBLE_DOUBLE_BINOP(asm_instr) \ | 268 #define ASSEMBLE_SSE_BINOP(asm_instr) \ |
| 269 do { \ | 269 do { \ |
| 270 if (instr->InputAt(1)->IsDoubleRegister()) { \ | 270 if (instr->InputAt(1)->IsDoubleRegister()) { \ |
| 271 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \ | 271 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \ |
| 272 } else { \ | 272 } else { \ |
| 273 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \ | 273 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \ |
| 274 } \ | 274 } \ |
| 275 } while (0) | 275 } while (0) |
| 276 | 276 |
| 277 | 277 |
| 278 #define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr) \ | 278 #define ASSEMBLE_SSE_UNOP(asm_instr) \ |
| 279 do { \ |
| 280 if (instr->InputAt(0)->IsDoubleRegister()) { \ |
| 281 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \ |
| 282 } else { \ |
| 283 __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0)); \ |
| 284 } \ |
| 285 } while (0) |
| 286 |
| 287 |
| 288 #define ASSEMBLE_AVX_BINOP(asm_instr) \ |
| 279 do { \ | 289 do { \ |
| 280 CpuFeatureScope avx_scope(masm(), AVX); \ | 290 CpuFeatureScope avx_scope(masm(), AVX); \ |
| 281 if (instr->InputAt(1)->IsDoubleRegister()) { \ | 291 if (instr->InputAt(1)->IsDoubleRegister()) { \ |
| 282 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \ | 292 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \ |
| 283 i.InputDoubleRegister(1)); \ | 293 i.InputDoubleRegister(1)); \ |
| 284 } else { \ | 294 } else { \ |
| 285 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \ | 295 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \ |
| 286 i.InputOperand(1)); \ | 296 i.InputOperand(1)); \ |
| 287 } \ | 297 } \ |
| 288 } while (0) | 298 } while (0) |
| (...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 694 case kX64Ror: | 704 case kX64Ror: |
| 695 ASSEMBLE_SHIFT(rorq, 6); | 705 ASSEMBLE_SHIFT(rorq, 6); |
| 696 break; | 706 break; |
| 697 case kX64Lzcnt32: | 707 case kX64Lzcnt32: |
| 698 if (instr->InputAt(0)->IsRegister()) { | 708 if (instr->InputAt(0)->IsRegister()) { |
| 699 __ Lzcntl(i.OutputRegister(), i.InputRegister(0)); | 709 __ Lzcntl(i.OutputRegister(), i.InputRegister(0)); |
| 700 } else { | 710 } else { |
| 701 __ Lzcntl(i.OutputRegister(), i.InputOperand(0)); | 711 __ Lzcntl(i.OutputRegister(), i.InputOperand(0)); |
| 702 } | 712 } |
| 703 break; | 713 break; |
| 714 case kSSEFloat32Cmp: |
| 715 ASSEMBLE_SSE_BINOP(ucomiss); |
| 716 break; |
| 717 case kSSEFloat32Add: |
| 718 ASSEMBLE_SSE_BINOP(addss); |
| 719 break; |
| 720 case kSSEFloat32Sub: |
| 721 ASSEMBLE_SSE_BINOP(subss); |
| 722 break; |
| 723 case kSSEFloat32Mul: |
| 724 ASSEMBLE_SSE_BINOP(mulss); |
| 725 break; |
| 726 case kSSEFloat32Div: |
| 727 ASSEMBLE_SSE_BINOP(divss); |
| 728 break; |
| 729 case kSSEFloat32Max: |
| 730 ASSEMBLE_SSE_BINOP(maxss); |
| 731 break; |
| 732 case kSSEFloat32Min: |
| 733 ASSEMBLE_SSE_BINOP(minss); |
| 734 break; |
| 735 case kSSEFloat32Sqrt: |
| 736 ASSEMBLE_SSE_UNOP(sqrtss); |
| 737 break; |
| 738 case kSSEFloat32ToFloat64: |
| 739 ASSEMBLE_SSE_UNOP(cvtss2sd); |
| 740 break; |
| 704 case kSSEFloat64Cmp: | 741 case kSSEFloat64Cmp: |
| 705 ASSEMBLE_DOUBLE_BINOP(ucomisd); | 742 ASSEMBLE_SSE_BINOP(ucomisd); |
| 706 break; | 743 break; |
| 707 case kSSEFloat64Add: | 744 case kSSEFloat64Add: |
| 708 ASSEMBLE_DOUBLE_BINOP(addsd); | 745 ASSEMBLE_SSE_BINOP(addsd); |
| 709 break; | 746 break; |
| 710 case kSSEFloat64Sub: | 747 case kSSEFloat64Sub: |
| 711 ASSEMBLE_DOUBLE_BINOP(subsd); | 748 ASSEMBLE_SSE_BINOP(subsd); |
| 712 break; | 749 break; |
| 713 case kSSEFloat64Mul: | 750 case kSSEFloat64Mul: |
| 714 ASSEMBLE_DOUBLE_BINOP(mulsd); | 751 ASSEMBLE_SSE_BINOP(mulsd); |
| 715 break; | 752 break; |
| 716 case kSSEFloat64Div: | 753 case kSSEFloat64Div: |
| 717 ASSEMBLE_DOUBLE_BINOP(divsd); | 754 ASSEMBLE_SSE_BINOP(divsd); |
| 718 break; | 755 break; |
| 719 case kSSEFloat64Mod: { | 756 case kSSEFloat64Mod: { |
| 720 __ subq(rsp, Immediate(kDoubleSize)); | 757 __ subq(rsp, Immediate(kDoubleSize)); |
| 721 // Move values to st(0) and st(1). | 758 // Move values to st(0) and st(1). |
| 722 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1)); | 759 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1)); |
| 723 __ fld_d(Operand(rsp, 0)); | 760 __ fld_d(Operand(rsp, 0)); |
| 724 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0)); | 761 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0)); |
| 725 __ fld_d(Operand(rsp, 0)); | 762 __ fld_d(Operand(rsp, 0)); |
| 726 // Loop while fprem isn't done. | 763 // Loop while fprem isn't done. |
| 727 Label mod_loop; | 764 Label mod_loop; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 742 } | 779 } |
| 743 __ j(parity_even, &mod_loop); | 780 __ j(parity_even, &mod_loop); |
| 744 // Move output to stack and clean up. | 781 // Move output to stack and clean up. |
| 745 __ fstp(1); | 782 __ fstp(1); |
| 746 __ fstp_d(Operand(rsp, 0)); | 783 __ fstp_d(Operand(rsp, 0)); |
| 747 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0)); | 784 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0)); |
| 748 __ addq(rsp, Immediate(kDoubleSize)); | 785 __ addq(rsp, Immediate(kDoubleSize)); |
| 749 break; | 786 break; |
| 750 } | 787 } |
| 751 case kSSEFloat64Max: | 788 case kSSEFloat64Max: |
| 752 ASSEMBLE_DOUBLE_BINOP(maxsd); | 789 ASSEMBLE_SSE_BINOP(maxsd); |
| 753 break; | 790 break; |
| 754 case kSSEFloat64Min: | 791 case kSSEFloat64Min: |
| 755 ASSEMBLE_DOUBLE_BINOP(minsd); | 792 ASSEMBLE_SSE_BINOP(minsd); |
| 756 break; | 793 break; |
| 757 case kSSEFloat64Sqrt: | 794 case kSSEFloat64Sqrt: |
| 758 if (instr->InputAt(0)->IsDoubleRegister()) { | 795 ASSEMBLE_SSE_UNOP(sqrtsd); |
| 759 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); | |
| 760 } else { | |
| 761 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0)); | |
| 762 } | |
| 763 break; | 796 break; |
| 764 case kSSEFloat64Round: { | 797 case kSSEFloat64Round: { |
| 765 CpuFeatureScope sse_scope(masm(), SSE4_1); | 798 CpuFeatureScope sse_scope(masm(), SSE4_1); |
| 766 RoundingMode const mode = | 799 RoundingMode const mode = |
| 767 static_cast<RoundingMode>(MiscField::decode(instr->opcode())); | 800 static_cast<RoundingMode>(MiscField::decode(instr->opcode())); |
| 768 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); | 801 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); |
| 769 break; | 802 break; |
| 770 } | 803 } |
| 771 case kSSECvtss2sd: | 804 case kSSEFloat64ToFloat32: |
| 772 if (instr->InputAt(0)->IsDoubleRegister()) { | 805 ASSEMBLE_SSE_UNOP(cvtsd2ss); |
| 773 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); | |
| 774 } else { | |
| 775 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0)); | |
| 776 } | |
| 777 break; | |
| 778 case kSSECvtsd2ss: | |
| 779 if (instr->InputAt(0)->IsDoubleRegister()) { | |
| 780 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); | |
| 781 } else { | |
| 782 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0)); | |
| 783 } | |
| 784 break; | 806 break; |
| 785 case kSSEFloat64ToInt32: | 807 case kSSEFloat64ToInt32: |
| 786 if (instr->InputAt(0)->IsDoubleRegister()) { | 808 if (instr->InputAt(0)->IsDoubleRegister()) { |
| 787 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0)); | 809 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0)); |
| 788 } else { | 810 } else { |
| 789 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0)); | 811 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0)); |
| 790 } | 812 } |
| 791 break; | 813 break; |
| 792 case kSSEFloat64ToUint32: { | 814 case kSSEFloat64ToUint32: { |
| 793 if (instr->InputAt(0)->IsDoubleRegister()) { | 815 if (instr->InputAt(0)->IsDoubleRegister()) { |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 841 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1); | 863 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1); |
| 842 } | 864 } |
| 843 break; | 865 break; |
| 844 case kSSEFloat64LoadLowWord32: | 866 case kSSEFloat64LoadLowWord32: |
| 845 if (instr->InputAt(0)->IsRegister()) { | 867 if (instr->InputAt(0)->IsRegister()) { |
| 846 __ movd(i.OutputDoubleRegister(), i.InputRegister(0)); | 868 __ movd(i.OutputDoubleRegister(), i.InputRegister(0)); |
| 847 } else { | 869 } else { |
| 848 __ movd(i.OutputDoubleRegister(), i.InputOperand(0)); | 870 __ movd(i.OutputDoubleRegister(), i.InputOperand(0)); |
| 849 } | 871 } |
| 850 break; | 872 break; |
| 873 case kAVXFloat32Cmp: { |
| 874 CpuFeatureScope avx_scope(masm(), AVX); |
| 875 if (instr->InputAt(1)->IsDoubleRegister()) { |
| 876 __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
| 877 } else { |
| 878 __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1)); |
| 879 } |
| 880 break; |
| 881 } |
| 882 case kAVXFloat32Add: |
| 883 ASSEMBLE_AVX_BINOP(vaddss); |
| 884 break; |
| 885 case kAVXFloat32Sub: |
| 886 ASSEMBLE_AVX_BINOP(vsubss); |
| 887 break; |
| 888 case kAVXFloat32Mul: |
| 889 ASSEMBLE_AVX_BINOP(vmulss); |
| 890 break; |
| 891 case kAVXFloat32Div: |
| 892 ASSEMBLE_AVX_BINOP(vdivss); |
| 893 break; |
| 894 case kAVXFloat32Max: |
| 895 ASSEMBLE_AVX_BINOP(vmaxss); |
| 896 break; |
| 897 case kAVXFloat32Min: |
| 898 ASSEMBLE_AVX_BINOP(vminss); |
| 899 break; |
| 900 case kAVXFloat64Cmp: { |
| 901 CpuFeatureScope avx_scope(masm(), AVX); |
| 902 if (instr->InputAt(1)->IsDoubleRegister()) { |
| 903 __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
| 904 } else { |
| 905 __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1)); |
| 906 } |
| 907 break; |
| 908 } |
| 851 case kAVXFloat64Add: | 909 case kAVXFloat64Add: |
| 852 ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd); | 910 ASSEMBLE_AVX_BINOP(vaddsd); |
| 853 break; | 911 break; |
| 854 case kAVXFloat64Sub: | 912 case kAVXFloat64Sub: |
| 855 ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd); | 913 ASSEMBLE_AVX_BINOP(vsubsd); |
| 856 break; | 914 break; |
| 857 case kAVXFloat64Mul: | 915 case kAVXFloat64Mul: |
| 858 ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd); | 916 ASSEMBLE_AVX_BINOP(vmulsd); |
| 859 break; | 917 break; |
| 860 case kAVXFloat64Div: | 918 case kAVXFloat64Div: |
| 861 ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd); | 919 ASSEMBLE_AVX_BINOP(vdivsd); |
| 862 break; | 920 break; |
| 863 case kAVXFloat64Max: | 921 case kAVXFloat64Max: |
| 864 ASSEMBLE_AVX_DOUBLE_BINOP(vmaxsd); | 922 ASSEMBLE_AVX_BINOP(vmaxsd); |
| 865 break; | 923 break; |
| 866 case kAVXFloat64Min: | 924 case kAVXFloat64Min: |
| 867 ASSEMBLE_AVX_DOUBLE_BINOP(vminsd); | 925 ASSEMBLE_AVX_BINOP(vminsd); |
| 868 break; | 926 break; |
| 869 case kX64Movsxbl: | 927 case kX64Movsxbl: |
| 870 ASSEMBLE_MOVX(movsxbl); | 928 ASSEMBLE_MOVX(movsxbl); |
| 871 __ AssertZeroExtended(i.OutputRegister()); | 929 __ AssertZeroExtended(i.OutputRegister()); |
| 872 break; | 930 break; |
| 873 case kX64Movzxbl: | 931 case kX64Movzxbl: |
| 874 ASSEMBLE_MOVX(movzxbl); | 932 ASSEMBLE_MOVX(movzxbl); |
| 875 __ AssertZeroExtended(i.OutputRegister()); | 933 __ AssertZeroExtended(i.OutputRegister()); |
| 876 break; | 934 break; |
| 877 case kX64Movb: { | 935 case kX64Movb: { |
| (...skipping 624 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1502 } | 1560 } |
| 1503 } | 1561 } |
| 1504 MarkLazyDeoptSite(); | 1562 MarkLazyDeoptSite(); |
| 1505 } | 1563 } |
| 1506 | 1564 |
| 1507 #undef __ | 1565 #undef __ |
| 1508 | 1566 |
| 1509 } // namespace internal | 1567 } // namespace internal |
| 1510 } // namespace compiler | 1568 } // namespace compiler |
| 1511 } // namespace v8 | 1569 } // namespace v8 |
| OLD | NEW |