Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(147)

Side by Side Diff: runtime/vm/assembler_x64_test.cc

Issue 2481873005: clang-format runtime/vm (Closed)
Patch Set: Merge Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/assembler_x64.cc ('k') | runtime/vm/ast.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/os.h" 9 #include "vm/os.h"
10 #include "vm/unit_test.h" 10 #include "vm/unit_test.h"
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
226 226
227 227
228 ASSEMBLER_TEST_RUN(AddressingModes, test) { 228 ASSEMBLER_TEST_RUN(AddressingModes, test) {
229 // Avoid running the code since it is constructed to lead to crashes. 229 // Avoid running the code since it is constructed to lead to crashes.
230 } 230 }
231 231
232 232
233 ASSEMBLER_TEST_GENERATE(JumpAroundCrash, assembler) { 233 ASSEMBLER_TEST_GENERATE(JumpAroundCrash, assembler) {
234 Label done; 234 Label done;
235 // Make sure all the condition jumps work. 235 // Make sure all the condition jumps work.
236 for (Condition condition = OVERFLOW; 236 for (Condition condition = OVERFLOW; condition <= GREATER;
237 condition <= GREATER;
238 condition = static_cast<Condition>(condition + 1)) { 237 condition = static_cast<Condition>(condition + 1)) {
239 __ j(condition, &done); 238 __ j(condition, &done);
240 } 239 }
241 // This isn't strictly necessary, but we do an unconditional 240 // This isn't strictly necessary, but we do an unconditional
242 // jump around the crashing code anyway. 241 // jump around the crashing code anyway.
243 __ jmp(&done); 242 __ jmp(&done);
244 243
245 // Be sure to skip this crashing code. 244 // Be sure to skip this crashing code.
246 __ movq(RAX, Immediate(0)); 245 __ movq(RAX, Immediate(0));
247 __ movq(Address(RAX, 0), RAX); 246 __ movq(Address(RAX, 0), RAX);
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
406 405
407 ASSEMBLER_TEST_RUN(SignedMultiply, test) { 406 ASSEMBLER_TEST_RUN(SignedMultiply, test) {
408 typedef int (*SignedMultiply)(); 407 typedef int (*SignedMultiply)();
409 EXPECT_EQ(8000, reinterpret_cast<SignedMultiply>(test->entry())()); 408 EXPECT_EQ(8000, reinterpret_cast<SignedMultiply>(test->entry())());
410 } 409 }
411 410
412 411
413 ASSEMBLER_TEST_GENERATE(UnsignedMultiply, assembler) { 412 ASSEMBLER_TEST_GENERATE(UnsignedMultiply, assembler) {
414 __ movl(RAX, Immediate(-1)); // RAX = 0xFFFFFFFF 413 __ movl(RAX, Immediate(-1)); // RAX = 0xFFFFFFFF
415 __ movl(RCX, Immediate(16)); // RCX = 0x10 414 __ movl(RCX, Immediate(16)); // RCX = 0x10
416 __ mull(RCX); // RDX:RAX = RAX * RCX = 0x0FFFFFFFF0 415 __ mull(RCX); // RDX:RAX = RAX * RCX = 0x0FFFFFFFF0
417 __ movq(RAX, RDX); // Return high32(0x0FFFFFFFF0) == 0x0F 416 __ movq(RAX, RDX); // Return high32(0x0FFFFFFFF0) == 0x0F
418 __ ret(); 417 __ ret();
419 } 418 }
420 419
421 420
422 ASSEMBLER_TEST_RUN(UnsignedMultiply, test) { 421 ASSEMBLER_TEST_RUN(UnsignedMultiply, test) {
423 typedef int (*UnsignedMultiply)(); 422 typedef int (*UnsignedMultiply)();
424 EXPECT_EQ(15, reinterpret_cast<UnsignedMultiply>(test->entry())()); 423 EXPECT_EQ(15, reinterpret_cast<UnsignedMultiply>(test->entry())());
425 } 424 }
426 425
427 426
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
527 526
528 ASSEMBLER_TEST_RUN(SignedMultiply2, test) { 527 ASSEMBLER_TEST_RUN(SignedMultiply2, test) {
529 typedef int (*SignedMultiply2)(); 528 typedef int (*SignedMultiply2)();
530 EXPECT_EQ(2000, reinterpret_cast<SignedMultiply2>(test->entry())()); 529 EXPECT_EQ(2000, reinterpret_cast<SignedMultiply2>(test->entry())());
531 } 530 }
532 531
533 532
534 ASSEMBLER_TEST_GENERATE(UnsignedMultiplyLong, assembler) { 533 ASSEMBLER_TEST_GENERATE(UnsignedMultiplyLong, assembler) {
535 __ movq(RAX, Immediate(-1)); // RAX = 0xFFFFFFFFFFFFFFFF 534 __ movq(RAX, Immediate(-1)); // RAX = 0xFFFFFFFFFFFFFFFF
536 __ movq(RCX, Immediate(16)); // RCX = 0x10 535 __ movq(RCX, Immediate(16)); // RCX = 0x10
537 __ mulq(RCX); // RDX:RAX = RAX * RCX = 0x0FFFFFFFFFFFFFFFF0 536 __ mulq(RCX); // RDX:RAX = RAX * RCX = 0x0FFFFFFFFFFFFFFFF0
538 __ movq(RAX, RDX); // Return high64(0x0FFFFFFFFFFFFFFFF0) == 0x0F 537 __ movq(RAX, RDX); // Return high64(0x0FFFFFFFFFFFFFFFF0) == 0x0F
539 __ ret(); 538 __ ret();
540 } 539 }
541 540
542 541
543 ASSEMBLER_TEST_RUN(UnsignedMultiplyLong, test) { 542 ASSEMBLER_TEST_RUN(UnsignedMultiplyLong, test) {
544 typedef int64_t (*UnsignedMultiplyLong)(); 543 typedef int64_t (*UnsignedMultiplyLong)();
545 EXPECT_EQ(15, reinterpret_cast<UnsignedMultiplyLong>(test->entry())()); 544 EXPECT_EQ(15, reinterpret_cast<UnsignedMultiplyLong>(test->entry())());
546 } 545 }
547 546
548 547
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after
794 b = 600000; 793 b = 600000;
795 res = reinterpret_cast<LongAddRegCode>(test->entry())(a, b); 794 res = reinterpret_cast<LongAddRegCode>(test->entry())(a, b);
796 EXPECT_EQ((a + b), res); 795 EXPECT_EQ((a + b), res);
797 } 796 }
798 797
799 798
800 ASSEMBLER_TEST_GENERATE(LongAddImmediate, assembler) { 799 ASSEMBLER_TEST_GENERATE(LongAddImmediate, assembler) {
801 __ pushq(CallingConventions::kArg1Reg); 800 __ pushq(CallingConventions::kArg1Reg);
802 __ movl(RAX, Address(RSP, 0)); // left low. 801 __ movl(RAX, Address(RSP, 0)); // left low.
803 __ movl(RDX, Address(RSP, 4)); // left high. 802 __ movl(RDX, Address(RSP, 4)); // left high.
804 __ addl(RAX, Immediate(12)); // right low immediate. 803 __ addl(RAX, Immediate(12)); // right low immediate.
805 __ adcl(RDX, Immediate(11)); // right high immediate. 804 __ adcl(RDX, Immediate(11)); // right high immediate.
806 // Result is in RAX/RDX. 805 // Result is in RAX/RDX.
807 __ movl(Address(RSP, 0), RAX); // result low. 806 __ movl(Address(RSP, 0), RAX); // result low.
808 __ movl(Address(RSP, 4), RDX); // result high. 807 __ movl(Address(RSP, 4), RDX); // result high.
809 __ popq(RAX); 808 __ popq(RAX);
810 __ ret(); 809 __ ret();
811 } 810 }
812 811
813 812
814 ASSEMBLER_TEST_RUN(LongAddImmediate, test) { 813 ASSEMBLER_TEST_RUN(LongAddImmediate, test) {
815 typedef int64_t (*LongAddImmediateCode)(int64_t a); 814 typedef int64_t (*LongAddImmediateCode)(int64_t a);
816 int64_t a = (13LL << 32) + 14; 815 int64_t a = (13LL << 32) + 14;
817 int64_t b = (11LL << 32) + 12; 816 int64_t b = (11LL << 32) + 12;
818 int64_t res = reinterpret_cast<LongAddImmediateCode>(test->entry())(a); 817 int64_t res = reinterpret_cast<LongAddImmediateCode>(test->entry())(a);
819 EXPECT_EQ((a + b), res); 818 EXPECT_EQ((a + b), res);
820 a = (13LL << 32) - 1; 819 a = (13LL << 32) - 1;
821 res = reinterpret_cast<LongAddImmediateCode>(test->entry())(a); 820 res = reinterpret_cast<LongAddImmediateCode>(test->entry())(a);
822 EXPECT_EQ((a + b), res); 821 EXPECT_EQ((a + b), res);
823 } 822 }
824 823
825 824
826 ASSEMBLER_TEST_GENERATE(LongAddAddress, assembler) { 825 ASSEMBLER_TEST_GENERATE(LongAddAddress, assembler) {
827 __ pushq(CallingConventions::kArg2Reg); 826 __ pushq(CallingConventions::kArg2Reg);
828 __ pushq(CallingConventions::kArg1Reg); 827 __ pushq(CallingConventions::kArg1Reg);
829 __ movl(RAX, Address(RSP, 0)); // left low. 828 __ movl(RAX, Address(RSP, 0)); // left low.
830 __ movl(RDX, Address(RSP, 4)); // left high. 829 __ movl(RDX, Address(RSP, 4)); // left high.
831 __ addl(RAX, Address(RSP, 8)); // low. 830 __ addl(RAX, Address(RSP, 8)); // low.
832 __ adcl(RDX, Address(RSP, 12)); // high. 831 __ adcl(RDX, Address(RSP, 12)); // high.
833 // Result is in RAX/RDX. 832 // Result is in RAX/RDX.
834 __ movl(Address(RSP, 0), RAX); // result low. 833 __ movl(Address(RSP, 0), RAX); // result low.
835 __ movl(Address(RSP, 4), RDX); // result high. 834 __ movl(Address(RSP, 4), RDX); // result high.
836 __ popq(RAX); 835 __ popq(RAX);
837 __ popq(RDX); 836 __ popq(RDX);
838 __ ret(); 837 __ ret();
839 } 838 }
840 839
841 840
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
880 b = 2147483647; 879 b = 2147483647;
881 res = reinterpret_cast<LongSubRegCode>(test->entry())(a, b); 880 res = reinterpret_cast<LongSubRegCode>(test->entry())(a, b);
882 EXPECT_EQ((a - b), res); 881 EXPECT_EQ((a - b), res);
883 } 882 }
884 883
885 884
886 ASSEMBLER_TEST_GENERATE(LongSubImmediate, assembler) { 885 ASSEMBLER_TEST_GENERATE(LongSubImmediate, assembler) {
887 __ pushq(CallingConventions::kArg1Reg); 886 __ pushq(CallingConventions::kArg1Reg);
888 __ movl(RAX, Address(RSP, 0)); // left low. 887 __ movl(RAX, Address(RSP, 0)); // left low.
889 __ movl(RDX, Address(RSP, 4)); // left high. 888 __ movl(RDX, Address(RSP, 4)); // left high.
890 __ subl(RAX, Immediate(12)); // right low immediate. 889 __ subl(RAX, Immediate(12)); // right low immediate.
891 __ sbbl(RDX, Immediate(11)); // right high immediate. 890 __ sbbl(RDX, Immediate(11)); // right high immediate.
892 // Result is in RAX/RDX. 891 // Result is in RAX/RDX.
893 __ movl(Address(RSP, 0), RAX); // result low. 892 __ movl(Address(RSP, 0), RAX); // result low.
894 __ movl(Address(RSP, 4), RDX); // result high. 893 __ movl(Address(RSP, 4), RDX); // result high.
895 __ popq(RAX); 894 __ popq(RAX);
896 __ ret(); 895 __ ret();
897 } 896 }
898 897
899 898
900 ASSEMBLER_TEST_RUN(LongSubImmediate, test) { 899 ASSEMBLER_TEST_RUN(LongSubImmediate, test) {
901 typedef int64_t (*LongSubImmediateCode)(int64_t a); 900 typedef int64_t (*LongSubImmediateCode)(int64_t a);
902 int64_t a = (13LL << 32) + 14; 901 int64_t a = (13LL << 32) + 14;
903 int64_t b = (11LL << 32) + 12; 902 int64_t b = (11LL << 32) + 12;
904 int64_t res = reinterpret_cast<LongSubImmediateCode>(test->entry())(a); 903 int64_t res = reinterpret_cast<LongSubImmediateCode>(test->entry())(a);
905 EXPECT_EQ((a - b), res); 904 EXPECT_EQ((a - b), res);
906 a = (13LL << 32) + 10; 905 a = (13LL << 32) + 10;
907 res = reinterpret_cast<LongSubImmediateCode>(test->entry())(a); 906 res = reinterpret_cast<LongSubImmediateCode>(test->entry())(a);
908 EXPECT_EQ((a - b), res); 907 EXPECT_EQ((a - b), res);
909 } 908 }
910 909
911 910
912 ASSEMBLER_TEST_GENERATE(LongSubAddress, assembler) { 911 ASSEMBLER_TEST_GENERATE(LongSubAddress, assembler) {
913 __ pushq(CallingConventions::kArg2Reg); 912 __ pushq(CallingConventions::kArg2Reg);
914 __ pushq(CallingConventions::kArg1Reg); 913 __ pushq(CallingConventions::kArg1Reg);
915 __ movl(RAX, Address(RSP, 0)); // left low. 914 __ movl(RAX, Address(RSP, 0)); // left low.
916 __ movl(RDX, Address(RSP, 4)); // left high. 915 __ movl(RDX, Address(RSP, 4)); // left high.
917 __ subl(RAX, Address(RSP, 8)); // low. 916 __ subl(RAX, Address(RSP, 8)); // low.
918 __ sbbl(RDX, Address(RSP, 12)); // high. 917 __ sbbl(RDX, Address(RSP, 12)); // high.
919 // Result is in RAX/RDX. 918 // Result is in RAX/RDX.
920 __ movl(Address(RSP, 0), RAX); // result low. 919 __ movl(Address(RSP, 0), RAX); // result low.
921 __ movl(Address(RSP, 4), RDX); // result high. 920 __ movl(Address(RSP, 4), RDX); // result high.
922 __ popq(RAX); 921 __ popq(RAX);
923 __ popq(RDX); 922 __ popq(RDX);
924 __ ret(); 923 __ ret();
925 } 924 }
926 925
927 926
(...skipping 29 matching lines...) Expand all
957 int64_t res = reinterpret_cast<AddRegCode>(test->entry())(al, ah, bl, bh); 956 int64_t res = reinterpret_cast<AddRegCode>(test->entry())(al, ah, bl, bh);
958 EXPECT_EQ((ah + bh), res); 957 EXPECT_EQ((ah + bh), res);
959 al = -1; 958 al = -1;
960 res = reinterpret_cast<AddRegCode>(test->entry())(al, ah, bl, bh); 959 res = reinterpret_cast<AddRegCode>(test->entry())(al, ah, bl, bh);
961 EXPECT_EQ((ah + bh + 1), res); 960 EXPECT_EQ((ah + bh + 1), res);
962 } 961 }
963 962
964 963
965 ASSEMBLER_TEST_GENERATE(AddImmediate, assembler) { 964 ASSEMBLER_TEST_GENERATE(AddImmediate, assembler) {
966 __ movq(R10, CallingConventions::kArg1Reg); // al. 965 __ movq(R10, CallingConventions::kArg1Reg); // al.
967 __ addq(R10, Immediate(13)); // bl. 966 __ addq(R10, Immediate(13)); // bl.
968 __ movq(RAX, CallingConventions::kArg2Reg); // ah. 967 __ movq(RAX, CallingConventions::kArg2Reg); // ah.
969 __ adcq(RAX, Immediate(14)); // bh. 968 __ adcq(RAX, Immediate(14)); // bh.
970 // RAX = high64(ah:al + bh:bl). 969 // RAX = high64(ah:al + bh:bl).
971 __ ret(); 970 __ ret();
972 } 971 }
973 972
974 973
975 ASSEMBLER_TEST_RUN(AddImmediate, test) { 974 ASSEMBLER_TEST_RUN(AddImmediate, test) {
976 typedef int64_t (*AddImmediateCode)(int64_t al, int64_t ah); 975 typedef int64_t (*AddImmediateCode)(int64_t al, int64_t ah);
977 int64_t al = 11; 976 int64_t al = 11;
978 int64_t ah = 12; 977 int64_t ah = 12;
979 int64_t bh = 14; 978 int64_t bh = 14;
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1033 int64_t res = reinterpret_cast<SubRegCode>(test->entry())(al, ah, bl, bh); 1032 int64_t res = reinterpret_cast<SubRegCode>(test->entry())(al, ah, bl, bh);
1034 EXPECT_EQ((ah - bh), res); 1033 EXPECT_EQ((ah - bh), res);
1035 al = 10; 1034 al = 10;
1036 res = reinterpret_cast<SubRegCode>(test->entry())(al, ah, bl, bh); 1035 res = reinterpret_cast<SubRegCode>(test->entry())(al, ah, bl, bh);
1037 EXPECT_EQ((ah - bh - 1), res); 1036 EXPECT_EQ((ah - bh - 1), res);
1038 } 1037 }
1039 1038
1040 1039
1041 ASSEMBLER_TEST_GENERATE(SubImmediate, assembler) { 1040 ASSEMBLER_TEST_GENERATE(SubImmediate, assembler) {
1042 __ movq(R10, CallingConventions::kArg1Reg); // al. 1041 __ movq(R10, CallingConventions::kArg1Reg); // al.
1043 __ subq(R10, Immediate(12)); // bl. 1042 __ subq(R10, Immediate(12)); // bl.
1044 __ movq(RAX, CallingConventions::kArg2Reg); // ah. 1043 __ movq(RAX, CallingConventions::kArg2Reg); // ah.
1045 __ sbbq(RAX, Immediate(11)); // bh. 1044 __ sbbq(RAX, Immediate(11)); // bh.
1046 // RAX = high64(ah:al - bh:bl). 1045 // RAX = high64(ah:al - bh:bl).
1047 __ ret(); 1046 __ ret();
1048 } 1047 }
1049 1048
1050 1049
1051 ASSEMBLER_TEST_RUN(SubImmediate, test) { 1050 ASSEMBLER_TEST_RUN(SubImmediate, test) {
1052 typedef int64_t (*SubImmediateCode)(int64_t al, int64_t ah); 1051 typedef int64_t (*SubImmediateCode)(int64_t al, int64_t ah);
1053 int64_t al = 14; 1052 int64_t al = 14;
1054 int64_t ah = 13; 1053 int64_t ah = 13;
1055 int64_t bh = 11; 1054 int64_t bh = 11;
(...skipping 748 matching lines...) Expand 10 before | Expand all | Expand 10 after
1804 1803
1805 ASSEMBLER_TEST_RUN(LargeConstant, test) { 1804 ASSEMBLER_TEST_RUN(LargeConstant, test) {
1806 typedef int64_t (*LargeConstantCode)(); 1805 typedef int64_t (*LargeConstantCode)();
1807 EXPECT_EQ(kLargeConstant, 1806 EXPECT_EQ(kLargeConstant,
1808 reinterpret_cast<LargeConstantCode>(test->entry())()); 1807 reinterpret_cast<LargeConstantCode>(test->entry())());
1809 } 1808 }
1810 1809
1811 1810
1812 static int ComputeStackSpaceReservation(int needed, int fixed) { 1811 static int ComputeStackSpaceReservation(int needed, int fixed) {
1813 return (OS::ActivationFrameAlignment() > 1) 1812 return (OS::ActivationFrameAlignment() > 1)
1814 ? Utils::RoundUp(needed + fixed, OS::ActivationFrameAlignment()) - fixed 1813 ? Utils::RoundUp(needed + fixed, OS::ActivationFrameAlignment()) -
1815 : needed; 1814 fixed
1815 : needed;
1816 } 1816 }
1817 1817
1818 1818
1819 static int LeafReturn42() { 1819 static int LeafReturn42() {
1820 return 42; 1820 return 42;
1821 } 1821 }
1822 1822
1823 1823
1824 static int LeafReturnArgument(int x) { 1824 static int LeafReturnArgument(int x) {
1825 return x + 87; 1825 return x + 87;
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
1965 ASSEMBLER_TEST_RUN(SingleFPMoves2, test) { 1965 ASSEMBLER_TEST_RUN(SingleFPMoves2, test) {
1966 typedef float (*SingleFPMoves2Code)(); 1966 typedef float (*SingleFPMoves2Code)();
1967 EXPECT_EQ(234, reinterpret_cast<SingleFPMoves2Code>(test->entry())()); 1967 EXPECT_EQ(234, reinterpret_cast<SingleFPMoves2Code>(test->entry())());
1968 } 1968 }
1969 1969
1970 1970
1971 ASSEMBLER_TEST_GENERATE(PackedDoubleAdd, assembler) { 1971 ASSEMBLER_TEST_GENERATE(PackedDoubleAdd, assembler) {
1972 static const struct ALIGN16 { 1972 static const struct ALIGN16 {
1973 double a; 1973 double a;
1974 double b; 1974 double b;
1975 } constant0 = { 1.0, 2.0 }; 1975 } constant0 = {1.0, 2.0};
1976 static const struct ALIGN16 { 1976 static const struct ALIGN16 {
1977 double a; 1977 double a;
1978 double b; 1978 double b;
1979 } constant1 = { 3.0, 4.0 }; 1979 } constant1 = {3.0, 4.0};
1980 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 1980 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
1981 __ movups(XMM10, Address(RAX, 0)); 1981 __ movups(XMM10, Address(RAX, 0));
1982 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1))); 1982 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1)));
1983 __ movups(XMM11, Address(RAX, 0)); 1983 __ movups(XMM11, Address(RAX, 0));
1984 __ addpd(XMM10, XMM11); 1984 __ addpd(XMM10, XMM11);
1985 __ movaps(XMM0, XMM10); 1985 __ movaps(XMM0, XMM10);
1986 __ ret(); 1986 __ ret();
1987 } 1987 }
1988 1988
1989 1989
1990 ASSEMBLER_TEST_RUN(PackedDoubleAdd, test) { 1990 ASSEMBLER_TEST_RUN(PackedDoubleAdd, test) {
1991 typedef double (*PackedDoubleAdd)(); 1991 typedef double (*PackedDoubleAdd)();
1992 double res = reinterpret_cast<PackedDoubleAdd>(test->entry())(); 1992 double res = reinterpret_cast<PackedDoubleAdd>(test->entry())();
1993 EXPECT_FLOAT_EQ(4.0, res, 0.000001f); 1993 EXPECT_FLOAT_EQ(4.0, res, 0.000001f);
1994 } 1994 }
1995 1995
1996 1996
1997 ASSEMBLER_TEST_GENERATE(PackedDoubleSub, assembler) { 1997 ASSEMBLER_TEST_GENERATE(PackedDoubleSub, assembler) {
1998 static const struct ALIGN16 { 1998 static const struct ALIGN16 {
1999 double a; 1999 double a;
2000 double b; 2000 double b;
2001 } constant0 = { 1.0, 2.0 }; 2001 } constant0 = {1.0, 2.0};
2002 static const struct ALIGN16 { 2002 static const struct ALIGN16 {
2003 double a; 2003 double a;
2004 double b; 2004 double b;
2005 } constant1 = { 3.0, 4.0 }; 2005 } constant1 = {3.0, 4.0};
2006 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2006 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2007 __ movups(XMM10, Address(RAX, 0)); 2007 __ movups(XMM10, Address(RAX, 0));
2008 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1))); 2008 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1)));
2009 __ movups(XMM11, Address(RAX, 0)); 2009 __ movups(XMM11, Address(RAX, 0));
2010 __ subpd(XMM10, XMM11); 2010 __ subpd(XMM10, XMM11);
2011 __ movaps(XMM0, XMM10); 2011 __ movaps(XMM0, XMM10);
2012 __ ret(); 2012 __ ret();
2013 } 2013 }
2014 2014
2015 2015
(...skipping 23 matching lines...) Expand all
2039 __ popq(PP); 2039 __ popq(PP);
2040 __ popq(CODE_REG); 2040 __ popq(CODE_REG);
2041 __ LeaveFrame(); 2041 __ LeaveFrame();
2042 } 2042 }
2043 2043
2044 2044
2045 ASSEMBLER_TEST_GENERATE(PackedDoubleNegate, assembler) { 2045 ASSEMBLER_TEST_GENERATE(PackedDoubleNegate, assembler) {
2046 static const struct ALIGN16 { 2046 static const struct ALIGN16 {
2047 double a; 2047 double a;
2048 double b; 2048 double b;
2049 } constant0 = { 1.0, 2.0 }; 2049 } constant0 = {1.0, 2.0};
2050 EnterTestFrame(assembler); 2050 EnterTestFrame(assembler);
2051 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2051 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2052 __ movups(XMM10, Address(RAX, 0)); 2052 __ movups(XMM10, Address(RAX, 0));
2053 __ negatepd(XMM10); 2053 __ negatepd(XMM10);
2054 __ movaps(XMM0, XMM10); 2054 __ movaps(XMM0, XMM10);
2055 LeaveTestFrame(assembler); 2055 LeaveTestFrame(assembler);
2056 __ ret(); 2056 __ ret();
2057 } 2057 }
2058 2058
2059 2059
2060 ASSEMBLER_TEST_RUN(PackedDoubleNegate, test) { 2060 ASSEMBLER_TEST_RUN(PackedDoubleNegate, test) {
2061 double res = test->InvokeWithCodeAndThread<double>(); 2061 double res = test->InvokeWithCodeAndThread<double>();
2062 EXPECT_FLOAT_EQ(-1.0, res, 0.000001f); 2062 EXPECT_FLOAT_EQ(-1.0, res, 0.000001f);
2063 } 2063 }
2064 2064
2065 2065
2066 ASSEMBLER_TEST_GENERATE(PackedDoubleAbsolute, assembler) { 2066 ASSEMBLER_TEST_GENERATE(PackedDoubleAbsolute, assembler) {
2067 static const struct ALIGN16 { 2067 static const struct ALIGN16 {
2068 double a; 2068 double a;
2069 double b; 2069 double b;
2070 } constant0 = { -1.0, 2.0 }; 2070 } constant0 = {-1.0, 2.0};
2071 EnterTestFrame(assembler); 2071 EnterTestFrame(assembler);
2072 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2072 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2073 __ movups(XMM10, Address(RAX, 0)); 2073 __ movups(XMM10, Address(RAX, 0));
2074 __ abspd(XMM10); 2074 __ abspd(XMM10);
2075 __ movaps(XMM0, XMM10); 2075 __ movaps(XMM0, XMM10);
2076 LeaveTestFrame(assembler); 2076 LeaveTestFrame(assembler);
2077 __ ret(); 2077 __ ret();
2078 } 2078 }
2079 2079
2080 2080
2081 ASSEMBLER_TEST_RUN(PackedDoubleAbsolute, test) { 2081 ASSEMBLER_TEST_RUN(PackedDoubleAbsolute, test) {
2082 double res = test->InvokeWithCodeAndThread<double>(); 2082 double res = test->InvokeWithCodeAndThread<double>();
2083 EXPECT_FLOAT_EQ(1.0, res, 0.000001f); 2083 EXPECT_FLOAT_EQ(1.0, res, 0.000001f);
2084 } 2084 }
2085 2085
2086 2086
2087 ASSEMBLER_TEST_GENERATE(PackedDoubleMul, assembler) { 2087 ASSEMBLER_TEST_GENERATE(PackedDoubleMul, assembler) {
2088 static const struct ALIGN16 { 2088 static const struct ALIGN16 {
2089 double a; 2089 double a;
2090 double b; 2090 double b;
2091 } constant0 = { 3.0, 2.0 }; 2091 } constant0 = {3.0, 2.0};
2092 static const struct ALIGN16 { 2092 static const struct ALIGN16 {
2093 double a; 2093 double a;
2094 double b; 2094 double b;
2095 } constant1 = { 3.0, 4.0 }; 2095 } constant1 = {3.0, 4.0};
2096 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2096 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2097 __ movups(XMM10, Address(RAX, 0)); 2097 __ movups(XMM10, Address(RAX, 0));
2098 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1))); 2098 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1)));
2099 __ movups(XMM11, Address(RAX, 0)); 2099 __ movups(XMM11, Address(RAX, 0));
2100 __ mulpd(XMM10, XMM11); 2100 __ mulpd(XMM10, XMM11);
2101 __ movaps(XMM0, XMM10); 2101 __ movaps(XMM0, XMM10);
2102 __ ret(); 2102 __ ret();
2103 } 2103 }
2104 2104
2105 2105
2106 ASSEMBLER_TEST_RUN(PackedDoubleMul, test) { 2106 ASSEMBLER_TEST_RUN(PackedDoubleMul, test) {
2107 typedef double (*PackedDoubleMul)(); 2107 typedef double (*PackedDoubleMul)();
2108 double res = reinterpret_cast<PackedDoubleMul>(test->entry())(); 2108 double res = reinterpret_cast<PackedDoubleMul>(test->entry())();
2109 EXPECT_FLOAT_EQ(9.0, res, 0.000001f); 2109 EXPECT_FLOAT_EQ(9.0, res, 0.000001f);
2110 } 2110 }
2111 2111
2112 2112
2113 ASSEMBLER_TEST_GENERATE(PackedDoubleDiv, assembler) { 2113 ASSEMBLER_TEST_GENERATE(PackedDoubleDiv, assembler) {
2114 static const struct ALIGN16 { 2114 static const struct ALIGN16 {
2115 double a; 2115 double a;
2116 double b; 2116 double b;
2117 } constant0 = { 9.0, 2.0 }; 2117 } constant0 = {9.0, 2.0};
2118 static const struct ALIGN16 { 2118 static const struct ALIGN16 {
2119 double a; 2119 double a;
2120 double b; 2120 double b;
2121 } constant1 = { 3.0, 4.0 }; 2121 } constant1 = {3.0, 4.0};
2122 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2122 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2123 __ movups(XMM10, Address(RAX, 0)); 2123 __ movups(XMM10, Address(RAX, 0));
2124 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1))); 2124 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1)));
2125 __ movups(XMM11, Address(RAX, 0)); 2125 __ movups(XMM11, Address(RAX, 0));
2126 __ divpd(XMM10, XMM11); 2126 __ divpd(XMM10, XMM11);
2127 __ movaps(XMM0, XMM10); 2127 __ movaps(XMM0, XMM10);
2128 __ ret(); 2128 __ ret();
2129 } 2129 }
2130 2130
2131 2131
2132 ASSEMBLER_TEST_RUN(PackedDoubleDiv, test) { 2132 ASSEMBLER_TEST_RUN(PackedDoubleDiv, test) {
2133 typedef double (*PackedDoubleDiv)(); 2133 typedef double (*PackedDoubleDiv)();
2134 double res = reinterpret_cast<PackedDoubleDiv>(test->entry())(); 2134 double res = reinterpret_cast<PackedDoubleDiv>(test->entry())();
2135 EXPECT_FLOAT_EQ(3.0, res, 0.000001f); 2135 EXPECT_FLOAT_EQ(3.0, res, 0.000001f);
2136 } 2136 }
2137 2137
2138 2138
2139 ASSEMBLER_TEST_GENERATE(PackedDoubleSqrt, assembler) { 2139 ASSEMBLER_TEST_GENERATE(PackedDoubleSqrt, assembler) {
2140 static const struct ALIGN16 { 2140 static const struct ALIGN16 {
2141 double a; 2141 double a;
2142 double b; 2142 double b;
2143 } constant0 = { 16.0, 2.0 }; 2143 } constant0 = {16.0, 2.0};
2144 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2144 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2145 __ movups(XMM10, Address(RAX, 0)); 2145 __ movups(XMM10, Address(RAX, 0));
2146 __ sqrtpd(XMM10); 2146 __ sqrtpd(XMM10);
2147 __ movaps(XMM0, XMM10); 2147 __ movaps(XMM0, XMM10);
2148 __ ret(); 2148 __ ret();
2149 } 2149 }
2150 2150
2151 2151
2152 ASSEMBLER_TEST_RUN(PackedDoubleSqrt, test) { 2152 ASSEMBLER_TEST_RUN(PackedDoubleSqrt, test) {
2153 typedef double (*PackedDoubleSqrt)(); 2153 typedef double (*PackedDoubleSqrt)();
2154 double res = reinterpret_cast<PackedDoubleSqrt>(test->entry())(); 2154 double res = reinterpret_cast<PackedDoubleSqrt>(test->entry())();
2155 EXPECT_FLOAT_EQ(4.0, res, 0.000001f); 2155 EXPECT_FLOAT_EQ(4.0, res, 0.000001f);
2156 } 2156 }
2157 2157
2158 2158
2159 ASSEMBLER_TEST_GENERATE(PackedDoubleMin, assembler) { 2159 ASSEMBLER_TEST_GENERATE(PackedDoubleMin, assembler) {
2160 static const struct ALIGN16 { 2160 static const struct ALIGN16 {
2161 double a; 2161 double a;
2162 double b; 2162 double b;
2163 } constant0 = { 9.0, 2.0 }; 2163 } constant0 = {9.0, 2.0};
2164 static const struct ALIGN16 { 2164 static const struct ALIGN16 {
2165 double a; 2165 double a;
2166 double b; 2166 double b;
2167 } constant1 = { 3.0, 4.0 }; 2167 } constant1 = {3.0, 4.0};
2168 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2168 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2169 __ movups(XMM10, Address(RAX, 0)); 2169 __ movups(XMM10, Address(RAX, 0));
2170 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1))); 2170 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1)));
2171 __ movups(XMM11, Address(RAX, 0)); 2171 __ movups(XMM11, Address(RAX, 0));
2172 __ minpd(XMM10, XMM11); 2172 __ minpd(XMM10, XMM11);
2173 __ movaps(XMM0, XMM10); 2173 __ movaps(XMM0, XMM10);
2174 __ ret(); 2174 __ ret();
2175 } 2175 }
2176 2176
2177 2177
2178 ASSEMBLER_TEST_RUN(PackedDoubleMin, test) { 2178 ASSEMBLER_TEST_RUN(PackedDoubleMin, test) {
2179 typedef double (*PackedDoubleMin)(); 2179 typedef double (*PackedDoubleMin)();
2180 double res = reinterpret_cast<PackedDoubleMin>(test->entry())(); 2180 double res = reinterpret_cast<PackedDoubleMin>(test->entry())();
2181 EXPECT_FLOAT_EQ(3.0, res, 0.000001f); 2181 EXPECT_FLOAT_EQ(3.0, res, 0.000001f);
2182 } 2182 }
2183 2183
2184 2184
2185 ASSEMBLER_TEST_GENERATE(PackedDoubleMax, assembler) { 2185 ASSEMBLER_TEST_GENERATE(PackedDoubleMax, assembler) {
2186 static const struct ALIGN16 { 2186 static const struct ALIGN16 {
2187 double a; 2187 double a;
2188 double b; 2188 double b;
2189 } constant0 = { 9.0, 2.0 }; 2189 } constant0 = {9.0, 2.0};
2190 static const struct ALIGN16 { 2190 static const struct ALIGN16 {
2191 double a; 2191 double a;
2192 double b; 2192 double b;
2193 } constant1 = { 3.0, 4.0 }; 2193 } constant1 = {3.0, 4.0};
2194 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2194 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2195 __ movups(XMM10, Address(RAX, 0)); 2195 __ movups(XMM10, Address(RAX, 0));
2196 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1))); 2196 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant1)));
2197 __ movups(XMM11, Address(RAX, 0)); 2197 __ movups(XMM11, Address(RAX, 0));
2198 __ maxpd(XMM10, XMM11); 2198 __ maxpd(XMM10, XMM11);
2199 __ movaps(XMM0, XMM10); 2199 __ movaps(XMM0, XMM10);
2200 __ ret(); 2200 __ ret();
2201 } 2201 }
2202 2202
2203 2203
2204 ASSEMBLER_TEST_RUN(PackedDoubleMax, test) { 2204 ASSEMBLER_TEST_RUN(PackedDoubleMax, test) {
2205 typedef double (*PackedDoubleMax)(); 2205 typedef double (*PackedDoubleMax)();
2206 double res = reinterpret_cast<PackedDoubleMax>(test->entry())(); 2206 double res = reinterpret_cast<PackedDoubleMax>(test->entry())();
2207 EXPECT_FLOAT_EQ(9.0, res, 0.000001f); 2207 EXPECT_FLOAT_EQ(9.0, res, 0.000001f);
2208 } 2208 }
2209 2209
2210 2210
2211 ASSEMBLER_TEST_GENERATE(PackedDoubleShuffle, assembler) { 2211 ASSEMBLER_TEST_GENERATE(PackedDoubleShuffle, assembler) {
2212 static const struct ALIGN16 { 2212 static const struct ALIGN16 {
2213 double a; 2213 double a;
2214 double b; 2214 double b;
2215 } constant0 = { 2.0, 9.0 }; 2215 } constant0 = {2.0, 9.0};
2216 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2216 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2217 __ movups(XMM10, Address(RAX, 0)); 2217 __ movups(XMM10, Address(RAX, 0));
2218 // Splat Y across all lanes. 2218 // Splat Y across all lanes.
2219 __ shufpd(XMM10, XMM10, Immediate(0x33)); 2219 __ shufpd(XMM10, XMM10, Immediate(0x33));
2220 // Splat X across all lanes. 2220 // Splat X across all lanes.
2221 __ shufpd(XMM10, XMM10, Immediate(0x0)); 2221 __ shufpd(XMM10, XMM10, Immediate(0x0));
2222 // Set return value. 2222 // Set return value.
2223 __ movaps(XMM0, XMM10); 2223 __ movaps(XMM0, XMM10);
2224 __ ret(); 2224 __ ret();
2225 } 2225 }
2226 2226
2227 2227
2228 ASSEMBLER_TEST_RUN(PackedDoubleShuffle, test) { 2228 ASSEMBLER_TEST_RUN(PackedDoubleShuffle, test) {
2229 typedef double (*PackedDoubleShuffle)(); 2229 typedef double (*PackedDoubleShuffle)();
2230 double res = reinterpret_cast<PackedDoubleShuffle>(test->entry())(); 2230 double res = reinterpret_cast<PackedDoubleShuffle>(test->entry())();
2231 EXPECT_FLOAT_EQ(9.0, res, 0.000001f); 2231 EXPECT_FLOAT_EQ(9.0, res, 0.000001f);
2232 } 2232 }
2233 2233
2234 2234
2235 ASSEMBLER_TEST_GENERATE(PackedDoubleToSingle, assembler) { 2235 ASSEMBLER_TEST_GENERATE(PackedDoubleToSingle, assembler) {
2236 static const struct ALIGN16 { 2236 static const struct ALIGN16 {
2237 double a; 2237 double a;
2238 double b; 2238 double b;
2239 } constant0 = { 9.0, 2.0 }; 2239 } constant0 = {9.0, 2.0};
2240 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2240 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2241 __ movups(XMM11, Address(RAX, 0)); 2241 __ movups(XMM11, Address(RAX, 0));
2242 __ cvtpd2ps(XMM10, XMM11); 2242 __ cvtpd2ps(XMM10, XMM11);
2243 __ movaps(XMM0, XMM10); 2243 __ movaps(XMM0, XMM10);
2244 __ ret(); 2244 __ ret();
2245 } 2245 }
2246 2246
2247 2247
2248 ASSEMBLER_TEST_RUN(PackedDoubleToSingle, test) { 2248 ASSEMBLER_TEST_RUN(PackedDoubleToSingle, test) {
2249 typedef float (*PackedDoubleToSingle)(); 2249 typedef float (*PackedDoubleToSingle)();
2250 float res = reinterpret_cast<PackedDoubleToSingle>(test->entry())(); 2250 float res = reinterpret_cast<PackedDoubleToSingle>(test->entry())();
2251 EXPECT_FLOAT_EQ(9.0f, res, 0.000001f); 2251 EXPECT_FLOAT_EQ(9.0f, res, 0.000001f);
2252 } 2252 }
2253 2253
2254 2254
2255 ASSEMBLER_TEST_GENERATE(PackedSingleToDouble, assembler) { 2255 ASSEMBLER_TEST_GENERATE(PackedSingleToDouble, assembler) {
2256 static const struct ALIGN16 { 2256 static const struct ALIGN16 {
2257 float a; 2257 float a;
2258 float b; 2258 float b;
2259 float c; 2259 float c;
2260 float d; 2260 float d;
2261 } constant0 = { 9.0f, 2.0f, 3.0f, 4.0f }; 2261 } constant0 = {9.0f, 2.0f, 3.0f, 4.0f};
2262 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0))); 2262 __ movq(RAX, Immediate(reinterpret_cast<uword>(&constant0)));
2263 __ movups(XMM11, Address(RAX, 0)); 2263 __ movups(XMM11, Address(RAX, 0));
2264 __ cvtps2pd(XMM10, XMM11); 2264 __ cvtps2pd(XMM10, XMM11);
2265 __ movaps(XMM0, XMM10); 2265 __ movaps(XMM0, XMM10);
2266 __ ret(); 2266 __ ret();
2267 } 2267 }
2268 2268
2269 2269
2270 ASSEMBLER_TEST_RUN(PackedSingleToDouble, test) { 2270 ASSEMBLER_TEST_RUN(PackedSingleToDouble, test) {
2271 typedef double (*PackedSingleToDouble)(); 2271 typedef double (*PackedSingleToDouble)();
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
2380 uint32_t res = reinterpret_cast<PackedIntOperationsCode>(test->entry())(); 2380 uint32_t res = reinterpret_cast<PackedIntOperationsCode>(test->entry())();
2381 EXPECT_EQ(static_cast<uword>(0x5), res); 2381 EXPECT_EQ(static_cast<uword>(0x5), res);
2382 } 2382 }
2383 2383
2384 2384
2385 ASSEMBLER_TEST_GENERATE(PackedFPOperations2, assembler) { 2385 ASSEMBLER_TEST_GENERATE(PackedFPOperations2, assembler) {
2386 __ movq(RAX, Immediate(bit_cast<int32_t, float>(4.0f))); 2386 __ movq(RAX, Immediate(bit_cast<int32_t, float>(4.0f)));
2387 __ movd(XMM0, RAX); 2387 __ movd(XMM0, RAX);
2388 __ shufps(XMM0, XMM0, Immediate(0x0)); 2388 __ shufps(XMM0, XMM0, Immediate(0x0));
2389 2389
2390 __ movaps(XMM11, XMM0); // Copy XMM0 2390 __ movaps(XMM11, XMM0); // Copy XMM0
2391 __ reciprocalps(XMM11); // 0.25 2391 __ reciprocalps(XMM11); // 0.25
2392 __ sqrtps(XMM11); // 0.5 2392 __ sqrtps(XMM11); // 0.5
2393 __ rsqrtps(XMM0); // ~0.5 2393 __ rsqrtps(XMM0); // ~0.5
2394 __ subps(XMM0, XMM11); // ~0.0 2394 __ subps(XMM0, XMM11); // ~0.0
2395 __ shufps(XMM0, XMM0, Immediate(0x00)); // Copy second lane into all 4 lanes. 2395 __ shufps(XMM0, XMM0, Immediate(0x00)); // Copy second lane into all 4 lanes.
2396 __ ret(); 2396 __ ret();
2397 } 2397 }
2398 2398
2399 2399
2400 ASSEMBLER_TEST_RUN(PackedFPOperations2, test) { 2400 ASSEMBLER_TEST_RUN(PackedFPOperations2, test) {
2401 typedef float (*PackedFPOperations2Code)(); 2401 typedef float (*PackedFPOperations2Code)();
2402 float res = reinterpret_cast<PackedFPOperations2Code>(test->entry())(); 2402 float res = reinterpret_cast<PackedFPOperations2Code>(test->entry())();
2403 EXPECT_FLOAT_EQ(0.0f, res, 0.001f); 2403 EXPECT_FLOAT_EQ(0.0f, res, 0.001f);
2404 } 2404 }
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
2593 EXPECT_FLOAT_EQ(4.0f, res, 0.001f); 2593 EXPECT_FLOAT_EQ(4.0f, res, 0.001f);
2594 } 2594 }
2595 2595
2596 2596
2597 ASSEMBLER_TEST_GENERATE(PackedLogicalOr, assembler) { 2597 ASSEMBLER_TEST_GENERATE(PackedLogicalOr, assembler) {
2598 static const struct ALIGN16 { 2598 static const struct ALIGN16 {
2599 uint32_t a; 2599 uint32_t a;
2600 uint32_t b; 2600 uint32_t b;
2601 uint32_t c; 2601 uint32_t c;
2602 uint32_t d; 2602 uint32_t d;
2603 } constant1 = 2603 } constant1 = {0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0};
2604 { 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0 };
2605 static const struct ALIGN16 { 2604 static const struct ALIGN16 {
2606 uint32_t a; 2605 uint32_t a;
2607 uint32_t b; 2606 uint32_t b;
2608 uint32_t c; 2607 uint32_t c;
2609 uint32_t d; 2608 uint32_t d;
2610 } constant2 = 2609 } constant2 = {0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F};
2611 { 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F };
2612 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1))); 2610 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1)));
2613 __ movups(XMM0, Address(RAX, 0)); 2611 __ movups(XMM0, Address(RAX, 0));
2614 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant2))); 2612 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant2)));
2615 __ movups(XMM1, Address(RAX, 0)); 2613 __ movups(XMM1, Address(RAX, 0));
2616 __ orps(XMM0, XMM1); 2614 __ orps(XMM0, XMM1);
2617 __ pushq(RAX); 2615 __ pushq(RAX);
2618 __ movss(Address(RSP, 0), XMM0); 2616 __ movss(Address(RSP, 0), XMM0);
2619 __ popq(RAX); 2617 __ popq(RAX);
2620 __ ret(); 2618 __ ret();
2621 } 2619 }
2622 2620
2623 2621
2624 ASSEMBLER_TEST_RUN(PackedLogicalOr, test) { 2622 ASSEMBLER_TEST_RUN(PackedLogicalOr, test) {
2625 typedef uint32_t (*PackedLogicalOrCode)(); 2623 typedef uint32_t (*PackedLogicalOrCode)();
2626 uint32_t res = reinterpret_cast<PackedLogicalOrCode>(test->entry())(); 2624 uint32_t res = reinterpret_cast<PackedLogicalOrCode>(test->entry())();
2627 EXPECT_EQ(0xFFFFFFFF, res); 2625 EXPECT_EQ(0xFFFFFFFF, res);
2628 } 2626 }
2629 2627
2630 2628
2631 ASSEMBLER_TEST_GENERATE(PackedLogicalAnd, assembler) { 2629 ASSEMBLER_TEST_GENERATE(PackedLogicalAnd, assembler) {
2632 static const struct ALIGN16 { 2630 static const struct ALIGN16 {
2633 uint32_t a; 2631 uint32_t a;
2634 uint32_t b; 2632 uint32_t b;
2635 uint32_t c; 2633 uint32_t c;
2636 uint32_t d; 2634 uint32_t d;
2637 } constant1 = 2635 } constant1 = {0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0};
2638 { 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0 };
2639 static const struct ALIGN16 { 2636 static const struct ALIGN16 {
2640 uint32_t a; 2637 uint32_t a;
2641 uint32_t b; 2638 uint32_t b;
2642 uint32_t c; 2639 uint32_t c;
2643 uint32_t d; 2640 uint32_t d;
2644 } constant2 = 2641 } constant2 = {0x0F0FFF0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F};
2645 { 0x0F0FFF0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F };
2646 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1))); 2642 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1)));
2647 __ movups(XMM0, Address(RAX, 0)); 2643 __ movups(XMM0, Address(RAX, 0));
2648 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant2))); 2644 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant2)));
2649 __ andps(XMM0, Address(RAX, 0)); 2645 __ andps(XMM0, Address(RAX, 0));
2650 __ pushq(RAX); 2646 __ pushq(RAX);
2651 __ movss(Address(RSP, 0), XMM0); 2647 __ movss(Address(RSP, 0), XMM0);
2652 __ popq(RAX); 2648 __ popq(RAX);
2653 __ ret(); 2649 __ ret();
2654 } 2650 }
2655 2651
2656 2652
2657 ASSEMBLER_TEST_RUN(PackedLogicalAnd, test) { 2653 ASSEMBLER_TEST_RUN(PackedLogicalAnd, test) {
2658 typedef uint32_t (*PackedLogicalAndCode)(); 2654 typedef uint32_t (*PackedLogicalAndCode)();
2659 uint32_t res = reinterpret_cast<PackedLogicalAndCode>(test->entry())(); 2655 uint32_t res = reinterpret_cast<PackedLogicalAndCode>(test->entry())();
2660 EXPECT_EQ(static_cast<uword>(0x0000F000), res); 2656 EXPECT_EQ(static_cast<uword>(0x0000F000), res);
2661 } 2657 }
2662 2658
2663 2659
2664 ASSEMBLER_TEST_GENERATE(PackedLogicalNot, assembler) { 2660 ASSEMBLER_TEST_GENERATE(PackedLogicalNot, assembler) {
2665 static const struct ALIGN16 { 2661 static const struct ALIGN16 {
2666 uint32_t a; 2662 uint32_t a;
2667 uint32_t b; 2663 uint32_t b;
2668 uint32_t c; 2664 uint32_t c;
2669 uint32_t d; 2665 uint32_t d;
2670 } constant1 = 2666 } constant1 = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
2671 { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
2672 EnterTestFrame(assembler); 2667 EnterTestFrame(assembler);
2673 __ LoadImmediate(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1))); 2668 __ LoadImmediate(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1)));
2674 __ movups(XMM9, Address(RAX, 0)); 2669 __ movups(XMM9, Address(RAX, 0));
2675 __ notps(XMM9); 2670 __ notps(XMM9);
2676 __ movaps(XMM0, XMM9); 2671 __ movaps(XMM0, XMM9);
2677 __ pushq(RAX); 2672 __ pushq(RAX);
2678 __ movss(Address(RSP, 0), XMM0); 2673 __ movss(Address(RSP, 0), XMM0);
2679 __ popq(RAX); 2674 __ popq(RAX);
2680 LeaveTestFrame(assembler); 2675 LeaveTestFrame(assembler);
2681 __ ret(); 2676 __ ret();
2682 } 2677 }
2683 2678
2684 2679
2685 ASSEMBLER_TEST_RUN(PackedLogicalNot, test) { 2680 ASSEMBLER_TEST_RUN(PackedLogicalNot, test) {
2686 uint32_t res = test->InvokeWithCodeAndThread<uint32_t>(); 2681 uint32_t res = test->InvokeWithCodeAndThread<uint32_t>();
2687 EXPECT_EQ(static_cast<uword>(0x0), res); 2682 EXPECT_EQ(static_cast<uword>(0x0), res);
2688 } 2683 }
2689 2684
2690 2685
2691 ASSEMBLER_TEST_GENERATE(PackedMoveHighLow, assembler) { 2686 ASSEMBLER_TEST_GENERATE(PackedMoveHighLow, assembler) {
2692 static const struct ALIGN16 { 2687 static const struct ALIGN16 {
2693 float a; 2688 float a;
2694 float b; 2689 float b;
2695 float c; 2690 float c;
2696 float d; 2691 float d;
2697 } constant0 = { 1.0, 2.0, 3.0, 4.0 }; 2692 } constant0 = {1.0, 2.0, 3.0, 4.0};
2698 static const struct ALIGN16 { 2693 static const struct ALIGN16 {
2699 float a; 2694 float a;
2700 float b; 2695 float b;
2701 float c; 2696 float c;
2702 float d; 2697 float d;
2703 } constant1 = { 5.0, 6.0, 7.0, 8.0 }; 2698 } constant1 = {5.0, 6.0, 7.0, 8.0};
2704 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f. 2699 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f.
2705 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0))); 2700 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0)));
2706 __ movups(XMM9, Address(RAX, 0)); 2701 __ movups(XMM9, Address(RAX, 0));
2707 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f. 2702 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f.
2708 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1))); 2703 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1)));
2709 __ movups(XMM1, Address(RAX, 0)); 2704 __ movups(XMM1, Address(RAX, 0));
2710 // XMM9 = 7.0f, 8.0f, 3.0f, 4.0f. 2705 // XMM9 = 7.0f, 8.0f, 3.0f, 4.0f.
2711 __ movhlps(XMM9, XMM1); 2706 __ movhlps(XMM9, XMM1);
2712 __ xorps(XMM1, XMM1); 2707 __ xorps(XMM1, XMM1);
2713 // XMM1 = 7.0f, 8.0f, 3.0f, 4.0f. 2708 // XMM1 = 7.0f, 8.0f, 3.0f, 4.0f.
2714 __ movaps(XMM1, XMM9); 2709 __ movaps(XMM1, XMM9);
2715 __ shufps(XMM9, XMM9, Immediate(0x00)); // 7.0f. 2710 __ shufps(XMM9, XMM9, Immediate(0x00)); // 7.0f.
2716 __ shufps(XMM1, XMM1, Immediate(0x55)); // 8.0f. 2711 __ shufps(XMM1, XMM1, Immediate(0x55)); // 8.0f.
2717 __ addss(XMM9, XMM1); // 15.0f. 2712 __ addss(XMM9, XMM1); // 15.0f.
2718 __ movaps(XMM0, XMM9); 2713 __ movaps(XMM0, XMM9);
2719 __ ret(); 2714 __ ret();
2720 } 2715 }
2721 2716
2722 2717
2723 ASSEMBLER_TEST_RUN(PackedMoveHighLow, test) { 2718 ASSEMBLER_TEST_RUN(PackedMoveHighLow, test) {
2724 typedef float (*PackedMoveHighLow)(); 2719 typedef float (*PackedMoveHighLow)();
2725 float res = reinterpret_cast<PackedMoveHighLow>(test->entry())(); 2720 float res = reinterpret_cast<PackedMoveHighLow>(test->entry())();
2726 EXPECT_FLOAT_EQ(15.0f, res, 0.001f); 2721 EXPECT_FLOAT_EQ(15.0f, res, 0.001f);
2727 } 2722 }
2728 2723
2729 2724
2730 ASSEMBLER_TEST_GENERATE(PackedMoveLowHigh, assembler) { 2725 ASSEMBLER_TEST_GENERATE(PackedMoveLowHigh, assembler) {
2731 static const struct ALIGN16 { 2726 static const struct ALIGN16 {
2732 float a; 2727 float a;
2733 float b; 2728 float b;
2734 float c; 2729 float c;
2735 float d; 2730 float d;
2736 } constant0 = { 1.0, 2.0, 3.0, 4.0 }; 2731 } constant0 = {1.0, 2.0, 3.0, 4.0};
2737 static const struct ALIGN16 { 2732 static const struct ALIGN16 {
2738 float a; 2733 float a;
2739 float b; 2734 float b;
2740 float c; 2735 float c;
2741 float d; 2736 float d;
2742 } constant1 = { 5.0, 6.0, 7.0, 8.0 }; 2737 } constant1 = {5.0, 6.0, 7.0, 8.0};
2743 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f. 2738 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f.
2744 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0))); 2739 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0)));
2745 __ movups(XMM9, Address(RAX, 0)); 2740 __ movups(XMM9, Address(RAX, 0));
2746 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f. 2741 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f.
2747 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1))); 2742 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1)));
2748 __ movups(XMM1, Address(RAX, 0)); 2743 __ movups(XMM1, Address(RAX, 0));
2749 // XMM9 = 1.0f, 2.0f, 5.0f, 6.0f 2744 // XMM9 = 1.0f, 2.0f, 5.0f, 6.0f
2750 __ movlhps(XMM9, XMM1); 2745 __ movlhps(XMM9, XMM1);
2751 __ xorps(XMM1, XMM1); 2746 __ xorps(XMM1, XMM1);
2752 // XMM1 = 1.0f, 2.0f, 5.0f, 6.0f 2747 // XMM1 = 1.0f, 2.0f, 5.0f, 6.0f
2753 __ movaps(XMM1, XMM9); 2748 __ movaps(XMM1, XMM9);
2754 __ shufps(XMM9, XMM9, Immediate(0xAA)); // 5.0f. 2749 __ shufps(XMM9, XMM9, Immediate(0xAA)); // 5.0f.
2755 __ shufps(XMM1, XMM1, Immediate(0xFF)); // 6.0f. 2750 __ shufps(XMM1, XMM1, Immediate(0xFF)); // 6.0f.
2756 __ addss(XMM9, XMM1); // 11.0f. 2751 __ addss(XMM9, XMM1); // 11.0f.
2757 __ movaps(XMM0, XMM9); 2752 __ movaps(XMM0, XMM9);
2758 __ ret(); 2753 __ ret();
2759 } 2754 }
2760 2755
2761 2756
2762 ASSEMBLER_TEST_RUN(PackedMoveLowHigh, test) { 2757 ASSEMBLER_TEST_RUN(PackedMoveLowHigh, test) {
2763 typedef float (*PackedMoveLowHigh)(); 2758 typedef float (*PackedMoveLowHigh)();
2764 float res = reinterpret_cast<PackedMoveLowHigh>(test->entry())(); 2759 float res = reinterpret_cast<PackedMoveLowHigh>(test->entry())();
2765 EXPECT_FLOAT_EQ(11.0f, res, 0.001f); 2760 EXPECT_FLOAT_EQ(11.0f, res, 0.001f);
2766 } 2761 }
2767 2762
2768 2763
2769 ASSEMBLER_TEST_GENERATE(PackedUnpackLow, assembler) { 2764 ASSEMBLER_TEST_GENERATE(PackedUnpackLow, assembler) {
2770 static const struct ALIGN16 { 2765 static const struct ALIGN16 {
2771 float a; 2766 float a;
2772 float b; 2767 float b;
2773 float c; 2768 float c;
2774 float d; 2769 float d;
2775 } constant0 = { 1.0, 2.0, 3.0, 4.0 }; 2770 } constant0 = {1.0, 2.0, 3.0, 4.0};
2776 static const struct ALIGN16 { 2771 static const struct ALIGN16 {
2777 float a; 2772 float a;
2778 float b; 2773 float b;
2779 float c; 2774 float c;
2780 float d; 2775 float d;
2781 } constant1 = { 5.0, 6.0, 7.0, 8.0 }; 2776 } constant1 = {5.0, 6.0, 7.0, 8.0};
2782 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f. 2777 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f.
2783 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0))); 2778 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0)));
2784 __ movups(XMM9, Address(RAX, 0)); 2779 __ movups(XMM9, Address(RAX, 0));
2785 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f. 2780 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f.
2786 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1))); 2781 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1)));
2787 __ movups(XMM1, Address(RAX, 0)); 2782 __ movups(XMM1, Address(RAX, 0));
2788 // XMM9 = 1.0f, 5.0f, 2.0f, 6.0f. 2783 // XMM9 = 1.0f, 5.0f, 2.0f, 6.0f.
2789 __ unpcklps(XMM9, XMM1); 2784 __ unpcklps(XMM9, XMM1);
2790 // XMM1 = 1.0f, 5.0f, 2.0f, 6.0f. 2785 // XMM1 = 1.0f, 5.0f, 2.0f, 6.0f.
2791 __ movaps(XMM1, XMM9); 2786 __ movaps(XMM1, XMM9);
(...skipping 11 matching lines...) Expand all
2803 EXPECT_FLOAT_EQ(11.0f, res, 0.001f); 2798 EXPECT_FLOAT_EQ(11.0f, res, 0.001f);
2804 } 2799 }
2805 2800
2806 2801
2807 ASSEMBLER_TEST_GENERATE(PackedUnpackHigh, assembler) { 2802 ASSEMBLER_TEST_GENERATE(PackedUnpackHigh, assembler) {
2808 static const struct ALIGN16 { 2803 static const struct ALIGN16 {
2809 float a; 2804 float a;
2810 float b; 2805 float b;
2811 float c; 2806 float c;
2812 float d; 2807 float d;
2813 } constant0 = { 1.0, 2.0, 3.0, 4.0 }; 2808 } constant0 = {1.0, 2.0, 3.0, 4.0};
2814 static const struct ALIGN16 { 2809 static const struct ALIGN16 {
2815 float a; 2810 float a;
2816 float b; 2811 float b;
2817 float c; 2812 float c;
2818 float d; 2813 float d;
2819 } constant1 = { 5.0, 6.0, 7.0, 8.0 }; 2814 } constant1 = {5.0, 6.0, 7.0, 8.0};
2820 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f. 2815 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f.
2821 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0))); 2816 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0)));
2822 __ movups(XMM9, Address(RAX, 0)); 2817 __ movups(XMM9, Address(RAX, 0));
2823 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f. 2818 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f.
2824 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1))); 2819 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1)));
2825 __ movups(XMM1, Address(RAX, 0)); 2820 __ movups(XMM1, Address(RAX, 0));
2826 // XMM9 = 3.0f, 7.0f, 4.0f, 8.0f. 2821 // XMM9 = 3.0f, 7.0f, 4.0f, 8.0f.
2827 __ unpckhps(XMM9, XMM1); 2822 __ unpckhps(XMM9, XMM1);
2828 // XMM1 = 3.0f, 7.0f, 4.0f, 8.0f. 2823 // XMM1 = 3.0f, 7.0f, 4.0f, 8.0f.
2829 __ movaps(XMM1, XMM9); 2824 __ movaps(XMM1, XMM9);
(...skipping 11 matching lines...) Expand all
2841 EXPECT_FLOAT_EQ(7.0f, res, 0.001f); 2836 EXPECT_FLOAT_EQ(7.0f, res, 0.001f);
2842 } 2837 }
2843 2838
2844 2839
2845 ASSEMBLER_TEST_GENERATE(PackedUnpackLowPair, assembler) { 2840 ASSEMBLER_TEST_GENERATE(PackedUnpackLowPair, assembler) {
2846 static const struct ALIGN16 { 2841 static const struct ALIGN16 {
2847 float a; 2842 float a;
2848 float b; 2843 float b;
2849 float c; 2844 float c;
2850 float d; 2845 float d;
2851 } constant0 = { 1.0, 2.0, 3.0, 4.0 }; 2846 } constant0 = {1.0, 2.0, 3.0, 4.0};
2852 static const struct ALIGN16 { 2847 static const struct ALIGN16 {
2853 float a; 2848 float a;
2854 float b; 2849 float b;
2855 float c; 2850 float c;
2856 float d; 2851 float d;
2857 } constant1 = { 5.0, 6.0, 7.0, 8.0 }; 2852 } constant1 = {5.0, 6.0, 7.0, 8.0};
2858 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f. 2853 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f.
2859 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0))); 2854 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0)));
2860 __ movups(XMM9, Address(RAX, 0)); 2855 __ movups(XMM9, Address(RAX, 0));
2861 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f. 2856 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f.
2862 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1))); 2857 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1)));
2863 __ movups(XMM1, Address(RAX, 0)); 2858 __ movups(XMM1, Address(RAX, 0));
2864 // XMM9 = 1.0f, 2.0f, 5.0f, 6.0f. 2859 // XMM9 = 1.0f, 2.0f, 5.0f, 6.0f.
2865 __ unpcklpd(XMM9, XMM1); 2860 __ unpcklpd(XMM9, XMM1);
2866 // XMM1 = 1.0f, 2.0f, 5.0f, 6.0f. 2861 // XMM1 = 1.0f, 2.0f, 5.0f, 6.0f.
2867 __ movaps(XMM1, XMM9); 2862 __ movaps(XMM1, XMM9);
(...skipping 11 matching lines...) Expand all
2879 EXPECT_FLOAT_EQ(6.0f, res, 0.001f); 2874 EXPECT_FLOAT_EQ(6.0f, res, 0.001f);
2880 } 2875 }
2881 2876
2882 2877
2883 ASSEMBLER_TEST_GENERATE(PackedUnpackHighPair, assembler) { 2878 ASSEMBLER_TEST_GENERATE(PackedUnpackHighPair, assembler) {
2884 static const struct ALIGN16 { 2879 static const struct ALIGN16 {
2885 float a; 2880 float a;
2886 float b; 2881 float b;
2887 float c; 2882 float c;
2888 float d; 2883 float d;
2889 } constant0 = { 1.0, 2.0, 3.0, 4.0 }; 2884 } constant0 = {1.0, 2.0, 3.0, 4.0};
2890 static const struct ALIGN16 { 2885 static const struct ALIGN16 {
2891 float a; 2886 float a;
2892 float b; 2887 float b;
2893 float c; 2888 float c;
2894 float d; 2889 float d;
2895 } constant1 = { 5.0, 6.0, 7.0, 8.0 }; 2890 } constant1 = {5.0, 6.0, 7.0, 8.0};
2896 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f. 2891 // XMM9 = 1.0f, 2.0f, 3.0f, 4.0f.
2897 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0))); 2892 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant0)));
2898 __ movups(XMM9, Address(RAX, 0)); 2893 __ movups(XMM9, Address(RAX, 0));
2899 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f. 2894 // XMM1 = 5.0f, 6.0f, 7.0f, 8.0f.
2900 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1))); 2895 __ movq(RAX, Immediate(reinterpret_cast<intptr_t>(&constant1)));
2901 __ movups(XMM1, Address(RAX, 0)); 2896 __ movups(XMM1, Address(RAX, 0));
2902 // XMM9 = 3.0f, 4.0f, 7.0f, 8.0f. 2897 // XMM9 = 3.0f, 4.0f, 7.0f, 8.0f.
2903 __ unpckhpd(XMM9, XMM1); 2898 __ unpckhpd(XMM9, XMM1);
2904 // XMM1 = 3.0f, 4.0f, 7.0f, 8.0f. 2899 // XMM1 = 3.0f, 4.0f, 7.0f, 8.0f.
2905 __ movaps(XMM1, XMM9); 2900 __ movaps(XMM1, XMM9);
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
2968 __ movaps(XMM1, XMM2); 2963 __ movaps(XMM1, XMM2);
2969 __ movaps(XMM0, XMM1); 2964 __ movaps(XMM0, XMM1);
2970 __ popq(RAX); 2965 __ popq(RAX);
2971 __ popq(R15); // Callee saved. 2966 __ popq(R15); // Callee saved.
2972 __ ret(); 2967 __ ret();
2973 } 2968 }
2974 2969
2975 2970
2976 ASSEMBLER_TEST_RUN(DoubleFPMoves, test) { 2971 ASSEMBLER_TEST_RUN(DoubleFPMoves, test) {
2977 typedef double (*DoubleFPMovesCode)(); 2972 typedef double (*DoubleFPMovesCode)();
2978 EXPECT_FLOAT_EQ(1024.67, 2973 EXPECT_FLOAT_EQ(1024.67, reinterpret_cast<DoubleFPMovesCode>(test->entry())(),
2979 reinterpret_cast<DoubleFPMovesCode>(test->entry())(), 0.001); 2974 0.001);
2980 } 2975 }
2981 2976
2982 2977
2983 ASSEMBLER_TEST_GENERATE(DoubleFPOperations, assembler) { 2978 ASSEMBLER_TEST_GENERATE(DoubleFPOperations, assembler) {
2984 __ movq(RAX, Immediate(bit_cast<int64_t, double>(12.3))); 2979 __ movq(RAX, Immediate(bit_cast<int64_t, double>(12.3)));
2985 __ pushq(RAX); 2980 __ pushq(RAX);
2986 __ movsd(XMM0, Address(RSP, 0)); 2981 __ movsd(XMM0, Address(RSP, 0));
2987 __ movsd(XMM8, Address(RSP, 0)); 2982 __ movsd(XMM8, Address(RSP, 0));
2988 __ movq(RAX, Immediate(bit_cast<int64_t, double>(3.4))); 2983 __ movq(RAX, Immediate(bit_cast<int64_t, double>(3.4)));
2989 __ movq(Address(RSP, 0), RAX); 2984 __ movq(Address(RSP, 0), RAX);
2990 __ movsd(XMM12, Address(RSP, 0)); 2985 __ movsd(XMM12, Address(RSP, 0));
2991 __ addsd(XMM8, XMM12); // 15.7 2986 __ addsd(XMM8, XMM12); // 15.7
2992 __ mulsd(XMM8, XMM12); // 53.38 2987 __ mulsd(XMM8, XMM12); // 53.38
2993 __ subsd(XMM8, XMM12); // 49.98 2988 __ subsd(XMM8, XMM12); // 49.98
2994 __ divsd(XMM8, XMM12); // 14.7 2989 __ divsd(XMM8, XMM12); // 14.7
2995 __ sqrtsd(XMM8, XMM8); // 3.834 2990 __ sqrtsd(XMM8, XMM8); // 3.834
2996 __ movsd(XMM1, Address(RSP, 0)); 2991 __ movsd(XMM1, Address(RSP, 0));
2997 __ addsd(XMM0, XMM1); // 15.7 2992 __ addsd(XMM0, XMM1); // 15.7
2998 __ mulsd(XMM0, XMM1); // 53.38 2993 __ mulsd(XMM0, XMM1); // 53.38
2999 __ subsd(XMM0, XMM1); // 49.98 2994 __ subsd(XMM0, XMM1); // 49.98
3000 __ divsd(XMM0, XMM1); // 14.7 2995 __ divsd(XMM0, XMM1); // 14.7
3001 __ sqrtsd(XMM0, XMM0); // 3.834057902 2996 __ sqrtsd(XMM0, XMM0); // 3.834057902
3002 __ addsd(XMM0, XMM8); // 7.6681 2997 __ addsd(XMM0, XMM8); // 7.6681
3003 __ popq(RAX); 2998 __ popq(RAX);
3004 __ ret(); 2999 __ ret();
3005 } 3000 }
3006 3001
3007 3002
3008 ASSEMBLER_TEST_RUN(DoubleFPOperations, test) { 3003 ASSEMBLER_TEST_RUN(DoubleFPOperations, test) {
3009 typedef double (*SingleFPOperationsCode)(); 3004 typedef double (*SingleFPOperationsCode)();
3010 double res = reinterpret_cast<SingleFPOperationsCode>(test->entry())(); 3005 double res = reinterpret_cast<SingleFPOperationsCode>(test->entry())();
3011 EXPECT_FLOAT_EQ(7.668, res, 0.001); 3006 EXPECT_FLOAT_EQ(7.668, res, 0.001);
3012 } 3007 }
(...skipping 15 matching lines...) Expand all
3028 typedef double (*Int32ToDoubleConversion)(); 3023 typedef double (*Int32ToDoubleConversion)();
3029 double res = reinterpret_cast<Int32ToDoubleConversion>(test->entry())(); 3024 double res = reinterpret_cast<Int32ToDoubleConversion>(test->entry())();
3030 EXPECT_FLOAT_EQ(-2.0, res, 0.001); 3025 EXPECT_FLOAT_EQ(-2.0, res, 0.001);
3031 } 3026 }
3032 3027
3033 3028
3034 ASSEMBLER_TEST_GENERATE(Int64ToDoubleConversion, assembler) { 3029 ASSEMBLER_TEST_GENERATE(Int64ToDoubleConversion, assembler) {
3035 __ movq(RDX, Immediate(12LL << 32)); 3030 __ movq(RDX, Immediate(12LL << 32));
3036 __ cvtsi2sdq(XMM0, RDX); 3031 __ cvtsi2sdq(XMM0, RDX);
3037 __ movsd(XMM15, XMM0); // Move to high register 3032 __ movsd(XMM15, XMM0); // Move to high register
3038 __ addsd(XMM0, XMM0); // Stomp XMM0 3033 __ addsd(XMM0, XMM0); // Stomp XMM0
3039 __ movsd(XMM0, XMM15); // Move back to XMM0 3034 __ movsd(XMM0, XMM15); // Move back to XMM0
3040 __ ret(); 3035 __ ret();
3041 } 3036 }
3042 3037
3043 3038
3044 ASSEMBLER_TEST_RUN(Int64ToDoubleConversion, test) { 3039 ASSEMBLER_TEST_RUN(Int64ToDoubleConversion, test) {
3045 typedef double (*Int64ToDoubleConversionCode)(); 3040 typedef double (*Int64ToDoubleConversionCode)();
3046 double res = reinterpret_cast<Int64ToDoubleConversionCode>(test->entry())(); 3041 double res = reinterpret_cast<Int64ToDoubleConversionCode>(test->entry())();
3047 EXPECT_FLOAT_EQ(static_cast<double>(12LL << 32), res, 0.001); 3042 EXPECT_FLOAT_EQ(static_cast<double>(12LL << 32), res, 0.001);
3048 } 3043 }
(...skipping 379 matching lines...) Expand 10 before | Expand all | Expand 10 after
3428 // independently. 3423 // independently.
3429 __ DoubleAbs(XMM0); 3424 __ DoubleAbs(XMM0);
3430 #endif 3425 #endif
3431 LeaveTestFrame(assembler); 3426 LeaveTestFrame(assembler);
3432 __ ret(); 3427 __ ret();
3433 } 3428 }
3434 3429
3435 3430
3436 ASSEMBLER_TEST_RUN(DoubleAbs, test) { 3431 ASSEMBLER_TEST_RUN(DoubleAbs, test) {
3437 double val = -12.45; 3432 double val = -12.45;
3438 double res = test->InvokeWithCodeAndThread<double, double>(val); 3433 double res = test->InvokeWithCodeAndThread<double, double>(val);
3439 EXPECT_FLOAT_EQ(-val, res, 0.001); 3434 EXPECT_FLOAT_EQ(-val, res, 0.001);
3440 val = 12.45; 3435 val = 12.45;
3441 res = test->InvokeWithCodeAndThread<double, double>(val); 3436 res = test->InvokeWithCodeAndThread<double, double>(val);
3442 EXPECT_FLOAT_EQ(val, res, 0.001); 3437 EXPECT_FLOAT_EQ(val, res, 0.001);
3443 } 3438 }
3444 3439
3445 3440
3446 ASSEMBLER_TEST_GENERATE(ExtractSignBits, assembler) { 3441 ASSEMBLER_TEST_GENERATE(ExtractSignBits, assembler) {
3447 __ movmskpd(RAX, XMM0); 3442 __ movmskpd(RAX, XMM0);
3448 __ andq(RAX, Immediate(0x1)); 3443 __ andq(RAX, Immediate(0x1));
(...skipping 23 matching lines...) Expand all
3472 ASSEMBLER_TEST_RUN(TestSetCC, test) { 3467 ASSEMBLER_TEST_RUN(TestSetCC, test) {
3473 typedef uword (*TestSetCC)(); 3468 typedef uword (*TestSetCC)();
3474 uword res = reinterpret_cast<TestSetCC>(test->entry())(); 3469 uword res = reinterpret_cast<TestSetCC>(test->entry())();
3475 EXPECT_EQ(0xFFFFFF00, res); 3470 EXPECT_EQ(0xFFFFFF00, res);
3476 } 3471 }
3477 3472
3478 3473
3479 ASSEMBLER_TEST_GENERATE(TestRepMovsBytes, assembler) { 3474 ASSEMBLER_TEST_GENERATE(TestRepMovsBytes, assembler) {
3480 __ pushq(RSI); 3475 __ pushq(RSI);
3481 __ pushq(RDI); 3476 __ pushq(RDI);
3482 __ pushq(CallingConventions::kArg1Reg); // from. 3477 __ pushq(CallingConventions::kArg1Reg); // from.
3483 __ pushq(CallingConventions::kArg2Reg); // to. 3478 __ pushq(CallingConventions::kArg2Reg); // to.
3484 __ pushq(CallingConventions::kArg3Reg); // count. 3479 __ pushq(CallingConventions::kArg3Reg); // count.
3485 __ movq(RSI, Address(RSP, 2 * kWordSize)); // from. 3480 __ movq(RSI, Address(RSP, 2 * kWordSize)); // from.
3486 __ movq(RDI, Address(RSP, 1 * kWordSize)); // to. 3481 __ movq(RDI, Address(RSP, 1 * kWordSize)); // to.
3487 __ movq(RCX, Address(RSP, 0 * kWordSize)); // count. 3482 __ movq(RCX, Address(RSP, 0 * kWordSize)); // count.
3488 __ rep_movsb(); 3483 __ rep_movsb();
3489 // Remove saved arguments. 3484 // Remove saved arguments.
3490 __ popq(RAX); 3485 __ popq(RAX);
3491 __ popq(RAX); 3486 __ popq(RAX);
3492 __ popq(RAX); 3487 __ popq(RAX);
3493 __ popq(RDI); 3488 __ popq(RDI);
3494 __ popq(RSI); 3489 __ popq(RSI);
3495 __ ret(); 3490 __ ret();
3496 } 3491 }
3497 3492
3498 3493
3499 ASSEMBLER_TEST_RUN(TestRepMovsBytes, test) { 3494 ASSEMBLER_TEST_RUN(TestRepMovsBytes, test) {
3500 const char* from = "0123456789"; 3495 const char* from = "0123456789";
3501 const char* to = new char[10]; 3496 const char* to = new char[10];
3502 typedef void (*TestRepMovsBytes)(const char* from, const char* to, int count); 3497 typedef void (*TestRepMovsBytes)(const char* from, const char* to, int count);
3503 reinterpret_cast<TestRepMovsBytes>(test->entry())(from, to, 10); 3498 reinterpret_cast<TestRepMovsBytes>(test->entry())(from, to, 10);
3504 EXPECT_EQ(to[0], '0'); 3499 EXPECT_EQ(to[0], '0');
3505 for (int i = 0; i < 10; i++) { 3500 for (int i = 0; i < 10; i++) {
3506 EXPECT_EQ(from[i], to[i]); 3501 EXPECT_EQ(from[i], to[i]);
3507 } 3502 }
3508 delete [] to; 3503 delete[] to;
3509 } 3504 }
3510 3505
3511 3506
3512 ASSEMBLER_TEST_GENERATE(ConditionalMovesCompare, assembler) { 3507 ASSEMBLER_TEST_GENERATE(ConditionalMovesCompare, assembler) {
3513 __ cmpq(CallingConventions::kArg1Reg, CallingConventions::kArg2Reg); 3508 __ cmpq(CallingConventions::kArg1Reg, CallingConventions::kArg2Reg);
3514 __ movq(RDX, Immediate(1)); // Greater equal. 3509 __ movq(RDX, Immediate(1)); // Greater equal.
3515 __ movq(RCX, Immediate(-1)); // Less 3510 __ movq(RCX, Immediate(-1)); // Less
3516 __ cmovlessq(RAX, RCX); 3511 __ cmovlessq(RAX, RCX);
3517 __ cmovgeq(RAX, RDX); 3512 __ cmovgeq(RAX, RDX);
3518 __ ret(); 3513 __ ret();
3519 } 3514 }
3520 3515
3521 3516
3522 ASSEMBLER_TEST_RUN(ConditionalMovesCompare, test) { 3517 ASSEMBLER_TEST_RUN(ConditionalMovesCompare, test) {
3523 typedef int (*ConditionalMovesCompareCode)(int i, int j); 3518 typedef int (*ConditionalMovesCompareCode)(int i, int j);
3524 int res = reinterpret_cast<ConditionalMovesCompareCode>(test->entry())(10, 5); 3519 int res = reinterpret_cast<ConditionalMovesCompareCode>(test->entry())(10, 5);
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
3575 __ addq(RDX, CallingConventions::kArg2Reg); 3570 __ addq(RDX, CallingConventions::kArg2Reg);
3576 __ movq(RAX, Immediate(1)); 3571 __ movq(RAX, Immediate(1));
3577 __ movq(RCX, Immediate(0)); 3572 __ movq(RCX, Immediate(0));
3578 __ cmovnoq(RAX, RCX); 3573 __ cmovnoq(RAX, RCX);
3579 __ ret(); 3574 __ ret();
3580 } 3575 }
3581 3576
3582 3577
3583 ASSEMBLER_TEST_RUN(ConditionalMovesNoOverflow, test) { 3578 ASSEMBLER_TEST_RUN(ConditionalMovesNoOverflow, test) {
3584 typedef int (*ConditionalMovesNoOverflowCode)(int64_t i, int64_t j); 3579 typedef int (*ConditionalMovesNoOverflowCode)(int64_t i, int64_t j);
3585 int res = reinterpret_cast<ConditionalMovesNoOverflowCode>( 3580 int res = reinterpret_cast<ConditionalMovesNoOverflowCode>(test->entry())(
3586 test->entry())(0x7fffffffffffffff, 2); 3581 0x7fffffffffffffff, 2);
3587 EXPECT_EQ(1, res); 3582 EXPECT_EQ(1, res);
3588 res = reinterpret_cast<ConditionalMovesNoOverflowCode>(test->entry())(1, 1); 3583 res = reinterpret_cast<ConditionalMovesNoOverflowCode>(test->entry())(1, 1);
3589 EXPECT_EQ(0, res); 3584 EXPECT_EQ(0, res);
3590 } 3585 }
3591 3586
3592 } // namespace dart 3587 } // namespace dart
3593 3588
3594 #endif // defined TARGET_ARCH_X64 3589 #endif // defined TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « runtime/vm/assembler_x64.cc ('k') | runtime/vm/ast.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698