Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Side by Side Diff: test/cctest/test-assembler-arm64.cc

Issue 2896303003: Reland of Reland of "ARM64: Add NEON support" (Closed)
Patch Set: Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/v8.gyp ('k') | test/cctest/test-disasm-arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
229 229
230 #define CHECK_EQUAL_FP32(expected, result) \ 230 #define CHECK_EQUAL_FP32(expected, result) \
231 CHECK(EqualFP32(expected, &core, result)) 231 CHECK(EqualFP32(expected, &core, result))
232 232
233 #define CHECK_EQUAL_64(expected, result) \ 233 #define CHECK_EQUAL_64(expected, result) \
234 CHECK(Equal64(expected, &core, result)) 234 CHECK(Equal64(expected, &core, result))
235 235
236 #define CHECK_EQUAL_FP64(expected, result) \ 236 #define CHECK_EQUAL_FP64(expected, result) \
237 CHECK(EqualFP64(expected, &core, result)) 237 CHECK(EqualFP64(expected, &core, result))
238 238
239 // Expected values for 128-bit comparisons are passed as two 64-bit values,
240 // where expected_h (high) is <127:64> and expected_l (low) is <63:0>.
241 #define CHECK_EQUAL_128(expected_h, expected_l, result) \
242 CHECK(Equal128(expected_h, expected_l, &core, result))
243
239 #ifdef DEBUG 244 #ifdef DEBUG
240 #define CHECK_LITERAL_POOL_SIZE(expected) \ 245 #define CHECK_LITERAL_POOL_SIZE(expected) \
241 CHECK((expected) == (__ LiteralPoolSize())) 246 CHECK((expected) == (__ LiteralPoolSize()))
242 #else 247 #else
243 #define CHECK_LITERAL_POOL_SIZE(expected) ((void)0) 248 #define CHECK_LITERAL_POOL_SIZE(expected) ((void)0)
244 #endif 249 #endif
245 250
246 251
247 TEST(stack_ops) { 252 TEST(stack_ops) {
248 INIT_V8(); 253 INIT_V8();
(...skipping 2257 matching lines...) Expand 10 before | Expand all | Expand 10 after
2506 CHECK_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24); 2511 CHECK_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
2507 CHECK_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25); 2512 CHECK_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
2508 CHECK_EQUAL_32(6144, w2); 2513 CHECK_EQUAL_32(6144, w2);
2509 CHECK_EQUAL_32(6144, dst[6144]); 2514 CHECK_EQUAL_32(6144, dst[6144]);
2510 CHECK_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26); 2515 CHECK_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
2511 CHECK_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27); 2516 CHECK_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
2512 2517
2513 TEARDOWN(); 2518 TEARDOWN();
2514 } 2519 }
2515 2520
2516
2517 TEST(ldr_str_preindex) { 2521 TEST(ldr_str_preindex) {
2518 INIT_V8(); 2522 INIT_V8();
2519 SETUP(); 2523 SETUP();
2520 2524
2521 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL}; 2525 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2522 uint64_t dst[6] = {0, 0, 0, 0, 0, 0}; 2526 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2523 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); 2527 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2524 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); 2528 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2525 2529
2526 START(); 2530 START();
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
2565 CHECK_EQUAL_64(src_base + 12, x21); 2569 CHECK_EQUAL_64(src_base + 12, x21);
2566 CHECK_EQUAL_64(dst_base + 36, x22); 2570 CHECK_EQUAL_64(dst_base + 36, x22);
2567 CHECK_EQUAL_64(src_base + 1, x23); 2571 CHECK_EQUAL_64(src_base + 1, x23);
2568 CHECK_EQUAL_64(dst_base + 25, x24); 2572 CHECK_EQUAL_64(dst_base + 25, x24);
2569 CHECK_EQUAL_64(src_base + 3, x25); 2573 CHECK_EQUAL_64(src_base + 3, x25);
2570 CHECK_EQUAL_64(dst_base + 41, x26); 2574 CHECK_EQUAL_64(dst_base + 41, x26);
2571 2575
2572 TEARDOWN(); 2576 TEARDOWN();
2573 } 2577 }
2574 2578
2575
2576 TEST(ldr_str_postindex) { 2579 TEST(ldr_str_postindex) {
2577 INIT_V8(); 2580 INIT_V8();
2578 SETUP(); 2581 SETUP();
2579 2582
2580 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL}; 2583 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2581 uint64_t dst[6] = {0, 0, 0, 0, 0, 0}; 2584 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2582 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); 2585 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2583 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); 2586 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2584 2587
2585 START(); 2588 START();
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
2624 CHECK_EQUAL_64(src_base, x21); 2627 CHECK_EQUAL_64(src_base, x21);
2625 CHECK_EQUAL_64(dst_base, x22); 2628 CHECK_EQUAL_64(dst_base, x22);
2626 CHECK_EQUAL_64(src_base + 2, x23); 2629 CHECK_EQUAL_64(src_base + 2, x23);
2627 CHECK_EQUAL_64(dst_base + 30, x24); 2630 CHECK_EQUAL_64(dst_base + 30, x24);
2628 CHECK_EQUAL_64(src_base, x25); 2631 CHECK_EQUAL_64(src_base, x25);
2629 CHECK_EQUAL_64(dst_base, x26); 2632 CHECK_EQUAL_64(dst_base, x26);
2630 2633
2631 TEARDOWN(); 2634 TEARDOWN();
2632 } 2635 }
2633 2636
2634
2635 TEST(load_signed) { 2637 TEST(load_signed) {
2636 INIT_V8(); 2638 INIT_V8();
2637 SETUP(); 2639 SETUP();
2638 2640
2639 uint32_t src[2] = {0x80008080, 0x7fff7f7f}; 2641 uint32_t src[2] = {0x80008080, 0x7fff7f7f};
2640 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); 2642 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2641 2643
2642 START(); 2644 START();
2643 __ Mov(x24, src_base); 2645 __ Mov(x24, src_base);
2644 __ Ldrsb(w0, MemOperand(x24)); 2646 __ Ldrsb(w0, MemOperand(x24));
(...skipping 17 matching lines...) Expand all
2662 CHECK_EQUAL_64(0xffffffffffffff80UL, x4); 2664 CHECK_EQUAL_64(0xffffffffffffff80UL, x4);
2663 CHECK_EQUAL_64(0x000000000000007fUL, x5); 2665 CHECK_EQUAL_64(0x000000000000007fUL, x5);
2664 CHECK_EQUAL_64(0xffffffffffff8080UL, x6); 2666 CHECK_EQUAL_64(0xffffffffffff8080UL, x6);
2665 CHECK_EQUAL_64(0x0000000000007f7fUL, x7); 2667 CHECK_EQUAL_64(0x0000000000007f7fUL, x7);
2666 CHECK_EQUAL_64(0xffffffff80008080UL, x8); 2668 CHECK_EQUAL_64(0xffffffff80008080UL, x8);
2667 CHECK_EQUAL_64(0x000000007fff7f7fUL, x9); 2669 CHECK_EQUAL_64(0x000000007fff7f7fUL, x9);
2668 2670
2669 TEARDOWN(); 2671 TEARDOWN();
2670 } 2672 }
2671 2673
2672
2673 TEST(load_store_regoffset) { 2674 TEST(load_store_regoffset) {
2674 INIT_V8(); 2675 INIT_V8();
2675 SETUP(); 2676 SETUP();
2676 2677
2677 uint32_t src[3] = {1, 2, 3}; 2678 uint32_t src[3] = {1, 2, 3};
2678 uint32_t dst[4] = {0, 0, 0, 0}; 2679 uint32_t dst[4] = {0, 0, 0, 0};
2679 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); 2680 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2680 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); 2681 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2681 2682
2682 START(); 2683 START();
(...skipping 27 matching lines...) Expand all
2710 CHECK_EQUAL_64(3, x3); 2711 CHECK_EQUAL_64(3, x3);
2711 CHECK_EQUAL_64(2, x4); 2712 CHECK_EQUAL_64(2, x4);
2712 CHECK_EQUAL_32(1, dst[0]); 2713 CHECK_EQUAL_32(1, dst[0]);
2713 CHECK_EQUAL_32(2, dst[1]); 2714 CHECK_EQUAL_32(2, dst[1]);
2714 CHECK_EQUAL_32(3, dst[2]); 2715 CHECK_EQUAL_32(3, dst[2]);
2715 CHECK_EQUAL_32(3, dst[3]); 2716 CHECK_EQUAL_32(3, dst[3]);
2716 2717
2717 TEARDOWN(); 2718 TEARDOWN();
2718 } 2719 }
2719 2720
2720
2721 TEST(load_store_float) { 2721 TEST(load_store_float) {
2722 INIT_V8(); 2722 INIT_V8();
2723 SETUP(); 2723 SETUP();
2724 2724
2725 float src[3] = {1.0, 2.0, 3.0}; 2725 float src[3] = {1.0, 2.0, 3.0};
2726 float dst[3] = {0.0, 0.0, 0.0}; 2726 float dst[3] = {0.0, 0.0, 0.0};
2727 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); 2727 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2728 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); 2728 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2729 2729
2730 START(); 2730 START();
(...skipping 22 matching lines...) Expand all
2753 CHECK_EQUAL_64(src_base, x17); 2753 CHECK_EQUAL_64(src_base, x17);
2754 CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18); 2754 CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2755 CHECK_EQUAL_64(src_base + sizeof(src[0]), x19); 2755 CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
2756 CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20); 2756 CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2757 CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21); 2757 CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2758 CHECK_EQUAL_64(dst_base, x22); 2758 CHECK_EQUAL_64(dst_base, x22);
2759 2759
2760 TEARDOWN(); 2760 TEARDOWN();
2761 } 2761 }
2762 2762
2763
2764 TEST(load_store_double) { 2763 TEST(load_store_double) {
2765 INIT_V8(); 2764 INIT_V8();
2766 SETUP(); 2765 SETUP();
2767 2766
2768 double src[3] = {1.0, 2.0, 3.0}; 2767 double src[3] = {1.0, 2.0, 3.0};
2769 double dst[3] = {0.0, 0.0, 0.0}; 2768 double dst[3] = {0.0, 0.0, 0.0};
2770 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); 2769 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2771 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); 2770 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2772 2771
2773 START(); 2772 START();
(...skipping 21 matching lines...) Expand all
2795 CHECK_EQUAL_FP64(3.0, dst[1]); 2794 CHECK_EQUAL_FP64(3.0, dst[1]);
2796 CHECK_EQUAL_64(src_base, x17); 2795 CHECK_EQUAL_64(src_base, x17);
2797 CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18); 2796 CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2798 CHECK_EQUAL_64(src_base + sizeof(src[0]), x19); 2797 CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
2799 CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20); 2798 CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2800 CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21); 2799 CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2801 CHECK_EQUAL_64(dst_base, x22); 2800 CHECK_EQUAL_64(dst_base, x22);
2802 2801
2803 TEARDOWN(); 2802 TEARDOWN();
2804 } 2803 }
2804
2805 TEST(load_store_b) {
2806 INIT_V8();
2807 SETUP();
2808
2809 uint8_t src[3] = {0x12, 0x23, 0x34};
2810 uint8_t dst[3] = {0, 0, 0};
2811 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2812 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2813
2814 START();
2815 __ Mov(x17, src_base);
2816 __ Mov(x18, dst_base);
2817 __ Mov(x19, src_base);
2818 __ Mov(x20, dst_base);
2819 __ Mov(x21, src_base);
2820 __ Mov(x22, dst_base);
2821 __ Ldr(b0, MemOperand(x17, sizeof(src[0])));
2822 __ Str(b0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2823 __ Ldr(b1, MemOperand(x19, sizeof(src[0]), PostIndex));
2824 __ Str(b1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2825 __ Ldr(b2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2826 __ Str(b2, MemOperand(x22, sizeof(dst[0])));
2827 END();
2828
2829 RUN();
2830
2831 CHECK_EQUAL_128(0, 0x23, q0);
2832 CHECK_EQUAL_64(0x23, dst[0]);
2833 CHECK_EQUAL_128(0, 0x12, q1);
2834 CHECK_EQUAL_64(0x12, dst[2]);
2835 CHECK_EQUAL_128(0, 0x34, q2);
2836 CHECK_EQUAL_64(0x34, dst[1]);
2837 CHECK_EQUAL_64(src_base, x17);
2838 CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2839 CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
2840 CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2841 CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2842 CHECK_EQUAL_64(dst_base, x22);
2843
2844 TEARDOWN();
2845 }
2846
2847 TEST(load_store_h) {
2848 INIT_V8();
2849 SETUP();
2850
2851 uint16_t src[3] = {0x1234, 0x2345, 0x3456};
2852 uint16_t dst[3] = {0, 0, 0};
2853 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2854 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2855
2856 START();
2857 __ Mov(x17, src_base);
2858 __ Mov(x18, dst_base);
2859 __ Mov(x19, src_base);
2860 __ Mov(x20, dst_base);
2861 __ Mov(x21, src_base);
2862 __ Mov(x22, dst_base);
2863 __ Ldr(h0, MemOperand(x17, sizeof(src[0])));
2864 __ Str(h0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2865 __ Ldr(h1, MemOperand(x19, sizeof(src[0]), PostIndex));
2866 __ Str(h1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2867 __ Ldr(h2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2868 __ Str(h2, MemOperand(x22, sizeof(dst[0])));
2869 END();
2870
2871 RUN();
2872
2873 CHECK_EQUAL_128(0, 0x2345, q0);
2874 CHECK_EQUAL_64(0x2345, dst[0]);
2875 CHECK_EQUAL_128(0, 0x1234, q1);
2876 CHECK_EQUAL_64(0x1234, dst[2]);
2877 CHECK_EQUAL_128(0, 0x3456, q2);
2878 CHECK_EQUAL_64(0x3456, dst[1]);
2879 CHECK_EQUAL_64(src_base, x17);
2880 CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2881 CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
2882 CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2883 CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2884 CHECK_EQUAL_64(dst_base, x22);
2885
2886 TEARDOWN();
2887 }
2888
2889 TEST(load_store_q) {
2890 INIT_V8();
2891 SETUP();
2892
2893 uint8_t src[48] = {0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe, 0x01, 0x23,
2894 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x21, 0x43, 0x65, 0x87,
2895 0xa9, 0xcb, 0xed, 0x0f, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc,
2896 0xde, 0xf0, 0x24, 0x46, 0x68, 0x8a, 0xac, 0xce, 0xe0, 0x02,
2897 0x42, 0x64, 0x86, 0xa8, 0xca, 0xec, 0x0e, 0x20};
2898
2899 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2900 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2901 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2902
2903 START();
2904 __ Mov(x17, src_base);
2905 __ Mov(x18, dst_base);
2906 __ Mov(x19, src_base);
2907 __ Mov(x20, dst_base);
2908 __ Mov(x21, src_base);
2909 __ Mov(x22, dst_base);
2910 __ Ldr(q0, MemOperand(x17, 16));
2911 __ Str(q0, MemOperand(x18, 16, PostIndex));
2912 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
2913 __ Str(q1, MemOperand(x20, 32, PreIndex));
2914 __ Ldr(q2, MemOperand(x21, 32, PreIndex));
2915 __ Str(q2, MemOperand(x22, 16));
2916 END();
2917
2918 RUN();
2919
2920 CHECK_EQUAL_128(0xf0debc9a78563412, 0x0fedcba987654321, q0);
2921 CHECK_EQUAL_64(0x0fedcba987654321, dst[0]);
2922 CHECK_EQUAL_64(0xf0debc9a78563412, dst[1]);
2923 CHECK_EQUAL_128(0xefcdab8967452301, 0xfedcba9876543210, q1);
2924 CHECK_EQUAL_64(0xfedcba9876543210, dst[4]);
2925 CHECK_EQUAL_64(0xefcdab8967452301, dst[5]);
2926 CHECK_EQUAL_128(0x200eeccaa8866442, 0x02e0ceac8a684624, q2);
2927 CHECK_EQUAL_64(0x02e0ceac8a684624, dst[2]);
2928 CHECK_EQUAL_64(0x200eeccaa8866442, dst[3]);
2929 CHECK_EQUAL_64(src_base, x17);
2930 CHECK_EQUAL_64(dst_base + 16, x18);
2931 CHECK_EQUAL_64(src_base + 16, x19);
2932 CHECK_EQUAL_64(dst_base + 32, x20);
2933 CHECK_EQUAL_64(src_base + 32, x21);
2934 CHECK_EQUAL_64(dst_base, x22);
2935
2936 TEARDOWN();
2937 }
2938
2939 TEST(neon_ld1_d) {
2940 INIT_V8();
2941 SETUP();
2942
2943 uint8_t src[32 + 5];
2944 for (unsigned i = 0; i < sizeof(src); i++) {
2945 src[i] = i;
2946 }
2947 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2948
2949 START();
2950 __ Mov(x17, src_base);
2951 __ Ldr(q2, MemOperand(x17)); // Initialise top 64-bits of Q register.
2952 __ Ld1(v2.V8B(), MemOperand(x17));
2953 __ Add(x17, x17, 1);
2954 __ Ld1(v3.V8B(), v4.V8B(), MemOperand(x17));
2955 __ Add(x17, x17, 1);
2956 __ Ld1(v5.V4H(), v6.V4H(), v7.V4H(), MemOperand(x17));
2957 __ Add(x17, x17, 1);
2958 __ Ld1(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(), MemOperand(x17));
2959 __ Add(x17, x17, 1);
2960 __ Ld1(v30.V2S(), v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x17));
2961 __ Add(x17, x17, 1);
2962 __ Ld1(v20.V1D(), v21.V1D(), v22.V1D(), v23.V1D(), MemOperand(x17));
2963 END();
2964
2965 RUN();
2966
2967 CHECK_EQUAL_128(0, 0x0706050403020100, q2);
2968 CHECK_EQUAL_128(0, 0x0807060504030201, q3);
2969 CHECK_EQUAL_128(0, 0x100f0e0d0c0b0a09, q4);
2970 CHECK_EQUAL_128(0, 0x0908070605040302, q5);
2971 CHECK_EQUAL_128(0, 0x11100f0e0d0c0b0a, q6);
2972 CHECK_EQUAL_128(0, 0x1918171615141312, q7);
2973 CHECK_EQUAL_128(0, 0x0a09080706050403, q16);
2974 CHECK_EQUAL_128(0, 0x1211100f0e0d0c0b, q17);
2975 CHECK_EQUAL_128(0, 0x1a19181716151413, q18);
2976 CHECK_EQUAL_128(0, 0x2221201f1e1d1c1b, q19);
2977 CHECK_EQUAL_128(0, 0x0b0a090807060504, q30);
2978 CHECK_EQUAL_128(0, 0x131211100f0e0d0c, q31);
2979 CHECK_EQUAL_128(0, 0x1b1a191817161514, q0);
2980 CHECK_EQUAL_128(0, 0x232221201f1e1d1c, q1);
2981 CHECK_EQUAL_128(0, 0x0c0b0a0908070605, q20);
2982 CHECK_EQUAL_128(0, 0x14131211100f0e0d, q21);
2983 CHECK_EQUAL_128(0, 0x1c1b1a1918171615, q22);
2984 CHECK_EQUAL_128(0, 0x24232221201f1e1d, q23);
2985
2986 TEARDOWN();
2987 }
2988
2989 TEST(neon_ld1_d_postindex) {
2990 INIT_V8();
2991 SETUP();
2992
2993 uint8_t src[32 + 5];
2994 for (unsigned i = 0; i < sizeof(src); i++) {
2995 src[i] = i;
2996 }
2997 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2998
2999 START();
3000 __ Mov(x17, src_base);
3001 __ Mov(x18, src_base + 1);
3002 __ Mov(x19, src_base + 2);
3003 __ Mov(x20, src_base + 3);
3004 __ Mov(x21, src_base + 4);
3005 __ Mov(x22, src_base + 5);
3006 __ Mov(x23, 1);
3007 __ Ldr(q2, MemOperand(x17)); // Initialise top 64-bits of Q register.
3008 __ Ld1(v2.V8B(), MemOperand(x17, x23, PostIndex));
3009 __ Ld1(v3.V8B(), v4.V8B(), MemOperand(x18, 16, PostIndex));
3010 __ Ld1(v5.V4H(), v6.V4H(), v7.V4H(), MemOperand(x19, 24, PostIndex));
3011 __ Ld1(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(),
3012 MemOperand(x20, 32, PostIndex));
3013 __ Ld1(v30.V2S(), v31.V2S(), v0.V2S(), v1.V2S(),
3014 MemOperand(x21, 32, PostIndex));
3015 __ Ld1(v20.V1D(), v21.V1D(), v22.V1D(), v23.V1D(),
3016 MemOperand(x22, 32, PostIndex));
3017 END();
3018
3019 RUN();
3020
3021 CHECK_EQUAL_128(0, 0x0706050403020100, q2);
3022 CHECK_EQUAL_128(0, 0x0807060504030201, q3);
3023 CHECK_EQUAL_128(0, 0x100f0e0d0c0b0a09, q4);
3024 CHECK_EQUAL_128(0, 0x0908070605040302, q5);
3025 CHECK_EQUAL_128(0, 0x11100f0e0d0c0b0a, q6);
3026 CHECK_EQUAL_128(0, 0x1918171615141312, q7);
3027 CHECK_EQUAL_128(0, 0x0a09080706050403, q16);
3028 CHECK_EQUAL_128(0, 0x1211100f0e0d0c0b, q17);
3029 CHECK_EQUAL_128(0, 0x1a19181716151413, q18);
3030 CHECK_EQUAL_128(0, 0x2221201f1e1d1c1b, q19);
3031 CHECK_EQUAL_128(0, 0x0b0a090807060504, q30);
3032 CHECK_EQUAL_128(0, 0x131211100f0e0d0c, q31);
3033 CHECK_EQUAL_128(0, 0x1b1a191817161514, q0);
3034 CHECK_EQUAL_128(0, 0x232221201f1e1d1c, q1);
3035 CHECK_EQUAL_128(0, 0x0c0b0a0908070605, q20);
3036 CHECK_EQUAL_128(0, 0x14131211100f0e0d, q21);
3037 CHECK_EQUAL_128(0, 0x1c1b1a1918171615, q22);
3038 CHECK_EQUAL_128(0, 0x24232221201f1e1d, q23);
3039 CHECK_EQUAL_64(src_base + 1, x17);
3040 CHECK_EQUAL_64(src_base + 1 + 16, x18);
3041 CHECK_EQUAL_64(src_base + 2 + 24, x19);
3042 CHECK_EQUAL_64(src_base + 3 + 32, x20);
3043 CHECK_EQUAL_64(src_base + 4 + 32, x21);
3044 CHECK_EQUAL_64(src_base + 5 + 32, x22);
3045
3046 TEARDOWN();
3047 }
3048
3049 TEST(neon_ld1_q) {
3050 INIT_V8();
3051 SETUP();
3052
3053 uint8_t src[64 + 4];
3054 for (unsigned i = 0; i < sizeof(src); i++) {
3055 src[i] = i;
3056 }
3057 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3058
3059 START();
3060 __ Mov(x17, src_base);
3061 __ Ld1(v2.V16B(), MemOperand(x17));
3062 __ Add(x17, x17, 1);
3063 __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x17));
3064 __ Add(x17, x17, 1);
3065 __ Ld1(v5.V8H(), v6.V8H(), v7.V8H(), MemOperand(x17));
3066 __ Add(x17, x17, 1);
3067 __ Ld1(v16.V4S(), v17.V4S(), v18.V4S(), v19.V4S(), MemOperand(x17));
3068 __ Add(x17, x17, 1);
3069 __ Ld1(v30.V2D(), v31.V2D(), v0.V2D(), v1.V2D(), MemOperand(x17));
3070 END();
3071
3072 RUN();
3073
3074 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q2);
3075 CHECK_EQUAL_128(0x100f0e0d0c0b0a09, 0x0807060504030201, q3);
3076 CHECK_EQUAL_128(0x201f1e1d1c1b1a19, 0x1817161514131211, q4);
3077 CHECK_EQUAL_128(0x11100f0e0d0c0b0a, 0x0908070605040302, q5);
3078 CHECK_EQUAL_128(0x21201f1e1d1c1b1a, 0x1918171615141312, q6);
3079 CHECK_EQUAL_128(0x31302f2e2d2c2b2a, 0x2928272625242322, q7);
3080 CHECK_EQUAL_128(0x1211100f0e0d0c0b, 0x0a09080706050403, q16);
3081 CHECK_EQUAL_128(0x2221201f1e1d1c1b, 0x1a19181716151413, q17);
3082 CHECK_EQUAL_128(0x3231302f2e2d2c2b, 0x2a29282726252423, q18);
3083 CHECK_EQUAL_128(0x4241403f3e3d3c3b, 0x3a39383736353433, q19);
3084 CHECK_EQUAL_128(0x131211100f0e0d0c, 0x0b0a090807060504, q30);
3085 CHECK_EQUAL_128(0x232221201f1e1d1c, 0x1b1a191817161514, q31);
3086 CHECK_EQUAL_128(0x333231302f2e2d2c, 0x2b2a292827262524, q0);
3087 CHECK_EQUAL_128(0x434241403f3e3d3c, 0x3b3a393837363534, q1);
3088
3089 TEARDOWN();
3090 }
3091
3092 TEST(neon_ld1_q_postindex) {
3093 INIT_V8();
3094 SETUP();
3095
3096 uint8_t src[64 + 4];
3097 for (unsigned i = 0; i < sizeof(src); i++) {
3098 src[i] = i;
3099 }
3100 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3101
3102 START();
3103 __ Mov(x17, src_base);
3104 __ Mov(x18, src_base + 1);
3105 __ Mov(x19, src_base + 2);
3106 __ Mov(x20, src_base + 3);
3107 __ Mov(x21, src_base + 4);
3108 __ Mov(x22, 1);
3109 __ Ld1(v2.V16B(), MemOperand(x17, x22, PostIndex));
3110 __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x18, 32, PostIndex));
3111 __ Ld1(v5.V8H(), v6.V8H(), v7.V8H(), MemOperand(x19, 48, PostIndex));
3112 __ Ld1(v16.V4S(), v17.V4S(), v18.V4S(), v19.V4S(),
3113 MemOperand(x20, 64, PostIndex));
3114 __ Ld1(v30.V2D(), v31.V2D(), v0.V2D(), v1.V2D(),
3115 MemOperand(x21, 64, PostIndex));
3116 END();
3117
3118 RUN();
3119
3120 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q2);
3121 CHECK_EQUAL_128(0x100f0e0d0c0b0a09, 0x0807060504030201, q3);
3122 CHECK_EQUAL_128(0x201f1e1d1c1b1a19, 0x1817161514131211, q4);
3123 CHECK_EQUAL_128(0x11100f0e0d0c0b0a, 0x0908070605040302, q5);
3124 CHECK_EQUAL_128(0x21201f1e1d1c1b1a, 0x1918171615141312, q6);
3125 CHECK_EQUAL_128(0x31302f2e2d2c2b2a, 0x2928272625242322, q7);
3126 CHECK_EQUAL_128(0x1211100f0e0d0c0b, 0x0a09080706050403, q16);
3127 CHECK_EQUAL_128(0x2221201f1e1d1c1b, 0x1a19181716151413, q17);
3128 CHECK_EQUAL_128(0x3231302f2e2d2c2b, 0x2a29282726252423, q18);
3129 CHECK_EQUAL_128(0x4241403f3e3d3c3b, 0x3a39383736353433, q19);
3130 CHECK_EQUAL_128(0x131211100f0e0d0c, 0x0b0a090807060504, q30);
3131 CHECK_EQUAL_128(0x232221201f1e1d1c, 0x1b1a191817161514, q31);
3132 CHECK_EQUAL_128(0x333231302f2e2d2c, 0x2b2a292827262524, q0);
3133 CHECK_EQUAL_128(0x434241403f3e3d3c, 0x3b3a393837363534, q1);
3134 CHECK_EQUAL_64(src_base + 1, x17);
3135 CHECK_EQUAL_64(src_base + 1 + 32, x18);
3136 CHECK_EQUAL_64(src_base + 2 + 48, x19);
3137 CHECK_EQUAL_64(src_base + 3 + 64, x20);
3138 CHECK_EQUAL_64(src_base + 4 + 64, x21);
3139
3140 TEARDOWN();
3141 }
3142
3143 TEST(neon_ld1_lane) {
3144 INIT_V8();
3145 SETUP();
3146
3147 uint8_t src[64];
3148 for (unsigned i = 0; i < sizeof(src); i++) {
3149 src[i] = i;
3150 }
3151 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3152
3153 START();
3154
3155 // Test loading whole register by element.
3156 __ Mov(x17, src_base);
3157 for (int i = 15; i >= 0; i--) {
3158 __ Ld1(v0.B(), i, MemOperand(x17));
3159 __ Add(x17, x17, 1);
3160 }
3161
3162 __ Mov(x17, src_base);
3163 for (int i = 7; i >= 0; i--) {
3164 __ Ld1(v1.H(), i, MemOperand(x17));
3165 __ Add(x17, x17, 1);
3166 }
3167
3168 __ Mov(x17, src_base);
3169 for (int i = 3; i >= 0; i--) {
3170 __ Ld1(v2.S(), i, MemOperand(x17));
3171 __ Add(x17, x17, 1);
3172 }
3173
3174 __ Mov(x17, src_base);
3175 for (int i = 1; i >= 0; i--) {
3176 __ Ld1(v3.D(), i, MemOperand(x17));
3177 __ Add(x17, x17, 1);
3178 }
3179
3180 // Test loading a single element into an initialised register.
3181 __ Mov(x17, src_base);
3182 __ Ldr(q4, MemOperand(x17));
3183 __ Ld1(v4.B(), 4, MemOperand(x17));
3184 __ Ldr(q5, MemOperand(x17));
3185 __ Ld1(v5.H(), 3, MemOperand(x17));
3186 __ Ldr(q6, MemOperand(x17));
3187 __ Ld1(v6.S(), 2, MemOperand(x17));
3188 __ Ldr(q7, MemOperand(x17));
3189 __ Ld1(v7.D(), 1, MemOperand(x17));
3190
3191 END();
3192
3193 RUN();
3194
3195 CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q0);
3196 CHECK_EQUAL_128(0x0100020103020403, 0x0504060507060807, q1);
3197 CHECK_EQUAL_128(0x0302010004030201, 0x0504030206050403, q2);
3198 CHECK_EQUAL_128(0x0706050403020100, 0x0807060504030201, q3);
3199 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q4);
3200 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q5);
3201 CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q6);
3202 CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q7);
3203
3204 TEARDOWN();
3205 }
3206
3207 TEST(neon_ld2_d) {
3208 INIT_V8();
3209 SETUP();
3210
3211 uint8_t src[64 + 4];
3212 for (unsigned i = 0; i < sizeof(src); i++) {
3213 src[i] = i;
3214 }
3215 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3216
3217 START();
3218 __ Mov(x17, src_base);
3219 __ Ld2(v2.V8B(), v3.V8B(), MemOperand(x17));
3220 __ Add(x17, x17, 1);
3221 __ Ld2(v4.V8B(), v5.V8B(), MemOperand(x17));
3222 __ Add(x17, x17, 1);
3223 __ Ld2(v6.V4H(), v7.V4H(), MemOperand(x17));
3224 __ Add(x17, x17, 1);
3225 __ Ld2(v31.V2S(), v0.V2S(), MemOperand(x17));
3226 END();
3227
3228 RUN();
3229
3230 CHECK_EQUAL_128(0, 0x0e0c0a0806040200, q2);
3231 CHECK_EQUAL_128(0, 0x0f0d0b0907050301, q3);
3232 CHECK_EQUAL_128(0, 0x0f0d0b0907050301, q4);
3233 CHECK_EQUAL_128(0, 0x100e0c0a08060402, q5);
3234 CHECK_EQUAL_128(0, 0x0f0e0b0a07060302, q6);
3235 CHECK_EQUAL_128(0, 0x11100d0c09080504, q7);
3236 CHECK_EQUAL_128(0, 0x0e0d0c0b06050403, q31);
3237 CHECK_EQUAL_128(0, 0x1211100f0a090807, q0);
3238
3239 TEARDOWN();
3240 }
3241
3242 TEST(neon_ld2_d_postindex) {
3243 INIT_V8();
3244 SETUP();
3245
3246 uint8_t src[32 + 4];
3247 for (unsigned i = 0; i < sizeof(src); i++) {
3248 src[i] = i;
3249 }
3250 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3251
3252 START();
3253 __ Mov(x17, src_base);
3254 __ Mov(x18, src_base + 1);
3255 __ Mov(x19, src_base + 2);
3256 __ Mov(x20, src_base + 3);
3257 __ Mov(x21, src_base + 4);
3258 __ Mov(x22, 1);
3259 __ Ld2(v2.V8B(), v3.V8B(), MemOperand(x17, x22, PostIndex));
3260 __ Ld2(v4.V8B(), v5.V8B(), MemOperand(x18, 16, PostIndex));
3261 __ Ld2(v5.V4H(), v6.V4H(), MemOperand(x19, 16, PostIndex));
3262 __ Ld2(v16.V2S(), v17.V2S(), MemOperand(x20, 16, PostIndex));
3263 __ Ld2(v31.V2S(), v0.V2S(), MemOperand(x21, 16, PostIndex));
3264 END();
3265
3266 RUN();
3267
3268 CHECK_EQUAL_128(0, 0x0e0c0a0806040200, q2);
3269 CHECK_EQUAL_128(0, 0x0f0d0b0907050301, q3);
3270 CHECK_EQUAL_128(0, 0x0f0d0b0907050301, q4);
3271 CHECK_EQUAL_128(0, 0x0f0e0b0a07060302, q5);
3272 CHECK_EQUAL_128(0, 0x11100d0c09080504, q6);
3273 CHECK_EQUAL_128(0, 0x0e0d0c0b06050403, q16);
3274 CHECK_EQUAL_128(0, 0x1211100f0a090807, q17);
3275 CHECK_EQUAL_128(0, 0x0f0e0d0c07060504, q31);
3276 CHECK_EQUAL_128(0, 0x131211100b0a0908, q0);
3277
3278 CHECK_EQUAL_64(src_base + 1, x17);
3279 CHECK_EQUAL_64(src_base + 1 + 16, x18);
3280 CHECK_EQUAL_64(src_base + 2 + 16, x19);
3281 CHECK_EQUAL_64(src_base + 3 + 16, x20);
3282 CHECK_EQUAL_64(src_base + 4 + 16, x21);
3283
3284 TEARDOWN();
3285 }
3286
3287 TEST(neon_ld2_q) {
3288 INIT_V8();
3289 SETUP();
3290
3291 uint8_t src[64 + 4];
3292 for (unsigned i = 0; i < sizeof(src); i++) {
3293 src[i] = i;
3294 }
3295 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3296
3297 START();
3298 __ Mov(x17, src_base);
3299 __ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17));
3300 __ Add(x17, x17, 1);
3301 __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x17));
3302 __ Add(x17, x17, 1);
3303 __ Ld2(v6.V8H(), v7.V8H(), MemOperand(x17));
3304 __ Add(x17, x17, 1);
3305 __ Ld2(v16.V4S(), v17.V4S(), MemOperand(x17));
3306 __ Add(x17, x17, 1);
3307 __ Ld2(v31.V2D(), v0.V2D(), MemOperand(x17));
3308 END();
3309
3310 RUN();
3311
3312 CHECK_EQUAL_128(0x1e1c1a1816141210, 0x0e0c0a0806040200, q2);
3313 CHECK_EQUAL_128(0x1f1d1b1917151311, 0x0f0d0b0907050301, q3);
3314 CHECK_EQUAL_128(0x1f1d1b1917151311, 0x0f0d0b0907050301, q4);
3315 CHECK_EQUAL_128(0x201e1c1a18161412, 0x100e0c0a08060402, q5);
3316 CHECK_EQUAL_128(0x1f1e1b1a17161312, 0x0f0e0b0a07060302, q6);
3317 CHECK_EQUAL_128(0x21201d1c19181514, 0x11100d0c09080504, q7);
3318 CHECK_EQUAL_128(0x1e1d1c1b16151413, 0x0e0d0c0b06050403, q16);
3319 CHECK_EQUAL_128(0x2221201f1a191817, 0x1211100f0a090807, q17);
3320 CHECK_EQUAL_128(0x1b1a191817161514, 0x0b0a090807060504, q31);
3321 CHECK_EQUAL_128(0x232221201f1e1d1c, 0x131211100f0e0d0c, q0);
3322
3323 TEARDOWN();
3324 }
3325
3326 TEST(neon_ld2_q_postindex) {
3327 INIT_V8();
3328 SETUP();
3329
3330 uint8_t src[64 + 4];
3331 for (unsigned i = 0; i < sizeof(src); i++) {
3332 src[i] = i;
3333 }
3334 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3335
3336 START();
3337 __ Mov(x17, src_base);
3338 __ Mov(x18, src_base + 1);
3339 __ Mov(x19, src_base + 2);
3340 __ Mov(x20, src_base + 3);
3341 __ Mov(x21, src_base + 4);
3342 __ Mov(x22, 1);
3343 __ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17, x22, PostIndex));
3344 __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x18, 32, PostIndex));
3345 __ Ld2(v6.V8H(), v7.V8H(), MemOperand(x19, 32, PostIndex));
3346 __ Ld2(v16.V4S(), v17.V4S(), MemOperand(x20, 32, PostIndex));
3347 __ Ld2(v31.V2D(), v0.V2D(), MemOperand(x21, 32, PostIndex));
3348 END();
3349
3350 RUN();
3351
3352 CHECK_EQUAL_128(0x1e1c1a1816141210, 0x0e0c0a0806040200, q2);
3353 CHECK_EQUAL_128(0x1f1d1b1917151311, 0x0f0d0b0907050301, q3);
3354 CHECK_EQUAL_128(0x1f1d1b1917151311, 0x0f0d0b0907050301, q4);
3355 CHECK_EQUAL_128(0x201e1c1a18161412, 0x100e0c0a08060402, q5);
3356 CHECK_EQUAL_128(0x1f1e1b1a17161312, 0x0f0e0b0a07060302, q6);
3357 CHECK_EQUAL_128(0x21201d1c19181514, 0x11100d0c09080504, q7);
3358 CHECK_EQUAL_128(0x1e1d1c1b16151413, 0x0e0d0c0b06050403, q16);
3359 CHECK_EQUAL_128(0x2221201f1a191817, 0x1211100f0a090807, q17);
3360 CHECK_EQUAL_128(0x1b1a191817161514, 0x0b0a090807060504, q31);
3361 CHECK_EQUAL_128(0x232221201f1e1d1c, 0x131211100f0e0d0c, q0);
3362
3363 CHECK_EQUAL_64(src_base + 1, x17);
3364 CHECK_EQUAL_64(src_base + 1 + 32, x18);
3365 CHECK_EQUAL_64(src_base + 2 + 32, x19);
3366 CHECK_EQUAL_64(src_base + 3 + 32, x20);
3367 CHECK_EQUAL_64(src_base + 4 + 32, x21);
3368
3369 TEARDOWN();
3370 }
3371
3372 TEST(neon_ld2_lane) {
3373 INIT_V8();
3374 SETUP();
3375
3376 uint8_t src[64];
3377 for (unsigned i = 0; i < sizeof(src); i++) {
3378 src[i] = i;
3379 }
3380 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3381
3382 START();
3383
3384 // Test loading whole register by element.
3385 __ Mov(x17, src_base);
3386 for (int i = 15; i >= 0; i--) {
3387 __ Ld2(v0.B(), v1.B(), i, MemOperand(x17));
3388 __ Add(x17, x17, 1);
3389 }
3390
3391 __ Mov(x17, src_base);
3392 for (int i = 7; i >= 0; i--) {
3393 __ Ld2(v2.H(), v3.H(), i, MemOperand(x17));
3394 __ Add(x17, x17, 1);
3395 }
3396
3397 __ Mov(x17, src_base);
3398 for (int i = 3; i >= 0; i--) {
3399 __ Ld2(v4.S(), v5.S(), i, MemOperand(x17));
3400 __ Add(x17, x17, 1);
3401 }
3402
3403 __ Mov(x17, src_base);
3404 for (int i = 1; i >= 0; i--) {
3405 __ Ld2(v6.D(), v7.D(), i, MemOperand(x17));
3406 __ Add(x17, x17, 1);
3407 }
3408
3409 // Test loading a single element into an initialised register.
3410 __ Mov(x17, src_base);
3411 __ Mov(x4, x17);
3412 __ Ldr(q8, MemOperand(x4, 16, PostIndex));
3413 __ Ldr(q9, MemOperand(x4));
3414 __ Ld2(v8_.B(), v9.B(), 4, MemOperand(x17));
3415 __ Mov(x5, x17);
3416 __ Ldr(q10, MemOperand(x5, 16, PostIndex));
3417 __ Ldr(q11, MemOperand(x5));
3418 __ Ld2(v10.H(), v11.H(), 3, MemOperand(x17));
3419 __ Mov(x6, x17);
3420 __ Ldr(q12, MemOperand(x6, 16, PostIndex));
3421 __ Ldr(q13, MemOperand(x6));
3422 __ Ld2(v12.S(), v13.S(), 2, MemOperand(x17));
3423 __ Mov(x7, x17);
3424 __ Ldr(q14, MemOperand(x7, 16, PostIndex));
3425 __ Ldr(q15, MemOperand(x7));
3426 __ Ld2(v14.D(), v15.D(), 1, MemOperand(x17));
3427
3428 END();
3429
3430 RUN();
3431
3432 CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q0);
3433 CHECK_EQUAL_128(0x0102030405060708, 0x090a0b0c0d0e0f10, q1);
3434 CHECK_EQUAL_128(0x0100020103020403, 0x0504060507060807, q2);
3435 CHECK_EQUAL_128(0x0302040305040605, 0x0706080709080a09, q3);
3436 CHECK_EQUAL_128(0x0302010004030201, 0x0504030206050403, q4);
3437 CHECK_EQUAL_128(0x0706050408070605, 0x090807060a090807, q5);
3438 CHECK_EQUAL_128(0x0706050403020100, 0x0807060504030201, q6);
3439 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x100f0e0d0c0b0a09, q7);
3440 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q8);
3441 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q9);
3442 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q10);
3443 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q11);
3444 CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q12);
3445 CHECK_EQUAL_128(0x1f1e1d1c07060504, 0x1716151413121110, q13);
3446 CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q14);
3447 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q15);
3448
3449 TEARDOWN();
3450 }
3451
3452 TEST(neon_ld2_lane_postindex) {
3453 INIT_V8();
3454 SETUP();
3455
3456 uint8_t src[64];
3457 for (unsigned i = 0; i < sizeof(src); i++) {
3458 src[i] = i;
3459 }
3460 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3461
3462 START();
3463 __ Mov(x17, src_base);
3464 __ Mov(x18, src_base);
3465 __ Mov(x19, src_base);
3466 __ Mov(x20, src_base);
3467 __ Mov(x21, src_base);
3468 __ Mov(x22, src_base);
3469 __ Mov(x23, src_base);
3470 __ Mov(x24, src_base);
3471
3472 // Test loading whole register by element.
3473 for (int i = 15; i >= 0; i--) {
3474 __ Ld2(v0.B(), v1.B(), i, MemOperand(x17, 2, PostIndex));
3475 }
3476
3477 for (int i = 7; i >= 0; i--) {
3478 __ Ld2(v2.H(), v3.H(), i, MemOperand(x18, 4, PostIndex));
3479 }
3480
3481 for (int i = 3; i >= 0; i--) {
3482 __ Ld2(v4.S(), v5.S(), i, MemOperand(x19, 8, PostIndex));
3483 }
3484
3485 for (int i = 1; i >= 0; i--) {
3486 __ Ld2(v6.D(), v7.D(), i, MemOperand(x20, 16, PostIndex));
3487 }
3488
3489 // Test loading a single element into an initialised register.
3490 __ Mov(x25, 1);
3491 __ Mov(x4, x21);
3492 __ Ldr(q8, MemOperand(x4, 16, PostIndex));
3493 __ Ldr(q9, MemOperand(x4));
3494 __ Ld2(v8_.B(), v9.B(), 4, MemOperand(x21, x25, PostIndex));
3495 __ Add(x25, x25, 1);
3496
3497 __ Mov(x5, x22);
3498 __ Ldr(q10, MemOperand(x5, 16, PostIndex));
3499 __ Ldr(q11, MemOperand(x5));
3500 __ Ld2(v10.H(), v11.H(), 3, MemOperand(x22, x25, PostIndex));
3501 __ Add(x25, x25, 1);
3502
3503 __ Mov(x6, x23);
3504 __ Ldr(q12, MemOperand(x6, 16, PostIndex));
3505 __ Ldr(q13, MemOperand(x6));
3506 __ Ld2(v12.S(), v13.S(), 2, MemOperand(x23, x25, PostIndex));
3507 __ Add(x25, x25, 1);
3508
3509 __ Mov(x7, x24);
3510 __ Ldr(q14, MemOperand(x7, 16, PostIndex));
3511 __ Ldr(q15, MemOperand(x7));
3512 __ Ld2(v14.D(), v15.D(), 1, MemOperand(x24, x25, PostIndex));
3513
3514 END();
3515
3516 RUN();
3517
3518 CHECK_EQUAL_128(0x00020406080a0c0e, 0x10121416181a1c1e, q0);
3519 CHECK_EQUAL_128(0x01030507090b0d0f, 0x11131517191b1d1f, q1);
3520 CHECK_EQUAL_128(0x0100050409080d0c, 0x1110151419181d1c, q2);
3521 CHECK_EQUAL_128(0x030207060b0a0f0e, 0x131217161b1a1f1e, q3);
3522 CHECK_EQUAL_128(0x030201000b0a0908, 0x131211101b1a1918, q4);
3523 CHECK_EQUAL_128(0x070605040f0e0d0c, 0x171615141f1e1d1c, q5);
3524 CHECK_EQUAL_128(0x0706050403020100, 0x1716151413121110, q6);
3525 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1f1e1d1c1b1a1918, q7);
3526 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q8);
3527 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q9);
3528 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q10);
3529 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q11);
3530 CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q12);
3531 CHECK_EQUAL_128(0x1f1e1d1c07060504, 0x1716151413121110, q13);
3532 CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q14);
3533 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q15);
3534
3535 CHECK_EQUAL_64(src_base + 32, x17);
3536 CHECK_EQUAL_64(src_base + 32, x18);
3537 CHECK_EQUAL_64(src_base + 32, x19);
3538 CHECK_EQUAL_64(src_base + 32, x20);
3539 CHECK_EQUAL_64(src_base + 1, x21);
3540 CHECK_EQUAL_64(src_base + 2, x22);
3541 CHECK_EQUAL_64(src_base + 3, x23);
3542 CHECK_EQUAL_64(src_base + 4, x24);
3543
3544 TEARDOWN();
3545 }
3546
3547 TEST(neon_ld2_alllanes) {
3548 INIT_V8();
3549 SETUP();
3550
3551 uint8_t src[64];
3552 for (unsigned i = 0; i < sizeof(src); i++) {
3553 src[i] = i;
3554 }
3555 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3556
3557 START();
3558 __ Mov(x17, src_base + 1);
3559 __ Mov(x18, 1);
3560 __ Ld2r(v0.V8B(), v1.V8B(), MemOperand(x17));
3561 __ Add(x17, x17, 2);
3562 __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17));
3563 __ Add(x17, x17, 1);
3564 __ Ld2r(v4.V4H(), v5.V4H(), MemOperand(x17));
3565 __ Add(x17, x17, 1);
3566 __ Ld2r(v6.V8H(), v7.V8H(), MemOperand(x17));
3567 __ Add(x17, x17, 4);
3568 __ Ld2r(v8_.V2S(), v9.V2S(), MemOperand(x17));
3569 __ Add(x17, x17, 1);
3570 __ Ld2r(v10.V4S(), v11.V4S(), MemOperand(x17));
3571 __ Add(x17, x17, 8);
3572 __ Ld2r(v12.V2D(), v13.V2D(), MemOperand(x17));
3573 END();
3574
3575 RUN();
3576
3577 CHECK_EQUAL_128(0x0000000000000000, 0x0101010101010101, q0);
3578 CHECK_EQUAL_128(0x0000000000000000, 0x0202020202020202, q1);
3579 CHECK_EQUAL_128(0x0303030303030303, 0x0303030303030303, q2);
3580 CHECK_EQUAL_128(0x0404040404040404, 0x0404040404040404, q3);
3581 CHECK_EQUAL_128(0x0000000000000000, 0x0504050405040504, q4);
3582 CHECK_EQUAL_128(0x0000000000000000, 0x0706070607060706, q5);
3583 CHECK_EQUAL_128(0x0605060506050605, 0x0605060506050605, q6);
3584 CHECK_EQUAL_128(0x0807080708070807, 0x0807080708070807, q7);
3585 CHECK_EQUAL_128(0x0000000000000000, 0x0c0b0a090c0b0a09, q8);
3586 CHECK_EQUAL_128(0x0000000000000000, 0x100f0e0d100f0e0d, q9);
3587 CHECK_EQUAL_128(0x0d0c0b0a0d0c0b0a, 0x0d0c0b0a0d0c0b0a, q10);
3588 CHECK_EQUAL_128(0x11100f0e11100f0e, 0x11100f0e11100f0e, q11);
3589 CHECK_EQUAL_128(0x1918171615141312, 0x1918171615141312, q12);
3590 CHECK_EQUAL_128(0x21201f1e1d1c1b1a, 0x21201f1e1d1c1b1a, q13);
3591
3592 TEARDOWN();
3593 }
3594
3595 TEST(neon_ld2_alllanes_postindex) {
3596 INIT_V8();
3597 SETUP();
3598
3599 uint8_t src[64];
3600 for (unsigned i = 0; i < sizeof(src); i++) {
3601 src[i] = i;
3602 }
3603 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3604
3605 START();
3606 __ Mov(x17, src_base + 1);
3607 __ Mov(x18, 1);
3608 __ Ld2r(v0.V8B(), v1.V8B(), MemOperand(x17, 2, PostIndex));
3609 __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17, x18, PostIndex));
3610 __ Ld2r(v4.V4H(), v5.V4H(), MemOperand(x17, x18, PostIndex));
3611 __ Ld2r(v6.V8H(), v7.V8H(), MemOperand(x17, 4, PostIndex));
3612 __ Ld2r(v8_.V2S(), v9.V2S(), MemOperand(x17, x18, PostIndex));
3613 __ Ld2r(v10.V4S(), v11.V4S(), MemOperand(x17, 8, PostIndex));
3614 __ Ld2r(v12.V2D(), v13.V2D(), MemOperand(x17, 16, PostIndex));
3615 END();
3616
3617 RUN();
3618
3619 CHECK_EQUAL_128(0x0000000000000000, 0x0101010101010101, q0);
3620 CHECK_EQUAL_128(0x0000000000000000, 0x0202020202020202, q1);
3621 CHECK_EQUAL_128(0x0303030303030303, 0x0303030303030303, q2);
3622 CHECK_EQUAL_128(0x0404040404040404, 0x0404040404040404, q3);
3623 CHECK_EQUAL_128(0x0000000000000000, 0x0504050405040504, q4);
3624 CHECK_EQUAL_128(0x0000000000000000, 0x0706070607060706, q5);
3625 CHECK_EQUAL_128(0x0605060506050605, 0x0605060506050605, q6);
3626 CHECK_EQUAL_128(0x0807080708070807, 0x0807080708070807, q7);
3627 CHECK_EQUAL_128(0x0000000000000000, 0x0c0b0a090c0b0a09, q8);
3628 CHECK_EQUAL_128(0x0000000000000000, 0x100f0e0d100f0e0d, q9);
3629 CHECK_EQUAL_128(0x0d0c0b0a0d0c0b0a, 0x0d0c0b0a0d0c0b0a, q10);
3630 CHECK_EQUAL_128(0x11100f0e11100f0e, 0x11100f0e11100f0e, q11);
3631 CHECK_EQUAL_128(0x1918171615141312, 0x1918171615141312, q12);
3632 CHECK_EQUAL_128(0x21201f1e1d1c1b1a, 0x21201f1e1d1c1b1a, q13);
3633 CHECK_EQUAL_64(src_base + 34, x17);
3634
3635 TEARDOWN();
3636 }
3637
3638 TEST(neon_ld3_d) {
3639 INIT_V8();
3640 SETUP();
3641
3642 uint8_t src[64 + 4];
3643 for (unsigned i = 0; i < sizeof(src); i++) {
3644 src[i] = i;
3645 }
3646 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3647
3648 START();
3649 __ Mov(x17, src_base);
3650 __ Ld3(v2.V8B(), v3.V8B(), v4.V8B(), MemOperand(x17));
3651 __ Add(x17, x17, 1);
3652 __ Ld3(v5.V8B(), v6.V8B(), v7.V8B(), MemOperand(x17));
3653 __ Add(x17, x17, 1);
3654 __ Ld3(v8_.V4H(), v9.V4H(), v10.V4H(), MemOperand(x17));
3655 __ Add(x17, x17, 1);
3656 __ Ld3(v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x17));
3657 END();
3658
3659 RUN();
3660
3661 CHECK_EQUAL_128(0, 0x15120f0c09060300, q2);
3662 CHECK_EQUAL_128(0, 0x1613100d0a070401, q3);
3663 CHECK_EQUAL_128(0, 0x1714110e0b080502, q4);
3664 CHECK_EQUAL_128(0, 0x1613100d0a070401, q5);
3665 CHECK_EQUAL_128(0, 0x1714110e0b080502, q6);
3666 CHECK_EQUAL_128(0, 0x1815120f0c090603, q7);
3667 CHECK_EQUAL_128(0, 0x15140f0e09080302, q8);
3668 CHECK_EQUAL_128(0, 0x171611100b0a0504, q9);
3669 CHECK_EQUAL_128(0, 0x191813120d0c0706, q10);
3670 CHECK_EQUAL_128(0, 0x1211100f06050403, q31);
3671 CHECK_EQUAL_128(0, 0x161514130a090807, q0);
3672 CHECK_EQUAL_128(0, 0x1a1918170e0d0c0b, q1);
3673
3674 TEARDOWN();
3675 }
3676
3677 TEST(neon_ld3_d_postindex) {
3678 INIT_V8();
3679 SETUP();
3680
3681 uint8_t src[32 + 4];
3682 for (unsigned i = 0; i < sizeof(src); i++) {
3683 src[i] = i;
3684 }
3685 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3686
3687 START();
3688 __ Mov(x17, src_base);
3689 __ Mov(x18, src_base + 1);
3690 __ Mov(x19, src_base + 2);
3691 __ Mov(x20, src_base + 3);
3692 __ Mov(x21, src_base + 4);
3693 __ Mov(x22, 1);
3694 __ Ld3(v2.V8B(), v3.V8B(), v4.V8B(), MemOperand(x17, x22, PostIndex));
3695 __ Ld3(v5.V8B(), v6.V8B(), v7.V8B(), MemOperand(x18, 24, PostIndex));
3696 __ Ld3(v8_.V4H(), v9.V4H(), v10.V4H(), MemOperand(x19, 24, PostIndex));
3697 __ Ld3(v11.V2S(), v12.V2S(), v13.V2S(), MemOperand(x20, 24, PostIndex));
3698 __ Ld3(v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x21, 24, PostIndex));
3699 END();
3700
3701 RUN();
3702
3703 CHECK_EQUAL_128(0, 0x15120f0c09060300, q2);
3704 CHECK_EQUAL_128(0, 0x1613100d0a070401, q3);
3705 CHECK_EQUAL_128(0, 0x1714110e0b080502, q4);
3706 CHECK_EQUAL_128(0, 0x1613100d0a070401, q5);
3707 CHECK_EQUAL_128(0, 0x1714110e0b080502, q6);
3708 CHECK_EQUAL_128(0, 0x1815120f0c090603, q7);
3709 CHECK_EQUAL_128(0, 0x15140f0e09080302, q8);
3710 CHECK_EQUAL_128(0, 0x171611100b0a0504, q9);
3711 CHECK_EQUAL_128(0, 0x191813120d0c0706, q10);
3712 CHECK_EQUAL_128(0, 0x1211100f06050403, q11);
3713 CHECK_EQUAL_128(0, 0x161514130a090807, q12);
3714 CHECK_EQUAL_128(0, 0x1a1918170e0d0c0b, q13);
3715 CHECK_EQUAL_128(0, 0x1312111007060504, q31);
3716 CHECK_EQUAL_128(0, 0x171615140b0a0908, q0);
3717 CHECK_EQUAL_128(0, 0x1b1a19180f0e0d0c, q1);
3718
3719 CHECK_EQUAL_64(src_base + 1, x17);
3720 CHECK_EQUAL_64(src_base + 1 + 24, x18);
3721 CHECK_EQUAL_64(src_base + 2 + 24, x19);
3722 CHECK_EQUAL_64(src_base + 3 + 24, x20);
3723 CHECK_EQUAL_64(src_base + 4 + 24, x21);
3724
3725 TEARDOWN();
3726 }
3727
3728 TEST(neon_ld3_q) {
3729 INIT_V8();
3730 SETUP();
3731
3732 uint8_t src[64 + 4];
3733 for (unsigned i = 0; i < sizeof(src); i++) {
3734 src[i] = i;
3735 }
3736 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3737
3738 START();
3739 __ Mov(x17, src_base);
3740 __ Ld3(v2.V16B(), v3.V16B(), v4.V16B(), MemOperand(x17));
3741 __ Add(x17, x17, 1);
3742 __ Ld3(v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x17));
3743 __ Add(x17, x17, 1);
3744 __ Ld3(v8_.V8H(), v9.V8H(), v10.V8H(), MemOperand(x17));
3745 __ Add(x17, x17, 1);
3746 __ Ld3(v11.V4S(), v12.V4S(), v13.V4S(), MemOperand(x17));
3747 __ Add(x17, x17, 1);
3748 __ Ld3(v31.V2D(), v0.V2D(), v1.V2D(), MemOperand(x17));
3749 END();
3750
3751 RUN();
3752
3753 CHECK_EQUAL_128(0x2d2a2724211e1b18, 0x15120f0c09060300, q2);
3754 CHECK_EQUAL_128(0x2e2b2825221f1c19, 0x1613100d0a070401, q3);
3755 CHECK_EQUAL_128(0x2f2c292623201d1a, 0x1714110e0b080502, q4);
3756 CHECK_EQUAL_128(0x2e2b2825221f1c19, 0x1613100d0a070401, q5);
3757 CHECK_EQUAL_128(0x2f2c292623201d1a, 0x1714110e0b080502, q6);
3758 CHECK_EQUAL_128(0x302d2a2724211e1b, 0x1815120f0c090603, q7);
3759 CHECK_EQUAL_128(0x2d2c272621201b1a, 0x15140f0e09080302, q8);
3760 CHECK_EQUAL_128(0x2f2e292823221d1c, 0x171611100b0a0504, q9);
3761 CHECK_EQUAL_128(0x31302b2a25241f1e, 0x191813120d0c0706, q10);
3762 CHECK_EQUAL_128(0x2a2928271e1d1c1b, 0x1211100f06050403, q11);
3763 CHECK_EQUAL_128(0x2e2d2c2b2221201f, 0x161514130a090807, q12);
3764 CHECK_EQUAL_128(0x3231302f26252423, 0x1a1918170e0d0c0b, q13);
3765 CHECK_EQUAL_128(0x232221201f1e1d1c, 0x0b0a090807060504, q31);
3766 CHECK_EQUAL_128(0x2b2a292827262524, 0x131211100f0e0d0c, q0);
3767 CHECK_EQUAL_128(0x333231302f2e2d2c, 0x1b1a191817161514, q1);
3768
3769 TEARDOWN();
3770 }
3771
3772 TEST(neon_ld3_q_postindex) {
3773 INIT_V8();
3774 SETUP();
3775
3776 uint8_t src[64 + 4];
3777 for (unsigned i = 0; i < sizeof(src); i++) {
3778 src[i] = i;
3779 }
3780 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3781
3782 START();
3783 __ Mov(x17, src_base);
3784 __ Mov(x18, src_base + 1);
3785 __ Mov(x19, src_base + 2);
3786 __ Mov(x20, src_base + 3);
3787 __ Mov(x21, src_base + 4);
3788 __ Mov(x22, 1);
3789
3790 __ Ld3(v2.V16B(), v3.V16B(), v4.V16B(), MemOperand(x17, x22, PostIndex));
3791 __ Ld3(v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x18, 48, PostIndex));
3792 __ Ld3(v8_.V8H(), v9.V8H(), v10.V8H(), MemOperand(x19, 48, PostIndex));
3793 __ Ld3(v11.V4S(), v12.V4S(), v13.V4S(), MemOperand(x20, 48, PostIndex));
3794 __ Ld3(v31.V2D(), v0.V2D(), v1.V2D(), MemOperand(x21, 48, PostIndex));
3795 END();
3796
3797 RUN();
3798
3799 CHECK_EQUAL_128(0x2d2a2724211e1b18, 0x15120f0c09060300, q2);
3800 CHECK_EQUAL_128(0x2e2b2825221f1c19, 0x1613100d0a070401, q3);
3801 CHECK_EQUAL_128(0x2f2c292623201d1a, 0x1714110e0b080502, q4);
3802 CHECK_EQUAL_128(0x2e2b2825221f1c19, 0x1613100d0a070401, q5);
3803 CHECK_EQUAL_128(0x2f2c292623201d1a, 0x1714110e0b080502, q6);
3804 CHECK_EQUAL_128(0x302d2a2724211e1b, 0x1815120f0c090603, q7);
3805 CHECK_EQUAL_128(0x2d2c272621201b1a, 0x15140f0e09080302, q8);
3806 CHECK_EQUAL_128(0x2f2e292823221d1c, 0x171611100b0a0504, q9);
3807 CHECK_EQUAL_128(0x31302b2a25241f1e, 0x191813120d0c0706, q10);
3808 CHECK_EQUAL_128(0x2a2928271e1d1c1b, 0x1211100f06050403, q11);
3809 CHECK_EQUAL_128(0x2e2d2c2b2221201f, 0x161514130a090807, q12);
3810 CHECK_EQUAL_128(0x3231302f26252423, 0x1a1918170e0d0c0b, q13);
3811 CHECK_EQUAL_128(0x232221201f1e1d1c, 0x0b0a090807060504, q31);
3812 CHECK_EQUAL_128(0x2b2a292827262524, 0x131211100f0e0d0c, q0);
3813 CHECK_EQUAL_128(0x333231302f2e2d2c, 0x1b1a191817161514, q1);
3814
3815 CHECK_EQUAL_64(src_base + 1, x17);
3816 CHECK_EQUAL_64(src_base + 1 + 48, x18);
3817 CHECK_EQUAL_64(src_base + 2 + 48, x19);
3818 CHECK_EQUAL_64(src_base + 3 + 48, x20);
3819 CHECK_EQUAL_64(src_base + 4 + 48, x21);
3820
3821 TEARDOWN();
3822 }
3823
3824 TEST(neon_ld3_lane) {
3825 INIT_V8();
3826 SETUP();
3827
3828 uint8_t src[64];
3829 for (unsigned i = 0; i < sizeof(src); i++) {
3830 src[i] = i;
3831 }
3832 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3833
3834 START();
3835
3836 // Test loading whole register by element.
3837 __ Mov(x17, src_base);
3838 for (int i = 15; i >= 0; i--) {
3839 __ Ld3(v0.B(), v1.B(), v2.B(), i, MemOperand(x17));
3840 __ Add(x17, x17, 1);
3841 }
3842
3843 __ Mov(x17, src_base);
3844 for (int i = 7; i >= 0; i--) {
3845 __ Ld3(v3.H(), v4.H(), v5.H(), i, MemOperand(x17));
3846 __ Add(x17, x17, 1);
3847 }
3848
3849 __ Mov(x17, src_base);
3850 for (int i = 3; i >= 0; i--) {
3851 __ Ld3(v6.S(), v7.S(), v8_.S(), i, MemOperand(x17));
3852 __ Add(x17, x17, 1);
3853 }
3854
3855 __ Mov(x17, src_base);
3856 for (int i = 1; i >= 0; i--) {
3857 __ Ld3(v9.D(), v10.D(), v11.D(), i, MemOperand(x17));
3858 __ Add(x17, x17, 1);
3859 }
3860
3861 // Test loading a single element into an initialised register.
3862 __ Mov(x17, src_base);
3863 __ Mov(x4, x17);
3864 __ Ldr(q12, MemOperand(x4, 16, PostIndex));
3865 __ Ldr(q13, MemOperand(x4, 16, PostIndex));
3866 __ Ldr(q14, MemOperand(x4));
3867 __ Ld3(v12.B(), v13.B(), v14.B(), 4, MemOperand(x17));
3868 __ Mov(x5, x17);
3869 __ Ldr(q15, MemOperand(x5, 16, PostIndex));
3870 __ Ldr(q16, MemOperand(x5, 16, PostIndex));
3871 __ Ldr(q17, MemOperand(x5));
3872 __ Ld3(v15.H(), v16.H(), v17.H(), 3, MemOperand(x17));
3873 __ Mov(x6, x17);
3874 __ Ldr(q18, MemOperand(x6, 16, PostIndex));
3875 __ Ldr(q19, MemOperand(x6, 16, PostIndex));
3876 __ Ldr(q20, MemOperand(x6));
3877 __ Ld3(v18.S(), v19.S(), v20.S(), 2, MemOperand(x17));
3878 __ Mov(x7, x17);
3879 __ Ldr(q21, MemOperand(x7, 16, PostIndex));
3880 __ Ldr(q22, MemOperand(x7, 16, PostIndex));
3881 __ Ldr(q23, MemOperand(x7));
3882 __ Ld3(v21.D(), v22.D(), v23.D(), 1, MemOperand(x17));
3883
3884 END();
3885
3886 RUN();
3887
3888 CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q0);
3889 CHECK_EQUAL_128(0x0102030405060708, 0x090a0b0c0d0e0f10, q1);
3890 CHECK_EQUAL_128(0x0203040506070809, 0x0a0b0c0d0e0f1011, q2);
3891 CHECK_EQUAL_128(0x0100020103020403, 0x0504060507060807, q3);
3892 CHECK_EQUAL_128(0x0302040305040605, 0x0706080709080a09, q4);
3893 CHECK_EQUAL_128(0x0504060507060807, 0x09080a090b0a0c0b, q5);
3894 CHECK_EQUAL_128(0x0302010004030201, 0x0504030206050403, q6);
3895 CHECK_EQUAL_128(0x0706050408070605, 0x090807060a090807, q7);
3896 CHECK_EQUAL_128(0x0b0a09080c0b0a09, 0x0d0c0b0a0e0d0c0b, q8);
3897 CHECK_EQUAL_128(0x0706050403020100, 0x0807060504030201, q9);
3898 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x100f0e0d0c0b0a09, q10);
3899 CHECK_EQUAL_128(0x1716151413121110, 0x1817161514131211, q11);
3900 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q12);
3901 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q13);
3902 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726250223222120, q14);
3903 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q15);
3904 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q16);
3905 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x0504252423222120, q17);
3906
3907 TEARDOWN();
3908 }
3909
3910 TEST(neon_ld3_lane_postindex) {
3911 INIT_V8();
3912 SETUP();
3913
3914 uint8_t src[64];
3915 for (unsigned i = 0; i < sizeof(src); i++) {
3916 src[i] = i;
3917 }
3918 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3919
3920 START();
3921
3922 // Test loading whole register by element.
3923 __ Mov(x17, src_base);
3924 __ Mov(x18, src_base);
3925 __ Mov(x19, src_base);
3926 __ Mov(x20, src_base);
3927 __ Mov(x21, src_base);
3928 __ Mov(x22, src_base);
3929 __ Mov(x23, src_base);
3930 __ Mov(x24, src_base);
3931 for (int i = 15; i >= 0; i--) {
3932 __ Ld3(v0.B(), v1.B(), v2.B(), i, MemOperand(x17, 3, PostIndex));
3933 }
3934
3935 for (int i = 7; i >= 0; i--) {
3936 __ Ld3(v3.H(), v4.H(), v5.H(), i, MemOperand(x18, 6, PostIndex));
3937 }
3938
3939 for (int i = 3; i >= 0; i--) {
3940 __ Ld3(v6.S(), v7.S(), v8_.S(), i, MemOperand(x19, 12, PostIndex));
3941 }
3942
3943 for (int i = 1; i >= 0; i--) {
3944 __ Ld3(v9.D(), v10.D(), v11.D(), i, MemOperand(x20, 24, PostIndex));
3945 }
3946
3947 // Test loading a single element into an initialised register.
3948 __ Mov(x25, 1);
3949 __ Mov(x4, x21);
3950 __ Ldr(q12, MemOperand(x4, 16, PostIndex));
3951 __ Ldr(q13, MemOperand(x4, 16, PostIndex));
3952 __ Ldr(q14, MemOperand(x4));
3953 __ Ld3(v12.B(), v13.B(), v14.B(), 4, MemOperand(x21, x25, PostIndex));
3954 __ Add(x25, x25, 1);
3955
3956 __ Mov(x5, x22);
3957 __ Ldr(q15, MemOperand(x5, 16, PostIndex));
3958 __ Ldr(q16, MemOperand(x5, 16, PostIndex));
3959 __ Ldr(q17, MemOperand(x5));
3960 __ Ld3(v15.H(), v16.H(), v17.H(), 3, MemOperand(x22, x25, PostIndex));
3961 __ Add(x25, x25, 1);
3962
3963 __ Mov(x6, x23);
3964 __ Ldr(q18, MemOperand(x6, 16, PostIndex));
3965 __ Ldr(q19, MemOperand(x6, 16, PostIndex));
3966 __ Ldr(q20, MemOperand(x6));
3967 __ Ld3(v18.S(), v19.S(), v20.S(), 2, MemOperand(x23, x25, PostIndex));
3968 __ Add(x25, x25, 1);
3969
3970 __ Mov(x7, x24);
3971 __ Ldr(q21, MemOperand(x7, 16, PostIndex));
3972 __ Ldr(q22, MemOperand(x7, 16, PostIndex));
3973 __ Ldr(q23, MemOperand(x7));
3974 __ Ld3(v21.D(), v22.D(), v23.D(), 1, MemOperand(x24, x25, PostIndex));
3975
3976 END();
3977
3978 RUN();
3979
3980 CHECK_EQUAL_128(0x000306090c0f1215, 0x181b1e2124272a2d, q0);
3981 CHECK_EQUAL_128(0x0104070a0d101316, 0x191c1f2225282b2e, q1);
3982 CHECK_EQUAL_128(0x0205080b0e111417, 0x1a1d202326292c2f, q2);
3983 CHECK_EQUAL_128(0x010007060d0c1312, 0x19181f1e25242b2a, q3);
3984 CHECK_EQUAL_128(0x030209080f0e1514, 0x1b1a212027262d2c, q4);
3985 CHECK_EQUAL_128(0x05040b0a11101716, 0x1d1c232229282f2e, q5);
3986 CHECK_EQUAL_128(0x030201000f0e0d0c, 0x1b1a191827262524, q6);
3987 CHECK_EQUAL_128(0x0706050413121110, 0x1f1e1d1c2b2a2928, q7);
3988 CHECK_EQUAL_128(0x0b0a090817161514, 0x232221202f2e2d2c, q8);
3989 CHECK_EQUAL_128(0x0706050403020100, 0x1f1e1d1c1b1a1918, q9);
3990 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x2726252423222120, q10);
3991 CHECK_EQUAL_128(0x1716151413121110, 0x2f2e2d2c2b2a2928, q11);
3992 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q12);
3993 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q13);
3994 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726250223222120, q14);
3995 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q15);
3996 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q16);
3997 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x0504252423222120, q17);
3998 CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q18);
3999 CHECK_EQUAL_128(0x1f1e1d1c07060504, 0x1716151413121110, q19);
4000 CHECK_EQUAL_128(0x2f2e2d2c0b0a0908, 0x2726252423222120, q20);
4001 CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q21);
4002 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q22);
4003 CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q23);
4004
4005 CHECK_EQUAL_64(src_base + 48, x17);
4006 CHECK_EQUAL_64(src_base + 48, x18);
4007 CHECK_EQUAL_64(src_base + 48, x19);
4008 CHECK_EQUAL_64(src_base + 48, x20);
4009 CHECK_EQUAL_64(src_base + 1, x21);
4010 CHECK_EQUAL_64(src_base + 2, x22);
4011 CHECK_EQUAL_64(src_base + 3, x23);
4012 CHECK_EQUAL_64(src_base + 4, x24);
4013
4014 TEARDOWN();
4015 }
4016
4017 TEST(neon_ld3_alllanes) {
4018 INIT_V8();
4019 SETUP();
4020
4021 uint8_t src[64];
4022 for (unsigned i = 0; i < sizeof(src); i++) {
4023 src[i] = i;
4024 }
4025 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4026
4027 START();
4028 __ Mov(x17, src_base + 1);
4029 __ Mov(x18, 1);
4030 __ Ld3r(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x17));
4031 __ Add(x17, x17, 3);
4032 __ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17));
4033 __ Add(x17, x17, 1);
4034 __ Ld3r(v6.V4H(), v7.V4H(), v8_.V4H(), MemOperand(x17));
4035 __ Add(x17, x17, 1);
4036 __ Ld3r(v9.V8H(), v10.V8H(), v11.V8H(), MemOperand(x17));
4037 __ Add(x17, x17, 6);
4038 __ Ld3r(v12.V2S(), v13.V2S(), v14.V2S(), MemOperand(x17));
4039 __ Add(x17, x17, 1);
4040 __ Ld3r(v15.V4S(), v16.V4S(), v17.V4S(), MemOperand(x17));
4041 __ Add(x17, x17, 12);
4042 __ Ld3r(v18.V2D(), v19.V2D(), v20.V2D(), MemOperand(x17));
4043 END();
4044
4045 RUN();
4046
4047 CHECK_EQUAL_128(0x0000000000000000, 0x0101010101010101, q0);
4048 CHECK_EQUAL_128(0x0000000000000000, 0x0202020202020202, q1);
4049 CHECK_EQUAL_128(0x0000000000000000, 0x0303030303030303, q2);
4050 CHECK_EQUAL_128(0x0404040404040404, 0x0404040404040404, q3);
4051 CHECK_EQUAL_128(0x0505050505050505, 0x0505050505050505, q4);
4052 CHECK_EQUAL_128(0x0606060606060606, 0x0606060606060606, q5);
4053 CHECK_EQUAL_128(0x0000000000000000, 0x0605060506050605, q6);
4054 CHECK_EQUAL_128(0x0000000000000000, 0x0807080708070807, q7);
4055 CHECK_EQUAL_128(0x0000000000000000, 0x0a090a090a090a09, q8);
4056 CHECK_EQUAL_128(0x0706070607060706, 0x0706070607060706, q9);
4057 CHECK_EQUAL_128(0x0908090809080908, 0x0908090809080908, q10);
4058 CHECK_EQUAL_128(0x0b0a0b0a0b0a0b0a, 0x0b0a0b0a0b0a0b0a, q11);
4059 CHECK_EQUAL_128(0x0000000000000000, 0x0f0e0d0c0f0e0d0c, q12);
4060 CHECK_EQUAL_128(0x0000000000000000, 0x1312111013121110, q13);
4061 CHECK_EQUAL_128(0x0000000000000000, 0x1716151417161514, q14);
4062 CHECK_EQUAL_128(0x100f0e0d100f0e0d, 0x100f0e0d100f0e0d, q15);
4063 CHECK_EQUAL_128(0x1413121114131211, 0x1413121114131211, q16);
4064 CHECK_EQUAL_128(0x1817161518171615, 0x1817161518171615, q17);
4065 CHECK_EQUAL_128(0x201f1e1d1c1b1a19, 0x201f1e1d1c1b1a19, q18);
4066 CHECK_EQUAL_128(0x2827262524232221, 0x2827262524232221, q19);
4067 CHECK_EQUAL_128(0x302f2e2d2c2b2a29, 0x302f2e2d2c2b2a29, q20);
4068
4069 TEARDOWN();
4070 }
4071
4072 TEST(neon_ld3_alllanes_postindex) {
4073 INIT_V8();
4074 SETUP();
4075
4076 uint8_t src[64];
4077 for (unsigned i = 0; i < sizeof(src); i++) {
4078 src[i] = i;
4079 }
4080 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4081 __ Mov(x17, src_base + 1);
4082 __ Mov(x18, 1);
4083
4084 START();
4085 __ Mov(x17, src_base + 1);
4086 __ Mov(x18, 1);
4087 __ Ld3r(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x17, 3, PostIndex));
4088 __ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17, x18, PostIndex));
4089 __ Ld3r(v6.V4H(), v7.V4H(), v8_.V4H(), MemOperand(x17, x18, PostIndex));
4090 __ Ld3r(v9.V8H(), v10.V8H(), v11.V8H(), MemOperand(x17, 6, PostIndex));
4091 __ Ld3r(v12.V2S(), v13.V2S(), v14.V2S(), MemOperand(x17, x18, PostIndex));
4092 __ Ld3r(v15.V4S(), v16.V4S(), v17.V4S(), MemOperand(x17, 12, PostIndex));
4093 __ Ld3r(v18.V2D(), v19.V2D(), v20.V2D(), MemOperand(x17, 24, PostIndex));
4094 END();
4095
4096 RUN();
4097
4098 CHECK_EQUAL_128(0x0000000000000000, 0x0101010101010101, q0);
4099 CHECK_EQUAL_128(0x0000000000000000, 0x0202020202020202, q1);
4100 CHECK_EQUAL_128(0x0000000000000000, 0x0303030303030303, q2);
4101 CHECK_EQUAL_128(0x0404040404040404, 0x0404040404040404, q3);
4102 CHECK_EQUAL_128(0x0505050505050505, 0x0505050505050505, q4);
4103 CHECK_EQUAL_128(0x0606060606060606, 0x0606060606060606, q5);
4104 CHECK_EQUAL_128(0x0000000000000000, 0x0605060506050605, q6);
4105 CHECK_EQUAL_128(0x0000000000000000, 0x0807080708070807, q7);
4106 CHECK_EQUAL_128(0x0000000000000000, 0x0a090a090a090a09, q8);
4107 CHECK_EQUAL_128(0x0706070607060706, 0x0706070607060706, q9);
4108 CHECK_EQUAL_128(0x0908090809080908, 0x0908090809080908, q10);
4109 CHECK_EQUAL_128(0x0b0a0b0a0b0a0b0a, 0x0b0a0b0a0b0a0b0a, q11);
4110 CHECK_EQUAL_128(0x0000000000000000, 0x0f0e0d0c0f0e0d0c, q12);
4111 CHECK_EQUAL_128(0x0000000000000000, 0x1312111013121110, q13);
4112 CHECK_EQUAL_128(0x0000000000000000, 0x1716151417161514, q14);
4113 CHECK_EQUAL_128(0x100f0e0d100f0e0d, 0x100f0e0d100f0e0d, q15);
4114 CHECK_EQUAL_128(0x1413121114131211, 0x1413121114131211, q16);
4115 CHECK_EQUAL_128(0x1817161518171615, 0x1817161518171615, q17);
4116 CHECK_EQUAL_128(0x201f1e1d1c1b1a19, 0x201f1e1d1c1b1a19, q18);
4117 CHECK_EQUAL_128(0x2827262524232221, 0x2827262524232221, q19);
4118 CHECK_EQUAL_128(0x302f2e2d2c2b2a29, 0x302f2e2d2c2b2a29, q20);
4119
4120 TEARDOWN();
4121 }
4122
4123 TEST(neon_ld4_d) {
4124 INIT_V8();
4125 SETUP();
4126
4127 uint8_t src[64 + 4];
4128 for (unsigned i = 0; i < sizeof(src); i++) {
4129 src[i] = i;
4130 }
4131 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4132
4133 START();
4134 __ Mov(x17, src_base);
4135 __ Ld4(v2.V8B(), v3.V8B(), v4.V8B(), v5.V8B(), MemOperand(x17));
4136 __ Add(x17, x17, 1);
4137 __ Ld4(v6.V8B(), v7.V8B(), v8_.V8B(), v9.V8B(), MemOperand(x17));
4138 __ Add(x17, x17, 1);
4139 __ Ld4(v10.V4H(), v11.V4H(), v12.V4H(), v13.V4H(), MemOperand(x17));
4140 __ Add(x17, x17, 1);
4141 __ Ld4(v30.V2S(), v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x17));
4142 END();
4143
4144 RUN();
4145
4146 CHECK_EQUAL_128(0, 0x1c1814100c080400, q2);
4147 CHECK_EQUAL_128(0, 0x1d1915110d090501, q3);
4148 CHECK_EQUAL_128(0, 0x1e1a16120e0a0602, q4);
4149 CHECK_EQUAL_128(0, 0x1f1b17130f0b0703, q5);
4150 CHECK_EQUAL_128(0, 0x1d1915110d090501, q6);
4151 CHECK_EQUAL_128(0, 0x1e1a16120e0a0602, q7);
4152 CHECK_EQUAL_128(0, 0x1f1b17130f0b0703, q8);
4153 CHECK_EQUAL_128(0, 0x201c1814100c0804, q9);
4154 CHECK_EQUAL_128(0, 0x1b1a13120b0a0302, q10);
4155 CHECK_EQUAL_128(0, 0x1d1c15140d0c0504, q11);
4156 CHECK_EQUAL_128(0, 0x1f1e17160f0e0706, q12);
4157 CHECK_EQUAL_128(0, 0x2120191811100908, q13);
4158 CHECK_EQUAL_128(0, 0x1615141306050403, q30);
4159 CHECK_EQUAL_128(0, 0x1a1918170a090807, q31);
4160 CHECK_EQUAL_128(0, 0x1e1d1c1b0e0d0c0b, q0);
4161 CHECK_EQUAL_128(0, 0x2221201f1211100f, q1);
4162
4163 TEARDOWN();
4164 }
4165
4166 TEST(neon_ld4_d_postindex) {
4167 INIT_V8();
4168 SETUP();
4169
4170 uint8_t src[32 + 4];
4171 for (unsigned i = 0; i < sizeof(src); i++) {
4172 src[i] = i;
4173 }
4174 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4175
4176 START();
4177 __ Mov(x17, src_base);
4178 __ Mov(x18, src_base + 1);
4179 __ Mov(x19, src_base + 2);
4180 __ Mov(x20, src_base + 3);
4181 __ Mov(x21, src_base + 4);
4182 __ Mov(x22, 1);
4183 __ Ld4(v2.V8B(), v3.V8B(), v4.V8B(), v5.V8B(),
4184 MemOperand(x17, x22, PostIndex));
4185 __ Ld4(v6.V8B(), v7.V8B(), v8_.V8B(), v9.V8B(),
4186 MemOperand(x18, 32, PostIndex));
4187 __ Ld4(v10.V4H(), v11.V4H(), v12.V4H(), v13.V4H(),
4188 MemOperand(x19, 32, PostIndex));
4189 __ Ld4(v14.V2S(), v15.V2S(), v16.V2S(), v17.V2S(),
4190 MemOperand(x20, 32, PostIndex));
4191 __ Ld4(v30.V2S(), v31.V2S(), v0.V2S(), v1.V2S(),
4192 MemOperand(x21, 32, PostIndex));
4193 END();
4194
4195 RUN();
4196
4197 CHECK_EQUAL_128(0, 0x1c1814100c080400, q2);
4198 CHECK_EQUAL_128(0, 0x1d1915110d090501, q3);
4199 CHECK_EQUAL_128(0, 0x1e1a16120e0a0602, q4);
4200 CHECK_EQUAL_128(0, 0x1f1b17130f0b0703, q5);
4201 CHECK_EQUAL_128(0, 0x1d1915110d090501, q6);
4202 CHECK_EQUAL_128(0, 0x1e1a16120e0a0602, q7);
4203 CHECK_EQUAL_128(0, 0x1f1b17130f0b0703, q8);
4204 CHECK_EQUAL_128(0, 0x201c1814100c0804, q9);
4205 CHECK_EQUAL_128(0, 0x1b1a13120b0a0302, q10);
4206 CHECK_EQUAL_128(0, 0x1d1c15140d0c0504, q11);
4207 CHECK_EQUAL_128(0, 0x1f1e17160f0e0706, q12);
4208 CHECK_EQUAL_128(0, 0x2120191811100908, q13);
4209 CHECK_EQUAL_128(0, 0x1615141306050403, q14);
4210 CHECK_EQUAL_128(0, 0x1a1918170a090807, q15);
4211 CHECK_EQUAL_128(0, 0x1e1d1c1b0e0d0c0b, q16);
4212 CHECK_EQUAL_128(0, 0x2221201f1211100f, q17);
4213 CHECK_EQUAL_128(0, 0x1716151407060504, q30);
4214 CHECK_EQUAL_128(0, 0x1b1a19180b0a0908, q31);
4215 CHECK_EQUAL_128(0, 0x1f1e1d1c0f0e0d0c, q0);
4216 CHECK_EQUAL_128(0, 0x2322212013121110, q1);
4217
4218 CHECK_EQUAL_64(src_base + 1, x17);
4219 CHECK_EQUAL_64(src_base + 1 + 32, x18);
4220 CHECK_EQUAL_64(src_base + 2 + 32, x19);
4221 CHECK_EQUAL_64(src_base + 3 + 32, x20);
4222 CHECK_EQUAL_64(src_base + 4 + 32, x21);
4223 TEARDOWN();
4224 }
4225
4226 TEST(neon_ld4_q) {
4227 INIT_V8();
4228 SETUP();
4229
4230 uint8_t src[64 + 4];
4231 for (unsigned i = 0; i < sizeof(src); i++) {
4232 src[i] = i;
4233 }
4234 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4235
4236 START();
4237 __ Mov(x17, src_base);
4238 __ Ld4(v2.V16B(), v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17));
4239 __ Add(x17, x17, 1);
4240 __ Ld4(v6.V16B(), v7.V16B(), v8_.V16B(), v9.V16B(), MemOperand(x17));
4241 __ Add(x17, x17, 1);
4242 __ Ld4(v10.V8H(), v11.V8H(), v12.V8H(), v13.V8H(), MemOperand(x17));
4243 __ Add(x17, x17, 1);
4244 __ Ld4(v14.V4S(), v15.V4S(), v16.V4S(), v17.V4S(), MemOperand(x17));
4245 __ Add(x17, x17, 1);
4246 __ Ld4(v18.V2D(), v19.V2D(), v20.V2D(), v21.V2D(), MemOperand(x17));
4247 END();
4248
4249 RUN();
4250
4251 CHECK_EQUAL_128(0x3c3834302c282420, 0x1c1814100c080400, q2);
4252 CHECK_EQUAL_128(0x3d3935312d292521, 0x1d1915110d090501, q3);
4253 CHECK_EQUAL_128(0x3e3a36322e2a2622, 0x1e1a16120e0a0602, q4);
4254 CHECK_EQUAL_128(0x3f3b37332f2b2723, 0x1f1b17130f0b0703, q5);
4255 CHECK_EQUAL_128(0x3d3935312d292521, 0x1d1915110d090501, q6);
4256 CHECK_EQUAL_128(0x3e3a36322e2a2622, 0x1e1a16120e0a0602, q7);
4257 CHECK_EQUAL_128(0x3f3b37332f2b2723, 0x1f1b17130f0b0703, q8);
4258 CHECK_EQUAL_128(0x403c3834302c2824, 0x201c1814100c0804, q9);
4259 CHECK_EQUAL_128(0x3b3a33322b2a2322, 0x1b1a13120b0a0302, q10);
4260 CHECK_EQUAL_128(0x3d3c35342d2c2524, 0x1d1c15140d0c0504, q11);
4261 CHECK_EQUAL_128(0x3f3e37362f2e2726, 0x1f1e17160f0e0706, q12);
4262 CHECK_EQUAL_128(0x4140393831302928, 0x2120191811100908, q13);
4263 CHECK_EQUAL_128(0x3635343326252423, 0x1615141306050403, q14);
4264 CHECK_EQUAL_128(0x3a3938372a292827, 0x1a1918170a090807, q15);
4265 CHECK_EQUAL_128(0x3e3d3c3b2e2d2c2b, 0x1e1d1c1b0e0d0c0b, q16);
4266 CHECK_EQUAL_128(0x4241403f3231302f, 0x2221201f1211100f, q17);
4267 CHECK_EQUAL_128(0x2b2a292827262524, 0x0b0a090807060504, q18);
4268 CHECK_EQUAL_128(0x333231302f2e2d2c, 0x131211100f0e0d0c, q19);
4269 CHECK_EQUAL_128(0x3b3a393837363534, 0x1b1a191817161514, q20);
4270 CHECK_EQUAL_128(0x434241403f3e3d3c, 0x232221201f1e1d1c, q21);
4271 TEARDOWN();
4272 }
4273
4274 TEST(neon_ld4_q_postindex) {
4275 INIT_V8();
4276 SETUP();
4277
4278 uint8_t src[64 + 4];
4279 for (unsigned i = 0; i < sizeof(src); i++) {
4280 src[i] = i;
4281 }
4282 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4283
4284 START();
4285 __ Mov(x17, src_base);
4286 __ Mov(x18, src_base + 1);
4287 __ Mov(x19, src_base + 2);
4288 __ Mov(x20, src_base + 3);
4289 __ Mov(x21, src_base + 4);
4290 __ Mov(x22, 1);
4291
4292 __ Ld4(v2.V16B(), v3.V16B(), v4.V16B(), v5.V16B(),
4293 MemOperand(x17, x22, PostIndex));
4294 __ Ld4(v6.V16B(), v7.V16B(), v8_.V16B(), v9.V16B(),
4295 MemOperand(x18, 64, PostIndex));
4296 __ Ld4(v10.V8H(), v11.V8H(), v12.V8H(), v13.V8H(),
4297 MemOperand(x19, 64, PostIndex));
4298 __ Ld4(v14.V4S(), v15.V4S(), v16.V4S(), v17.V4S(),
4299 MemOperand(x20, 64, PostIndex));
4300 __ Ld4(v30.V2D(), v31.V2D(), v0.V2D(), v1.V2D(),
4301 MemOperand(x21, 64, PostIndex));
4302 END();
4303
4304 RUN();
4305
4306 CHECK_EQUAL_128(0x3c3834302c282420, 0x1c1814100c080400, q2);
4307 CHECK_EQUAL_128(0x3d3935312d292521, 0x1d1915110d090501, q3);
4308 CHECK_EQUAL_128(0x3e3a36322e2a2622, 0x1e1a16120e0a0602, q4);
4309 CHECK_EQUAL_128(0x3f3b37332f2b2723, 0x1f1b17130f0b0703, q5);
4310 CHECK_EQUAL_128(0x3d3935312d292521, 0x1d1915110d090501, q6);
4311 CHECK_EQUAL_128(0x3e3a36322e2a2622, 0x1e1a16120e0a0602, q7);
4312 CHECK_EQUAL_128(0x3f3b37332f2b2723, 0x1f1b17130f0b0703, q8);
4313 CHECK_EQUAL_128(0x403c3834302c2824, 0x201c1814100c0804, q9);
4314 CHECK_EQUAL_128(0x3b3a33322b2a2322, 0x1b1a13120b0a0302, q10);
4315 CHECK_EQUAL_128(0x3d3c35342d2c2524, 0x1d1c15140d0c0504, q11);
4316 CHECK_EQUAL_128(0x3f3e37362f2e2726, 0x1f1e17160f0e0706, q12);
4317 CHECK_EQUAL_128(0x4140393831302928, 0x2120191811100908, q13);
4318 CHECK_EQUAL_128(0x3635343326252423, 0x1615141306050403, q14);
4319 CHECK_EQUAL_128(0x3a3938372a292827, 0x1a1918170a090807, q15);
4320 CHECK_EQUAL_128(0x3e3d3c3b2e2d2c2b, 0x1e1d1c1b0e0d0c0b, q16);
4321 CHECK_EQUAL_128(0x4241403f3231302f, 0x2221201f1211100f, q17);
4322 CHECK_EQUAL_128(0x2b2a292827262524, 0x0b0a090807060504, q30);
4323 CHECK_EQUAL_128(0x333231302f2e2d2c, 0x131211100f0e0d0c, q31);
4324 CHECK_EQUAL_128(0x3b3a393837363534, 0x1b1a191817161514, q0);
4325 CHECK_EQUAL_128(0x434241403f3e3d3c, 0x232221201f1e1d1c, q1);
4326
4327 CHECK_EQUAL_64(src_base + 1, x17);
4328 CHECK_EQUAL_64(src_base + 1 + 64, x18);
4329 CHECK_EQUAL_64(src_base + 2 + 64, x19);
4330 CHECK_EQUAL_64(src_base + 3 + 64, x20);
4331 CHECK_EQUAL_64(src_base + 4 + 64, x21);
4332
4333 TEARDOWN();
4334 }
4335
4336 TEST(neon_ld4_lane) {
4337 INIT_V8();
4338 SETUP();
4339
4340 uint8_t src[64];
4341 for (unsigned i = 0; i < sizeof(src); i++) {
4342 src[i] = i;
4343 }
4344 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4345
4346 START();
4347
4348 // Test loading whole register by element.
4349 __ Mov(x17, src_base);
4350 for (int i = 15; i >= 0; i--) {
4351 __ Ld4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x17));
4352 __ Add(x17, x17, 1);
4353 }
4354
4355 __ Mov(x17, src_base);
4356 for (int i = 7; i >= 0; i--) {
4357 __ Ld4(v4.H(), v5.H(), v6.H(), v7.H(), i, MemOperand(x17));
4358 __ Add(x17, x17, 1);
4359 }
4360
4361 __ Mov(x17, src_base);
4362 for (int i = 3; i >= 0; i--) {
4363 __ Ld4(v8_.S(), v9.S(), v10.S(), v11.S(), i, MemOperand(x17));
4364 __ Add(x17, x17, 1);
4365 }
4366
4367 __ Mov(x17, src_base);
4368 for (int i = 1; i >= 0; i--) {
4369 __ Ld4(v12.D(), v13.D(), v14.D(), v15.D(), i, MemOperand(x17));
4370 __ Add(x17, x17, 1);
4371 }
4372
4373 // Test loading a single element into an initialised register.
4374 __ Mov(x17, src_base);
4375 __ Mov(x4, x17);
4376 __ Ldr(q16, MemOperand(x4, 16, PostIndex));
4377 __ Ldr(q17, MemOperand(x4, 16, PostIndex));
4378 __ Ldr(q18, MemOperand(x4, 16, PostIndex));
4379 __ Ldr(q19, MemOperand(x4));
4380 __ Ld4(v16.B(), v17.B(), v18.B(), v19.B(), 4, MemOperand(x17));
4381
4382 __ Mov(x5, x17);
4383 __ Ldr(q20, MemOperand(x5, 16, PostIndex));
4384 __ Ldr(q21, MemOperand(x5, 16, PostIndex));
4385 __ Ldr(q22, MemOperand(x5, 16, PostIndex));
4386 __ Ldr(q23, MemOperand(x5));
4387 __ Ld4(v20.H(), v21.H(), v22.H(), v23.H(), 3, MemOperand(x17));
4388
4389 __ Mov(x6, x17);
4390 __ Ldr(q24, MemOperand(x6, 16, PostIndex));
4391 __ Ldr(q25, MemOperand(x6, 16, PostIndex));
4392 __ Ldr(q26, MemOperand(x6, 16, PostIndex));
4393 __ Ldr(q27, MemOperand(x6));
4394 __ Ld4(v24.S(), v25.S(), v26.S(), v27.S(), 2, MemOperand(x17));
4395
4396 __ Mov(x7, x17);
4397 __ Ldr(q28, MemOperand(x7, 16, PostIndex));
4398 __ Ldr(q29, MemOperand(x7, 16, PostIndex));
4399 __ Ldr(q30, MemOperand(x7, 16, PostIndex));
4400 __ Ldr(q31, MemOperand(x7));
4401 __ Ld4(v28.D(), v29.D(), v30.D(), v31.D(), 1, MemOperand(x17));
4402
4403 END();
4404
4405 RUN();
4406
4407 CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q0);
4408 CHECK_EQUAL_128(0x0102030405060708, 0x090a0b0c0d0e0f10, q1);
4409 CHECK_EQUAL_128(0x0203040506070809, 0x0a0b0c0d0e0f1011, q2);
4410 CHECK_EQUAL_128(0x030405060708090a, 0x0b0c0d0e0f101112, q3);
4411 CHECK_EQUAL_128(0x0100020103020403, 0x0504060507060807, q4);
4412 CHECK_EQUAL_128(0x0302040305040605, 0x0706080709080a09, q5);
4413 CHECK_EQUAL_128(0x0504060507060807, 0x09080a090b0a0c0b, q6);
4414 CHECK_EQUAL_128(0x0706080709080a09, 0x0b0a0c0b0d0c0e0d, q7);
4415 CHECK_EQUAL_128(0x0302010004030201, 0x0504030206050403, q8);
4416 CHECK_EQUAL_128(0x0706050408070605, 0x090807060a090807, q9);
4417 CHECK_EQUAL_128(0x0b0a09080c0b0a09, 0x0d0c0b0a0e0d0c0b, q10);
4418 CHECK_EQUAL_128(0x0f0e0d0c100f0e0d, 0x11100f0e1211100f, q11);
4419 CHECK_EQUAL_128(0x0706050403020100, 0x0807060504030201, q12);
4420 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x100f0e0d0c0b0a09, q13);
4421 CHECK_EQUAL_128(0x1716151413121110, 0x1817161514131211, q14);
4422 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x201f1e1d1c1b1a19, q15);
4423 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q16);
4424 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q17);
4425 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726250223222120, q18);
4426 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736350333323130, q19);
4427 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q20);
4428 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q21);
4429 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x0504252423222120, q22);
4430 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x0706353433323130, q23);
4431 CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q24);
4432 CHECK_EQUAL_128(0x1f1e1d1c07060504, 0x1716151413121110, q25);
4433 CHECK_EQUAL_128(0x2f2e2d2c0b0a0908, 0x2726252423222120, q26);
4434 CHECK_EQUAL_128(0x3f3e3d3c0f0e0d0c, 0x3736353433323130, q27);
4435 CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q28);
4436 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q29);
4437 CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q30);
4438 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x3736353433323130, q31);
4439
4440 TEARDOWN();
4441 }
4442
4443 TEST(neon_ld4_lane_postindex) {
4444 INIT_V8();
4445 SETUP();
4446
4447 uint8_t src[64];
4448 for (unsigned i = 0; i < sizeof(src); i++) {
4449 src[i] = i;
4450 }
4451 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4452
4453 START();
4454
4455 // Test loading whole register by element.
4456 __ Mov(x17, src_base);
4457 for (int i = 15; i >= 0; i--) {
4458 __ Ld4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x17, 4, PostIndex));
4459 }
4460
4461 __ Mov(x18, src_base);
4462 for (int i = 7; i >= 0; i--) {
4463 __ Ld4(v4.H(), v5.H(), v6.H(), v7.H(), i, MemOperand(x18, 8, PostIndex));
4464 }
4465
4466 __ Mov(x19, src_base);
4467 for (int i = 3; i >= 0; i--) {
4468 __ Ld4(v8_.S(), v9.S(), v10.S(), v11.S(), i,
4469 MemOperand(x19, 16, PostIndex));
4470 }
4471
4472 __ Mov(x20, src_base);
4473 for (int i = 1; i >= 0; i--) {
4474 __ Ld4(v12.D(), v13.D(), v14.D(), v15.D(), i,
4475 MemOperand(x20, 32, PostIndex));
4476 }
4477
4478 // Test loading a single element into an initialised register.
4479 __ Mov(x25, 1);
4480 __ Mov(x21, src_base);
4481 __ Mov(x22, src_base);
4482 __ Mov(x23, src_base);
4483 __ Mov(x24, src_base);
4484
4485 __ Mov(x4, x21);
4486 __ Ldr(q16, MemOperand(x4, 16, PostIndex));
4487 __ Ldr(q17, MemOperand(x4, 16, PostIndex));
4488 __ Ldr(q18, MemOperand(x4, 16, PostIndex));
4489 __ Ldr(q19, MemOperand(x4));
4490 __ Ld4(v16.B(), v17.B(), v18.B(), v19.B(), 4,
4491 MemOperand(x21, x25, PostIndex));
4492 __ Add(x25, x25, 1);
4493
4494 __ Mov(x5, x22);
4495 __ Ldr(q20, MemOperand(x5, 16, PostIndex));
4496 __ Ldr(q21, MemOperand(x5, 16, PostIndex));
4497 __ Ldr(q22, MemOperand(x5, 16, PostIndex));
4498 __ Ldr(q23, MemOperand(x5));
4499 __ Ld4(v20.H(), v21.H(), v22.H(), v23.H(), 3,
4500 MemOperand(x22, x25, PostIndex));
4501 __ Add(x25, x25, 1);
4502
4503 __ Mov(x6, x23);
4504 __ Ldr(q24, MemOperand(x6, 16, PostIndex));
4505 __ Ldr(q25, MemOperand(x6, 16, PostIndex));
4506 __ Ldr(q26, MemOperand(x6, 16, PostIndex));
4507 __ Ldr(q27, MemOperand(x6));
4508 __ Ld4(v24.S(), v25.S(), v26.S(), v27.S(), 2,
4509 MemOperand(x23, x25, PostIndex));
4510 __ Add(x25, x25, 1);
4511
4512 __ Mov(x7, x24);
4513 __ Ldr(q28, MemOperand(x7, 16, PostIndex));
4514 __ Ldr(q29, MemOperand(x7, 16, PostIndex));
4515 __ Ldr(q30, MemOperand(x7, 16, PostIndex));
4516 __ Ldr(q31, MemOperand(x7));
4517 __ Ld4(v28.D(), v29.D(), v30.D(), v31.D(), 1,
4518 MemOperand(x24, x25, PostIndex));
4519
4520 END();
4521
4522 RUN();
4523
4524 CHECK_EQUAL_128(0x0004080c1014181c, 0x2024282c3034383c, q0);
4525 CHECK_EQUAL_128(0x0105090d1115191d, 0x2125292d3135393d, q1);
4526 CHECK_EQUAL_128(0x02060a0e12161a1e, 0x22262a2e32363a3e, q2);
4527 CHECK_EQUAL_128(0x03070b0f13171b1f, 0x23272b2f33373b3f, q3);
4528 CHECK_EQUAL_128(0x0100090811101918, 0x2120292831303938, q4);
4529 CHECK_EQUAL_128(0x03020b0a13121b1a, 0x23222b2a33323b3a, q5);
4530 CHECK_EQUAL_128(0x05040d0c15141d1c, 0x25242d2c35343d3c, q6);
4531 CHECK_EQUAL_128(0x07060f0e17161f1e, 0x27262f2e37363f3e, q7);
4532 CHECK_EQUAL_128(0x0302010013121110, 0x2322212033323130, q8);
4533 CHECK_EQUAL_128(0x0706050417161514, 0x2726252437363534, q9);
4534 CHECK_EQUAL_128(0x0b0a09081b1a1918, 0x2b2a29283b3a3938, q10);
4535 CHECK_EQUAL_128(0x0f0e0d0c1f1e1d1c, 0x2f2e2d2c3f3e3d3c, q11);
4536 CHECK_EQUAL_128(0x0706050403020100, 0x2726252423222120, q12);
4537 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x2f2e2d2c2b2a2928, q13);
4538 CHECK_EQUAL_128(0x1716151413121110, 0x3736353433323130, q14);
4539 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x3f3e3d3c3b3a3938, q15);
4540 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q16);
4541 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716150113121110, q17);
4542 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726250223222120, q18);
4543 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736350333323130, q19);
4544 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q20);
4545 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0302151413121110, q21);
4546 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x0504252423222120, q22);
4547 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x0706353433323130, q23);
4548 CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q24);
4549 CHECK_EQUAL_128(0x1f1e1d1c07060504, 0x1716151413121110, q25);
4550 CHECK_EQUAL_128(0x2f2e2d2c0b0a0908, 0x2726252423222120, q26);
4551 CHECK_EQUAL_128(0x3f3e3d3c0f0e0d0c, 0x3736353433323130, q27);
4552 CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q28);
4553 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x1716151413121110, q29);
4554 CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q30);
4555 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x3736353433323130, q31);
4556
4557 CHECK_EQUAL_64(src_base + 64, x17);
4558 CHECK_EQUAL_64(src_base + 64, x18);
4559 CHECK_EQUAL_64(src_base + 64, x19);
4560 CHECK_EQUAL_64(src_base + 64, x20);
4561 CHECK_EQUAL_64(src_base + 1, x21);
4562 CHECK_EQUAL_64(src_base + 2, x22);
4563 CHECK_EQUAL_64(src_base + 3, x23);
4564 CHECK_EQUAL_64(src_base + 4, x24);
4565
4566 TEARDOWN();
4567 }
4568
4569 TEST(neon_ld4_alllanes) {
4570 INIT_V8();
4571 SETUP();
4572
4573 uint8_t src[64];
4574 for (unsigned i = 0; i < sizeof(src); i++) {
4575 src[i] = i;
4576 }
4577 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4578
4579 START();
4580 __ Mov(x17, src_base + 1);
4581 __ Mov(x18, 1);
4582 __ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x17));
4583 __ Add(x17, x17, 4);
4584 __ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x17));
4585 __ Add(x17, x17, 1);
4586 __ Ld4r(v8_.V4H(), v9.V4H(), v10.V4H(), v11.V4H(), MemOperand(x17));
4587 __ Add(x17, x17, 1);
4588 __ Ld4r(v12.V8H(), v13.V8H(), v14.V8H(), v15.V8H(), MemOperand(x17));
4589 __ Add(x17, x17, 8);
4590 __ Ld4r(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(), MemOperand(x17));
4591 __ Add(x17, x17, 1);
4592 __ Ld4r(v20.V4S(), v21.V4S(), v22.V4S(), v23.V4S(), MemOperand(x17));
4593 __ Add(x17, x17, 16);
4594 __ Ld4r(v24.V2D(), v25.V2D(), v26.V2D(), v27.V2D(), MemOperand(x17));
4595
4596 END();
4597
4598 RUN();
4599
4600 CHECK_EQUAL_128(0x0000000000000000, 0x0101010101010101, q0);
4601 CHECK_EQUAL_128(0x0000000000000000, 0x0202020202020202, q1);
4602 CHECK_EQUAL_128(0x0000000000000000, 0x0303030303030303, q2);
4603 CHECK_EQUAL_128(0x0000000000000000, 0x0404040404040404, q3);
4604 CHECK_EQUAL_128(0x0505050505050505, 0x0505050505050505, q4);
4605 CHECK_EQUAL_128(0x0606060606060606, 0x0606060606060606, q5);
4606 CHECK_EQUAL_128(0x0707070707070707, 0x0707070707070707, q6);
4607 CHECK_EQUAL_128(0x0808080808080808, 0x0808080808080808, q7);
4608 CHECK_EQUAL_128(0x0000000000000000, 0x0706070607060706, q8);
4609 CHECK_EQUAL_128(0x0000000000000000, 0x0908090809080908, q9);
4610 CHECK_EQUAL_128(0x0000000000000000, 0x0b0a0b0a0b0a0b0a, q10);
4611 CHECK_EQUAL_128(0x0000000000000000, 0x0d0c0d0c0d0c0d0c, q11);
4612 CHECK_EQUAL_128(0x0807080708070807, 0x0807080708070807, q12);
4613 CHECK_EQUAL_128(0x0a090a090a090a09, 0x0a090a090a090a09, q13);
4614 CHECK_EQUAL_128(0x0c0b0c0b0c0b0c0b, 0x0c0b0c0b0c0b0c0b, q14);
4615 CHECK_EQUAL_128(0x0e0d0e0d0e0d0e0d, 0x0e0d0e0d0e0d0e0d, q15);
4616 CHECK_EQUAL_128(0x0000000000000000, 0x1211100f1211100f, q16);
4617 CHECK_EQUAL_128(0x0000000000000000, 0x1615141316151413, q17);
4618 CHECK_EQUAL_128(0x0000000000000000, 0x1a1918171a191817, q18);
4619 CHECK_EQUAL_128(0x0000000000000000, 0x1e1d1c1b1e1d1c1b, q19);
4620 CHECK_EQUAL_128(0x1312111013121110, 0x1312111013121110, q20);
4621 CHECK_EQUAL_128(0x1716151417161514, 0x1716151417161514, q21);
4622 CHECK_EQUAL_128(0x1b1a19181b1a1918, 0x1b1a19181b1a1918, q22);
4623 CHECK_EQUAL_128(0x1f1e1d1c1f1e1d1c, 0x1f1e1d1c1f1e1d1c, q23);
4624 CHECK_EQUAL_128(0x2726252423222120, 0x2726252423222120, q24);
4625 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2f2e2d2c2b2a2928, q25);
4626 CHECK_EQUAL_128(0x3736353433323130, 0x3736353433323130, q26);
4627 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3f3e3d3c3b3a3938, q27);
4628
4629 TEARDOWN();
4630 }
4631
4632 TEST(neon_ld4_alllanes_postindex) {
4633 INIT_V8();
4634 SETUP();
4635
4636 uint8_t src[64];
4637 for (unsigned i = 0; i < sizeof(src); i++) {
4638 src[i] = i;
4639 }
4640 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4641 __ Mov(x17, src_base + 1);
4642 __ Mov(x18, 1);
4643
4644 START();
4645 __ Mov(x17, src_base + 1);
4646 __ Mov(x18, 1);
4647 __ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(),
4648 MemOperand(x17, 4, PostIndex));
4649 __ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(),
4650 MemOperand(x17, x18, PostIndex));
4651 __ Ld4r(v8_.V4H(), v9.V4H(), v10.V4H(), v11.V4H(),
4652 MemOperand(x17, x18, PostIndex));
4653 __ Ld4r(v12.V8H(), v13.V8H(), v14.V8H(), v15.V8H(),
4654 MemOperand(x17, 8, PostIndex));
4655 __ Ld4r(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(),
4656 MemOperand(x17, x18, PostIndex));
4657 __ Ld4r(v20.V4S(), v21.V4S(), v22.V4S(), v23.V4S(),
4658 MemOperand(x17, 16, PostIndex));
4659 __ Ld4r(v24.V2D(), v25.V2D(), v26.V2D(), v27.V2D(),
4660 MemOperand(x17, 32, PostIndex));
4661 END();
4662
4663 RUN();
4664
4665 CHECK_EQUAL_128(0x0000000000000000, 0x0101010101010101, q0);
4666 CHECK_EQUAL_128(0x0000000000000000, 0x0202020202020202, q1);
4667 CHECK_EQUAL_128(0x0000000000000000, 0x0303030303030303, q2);
4668 CHECK_EQUAL_128(0x0000000000000000, 0x0404040404040404, q3);
4669 CHECK_EQUAL_128(0x0505050505050505, 0x0505050505050505, q4);
4670 CHECK_EQUAL_128(0x0606060606060606, 0x0606060606060606, q5);
4671 CHECK_EQUAL_128(0x0707070707070707, 0x0707070707070707, q6);
4672 CHECK_EQUAL_128(0x0808080808080808, 0x0808080808080808, q7);
4673 CHECK_EQUAL_128(0x0000000000000000, 0x0706070607060706, q8);
4674 CHECK_EQUAL_128(0x0000000000000000, 0x0908090809080908, q9);
4675 CHECK_EQUAL_128(0x0000000000000000, 0x0b0a0b0a0b0a0b0a, q10);
4676 CHECK_EQUAL_128(0x0000000000000000, 0x0d0c0d0c0d0c0d0c, q11);
4677 CHECK_EQUAL_128(0x0807080708070807, 0x0807080708070807, q12);
4678 CHECK_EQUAL_128(0x0a090a090a090a09, 0x0a090a090a090a09, q13);
4679 CHECK_EQUAL_128(0x0c0b0c0b0c0b0c0b, 0x0c0b0c0b0c0b0c0b, q14);
4680 CHECK_EQUAL_128(0x0e0d0e0d0e0d0e0d, 0x0e0d0e0d0e0d0e0d, q15);
4681 CHECK_EQUAL_128(0x0000000000000000, 0x1211100f1211100f, q16);
4682 CHECK_EQUAL_128(0x0000000000000000, 0x1615141316151413, q17);
4683 CHECK_EQUAL_128(0x0000000000000000, 0x1a1918171a191817, q18);
4684 CHECK_EQUAL_128(0x0000000000000000, 0x1e1d1c1b1e1d1c1b, q19);
4685 CHECK_EQUAL_128(0x1312111013121110, 0x1312111013121110, q20);
4686 CHECK_EQUAL_128(0x1716151417161514, 0x1716151417161514, q21);
4687 CHECK_EQUAL_128(0x1b1a19181b1a1918, 0x1b1a19181b1a1918, q22);
4688 CHECK_EQUAL_128(0x1f1e1d1c1f1e1d1c, 0x1f1e1d1c1f1e1d1c, q23);
4689 CHECK_EQUAL_128(0x2726252423222120, 0x2726252423222120, q24);
4690 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2f2e2d2c2b2a2928, q25);
4691 CHECK_EQUAL_128(0x3736353433323130, 0x3736353433323130, q26);
4692 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3f3e3d3c3b3a3938, q27);
4693 CHECK_EQUAL_64(src_base + 64, x17);
4694
4695 TEARDOWN();
4696 }
4697
4698 TEST(neon_st1_lane) {
4699 INIT_V8();
4700 SETUP();
4701
4702 uint8_t src[64];
4703 for (unsigned i = 0; i < sizeof(src); i++) {
4704 src[i] = i;
4705 }
4706 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
4707
4708 START();
4709 __ Mov(x17, src_base);
4710 __ Mov(x18, -16);
4711 __ Ldr(q0, MemOperand(x17));
4712
4713 for (int i = 15; i >= 0; i--) {
4714 __ St1(v0.B(), i, MemOperand(x17));
4715 __ Add(x17, x17, 1);
4716 }
4717 __ Ldr(q1, MemOperand(x17, x18));
4718
4719 for (int i = 7; i >= 0; i--) {
4720 __ St1(v0.H(), i, MemOperand(x17));
4721 __ Add(x17, x17, 2);
4722 }
4723 __ Ldr(q2, MemOperand(x17, x18));
4724
4725 for (int i = 3; i >= 0; i--) {
4726 __ St1(v0.S(), i, MemOperand(x17));
4727 __ Add(x17, x17, 4);
4728 }
4729 __ Ldr(q3, MemOperand(x17, x18));
4730
4731 for (int i = 1; i >= 0; i--) {
4732 __ St1(v0.D(), i, MemOperand(x17));
4733 __ Add(x17, x17, 8);
4734 }
4735 __ Ldr(q4, MemOperand(x17, x18));
4736
4737 END();
4738
4739 RUN();
4740
4741 CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q1);
4742 CHECK_EQUAL_128(0x0100030205040706, 0x09080b0a0d0c0f0e, q2);
4743 CHECK_EQUAL_128(0x0302010007060504, 0x0b0a09080f0e0d0c, q3);
4744 CHECK_EQUAL_128(0x0706050403020100, 0x0f0e0d0c0b0a0908, q4);
4745
4746 TEARDOWN();
4747 }
4748
4749 TEST(neon_st2_lane) {
4750 INIT_V8();
4751 SETUP();
4752
4753 // Struct size * addressing modes * element sizes * vector size.
4754 uint8_t dst[2 * 2 * 4 * 16];
4755 memset(dst, 0, sizeof(dst));
4756 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
4757
4758 START();
4759 __ Mov(x17, dst_base);
4760 __ Mov(x18, dst_base);
4761 __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f);
4762 __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f);
4763
4764 // Test B stores with and without post index.
4765 for (int i = 15; i >= 0; i--) {
4766 __ St2(v0.B(), v1.B(), i, MemOperand(x18));
4767 __ Add(x18, x18, 2);
4768 }
4769 for (int i = 15; i >= 0; i--) {
4770 __ St2(v0.B(), v1.B(), i, MemOperand(x18, 2, PostIndex));
4771 }
4772 __ Ldr(q2, MemOperand(x17, 0 * 16));
4773 __ Ldr(q3, MemOperand(x17, 1 * 16));
4774 __ Ldr(q4, MemOperand(x17, 2 * 16));
4775 __ Ldr(q5, MemOperand(x17, 3 * 16));
4776
4777 // Test H stores with and without post index.
4778 __ Mov(x0, 4);
4779 for (int i = 7; i >= 0; i--) {
4780 __ St2(v0.H(), v1.H(), i, MemOperand(x18));
4781 __ Add(x18, x18, 4);
4782 }
4783 for (int i = 7; i >= 0; i--) {
4784 __ St2(v0.H(), v1.H(), i, MemOperand(x18, x0, PostIndex));
4785 }
4786 __ Ldr(q6, MemOperand(x17, 4 * 16));
4787 __ Ldr(q7, MemOperand(x17, 5 * 16));
4788 __ Ldr(q16, MemOperand(x17, 6 * 16));
4789 __ Ldr(q17, MemOperand(x17, 7 * 16));
4790
4791 // Test S stores with and without post index.
4792 for (int i = 3; i >= 0; i--) {
4793 __ St2(v0.S(), v1.S(), i, MemOperand(x18));
4794 __ Add(x18, x18, 8);
4795 }
4796 for (int i = 3; i >= 0; i--) {
4797 __ St2(v0.S(), v1.S(), i, MemOperand(x18, 8, PostIndex));
4798 }
4799 __ Ldr(q18, MemOperand(x17, 8 * 16));
4800 __ Ldr(q19, MemOperand(x17, 9 * 16));
4801 __ Ldr(q20, MemOperand(x17, 10 * 16));
4802 __ Ldr(q21, MemOperand(x17, 11 * 16));
4803
4804 // Test D stores with and without post index.
4805 __ Mov(x0, 16);
4806 __ St2(v0.D(), v1.D(), 1, MemOperand(x18));
4807 __ Add(x18, x18, 16);
4808 __ St2(v0.D(), v1.D(), 0, MemOperand(x18, 16, PostIndex));
4809 __ St2(v0.D(), v1.D(), 1, MemOperand(x18, x0, PostIndex));
4810 __ St2(v0.D(), v1.D(), 0, MemOperand(x18, x0, PostIndex));
4811 __ Ldr(q22, MemOperand(x17, 12 * 16));
4812 __ Ldr(q23, MemOperand(x17, 13 * 16));
4813 __ Ldr(q24, MemOperand(x17, 14 * 16));
4814 __ Ldr(q25, MemOperand(x17, 15 * 16));
4815 END();
4816
4817 RUN();
4818
4819 CHECK_EQUAL_128(0x1707160615051404, 0x1303120211011000, q2);
4820 CHECK_EQUAL_128(0x1f0f1e0e1d0d1c0c, 0x1b0b1a0a19091808, q3);
4821 CHECK_EQUAL_128(0x1707160615051404, 0x1303120211011000, q4);
4822 CHECK_EQUAL_128(0x1f0f1e0e1d0d1c0c, 0x1b0b1a0a19091808, q5);
4823
4824 CHECK_EQUAL_128(0x1617060714150405, 0x1213020310110001, q6);
4825 CHECK_EQUAL_128(0x1e1f0e0f1c1d0c0d, 0x1a1b0a0b18190809, q7);
4826 CHECK_EQUAL_128(0x1617060714150405, 0x1213020310110001, q16);
4827 CHECK_EQUAL_128(0x1e1f0e0f1c1d0c0d, 0x1a1b0a0b18190809, q17);
4828
4829 CHECK_EQUAL_128(0x1415161704050607, 0x1011121300010203, q18);
4830 CHECK_EQUAL_128(0x1c1d1e1f0c0d0e0f, 0x18191a1b08090a0b, q19);
4831 CHECK_EQUAL_128(0x1415161704050607, 0x1011121300010203, q20);
4832 CHECK_EQUAL_128(0x1c1d1e1f0c0d0e0f, 0x18191a1b08090a0b, q21);
4833
4834 CHECK_EQUAL_128(0x1011121314151617, 0x0001020304050607, q22);
4835 CHECK_EQUAL_128(0x18191a1b1c1d1e1f, 0x08090a0b0c0d0e0f, q23);
4836 CHECK_EQUAL_128(0x1011121314151617, 0x0001020304050607, q22);
4837 CHECK_EQUAL_128(0x18191a1b1c1d1e1f, 0x08090a0b0c0d0e0f, q23);
4838
4839 TEARDOWN();
4840 }
4841
4842 TEST(neon_st3_lane) {
4843 INIT_V8();
4844 SETUP();
4845
4846 // Struct size * addressing modes * element sizes * vector size.
4847 uint8_t dst[3 * 2 * 4 * 16];
4848 memset(dst, 0, sizeof(dst));
4849 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
4850
4851 START();
4852 __ Mov(x17, dst_base);
4853 __ Mov(x18, dst_base);
4854 __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f);
4855 __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f);
4856 __ Movi(v2.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f);
4857
4858 // Test B stores with and without post index.
4859 for (int i = 15; i >= 0; i--) {
4860 __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x18));
4861 __ Add(x18, x18, 3);
4862 }
4863 for (int i = 15; i >= 0; i--) {
4864 __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x18, 3, PostIndex));
4865 }
4866 __ Ldr(q3, MemOperand(x17, 0 * 16));
4867 __ Ldr(q4, MemOperand(x17, 1 * 16));
4868 __ Ldr(q5, MemOperand(x17, 2 * 16));
4869 __ Ldr(q6, MemOperand(x17, 3 * 16));
4870 __ Ldr(q7, MemOperand(x17, 4 * 16));
4871 __ Ldr(q16, MemOperand(x17, 5 * 16));
4872
4873 // Test H stores with and without post index.
4874 __ Mov(x0, 6);
4875 for (int i = 7; i >= 0; i--) {
4876 __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x18));
4877 __ Add(x18, x18, 6);
4878 }
4879 for (int i = 7; i >= 0; i--) {
4880 __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x18, x0, PostIndex));
4881 }
4882 __ Ldr(q17, MemOperand(x17, 6 * 16));
4883 __ Ldr(q18, MemOperand(x17, 7 * 16));
4884 __ Ldr(q19, MemOperand(x17, 8 * 16));
4885 __ Ldr(q20, MemOperand(x17, 9 * 16));
4886 __ Ldr(q21, MemOperand(x17, 10 * 16));
4887 __ Ldr(q22, MemOperand(x17, 11 * 16));
4888
4889 // Test S stores with and without post index.
4890 for (int i = 3; i >= 0; i--) {
4891 __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x18));
4892 __ Add(x18, x18, 12);
4893 }
4894 for (int i = 3; i >= 0; i--) {
4895 __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x18, 12, PostIndex));
4896 }
4897 __ Ldr(q23, MemOperand(x17, 12 * 16));
4898 __ Ldr(q24, MemOperand(x17, 13 * 16));
4899 __ Ldr(q25, MemOperand(x17, 14 * 16));
4900 __ Ldr(q26, MemOperand(x17, 15 * 16));
4901 __ Ldr(q27, MemOperand(x17, 16 * 16));
4902 __ Ldr(q28, MemOperand(x17, 17 * 16));
4903
4904 // Test D stores with and without post index.
4905 __ Mov(x0, 24);
4906 __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x18));
4907 __ Add(x18, x18, 24);
4908 __ St3(v0.D(), v1.D(), v2.D(), 0, MemOperand(x18, 24, PostIndex));
4909 __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x18, x0, PostIndex));
4910 __ Ldr(q29, MemOperand(x17, 18 * 16));
4911 __ Ldr(q30, MemOperand(x17, 19 * 16));
4912 __ Ldr(q31, MemOperand(x17, 20 * 16));
4913 END();
4914
4915 RUN();
4916
4917 CHECK_EQUAL_128(0x0524140423130322, 0x1202211101201000, q3);
4918 CHECK_EQUAL_128(0x1a0a291909281808, 0x2717072616062515, q4);
4919 CHECK_EQUAL_128(0x2f1f0f2e1e0e2d1d, 0x0d2c1c0c2b1b0b2a, q5);
4920 CHECK_EQUAL_128(0x0524140423130322, 0x1202211101201000, q6);
4921 CHECK_EQUAL_128(0x1a0a291909281808, 0x2717072616062515, q7);
4922 CHECK_EQUAL_128(0x2f1f0f2e1e0e2d1d, 0x0d2c1c0c2b1b0b2a, q16);
4923
4924 CHECK_EQUAL_128(0x1415040522231213, 0x0203202110110001, q17);
4925 CHECK_EQUAL_128(0x0a0b282918190809, 0x2627161706072425, q18);
4926 CHECK_EQUAL_128(0x2e2f1e1f0e0f2c2d, 0x1c1d0c0d2a2b1a1b, q19);
4927 CHECK_EQUAL_128(0x1415040522231213, 0x0203202110110001, q20);
4928 CHECK_EQUAL_128(0x0a0b282918190809, 0x2627161706072425, q21);
4929 CHECK_EQUAL_128(0x2e2f1e1f0e0f2c2d, 0x1c1d0c0d2a2b1a1b, q22);
4930
4931 CHECK_EQUAL_128(0x0405060720212223, 0x1011121300010203, q23);
4932 CHECK_EQUAL_128(0x18191a1b08090a0b, 0x2425262714151617, q24);
4933 CHECK_EQUAL_128(0x2c2d2e2f1c1d1e1f, 0x0c0d0e0f28292a2b, q25);
4934 CHECK_EQUAL_128(0x0405060720212223, 0x1011121300010203, q26);
4935 CHECK_EQUAL_128(0x18191a1b08090a0b, 0x2425262714151617, q27);
4936 CHECK_EQUAL_128(0x2c2d2e2f1c1d1e1f, 0x0c0d0e0f28292a2b, q28);
4937
4938 TEARDOWN();
4939 }
4940
4941 TEST(neon_st4_lane) {
4942 INIT_V8();
4943 SETUP();
4944
4945 // Struct size * element sizes * vector size.
4946 uint8_t dst[4 * 4 * 16];
4947 memset(dst, 0, sizeof(dst));
4948 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
4949
4950 START();
4951 __ Mov(x17, dst_base);
4952 __ Mov(x18, dst_base);
4953 __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f);
4954 __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f);
4955 __ Movi(v2.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f);
4956 __ Movi(v3.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f);
4957
4958 // Test B stores without post index.
4959 for (int i = 15; i >= 0; i--) {
4960 __ St4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x18));
4961 __ Add(x18, x18, 4);
4962 }
4963 __ Ldr(q4, MemOperand(x17, 0 * 16));
4964 __ Ldr(q5, MemOperand(x17, 1 * 16));
4965 __ Ldr(q6, MemOperand(x17, 2 * 16));
4966 __ Ldr(q7, MemOperand(x17, 3 * 16));
4967
4968 // Test H stores with post index.
4969 __ Mov(x0, 8);
4970 for (int i = 7; i >= 0; i--) {
4971 __ St4(v0.H(), v1.H(), v2.H(), v3.H(), i, MemOperand(x18, x0, PostIndex));
4972 }
4973 __ Ldr(q16, MemOperand(x17, 4 * 16));
4974 __ Ldr(q17, MemOperand(x17, 5 * 16));
4975 __ Ldr(q18, MemOperand(x17, 6 * 16));
4976 __ Ldr(q19, MemOperand(x17, 7 * 16));
4977
4978 // Test S stores without post index.
4979 for (int i = 3; i >= 0; i--) {
4980 __ St4(v0.S(), v1.S(), v2.S(), v3.S(), i, MemOperand(x18));
4981 __ Add(x18, x18, 16);
4982 }
4983 __ Ldr(q20, MemOperand(x17, 8 * 16));
4984 __ Ldr(q21, MemOperand(x17, 9 * 16));
4985 __ Ldr(q22, MemOperand(x17, 10 * 16));
4986 __ Ldr(q23, MemOperand(x17, 11 * 16));
4987
4988 // Test D stores with post index.
4989 __ Mov(x0, 32);
4990 __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 0, MemOperand(x18, 32, PostIndex));
4991 __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 1, MemOperand(x18, x0, PostIndex));
4992
4993 __ Ldr(q24, MemOperand(x17, 12 * 16));
4994 __ Ldr(q25, MemOperand(x17, 13 * 16));
4995 __ Ldr(q26, MemOperand(x17, 14 * 16));
4996 __ Ldr(q27, MemOperand(x17, 15 * 16));
4997 END();
4998
4999 RUN();
5000
5001 CHECK_EQUAL_128(0x2323130322221202, 0x2121110120201000, q4);
5002 CHECK_EQUAL_128(0x2727170726261606, 0x2525150524241404, q5);
5003 CHECK_EQUAL_128(0x2b2b1b0b2a2a1a0a, 0x2929190928281808, q6);
5004 CHECK_EQUAL_128(0x2f2f1f0f2e2e1e0e, 0x2d2d1d0d2c2c1c0c, q7);
5005
5006 CHECK_EQUAL_128(0x2223222312130203, 0x2021202110110001, q16);
5007 CHECK_EQUAL_128(0x2627262716170607, 0x2425242514150405, q17);
5008 CHECK_EQUAL_128(0x2a2b2a2b1a1b0a0b, 0x2829282918190809, q18);
5009 CHECK_EQUAL_128(0x2e2f2e2f1e1f0e0f, 0x2c2d2c2d1c1d0c0d, q19);
5010
5011 CHECK_EQUAL_128(0x2021222320212223, 0x1011121300010203, q20);
5012 CHECK_EQUAL_128(0x2425262724252627, 0x1415161704050607, q21);
5013 CHECK_EQUAL_128(0x28292a2b28292a2b, 0x18191a1b08090a0b, q22);
5014 CHECK_EQUAL_128(0x2c2d2e2f2c2d2e2f, 0x1c1d1e1f0c0d0e0f, q23);
5015
5016 CHECK_EQUAL_128(0x18191a1b1c1d1e1f, 0x08090a0b0c0d0e0f, q24);
5017 CHECK_EQUAL_128(0x28292a2b2c2d2e2f, 0x28292a2b2c2d2e2f, q25);
5018 CHECK_EQUAL_128(0x1011121314151617, 0x0001020304050607, q26);
5019 CHECK_EQUAL_128(0x2021222324252627, 0x2021222324252627, q27);
5020
5021 TEARDOWN();
5022 }
5023
5024 TEST(neon_ld1_lane_postindex) {
5025 INIT_V8();
5026 SETUP();
5027
5028 uint8_t src[64];
5029 for (unsigned i = 0; i < sizeof(src); i++) {
5030 src[i] = i;
5031 }
5032 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5033
5034 START();
5035 __ Mov(x17, src_base);
5036 __ Mov(x18, src_base);
5037 __ Mov(x19, src_base);
5038 __ Mov(x20, src_base);
5039 __ Mov(x21, src_base);
5040 __ Mov(x22, src_base);
5041 __ Mov(x23, src_base);
5042 __ Mov(x24, src_base);
5043
5044 // Test loading whole register by element.
5045 for (int i = 15; i >= 0; i--) {
5046 __ Ld1(v0.B(), i, MemOperand(x17, 1, PostIndex));
5047 }
5048
5049 for (int i = 7; i >= 0; i--) {
5050 __ Ld1(v1.H(), i, MemOperand(x18, 2, PostIndex));
5051 }
5052
5053 for (int i = 3; i >= 0; i--) {
5054 __ Ld1(v2.S(), i, MemOperand(x19, 4, PostIndex));
5055 }
5056
5057 for (int i = 1; i >= 0; i--) {
5058 __ Ld1(v3.D(), i, MemOperand(x20, 8, PostIndex));
5059 }
5060
5061 // Test loading a single element into an initialised register.
5062 __ Mov(x25, 1);
5063 __ Ldr(q4, MemOperand(x21));
5064 __ Ld1(v4.B(), 4, MemOperand(x21, x25, PostIndex));
5065 __ Add(x25, x25, 1);
5066
5067 __ Ldr(q5, MemOperand(x22));
5068 __ Ld1(v5.H(), 3, MemOperand(x22, x25, PostIndex));
5069 __ Add(x25, x25, 1);
5070
5071 __ Ldr(q6, MemOperand(x23));
5072 __ Ld1(v6.S(), 2, MemOperand(x23, x25, PostIndex));
5073 __ Add(x25, x25, 1);
5074
5075 __ Ldr(q7, MemOperand(x24));
5076 __ Ld1(v7.D(), 1, MemOperand(x24, x25, PostIndex));
5077
5078 END();
5079
5080 RUN();
5081
5082 CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q0);
5083 CHECK_EQUAL_128(0x0100030205040706, 0x09080b0a0d0c0f0e, q1);
5084 CHECK_EQUAL_128(0x0302010007060504, 0x0b0a09080f0e0d0c, q2);
5085 CHECK_EQUAL_128(0x0706050403020100, 0x0f0e0d0c0b0a0908, q3);
5086 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050003020100, q4);
5087 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0100050403020100, q5);
5088 CHECK_EQUAL_128(0x0f0e0d0c03020100, 0x0706050403020100, q6);
5089 CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q7);
5090 CHECK_EQUAL_64(src_base + 16, x17);
5091 CHECK_EQUAL_64(src_base + 16, x18);
5092 CHECK_EQUAL_64(src_base + 16, x19);
5093 CHECK_EQUAL_64(src_base + 16, x20);
5094 CHECK_EQUAL_64(src_base + 1, x21);
5095 CHECK_EQUAL_64(src_base + 2, x22);
5096 CHECK_EQUAL_64(src_base + 3, x23);
5097 CHECK_EQUAL_64(src_base + 4, x24);
5098
5099 TEARDOWN();
5100 }
5101
5102 TEST(neon_st1_lane_postindex) {
5103 INIT_V8();
5104 SETUP();
5105
5106 uint8_t src[64];
5107 for (unsigned i = 0; i < sizeof(src); i++) {
5108 src[i] = i;
5109 }
5110 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5111
5112 START();
5113 __ Mov(x17, src_base);
5114 __ Mov(x18, -16);
5115 __ Ldr(q0, MemOperand(x17));
5116
5117 for (int i = 15; i >= 0; i--) {
5118 __ St1(v0.B(), i, MemOperand(x17, 1, PostIndex));
5119 }
5120 __ Ldr(q1, MemOperand(x17, x18));
5121
5122 for (int i = 7; i >= 0; i--) {
5123 __ St1(v0.H(), i, MemOperand(x17, 2, PostIndex));
5124 }
5125 __ Ldr(q2, MemOperand(x17, x18));
5126
5127 for (int i = 3; i >= 0; i--) {
5128 __ St1(v0.S(), i, MemOperand(x17, 4, PostIndex));
5129 }
5130 __ Ldr(q3, MemOperand(x17, x18));
5131
5132 for (int i = 1; i >= 0; i--) {
5133 __ St1(v0.D(), i, MemOperand(x17, 8, PostIndex));
5134 }
5135 __ Ldr(q4, MemOperand(x17, x18));
5136
5137 END();
5138
5139 RUN();
5140
5141 CHECK_EQUAL_128(0x0001020304050607, 0x08090a0b0c0d0e0f, q1);
5142 CHECK_EQUAL_128(0x0100030205040706, 0x09080b0a0d0c0f0e, q2);
5143 CHECK_EQUAL_128(0x0302010007060504, 0x0b0a09080f0e0d0c, q3);
5144 CHECK_EQUAL_128(0x0706050403020100, 0x0f0e0d0c0b0a0908, q4);
5145
5146 TEARDOWN();
5147 }
5148
5149 TEST(neon_ld1_alllanes) {
5150 INIT_V8();
5151 SETUP();
5152
5153 uint8_t src[64];
5154 for (unsigned i = 0; i < sizeof(src); i++) {
5155 src[i] = i;
5156 }
5157 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5158
5159 START();
5160 __ Mov(x17, src_base + 1);
5161 __ Ld1r(v0.V8B(), MemOperand(x17));
5162 __ Add(x17, x17, 1);
5163 __ Ld1r(v1.V16B(), MemOperand(x17));
5164 __ Add(x17, x17, 1);
5165 __ Ld1r(v2.V4H(), MemOperand(x17));
5166 __ Add(x17, x17, 1);
5167 __ Ld1r(v3.V8H(), MemOperand(x17));
5168 __ Add(x17, x17, 1);
5169 __ Ld1r(v4.V2S(), MemOperand(x17));
5170 __ Add(x17, x17, 1);
5171 __ Ld1r(v5.V4S(), MemOperand(x17));
5172 __ Add(x17, x17, 1);
5173 __ Ld1r(v6.V1D(), MemOperand(x17));
5174 __ Add(x17, x17, 1);
5175 __ Ld1r(v7.V2D(), MemOperand(x17));
5176 END();
5177
5178 RUN();
5179
5180 CHECK_EQUAL_128(0, 0x0101010101010101, q0);
5181 CHECK_EQUAL_128(0x0202020202020202, 0x0202020202020202, q1);
5182 CHECK_EQUAL_128(0, 0x0403040304030403, q2);
5183 CHECK_EQUAL_128(0x0504050405040504, 0x0504050405040504, q3);
5184 CHECK_EQUAL_128(0, 0x0807060508070605, q4);
5185 CHECK_EQUAL_128(0x0908070609080706, 0x0908070609080706, q5);
5186 CHECK_EQUAL_128(0, 0x0e0d0c0b0a090807, q6);
5187 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0f0e0d0c0b0a0908, q7);
5188
5189 TEARDOWN();
5190 }
5191
5192 TEST(neon_ld1_alllanes_postindex) {
5193 INIT_V8();
5194 SETUP();
5195
5196 uint8_t src[64];
5197 for (unsigned i = 0; i < sizeof(src); i++) {
5198 src[i] = i;
5199 }
5200 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5201
5202 START();
5203 __ Mov(x17, src_base + 1);
5204 __ Mov(x18, 1);
5205 __ Ld1r(v0.V8B(), MemOperand(x17, 1, PostIndex));
5206 __ Ld1r(v1.V16B(), MemOperand(x17, x18, PostIndex));
5207 __ Ld1r(v2.V4H(), MemOperand(x17, x18, PostIndex));
5208 __ Ld1r(v3.V8H(), MemOperand(x17, 2, PostIndex));
5209 __ Ld1r(v4.V2S(), MemOperand(x17, x18, PostIndex));
5210 __ Ld1r(v5.V4S(), MemOperand(x17, 4, PostIndex));
5211 __ Ld1r(v6.V2D(), MemOperand(x17, 8, PostIndex));
5212 END();
5213
5214 RUN();
5215
5216 CHECK_EQUAL_128(0, 0x0101010101010101, q0);
5217 CHECK_EQUAL_128(0x0202020202020202, 0x0202020202020202, q1);
5218 CHECK_EQUAL_128(0, 0x0403040304030403, q2);
5219 CHECK_EQUAL_128(0x0504050405040504, 0x0504050405040504, q3);
5220 CHECK_EQUAL_128(0, 0x0908070609080706, q4);
5221 CHECK_EQUAL_128(0x0a0908070a090807, 0x0a0908070a090807, q5);
5222 CHECK_EQUAL_128(0x1211100f0e0d0c0b, 0x1211100f0e0d0c0b, q6);
5223 CHECK_EQUAL_64(src_base + 19, x17);
5224
5225 TEARDOWN();
5226 }
5227
5228 TEST(neon_st1_d) {
5229 INIT_V8();
5230 SETUP();
5231
5232 uint8_t src[14 * kDRegSize];
5233 for (unsigned i = 0; i < sizeof(src); i++) {
5234 src[i] = i;
5235 }
5236 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5237
5238 START();
5239 __ Mov(x17, src_base);
5240 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5241 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5242 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5243 __ Ldr(q3, MemOperand(x17, 16, PostIndex));
5244 __ Mov(x17, src_base);
5245
5246 __ St1(v0.V8B(), MemOperand(x17));
5247 __ Ldr(d16, MemOperand(x17, 8, PostIndex));
5248
5249 __ St1(v0.V8B(), v1.V8B(), MemOperand(x17));
5250 __ Ldr(q17, MemOperand(x17, 16, PostIndex));
5251
5252 __ St1(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x17));
5253 __ Ldr(d18, MemOperand(x17, 8, PostIndex));
5254 __ Ldr(d19, MemOperand(x17, 8, PostIndex));
5255 __ Ldr(d20, MemOperand(x17, 8, PostIndex));
5256
5257 __ St1(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x17));
5258 __ Ldr(q21, MemOperand(x17, 16, PostIndex));
5259 __ Ldr(q22, MemOperand(x17, 16, PostIndex));
5260
5261 __ St1(v0.V1D(), v1.V1D(), v2.V1D(), v3.V1D(), MemOperand(x17));
5262 __ Ldr(q23, MemOperand(x17, 16, PostIndex));
5263 __ Ldr(q24, MemOperand(x17));
5264 END();
5265
5266 RUN();
5267
5268 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q0);
5269 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q1);
5270 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726252423222120, q2);
5271 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736353433323130, q3);
5272 CHECK_EQUAL_128(0, 0x0706050403020100, q16);
5273 CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q17);
5274 CHECK_EQUAL_128(0, 0x0706050403020100, q18);
5275 CHECK_EQUAL_128(0, 0x1716151413121110, q19);
5276 CHECK_EQUAL_128(0, 0x2726252423222120, q20);
5277 CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q21);
5278 CHECK_EQUAL_128(0x3736353433323130, 0x2726252423222120, q22);
5279 CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q23);
5280 CHECK_EQUAL_128(0x3736353433323130, 0x2726252423222120, q24);
5281
5282 TEARDOWN();
5283 }
5284
5285 TEST(neon_st1_d_postindex) {
5286 INIT_V8();
5287 SETUP();
5288
5289 uint8_t src[64 + 14 * kDRegSize];
5290 for (unsigned i = 0; i < sizeof(src); i++) {
5291 src[i] = i;
5292 }
5293 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5294
5295 START();
5296 __ Mov(x17, src_base);
5297 __ Mov(x18, -8);
5298 __ Mov(x19, -16);
5299 __ Mov(x20, -24);
5300 __ Mov(x21, -32);
5301 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5302 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5303 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5304 __ Ldr(q3, MemOperand(x17, 16, PostIndex));
5305 __ Mov(x17, src_base);
5306
5307 __ St1(v0.V8B(), MemOperand(x17, 8, PostIndex));
5308 __ Ldr(d16, MemOperand(x17, x18));
5309
5310 __ St1(v0.V8B(), v1.V8B(), MemOperand(x17, 16, PostIndex));
5311 __ Ldr(q17, MemOperand(x17, x19));
5312
5313 __ St1(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x17, 24, PostIndex));
5314 __ Ldr(d18, MemOperand(x17, x20));
5315 __ Ldr(d19, MemOperand(x17, x19));
5316 __ Ldr(d20, MemOperand(x17, x18));
5317
5318 __ St1(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(),
5319 MemOperand(x17, 32, PostIndex));
5320 __ Ldr(q21, MemOperand(x17, x21));
5321 __ Ldr(q22, MemOperand(x17, x19));
5322
5323 __ St1(v0.V1D(), v1.V1D(), v2.V1D(), v3.V1D(),
5324 MemOperand(x17, 32, PostIndex));
5325 __ Ldr(q23, MemOperand(x17, x21));
5326 __ Ldr(q24, MemOperand(x17, x19));
5327 END();
5328
5329 RUN();
5330
5331 CHECK_EQUAL_128(0, 0x0706050403020100, q16);
5332 CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q17);
5333 CHECK_EQUAL_128(0, 0x0706050403020100, q18);
5334 CHECK_EQUAL_128(0, 0x1716151413121110, q19);
5335 CHECK_EQUAL_128(0, 0x2726252423222120, q20);
5336 CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q21);
5337 CHECK_EQUAL_128(0x3736353433323130, 0x2726252423222120, q22);
5338 CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q23);
5339 CHECK_EQUAL_128(0x3736353433323130, 0x2726252423222120, q24);
5340
5341 TEARDOWN();
5342 }
5343
5344 TEST(neon_st1_q) {
5345 INIT_V8();
5346 SETUP();
5347
5348 uint8_t src[64 + 160];
5349 for (unsigned i = 0; i < sizeof(src); i++) {
5350 src[i] = i;
5351 }
5352 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5353
5354 START();
5355 __ Mov(x17, src_base);
5356 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5357 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5358 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5359 __ Ldr(q3, MemOperand(x17, 16, PostIndex));
5360
5361 __ St1(v0.V16B(), MemOperand(x17));
5362 __ Ldr(q16, MemOperand(x17, 16, PostIndex));
5363
5364 __ St1(v0.V8H(), v1.V8H(), MemOperand(x17));
5365 __ Ldr(q17, MemOperand(x17, 16, PostIndex));
5366 __ Ldr(q18, MemOperand(x17, 16, PostIndex));
5367
5368 __ St1(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x17));
5369 __ Ldr(q19, MemOperand(x17, 16, PostIndex));
5370 __ Ldr(q20, MemOperand(x17, 16, PostIndex));
5371 __ Ldr(q21, MemOperand(x17, 16, PostIndex));
5372
5373 __ St1(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x17));
5374 __ Ldr(q22, MemOperand(x17, 16, PostIndex));
5375 __ Ldr(q23, MemOperand(x17, 16, PostIndex));
5376 __ Ldr(q24, MemOperand(x17, 16, PostIndex));
5377 __ Ldr(q25, MemOperand(x17));
5378 END();
5379
5380 RUN();
5381
5382 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q16);
5383 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q17);
5384 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q18);
5385 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q19);
5386 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q20);
5387 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726252423222120, q21);
5388 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q22);
5389 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q23);
5390 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726252423222120, q24);
5391 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736353433323130, q25);
5392
5393 TEARDOWN();
5394 }
5395
5396 TEST(neon_st1_q_postindex) {
5397 INIT_V8();
5398 SETUP();
5399
5400 uint8_t src[64 + 160];
5401 for (unsigned i = 0; i < sizeof(src); i++) {
5402 src[i] = i;
5403 }
5404 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5405
5406 START();
5407 __ Mov(x17, src_base);
5408 __ Mov(x18, -16);
5409 __ Mov(x19, -32);
5410 __ Mov(x20, -48);
5411 __ Mov(x21, -64);
5412 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5413 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5414 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5415 __ Ldr(q3, MemOperand(x17, 16, PostIndex));
5416
5417 __ St1(v0.V16B(), MemOperand(x17, 16, PostIndex));
5418 __ Ldr(q16, MemOperand(x17, x18));
5419
5420 __ St1(v0.V8H(), v1.V8H(), MemOperand(x17, 32, PostIndex));
5421 __ Ldr(q17, MemOperand(x17, x19));
5422 __ Ldr(q18, MemOperand(x17, x18));
5423
5424 __ St1(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x17, 48, PostIndex));
5425 __ Ldr(q19, MemOperand(x17, x20));
5426 __ Ldr(q20, MemOperand(x17, x19));
5427 __ Ldr(q21, MemOperand(x17, x18));
5428
5429 __ St1(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(),
5430 MemOperand(x17, 64, PostIndex));
5431 __ Ldr(q22, MemOperand(x17, x21));
5432 __ Ldr(q23, MemOperand(x17, x20));
5433 __ Ldr(q24, MemOperand(x17, x19));
5434 __ Ldr(q25, MemOperand(x17, x18));
5435
5436 END();
5437
5438 RUN();
5439
5440 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q16);
5441 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q17);
5442 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q18);
5443 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q19);
5444 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q20);
5445 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726252423222120, q21);
5446 CHECK_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q22);
5447 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x1716151413121110, q23);
5448 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726252423222120, q24);
5449 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736353433323130, q25);
5450
5451 TEARDOWN();
5452 }
5453
5454 TEST(neon_st2_d) {
5455 INIT_V8();
5456 SETUP();
5457
5458 uint8_t src[4 * 16];
5459 for (unsigned i = 0; i < sizeof(src); i++) {
5460 src[i] = i;
5461 }
5462 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5463
5464 START();
5465 __ Mov(x17, src_base);
5466 __ Mov(x18, src_base);
5467 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5468 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5469
5470 __ St2(v0.V8B(), v1.V8B(), MemOperand(x18));
5471 __ Add(x18, x18, 22);
5472 __ St2(v0.V4H(), v1.V4H(), MemOperand(x18));
5473 __ Add(x18, x18, 11);
5474 __ St2(v0.V2S(), v1.V2S(), MemOperand(x18));
5475
5476 __ Mov(x19, src_base);
5477 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5478 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5479 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5480 __ Ldr(q3, MemOperand(x19, 16, PostIndex));
5481
5482 END();
5483
5484 RUN();
5485
5486 CHECK_EQUAL_128(0x1707160615051404, 0x1303120211011000, q0);
5487 CHECK_EQUAL_128(0x0504131203021110, 0x0100151413121110, q1);
5488 CHECK_EQUAL_128(0x1615140706050413, 0x1211100302010014, q2);
5489 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736353433323117, q3);
5490
5491 TEARDOWN();
5492 }
5493
5494 TEST(neon_st2_d_postindex) {
5495 INIT_V8();
5496 SETUP();
5497
5498 uint8_t src[4 * 16];
5499 for (unsigned i = 0; i < sizeof(src); i++) {
5500 src[i] = i;
5501 }
5502 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5503
5504 START();
5505 __ Mov(x22, 5);
5506 __ Mov(x17, src_base);
5507 __ Mov(x18, src_base);
5508 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5509 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5510
5511 __ St2(v0.V8B(), v1.V8B(), MemOperand(x18, x22, PostIndex));
5512 __ St2(v0.V4H(), v1.V4H(), MemOperand(x18, 16, PostIndex));
5513 __ St2(v0.V2S(), v1.V2S(), MemOperand(x18));
5514
5515 __ Mov(x19, src_base);
5516 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5517 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5518 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5519
5520 END();
5521
5522 RUN();
5523
5524 CHECK_EQUAL_128(0x1405041312030211, 0x1001000211011000, q0);
5525 CHECK_EQUAL_128(0x0605041312111003, 0x0201001716070615, q1);
5526 CHECK_EQUAL_128(0x2f2e2d2c2b2a2928, 0x2726251716151407, q2);
5527
5528 TEARDOWN();
5529 }
5530
5531 TEST(neon_st2_q) {
5532 INIT_V8();
5533 SETUP();
5534
5535 uint8_t src[5 * 16];
5536 for (unsigned i = 0; i < sizeof(src); i++) {
5537 src[i] = i;
5538 }
5539 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5540
5541 START();
5542 __ Mov(x17, src_base);
5543 __ Mov(x18, src_base);
5544 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5545 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5546
5547 __ St2(v0.V16B(), v1.V16B(), MemOperand(x18));
5548 __ Add(x18, x18, 8);
5549 __ St2(v0.V8H(), v1.V8H(), MemOperand(x18));
5550 __ Add(x18, x18, 22);
5551 __ St2(v0.V4S(), v1.V4S(), MemOperand(x18));
5552 __ Add(x18, x18, 2);
5553 __ St2(v0.V2D(), v1.V2D(), MemOperand(x18));
5554
5555 __ Mov(x19, src_base);
5556 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5557 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5558 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5559 __ Ldr(q3, MemOperand(x19, 16, PostIndex));
5560
5561 END();
5562
5563 RUN();
5564
5565 CHECK_EQUAL_128(0x1312030211100100, 0x1303120211011000, q0);
5566 CHECK_EQUAL_128(0x01000b0a19180908, 0x1716070615140504, q1);
5567 CHECK_EQUAL_128(0x1716151413121110, 0x0706050403020100, q2);
5568 CHECK_EQUAL_128(0x1f1e1d1c1b1a1918, 0x0f0e0d0c0b0a0908, q3);
5569 TEARDOWN();
5570 }
5571
5572 TEST(neon_st2_q_postindex) {
5573 INIT_V8();
5574 SETUP();
5575
5576 uint8_t src[5 * 16];
5577 for (unsigned i = 0; i < sizeof(src); i++) {
5578 src[i] = i;
5579 }
5580 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5581
5582 START();
5583 __ Mov(x22, 5);
5584 __ Mov(x17, src_base);
5585 __ Mov(x18, src_base);
5586 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5587 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5588
5589 __ St2(v0.V16B(), v1.V16B(), MemOperand(x18, x22, PostIndex));
5590 __ St2(v0.V8H(), v1.V8H(), MemOperand(x18, 32, PostIndex));
5591 __ St2(v0.V4S(), v1.V4S(), MemOperand(x18, x22, PostIndex));
5592 __ St2(v0.V2D(), v1.V2D(), MemOperand(x18));
5593
5594 __ Mov(x19, src_base);
5595 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5596 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5597 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5598 __ Ldr(q3, MemOperand(x19, 16, PostIndex));
5599 __ Ldr(q4, MemOperand(x19, 16, PostIndex));
5600
5601 END();
5602
5603 RUN();
5604
5605 CHECK_EQUAL_128(0x1405041312030211, 0x1001000211011000, q0);
5606 CHECK_EQUAL_128(0x1c0d0c1b1a0b0a19, 0x1809081716070615, q1);
5607 CHECK_EQUAL_128(0x0504030201001003, 0x0201001f1e0f0e1d, q2);
5608 CHECK_EQUAL_128(0x0d0c0b0a09081716, 0x1514131211100706, q3);
5609 CHECK_EQUAL_128(0x4f4e4d4c4b4a1f1e, 0x1d1c1b1a19180f0e, q4);
5610
5611 TEARDOWN();
5612 }
5613
5614 TEST(neon_st3_d) {
5615 INIT_V8();
5616 SETUP();
5617
5618 uint8_t src[3 * 16];
5619 for (unsigned i = 0; i < sizeof(src); i++) {
5620 src[i] = i;
5621 }
5622 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5623
5624 START();
5625 __ Mov(x17, src_base);
5626 __ Mov(x18, src_base);
5627 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5628 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5629 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5630
5631 __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x18));
5632 __ Add(x18, x18, 3);
5633 __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x18));
5634 __ Add(x18, x18, 2);
5635 __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x18));
5636
5637 __ Mov(x19, src_base);
5638 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5639 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5640
5641 END();
5642
5643 RUN();
5644
5645 CHECK_EQUAL_128(0x2221201312111003, 0x0201000100201000, q0);
5646 CHECK_EQUAL_128(0x1f1e1d2726252417, 0x1615140706050423, q1);
5647
5648 TEARDOWN();
5649 }
5650
5651 TEST(neon_st3_d_postindex) {
5652 INIT_V8();
5653 SETUP();
5654
5655 uint8_t src[4 * 16];
5656 for (unsigned i = 0; i < sizeof(src); i++) {
5657 src[i] = i;
5658 }
5659 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5660
5661 START();
5662 __ Mov(x22, 5);
5663 __ Mov(x17, src_base);
5664 __ Mov(x18, src_base);
5665 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5666 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5667 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5668
5669 __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x18, x22, PostIndex));
5670 __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x18, 24, PostIndex));
5671 __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x18));
5672
5673 __ Mov(x19, src_base);
5674 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5675 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5676 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5677 __ Ldr(q3, MemOperand(x19, 16, PostIndex));
5678
5679 END();
5680
5681 RUN();
5682
5683 CHECK_EQUAL_128(0x2213120302212011, 0x1001001101201000, q0);
5684 CHECK_EQUAL_128(0x0201002726171607, 0x0625241514050423, q1);
5685 CHECK_EQUAL_128(0x1615140706050423, 0x2221201312111003, q2);
5686 CHECK_EQUAL_128(0x3f3e3d3c3b3a3938, 0x3736352726252417, q3);
5687
5688 TEARDOWN();
5689 }
5690
5691 TEST(neon_st3_q) {
5692 INIT_V8();
5693 SETUP();
5694
5695 uint8_t src[6 * 16];
5696 for (unsigned i = 0; i < sizeof(src); i++) {
5697 src[i] = i;
5698 }
5699 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5700
5701 START();
5702 __ Mov(x17, src_base);
5703 __ Mov(x18, src_base);
5704 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5705 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5706 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5707
5708 __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x18));
5709 __ Add(x18, x18, 5);
5710 __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x18));
5711 __ Add(x18, x18, 12);
5712 __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x18));
5713 __ Add(x18, x18, 22);
5714 __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x18));
5715
5716 __ Mov(x19, src_base);
5717 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5718 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5719 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5720 __ Ldr(q3, MemOperand(x19, 16, PostIndex));
5721 __ Ldr(q4, MemOperand(x19, 16, PostIndex));
5722 __ Ldr(q5, MemOperand(x19, 16, PostIndex));
5723
5724 END();
5725
5726 RUN();
5727
5728 CHECK_EQUAL_128(0x2213120302212011, 0x1001001101201000, q0);
5729 CHECK_EQUAL_128(0x0605042322212013, 0x1211100302010023, q1);
5730 CHECK_EQUAL_128(0x1007060504030201, 0x0025241716151407, q2);
5731 CHECK_EQUAL_128(0x0827262524232221, 0x2017161514131211, q3);
5732 CHECK_EQUAL_128(0x281f1e1d1c1b1a19, 0x180f0e0d0c0b0a09, q4);
5733 CHECK_EQUAL_128(0x5f5e5d5c5b5a5958, 0x572f2e2d2c2b2a29, q5);
5734
5735 TEARDOWN();
5736 }
5737
5738 TEST(neon_st3_q_postindex) {
5739 INIT_V8();
5740 SETUP();
5741
5742 uint8_t src[7 * 16];
5743 for (unsigned i = 0; i < sizeof(src); i++) {
5744 src[i] = i;
5745 }
5746 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5747
5748 START();
5749 __ Mov(x22, 5);
5750 __ Mov(x17, src_base);
5751 __ Mov(x18, src_base);
5752 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5753 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5754 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5755
5756 __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x18, x22, PostIndex));
5757 __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x18, 48, PostIndex));
5758 __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x18, x22, PostIndex));
5759 __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x18));
5760
5761 __ Mov(x19, src_base);
5762 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5763 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5764 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5765 __ Ldr(q3, MemOperand(x19, 16, PostIndex));
5766 __ Ldr(q4, MemOperand(x19, 16, PostIndex));
5767 __ Ldr(q5, MemOperand(x19, 16, PostIndex));
5768 __ Ldr(q6, MemOperand(x19, 16, PostIndex));
5769
5770 END();
5771
5772 RUN();
5773
5774 CHECK_EQUAL_128(0x2213120302212011, 0x1001001101201000, q0);
5775 CHECK_EQUAL_128(0x1809082726171607, 0x0625241514050423, q1);
5776 CHECK_EQUAL_128(0x0e2d2c1d1c0d0c2b, 0x2a1b1a0b0a292819, q2);
5777 CHECK_EQUAL_128(0x0504030201001003, 0x0201002f2e1f1e0f, q3);
5778 CHECK_EQUAL_128(0x2524232221201716, 0x1514131211100706, q4);
5779 CHECK_EQUAL_128(0x1d1c1b1a19180f0e, 0x0d0c0b0a09082726, q5);
5780 CHECK_EQUAL_128(0x6f6e6d6c6b6a2f2e, 0x2d2c2b2a29281f1e, q6);
5781
5782 TEARDOWN();
5783 }
5784
5785 TEST(neon_st4_d) {
5786 INIT_V8();
5787 SETUP();
5788
5789 uint8_t src[4 * 16];
5790 for (unsigned i = 0; i < sizeof(src); i++) {
5791 src[i] = i;
5792 }
5793 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5794
5795 START();
5796 __ Mov(x17, src_base);
5797 __ Mov(x18, src_base);
5798 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5799 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5800 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5801 __ Ldr(q3, MemOperand(x17, 16, PostIndex));
5802
5803 __ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x18));
5804 __ Add(x18, x18, 12);
5805 __ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(), MemOperand(x18));
5806 __ Add(x18, x18, 15);
5807 __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x18));
5808
5809 __ Mov(x19, src_base);
5810 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5811 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5812 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5813 __ Ldr(q3, MemOperand(x19, 16, PostIndex));
5814
5815 END();
5816
5817 RUN();
5818
5819 CHECK_EQUAL_128(0x1110010032221202, 0X3121110130201000, q0);
5820 CHECK_EQUAL_128(0x1003020100322322, 0X1312030231302120, q1);
5821 CHECK_EQUAL_128(0x1407060504333231, 0X3023222120131211, q2);
5822 CHECK_EQUAL_128(0x3f3e3d3c3b373635, 0x3427262524171615, q3);
5823
5824 TEARDOWN();
5825 }
5826
5827 TEST(neon_st4_d_postindex) {
5828 INIT_V8();
5829 SETUP();
5830
5831 uint8_t src[5 * 16];
5832 for (unsigned i = 0; i < sizeof(src); i++) {
5833 src[i] = i;
5834 }
5835 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5836
5837 START();
5838 __ Mov(x22, 5);
5839 __ Mov(x17, src_base);
5840 __ Mov(x18, src_base);
5841 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5842 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5843 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5844 __ Ldr(q3, MemOperand(x17, 16, PostIndex));
5845
5846 __ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(),
5847 MemOperand(x18, x22, PostIndex));
5848 __ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(),
5849 MemOperand(x18, 32, PostIndex));
5850 __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x18));
5851
5852 __ Mov(x19, src_base);
5853 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5854 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5855 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5856 __ Ldr(q3, MemOperand(x19, 16, PostIndex));
5857 __ Ldr(q4, MemOperand(x19, 16, PostIndex));
5858
5859 END();
5860
5861 RUN();
5862
5863 CHECK_EQUAL_128(0x1203023130212011, 0x1001000130201000, q0);
5864 CHECK_EQUAL_128(0x1607063534252415, 0x1405043332232213, q1);
5865 CHECK_EQUAL_128(0x2221201312111003, 0x0201003736272617, q2);
5866 CHECK_EQUAL_128(0x2625241716151407, 0x0605043332313023, q3);
5867 CHECK_EQUAL_128(0x4f4e4d4c4b4a4948, 0x4746453736353427, q4);
5868
5869 TEARDOWN();
5870 }
5871
5872 TEST(neon_st4_q) {
5873 INIT_V8();
5874 SETUP();
5875
5876 uint8_t src[7 * 16];
5877 for (unsigned i = 0; i < sizeof(src); i++) {
5878 src[i] = i;
5879 }
5880 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5881
5882 START();
5883 __ Mov(x17, src_base);
5884 __ Mov(x18, src_base);
5885 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5886 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5887 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5888 __ Ldr(q3, MemOperand(x17, 16, PostIndex));
5889
5890 __ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), MemOperand(x18));
5891 __ Add(x18, x18, 5);
5892 __ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(), MemOperand(x18));
5893 __ Add(x18, x18, 12);
5894 __ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(), MemOperand(x18));
5895 __ Add(x18, x18, 22);
5896 __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x18));
5897 __ Add(x18, x18, 10);
5898
5899 __ Mov(x19, src_base);
5900 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5901 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5902 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5903 __ Ldr(q3, MemOperand(x19, 16, PostIndex));
5904 __ Ldr(q4, MemOperand(x19, 16, PostIndex));
5905 __ Ldr(q5, MemOperand(x19, 16, PostIndex));
5906 __ Ldr(q6, MemOperand(x19, 16, PostIndex));
5907
5908 END();
5909
5910 RUN();
5911
5912 CHECK_EQUAL_128(0x1203023130212011, 0x1001000130201000, q0);
5913 CHECK_EQUAL_128(0x3231302322212013, 0x1211100302010013, q1);
5914 CHECK_EQUAL_128(0x1007060504030201, 0x0015140706050433, q2);
5915 CHECK_EQUAL_128(0x3027262524232221, 0x2017161514131211, q3);
5916 CHECK_EQUAL_128(0x180f0e0d0c0b0a09, 0x0837363534333231, q4);
5917 CHECK_EQUAL_128(0x382f2e2d2c2b2a29, 0x281f1e1d1c1b1a19, q5);
5918 CHECK_EQUAL_128(0x6f6e6d6c6b6a6968, 0x673f3e3d3c3b3a39, q6);
5919
5920 TEARDOWN();
5921 }
5922
5923 TEST(neon_st4_q_postindex) {
5924 INIT_V8();
5925 SETUP();
5926
5927 uint8_t src[9 * 16];
5928 for (unsigned i = 0; i < sizeof(src); i++) {
5929 src[i] = i;
5930 }
5931 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
5932
5933 START();
5934 __ Mov(x22, 5);
5935 __ Mov(x17, src_base);
5936 __ Mov(x18, src_base);
5937 __ Ldr(q0, MemOperand(x17, 16, PostIndex));
5938 __ Ldr(q1, MemOperand(x17, 16, PostIndex));
5939 __ Ldr(q2, MemOperand(x17, 16, PostIndex));
5940 __ Ldr(q3, MemOperand(x17, 16, PostIndex));
5941
5942 __ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(),
5943 MemOperand(x18, x22, PostIndex));
5944 __ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(),
5945 MemOperand(x18, 64, PostIndex));
5946 __ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(),
5947 MemOperand(x18, x22, PostIndex));
5948 __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x18));
5949
5950 __ Mov(x19, src_base);
5951 __ Ldr(q0, MemOperand(x19, 16, PostIndex));
5952 __ Ldr(q1, MemOperand(x19, 16, PostIndex));
5953 __ Ldr(q2, MemOperand(x19, 16, PostIndex));
5954 __ Ldr(q3, MemOperand(x19, 16, PostIndex));
5955 __ Ldr(q4, MemOperand(x19, 16, PostIndex));
5956 __ Ldr(q5, MemOperand(x19, 16, PostIndex));
5957 __ Ldr(q6, MemOperand(x19, 16, PostIndex));
5958 __ Ldr(q7, MemOperand(x19, 16, PostIndex));
5959 __ Ldr(q8, MemOperand(x19, 16, PostIndex));
5960
5961 END();
5962
5963 RUN();
5964
5965 CHECK_EQUAL_128(0x1203023130212011, 0x1001000130201000, q0);
5966 CHECK_EQUAL_128(0x1607063534252415, 0x1405043332232213, q1);
5967 CHECK_EQUAL_128(0x1a0b0a3938292819, 0x1809083736272617, q2);
5968 CHECK_EQUAL_128(0x1e0f0e3d3c2d2c1d, 0x1c0d0c3b3a2b2a1b, q3);
5969 CHECK_EQUAL_128(0x0504030201001003, 0x0201003f3e2f2e1f, q4);
5970 CHECK_EQUAL_128(0x2524232221201716, 0x1514131211100706, q5);
5971 CHECK_EQUAL_128(0x0d0c0b0a09083736, 0x3534333231302726, q6);
5972 CHECK_EQUAL_128(0x2d2c2b2a29281f1e, 0x1d1c1b1a19180f0e, q7);
5973 CHECK_EQUAL_128(0x8f8e8d8c8b8a3f3e, 0x3d3c3b3a39382f2e, q8);
5974
5975 TEARDOWN();
5976 }
5977
5978 TEST(neon_destructive_minmaxp) {
5979 INIT_V8();
5980 SETUP();
5981
5982 START();
5983 __ Movi(v0.V2D(), 0, 0x2222222233333333);
5984 __ Movi(v1.V2D(), 0, 0x0000000011111111);
5985
5986 __ Sminp(v16.V2S(), v0.V2S(), v1.V2S());
5987 __ Mov(v17, v0);
5988 __ Sminp(v17.V2S(), v17.V2S(), v1.V2S());
5989 __ Mov(v18, v1);
5990 __ Sminp(v18.V2S(), v0.V2S(), v18.V2S());
5991 __ Mov(v19, v0);
5992 __ Sminp(v19.V2S(), v19.V2S(), v19.V2S());
5993
5994 __ Smaxp(v20.V2S(), v0.V2S(), v1.V2S());
5995 __ Mov(v21, v0);
5996 __ Smaxp(v21.V2S(), v21.V2S(), v1.V2S());
5997 __ Mov(v22, v1);
5998 __ Smaxp(v22.V2S(), v0.V2S(), v22.V2S());
5999 __ Mov(v23, v0);
6000 __ Smaxp(v23.V2S(), v23.V2S(), v23.V2S());
6001
6002 __ Uminp(v24.V2S(), v0.V2S(), v1.V2S());
6003 __ Mov(v25, v0);
6004 __ Uminp(v25.V2S(), v25.V2S(), v1.V2S());
6005 __ Mov(v26, v1);
6006 __ Uminp(v26.V2S(), v0.V2S(), v26.V2S());
6007 __ Mov(v27, v0);
6008 __ Uminp(v27.V2S(), v27.V2S(), v27.V2S());
6009
6010 __ Umaxp(v28.V2S(), v0.V2S(), v1.V2S());
6011 __ Mov(v29, v0);
6012 __ Umaxp(v29.V2S(), v29.V2S(), v1.V2S());
6013 __ Mov(v30, v1);
6014 __ Umaxp(v30.V2S(), v0.V2S(), v30.V2S());
6015 __ Mov(v31, v0);
6016 __ Umaxp(v31.V2S(), v31.V2S(), v31.V2S());
6017 END();
6018
6019 RUN();
6020
6021 CHECK_EQUAL_128(0, 0x0000000022222222, q16);
6022 CHECK_EQUAL_128(0, 0x0000000022222222, q17);
6023 CHECK_EQUAL_128(0, 0x0000000022222222, q18);
6024 CHECK_EQUAL_128(0, 0x2222222222222222, q19);
6025
6026 CHECK_EQUAL_128(0, 0x1111111133333333, q20);
6027 CHECK_EQUAL_128(0, 0x1111111133333333, q21);
6028 CHECK_EQUAL_128(0, 0x1111111133333333, q22);
6029 CHECK_EQUAL_128(0, 0x3333333333333333, q23);
6030
6031 CHECK_EQUAL_128(0, 0x0000000022222222, q24);
6032 CHECK_EQUAL_128(0, 0x0000000022222222, q25);
6033 CHECK_EQUAL_128(0, 0x0000000022222222, q26);
6034 CHECK_EQUAL_128(0, 0x2222222222222222, q27);
6035
6036 CHECK_EQUAL_128(0, 0x1111111133333333, q28);
6037 CHECK_EQUAL_128(0, 0x1111111133333333, q29);
6038 CHECK_EQUAL_128(0, 0x1111111133333333, q30);
6039 CHECK_EQUAL_128(0, 0x3333333333333333, q31);
6040
6041 TEARDOWN();
6042 }
6043
6044 TEST(neon_destructive_tbl) {
6045 INIT_V8();
6046 SETUP();
6047
6048 START();
6049 __ Movi(v0.V2D(), 0x0041424334353627, 0x28291a1b1c0d0e0f);
6050 __ Movi(v1.V2D(), 0xafaeadacabaaa9a8, 0xa7a6a5a4a3a2a1a0);
6051 __ Movi(v2.V2D(), 0xbfbebdbcbbbab9b8, 0xb7b6b5b4b3b2b1b0);
6052 __ Movi(v3.V2D(), 0xcfcecdcccbcac9c8, 0xc7c6c5c4c3c2c1c0);
6053 __ Movi(v4.V2D(), 0xdfdedddcdbdad9d8, 0xd7d6d5d4d3d2d1d0);
6054
6055 __ Movi(v16.V2D(), 0x5555555555555555, 0x5555555555555555);
6056 __ Tbl(v16.V16B(), v1.V16B(), v0.V16B());
6057 __ Mov(v17, v0);
6058 __ Tbl(v17.V16B(), v1.V16B(), v17.V16B());
6059 __ Mov(v18, v1);
6060 __ Tbl(v18.V16B(), v18.V16B(), v0.V16B());
6061 __ Mov(v19, v0);
6062 __ Tbl(v19.V16B(), v19.V16B(), v19.V16B());
6063
6064 __ Movi(v20.V2D(), 0x5555555555555555, 0x5555555555555555);
6065 __ Tbl(v20.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), v4.V16B(), v0.V16B());
6066 __ Mov(v21, v0);
6067 __ Tbl(v21.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), v4.V16B(), v21.V16B());
6068 __ Mov(v22, v1);
6069 __ Mov(v23, v2);
6070 __ Mov(v24, v3);
6071 __ Mov(v25, v4);
6072 __ Tbl(v22.V16B(), v22.V16B(), v23.V16B(), v24.V16B(), v25.V16B(), v0.V16B());
6073 __ Mov(v26, v0);
6074 __ Mov(v27, v1);
6075 __ Mov(v28, v2);
6076 __ Mov(v29, v3);
6077 __ Tbl(v26.V16B(), v26.V16B(), v27.V16B(), v28.V16B(), v29.V16B(),
6078 v26.V16B());
6079 END();
6080
6081 RUN();
6082
6083 CHECK_EQUAL_128(0xa000000000000000, 0x0000000000adaeaf, q16);
6084 CHECK_EQUAL_128(0xa000000000000000, 0x0000000000adaeaf, q17);
6085 CHECK_EQUAL_128(0xa000000000000000, 0x0000000000adaeaf, q18);
6086 CHECK_EQUAL_128(0x0f00000000000000, 0x0000000000424100, q19);
6087
6088 CHECK_EQUAL_128(0xa0000000d4d5d6c7, 0xc8c9babbbcadaeaf, q20);
6089 CHECK_EQUAL_128(0xa0000000d4d5d6c7, 0xc8c9babbbcadaeaf, q21);
6090 CHECK_EQUAL_128(0xa0000000d4d5d6c7, 0xc8c9babbbcadaeaf, q22);
6091 CHECK_EQUAL_128(0x0f000000c4c5c6b7, 0xb8b9aaabac424100, q26);
6092
6093 TEARDOWN();
6094 }
6095
6096 TEST(neon_destructive_tbx) {
6097 INIT_V8();
6098 SETUP();
6099
6100 START();
6101 __ Movi(v0.V2D(), 0x0041424334353627, 0x28291a1b1c0d0e0f);
6102 __ Movi(v1.V2D(), 0xafaeadacabaaa9a8, 0xa7a6a5a4a3a2a1a0);
6103 __ Movi(v2.V2D(), 0xbfbebdbcbbbab9b8, 0xb7b6b5b4b3b2b1b0);
6104 __ Movi(v3.V2D(), 0xcfcecdcccbcac9c8, 0xc7c6c5c4c3c2c1c0);
6105 __ Movi(v4.V2D(), 0xdfdedddcdbdad9d8, 0xd7d6d5d4d3d2d1d0);
6106
6107 __ Movi(v16.V2D(), 0x5555555555555555, 0x5555555555555555);
6108 __ Tbx(v16.V16B(), v1.V16B(), v0.V16B());
6109 __ Mov(v17, v0);
6110 __ Tbx(v17.V16B(), v1.V16B(), v17.V16B());
6111 __ Mov(v18, v1);
6112 __ Tbx(v18.V16B(), v18.V16B(), v0.V16B());
6113 __ Mov(v19, v0);
6114 __ Tbx(v19.V16B(), v19.V16B(), v19.V16B());
6115
6116 __ Movi(v20.V2D(), 0x5555555555555555, 0x5555555555555555);
6117 __ Tbx(v20.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), v4.V16B(), v0.V16B());
6118 __ Mov(v21, v0);
6119 __ Tbx(v21.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), v4.V16B(), v21.V16B());
6120 __ Mov(v22, v1);
6121 __ Mov(v23, v2);
6122 __ Mov(v24, v3);
6123 __ Mov(v25, v4);
6124 __ Tbx(v22.V16B(), v22.V16B(), v23.V16B(), v24.V16B(), v25.V16B(), v0.V16B());
6125 __ Mov(v26, v0);
6126 __ Mov(v27, v1);
6127 __ Mov(v28, v2);
6128 __ Mov(v29, v3);
6129 __ Tbx(v26.V16B(), v26.V16B(), v27.V16B(), v28.V16B(), v29.V16B(),
6130 v26.V16B());
6131 END();
6132
6133 RUN();
6134
6135 CHECK_EQUAL_128(0xa055555555555555, 0x5555555555adaeaf, q16);
6136 CHECK_EQUAL_128(0xa041424334353627, 0x28291a1b1cadaeaf, q17);
6137 CHECK_EQUAL_128(0xa0aeadacabaaa9a8, 0xa7a6a5a4a3adaeaf, q18);
6138 CHECK_EQUAL_128(0x0f41424334353627, 0x28291a1b1c424100, q19);
6139
6140 CHECK_EQUAL_128(0xa0555555d4d5d6c7, 0xc8c9babbbcadaeaf, q20);
6141 CHECK_EQUAL_128(0xa0414243d4d5d6c7, 0xc8c9babbbcadaeaf, q21);
6142 CHECK_EQUAL_128(0xa0aeadacd4d5d6c7, 0xc8c9babbbcadaeaf, q22);
6143 CHECK_EQUAL_128(0x0f414243c4c5c6b7, 0xb8b9aaabac424100, q26);
6144
6145 TEARDOWN();
6146 }
6147
6148 TEST(neon_destructive_fcvtl) {
6149 INIT_V8();
6150 SETUP();
6151
6152 START();
6153 __ Movi(v0.V2D(), 0x400000003f800000, 0xbf800000c0000000);
6154 __ Fcvtl(v16.V2D(), v0.V2S());
6155 __ Fcvtl2(v17.V2D(), v0.V4S());
6156 __ Mov(v18, v0);
6157 __ Mov(v19, v0);
6158 __ Fcvtl(v18.V2D(), v18.V2S());
6159 __ Fcvtl2(v19.V2D(), v19.V4S());
6160
6161 __ Movi(v1.V2D(), 0x40003c003c004000, 0xc000bc00bc00c000);
6162 __ Fcvtl(v20.V4S(), v1.V4H());
6163 __ Fcvtl2(v21.V4S(), v1.V8H());
6164 __ Mov(v22, v1);
6165 __ Mov(v23, v1);
6166 __ Fcvtl(v22.V4S(), v22.V4H());
6167 __ Fcvtl2(v23.V4S(), v23.V8H());
6168
6169 END();
6170
6171 RUN();
6172
6173 CHECK_EQUAL_128(0xbff0000000000000, 0xc000000000000000, q16);
6174 CHECK_EQUAL_128(0x4000000000000000, 0x3ff0000000000000, q17);
6175 CHECK_EQUAL_128(0xbff0000000000000, 0xc000000000000000, q18);
6176 CHECK_EQUAL_128(0x4000000000000000, 0x3ff0000000000000, q19);
6177
6178 CHECK_EQUAL_128(0xc0000000bf800000, 0xbf800000c0000000, q20);
6179 CHECK_EQUAL_128(0x400000003f800000, 0x3f80000040000000, q21);
6180 CHECK_EQUAL_128(0xc0000000bf800000, 0xbf800000c0000000, q22);
6181 CHECK_EQUAL_128(0x400000003f800000, 0x3f80000040000000, q23);
6182
6183 TEARDOWN();
6184 }
2805 6185
2806 6186
2807 TEST(ldp_stp_float) { 6187 TEST(ldp_stp_float) {
2808 INIT_V8(); 6188 INIT_V8();
2809 SETUP(); 6189 SETUP();
2810 6190
2811 float src[2] = {1.0, 2.0}; 6191 float src[2] = {1.0, 2.0};
2812 float dst[3] = {0.0, 0.0, 0.0}; 6192 float dst[3] = {0.0, 0.0, 0.0};
2813 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); 6193 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2814 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); 6194 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
2856 CHECK_EQUAL_FP64(2.0, d0); 6236 CHECK_EQUAL_FP64(2.0, d0);
2857 CHECK_EQUAL_FP64(0.0, dst[0]); 6237 CHECK_EQUAL_FP64(0.0, dst[0]);
2858 CHECK_EQUAL_FP64(2.0, dst[1]); 6238 CHECK_EQUAL_FP64(2.0, dst[1]);
2859 CHECK_EQUAL_FP64(1.0, dst[2]); 6239 CHECK_EQUAL_FP64(1.0, dst[2]);
2860 CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x16); 6240 CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2861 CHECK_EQUAL_64(dst_base + sizeof(dst[1]), x17); 6241 CHECK_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2862 6242
2863 TEARDOWN(); 6243 TEARDOWN();
2864 } 6244 }
2865 6245
6246 TEST(ldp_stp_quad) {
6247 SETUP();
6248
6249 uint64_t src[4] = {0x0123456789abcdef, 0xaaaaaaaa55555555, 0xfedcba9876543210,
6250 0x55555555aaaaaaaa};
6251 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
6252 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
6253 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
6254
6255 START();
6256 __ Mov(x16, src_base);
6257 __ Mov(x17, dst_base);
6258 __ Ldp(q31, q0, MemOperand(x16, 4 * sizeof(src[0]), PostIndex));
6259 __ Stp(q0, q31, MemOperand(x17, 2 * sizeof(dst[1]), PreIndex));
6260 END();
6261
6262 RUN();
6263
6264 CHECK_EQUAL_128(0xaaaaaaaa55555555, 0x0123456789abcdef, q31);
6265 CHECK_EQUAL_128(0x55555555aaaaaaaa, 0xfedcba9876543210, q0);
6266 CHECK_EQUAL_64(0, dst[0]);
6267 CHECK_EQUAL_64(0, dst[1]);
6268 CHECK_EQUAL_64(0xfedcba9876543210, dst[2]);
6269 CHECK_EQUAL_64(0x55555555aaaaaaaa, dst[3]);
6270 CHECK_EQUAL_64(0x0123456789abcdef, dst[4]);
6271 CHECK_EQUAL_64(0xaaaaaaaa55555555, dst[5]);
6272 CHECK_EQUAL_64(src_base + 4 * sizeof(src[0]), x16);
6273 CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[1]), x17);
6274
6275 TEARDOWN();
6276 }
2866 6277
2867 TEST(ldp_stp_offset) { 6278 TEST(ldp_stp_offset) {
2868 INIT_V8(); 6279 INIT_V8();
2869 SETUP(); 6280 SETUP();
2870 6281
2871 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL, 6282 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2872 0xffeeddccbbaa9988UL}; 6283 0xffeeddccbbaa9988UL};
2873 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0}; 6284 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2874 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); 6285 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2875 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); 6286 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
(...skipping 2476 matching lines...) Expand 10 before | Expand all | Expand 10 after
5352 8763
5353 START(); 8764 START();
5354 __ Fmov(s20, 1.0); 8765 __ Fmov(s20, 1.0);
5355 __ Fmov(w10, s20); 8766 __ Fmov(w10, s20);
5356 __ Fmov(s30, w10); 8767 __ Fmov(s30, w10);
5357 __ Fmov(s5, s20); 8768 __ Fmov(s5, s20);
5358 __ Fmov(d1, -13.0); 8769 __ Fmov(d1, -13.0);
5359 __ Fmov(x1, d1); 8770 __ Fmov(x1, d1);
5360 __ Fmov(d2, x1); 8771 __ Fmov(d2, x1);
5361 __ Fmov(d4, d1); 8772 __ Fmov(d4, d1);
5362 __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL)); 8773 __ Fmov(d6, bit_cast<double>(0x0123456789abcdefL));
5363 __ Fmov(s6, s6); 8774 __ Fmov(s6, s6);
5364 END(); 8775 END();
5365 8776
5366 RUN(); 8777 RUN();
5367 8778
5368 CHECK_EQUAL_32(float_to_rawbits(1.0), w10); 8779 CHECK_EQUAL_32(bit_cast<uint32_t>(1.0f), w10);
5369 CHECK_EQUAL_FP32(1.0, s30); 8780 CHECK_EQUAL_FP32(1.0, s30);
5370 CHECK_EQUAL_FP32(1.0, s5); 8781 CHECK_EQUAL_FP32(1.0, s5);
5371 CHECK_EQUAL_64(double_to_rawbits(-13.0), x1); 8782 CHECK_EQUAL_64(bit_cast<uint64_t>(-13.0), x1);
5372 CHECK_EQUAL_FP64(-13.0, d2); 8783 CHECK_EQUAL_FP64(-13.0, d2);
5373 CHECK_EQUAL_FP64(-13.0, d4); 8784 CHECK_EQUAL_FP64(-13.0, d4);
5374 CHECK_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6); 8785 CHECK_EQUAL_FP32(bit_cast<float>(0x89abcdef), s6);
5375 8786
5376 TEARDOWN(); 8787 TEARDOWN();
5377 } 8788 }
5378 8789
5379 8790
5380 TEST(fadd) { 8791 TEST(fadd) {
5381 INIT_V8(); 8792 INIT_V8();
5382 SETUP(); 8793 SETUP();
5383 8794
5384 START(); 8795 START();
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after
5680 kFP32DefaultNaN, // inf + (-inf * 1) = NaN 9091 kFP32DefaultNaN, // inf + (-inf * 1) = NaN
5681 kFP32PositiveInfinity, // inf + ( inf * 1) = inf 9092 kFP32PositiveInfinity, // inf + ( inf * 1) = inf
5682 kFP32DefaultNaN, // -inf + ( inf * 1) = NaN 9093 kFP32DefaultNaN, // -inf + ( inf * 1) = NaN
5683 kFP32NegativeInfinity); // -inf + (-inf * 1) = -inf 9094 kFP32NegativeInfinity); // -inf + (-inf * 1) = -inf
5684 } 9095 }
5685 9096
5686 9097
5687 TEST(fmadd_fmsub_double_nans) { 9098 TEST(fmadd_fmsub_double_nans) {
5688 INIT_V8(); 9099 INIT_V8();
5689 // Make sure that NaN propagation works correctly. 9100 // Make sure that NaN propagation works correctly.
5690 double s1 = rawbits_to_double(0x7ff5555511111111); 9101 double s1 = bit_cast<double>(0x7ff5555511111111);
5691 double s2 = rawbits_to_double(0x7ff5555522222222); 9102 double s2 = bit_cast<double>(0x7ff5555522222222);
5692 double sa = rawbits_to_double(0x7ff55555aaaaaaaa); 9103 double sa = bit_cast<double>(0x7ff55555aaaaaaaa);
5693 double q1 = rawbits_to_double(0x7ffaaaaa11111111); 9104 double q1 = bit_cast<double>(0x7ffaaaaa11111111);
5694 double q2 = rawbits_to_double(0x7ffaaaaa22222222); 9105 double q2 = bit_cast<double>(0x7ffaaaaa22222222);
5695 double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa); 9106 double qa = bit_cast<double>(0x7ffaaaaaaaaaaaaa);
5696 CHECK(IsSignallingNaN(s1)); 9107 CHECK(IsSignallingNaN(s1));
5697 CHECK(IsSignallingNaN(s2)); 9108 CHECK(IsSignallingNaN(s2));
5698 CHECK(IsSignallingNaN(sa)); 9109 CHECK(IsSignallingNaN(sa));
5699 CHECK(IsQuietNaN(q1)); 9110 CHECK(IsQuietNaN(q1));
5700 CHECK(IsQuietNaN(q2)); 9111 CHECK(IsQuietNaN(q2));
5701 CHECK(IsQuietNaN(qa)); 9112 CHECK(IsQuietNaN(qa));
5702 9113
5703 // The input NaNs after passing through ProcessNaN. 9114 // The input NaNs after passing through ProcessNaN.
5704 double s1_proc = rawbits_to_double(0x7ffd555511111111); 9115 double s1_proc = bit_cast<double>(0x7ffd555511111111);
5705 double s2_proc = rawbits_to_double(0x7ffd555522222222); 9116 double s2_proc = bit_cast<double>(0x7ffd555522222222);
5706 double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa); 9117 double sa_proc = bit_cast<double>(0x7ffd5555aaaaaaaa);
5707 double q1_proc = q1; 9118 double q1_proc = q1;
5708 double q2_proc = q2; 9119 double q2_proc = q2;
5709 double qa_proc = qa; 9120 double qa_proc = qa;
5710 CHECK(IsQuietNaN(s1_proc)); 9121 CHECK(IsQuietNaN(s1_proc));
5711 CHECK(IsQuietNaN(s2_proc)); 9122 CHECK(IsQuietNaN(s2_proc));
5712 CHECK(IsQuietNaN(sa_proc)); 9123 CHECK(IsQuietNaN(sa_proc));
5713 CHECK(IsQuietNaN(q1_proc)); 9124 CHECK(IsQuietNaN(q1_proc));
5714 CHECK(IsQuietNaN(q2_proc)); 9125 CHECK(IsQuietNaN(q2_proc));
5715 CHECK(IsQuietNaN(qa_proc)); 9126 CHECK(IsQuietNaN(qa_proc));
5716 9127
5717 // Negated NaNs as it would be done on ARMv8 hardware. 9128 // Negated NaNs as it would be done on ARMv8 hardware.
5718 double s1_proc_neg = rawbits_to_double(0xfffd555511111111); 9129 double s1_proc_neg = bit_cast<double>(0xfffd555511111111);
5719 double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa); 9130 double sa_proc_neg = bit_cast<double>(0xfffd5555aaaaaaaa);
5720 double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111); 9131 double q1_proc_neg = bit_cast<double>(0xfffaaaaa11111111);
5721 double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa); 9132 double qa_proc_neg = bit_cast<double>(0xfffaaaaaaaaaaaaa);
5722 CHECK(IsQuietNaN(s1_proc_neg)); 9133 CHECK(IsQuietNaN(s1_proc_neg));
5723 CHECK(IsQuietNaN(sa_proc_neg)); 9134 CHECK(IsQuietNaN(sa_proc_neg));
5724 CHECK(IsQuietNaN(q1_proc_neg)); 9135 CHECK(IsQuietNaN(q1_proc_neg));
5725 CHECK(IsQuietNaN(qa_proc_neg)); 9136 CHECK(IsQuietNaN(qa_proc_neg));
5726 9137
5727 // Quiet NaNs are propagated. 9138 // Quiet NaNs are propagated.
5728 FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc); 9139 FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5729 FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc); 9140 FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5730 FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg); 9141 FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5731 FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc); 9142 FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
5763 kFP64DefaultNaN, kFP64DefaultNaN); 9174 kFP64DefaultNaN, kFP64DefaultNaN);
5764 FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa, 9175 FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
5765 kFP64DefaultNaN, kFP64DefaultNaN, 9176 kFP64DefaultNaN, kFP64DefaultNaN,
5766 kFP64DefaultNaN, kFP64DefaultNaN); 9177 kFP64DefaultNaN, kFP64DefaultNaN);
5767 } 9178 }
5768 9179
5769 9180
5770 TEST(fmadd_fmsub_float_nans) { 9181 TEST(fmadd_fmsub_float_nans) {
5771 INIT_V8(); 9182 INIT_V8();
5772 // Make sure that NaN propagation works correctly. 9183 // Make sure that NaN propagation works correctly.
5773 float s1 = rawbits_to_float(0x7f951111); 9184 float s1 = bit_cast<float>(0x7f951111);
5774 float s2 = rawbits_to_float(0x7f952222); 9185 float s2 = bit_cast<float>(0x7f952222);
5775 float sa = rawbits_to_float(0x7f95aaaa); 9186 float sa = bit_cast<float>(0x7f95aaaa);
5776 float q1 = rawbits_to_float(0x7fea1111); 9187 float q1 = bit_cast<float>(0x7fea1111);
5777 float q2 = rawbits_to_float(0x7fea2222); 9188 float q2 = bit_cast<float>(0x7fea2222);
5778 float qa = rawbits_to_float(0x7feaaaaa); 9189 float qa = bit_cast<float>(0x7feaaaaa);
5779 CHECK(IsSignallingNaN(s1)); 9190 CHECK(IsSignallingNaN(s1));
5780 CHECK(IsSignallingNaN(s2)); 9191 CHECK(IsSignallingNaN(s2));
5781 CHECK(IsSignallingNaN(sa)); 9192 CHECK(IsSignallingNaN(sa));
5782 CHECK(IsQuietNaN(q1)); 9193 CHECK(IsQuietNaN(q1));
5783 CHECK(IsQuietNaN(q2)); 9194 CHECK(IsQuietNaN(q2));
5784 CHECK(IsQuietNaN(qa)); 9195 CHECK(IsQuietNaN(qa));
5785 9196
5786 // The input NaNs after passing through ProcessNaN. 9197 // The input NaNs after passing through ProcessNaN.
5787 float s1_proc = rawbits_to_float(0x7fd51111); 9198 float s1_proc = bit_cast<float>(0x7fd51111);
5788 float s2_proc = rawbits_to_float(0x7fd52222); 9199 float s2_proc = bit_cast<float>(0x7fd52222);
5789 float sa_proc = rawbits_to_float(0x7fd5aaaa); 9200 float sa_proc = bit_cast<float>(0x7fd5aaaa);
5790 float q1_proc = q1; 9201 float q1_proc = q1;
5791 float q2_proc = q2; 9202 float q2_proc = q2;
5792 float qa_proc = qa; 9203 float qa_proc = qa;
5793 CHECK(IsQuietNaN(s1_proc)); 9204 CHECK(IsQuietNaN(s1_proc));
5794 CHECK(IsQuietNaN(s2_proc)); 9205 CHECK(IsQuietNaN(s2_proc));
5795 CHECK(IsQuietNaN(sa_proc)); 9206 CHECK(IsQuietNaN(sa_proc));
5796 CHECK(IsQuietNaN(q1_proc)); 9207 CHECK(IsQuietNaN(q1_proc));
5797 CHECK(IsQuietNaN(q2_proc)); 9208 CHECK(IsQuietNaN(q2_proc));
5798 CHECK(IsQuietNaN(qa_proc)); 9209 CHECK(IsQuietNaN(qa_proc));
5799 9210
5800 // Negated NaNs as it would be done on ARMv8 hardware. 9211 // Negated NaNs as it would be done on ARMv8 hardware.
5801 float s1_proc_neg = rawbits_to_float(0xffd51111); 9212 float s1_proc_neg = bit_cast<float>(0xffd51111);
5802 float sa_proc_neg = rawbits_to_float(0xffd5aaaa); 9213 float sa_proc_neg = bit_cast<float>(0xffd5aaaa);
5803 float q1_proc_neg = rawbits_to_float(0xffea1111); 9214 float q1_proc_neg = bit_cast<float>(0xffea1111);
5804 float qa_proc_neg = rawbits_to_float(0xffeaaaaa); 9215 float qa_proc_neg = bit_cast<float>(0xffeaaaaa);
5805 CHECK(IsQuietNaN(s1_proc_neg)); 9216 CHECK(IsQuietNaN(s1_proc_neg));
5806 CHECK(IsQuietNaN(sa_proc_neg)); 9217 CHECK(IsQuietNaN(sa_proc_neg));
5807 CHECK(IsQuietNaN(q1_proc_neg)); 9218 CHECK(IsQuietNaN(q1_proc_neg));
5808 CHECK(IsQuietNaN(qa_proc_neg)); 9219 CHECK(IsQuietNaN(qa_proc_neg));
5809 9220
5810 // Quiet NaNs are propagated. 9221 // Quiet NaNs are propagated.
5811 FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc); 9222 FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5812 FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc); 9223 FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5813 FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg); 9224 FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5814 FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc); 9225 FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
5905 CHECK_EQUAL_FP64(kFP64DefaultNaN, d13); 9316 CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
5906 9317
5907 TEARDOWN(); 9318 TEARDOWN();
5908 } 9319 }
5909 9320
5910 9321
5911 static float MinMaxHelper(float n, 9322 static float MinMaxHelper(float n,
5912 float m, 9323 float m,
5913 bool min, 9324 bool min,
5914 float quiet_nan_substitute = 0.0) { 9325 float quiet_nan_substitute = 0.0) {
5915 uint32_t raw_n = float_to_rawbits(n); 9326 uint32_t raw_n = bit_cast<uint32_t>(n);
5916 uint32_t raw_m = float_to_rawbits(m); 9327 uint32_t raw_m = bit_cast<uint32_t>(m);
5917 9328
5918 if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) { 9329 if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) {
5919 // n is signalling NaN. 9330 // n is signalling NaN.
5920 return rawbits_to_float(raw_n | kSQuietNanMask); 9331 return bit_cast<float>(raw_n | static_cast<uint32_t>(kSQuietNanMask));
5921 } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) { 9332 } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) {
5922 // m is signalling NaN. 9333 // m is signalling NaN.
5923 return rawbits_to_float(raw_m | kSQuietNanMask); 9334 return bit_cast<float>(raw_m | static_cast<uint32_t>(kSQuietNanMask));
5924 } else if (quiet_nan_substitute == 0.0) { 9335 } else if (quiet_nan_substitute == 0.0) {
5925 if (std::isnan(n)) { 9336 if (std::isnan(n)) {
5926 // n is quiet NaN. 9337 // n is quiet NaN.
5927 return n; 9338 return n;
5928 } else if (std::isnan(m)) { 9339 } else if (std::isnan(m)) {
5929 // m is quiet NaN. 9340 // m is quiet NaN.
5930 return m; 9341 return m;
5931 } 9342 }
5932 } else { 9343 } else {
5933 // Substitute n or m if one is quiet, but not both. 9344 // Substitute n or m if one is quiet, but not both.
(...skipping 12 matching lines...) Expand all
5946 } 9357 }
5947 9358
5948 return min ? fminf(n, m) : fmaxf(n, m); 9359 return min ? fminf(n, m) : fmaxf(n, m);
5949 } 9360 }
5950 9361
5951 9362
5952 static double MinMaxHelper(double n, 9363 static double MinMaxHelper(double n,
5953 double m, 9364 double m,
5954 bool min, 9365 bool min,
5955 double quiet_nan_substitute = 0.0) { 9366 double quiet_nan_substitute = 0.0) {
5956 uint64_t raw_n = double_to_rawbits(n); 9367 uint64_t raw_n = bit_cast<uint64_t>(n);
5957 uint64_t raw_m = double_to_rawbits(m); 9368 uint64_t raw_m = bit_cast<uint64_t>(m);
5958 9369
5959 if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) { 9370 if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) {
5960 // n is signalling NaN. 9371 // n is signalling NaN.
5961 return rawbits_to_double(raw_n | kDQuietNanMask); 9372 return bit_cast<double>(raw_n | kDQuietNanMask);
5962 } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) { 9373 } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) {
5963 // m is signalling NaN. 9374 // m is signalling NaN.
5964 return rawbits_to_double(raw_m | kDQuietNanMask); 9375 return bit_cast<double>(raw_m | kDQuietNanMask);
5965 } else if (quiet_nan_substitute == 0.0) { 9376 } else if (quiet_nan_substitute == 0.0) {
5966 if (std::isnan(n)) { 9377 if (std::isnan(n)) {
5967 // n is quiet NaN. 9378 // n is quiet NaN.
5968 return n; 9379 return n;
5969 } else if (std::isnan(m)) { 9380 } else if (std::isnan(m)) {
5970 // m is quiet NaN. 9381 // m is quiet NaN.
5971 return m; 9382 return m;
5972 } 9383 }
5973 } else { 9384 } else {
5974 // Substitute n or m if one is quiet, but not both. 9385 // Substitute n or m if one is quiet, but not both.
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
6010 CHECK_EQUAL_FP64(minnm, d30); 9421 CHECK_EQUAL_FP64(minnm, d30);
6011 CHECK_EQUAL_FP64(maxnm, d31); 9422 CHECK_EQUAL_FP64(maxnm, d31);
6012 9423
6013 TEARDOWN(); 9424 TEARDOWN();
6014 } 9425 }
6015 9426
6016 9427
6017 TEST(fmax_fmin_d) { 9428 TEST(fmax_fmin_d) {
6018 INIT_V8(); 9429 INIT_V8();
6019 // Use non-standard NaNs to check that the payload bits are preserved. 9430 // Use non-standard NaNs to check that the payload bits are preserved.
6020 double snan = rawbits_to_double(0x7ff5555512345678); 9431 double snan = bit_cast<double>(0x7ff5555512345678);
6021 double qnan = rawbits_to_double(0x7ffaaaaa87654321); 9432 double qnan = bit_cast<double>(0x7ffaaaaa87654321);
6022 9433
6023 double snan_processed = rawbits_to_double(0x7ffd555512345678); 9434 double snan_processed = bit_cast<double>(0x7ffd555512345678);
6024 double qnan_processed = qnan; 9435 double qnan_processed = qnan;
6025 9436
6026 CHECK(IsSignallingNaN(snan)); 9437 CHECK(IsSignallingNaN(snan));
6027 CHECK(IsQuietNaN(qnan)); 9438 CHECK(IsQuietNaN(qnan));
6028 CHECK(IsQuietNaN(snan_processed)); 9439 CHECK(IsQuietNaN(snan_processed));
6029 CHECK(IsQuietNaN(qnan_processed)); 9440 CHECK(IsQuietNaN(qnan_processed));
6030 9441
6031 // Bootstrap tests. 9442 // Bootstrap tests.
6032 FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0); 9443 FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
6033 FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1); 9444 FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
6095 CHECK_EQUAL_FP32(minnm, s30); 9506 CHECK_EQUAL_FP32(minnm, s30);
6096 CHECK_EQUAL_FP32(maxnm, s31); 9507 CHECK_EQUAL_FP32(maxnm, s31);
6097 9508
6098 TEARDOWN(); 9509 TEARDOWN();
6099 } 9510 }
6100 9511
6101 9512
6102 TEST(fmax_fmin_s) { 9513 TEST(fmax_fmin_s) {
6103 INIT_V8(); 9514 INIT_V8();
6104 // Use non-standard NaNs to check that the payload bits are preserved. 9515 // Use non-standard NaNs to check that the payload bits are preserved.
6105 float snan = rawbits_to_float(0x7f951234); 9516 float snan = bit_cast<float>(0x7f951234);
6106 float qnan = rawbits_to_float(0x7fea8765); 9517 float qnan = bit_cast<float>(0x7fea8765);
6107 9518
6108 float snan_processed = rawbits_to_float(0x7fd51234); 9519 float snan_processed = bit_cast<float>(0x7fd51234);
6109 float qnan_processed = qnan; 9520 float qnan_processed = qnan;
6110 9521
6111 CHECK(IsSignallingNaN(snan)); 9522 CHECK(IsSignallingNaN(snan));
6112 CHECK(IsQuietNaN(qnan)); 9523 CHECK(IsQuietNaN(qnan));
6113 CHECK(IsQuietNaN(snan_processed)); 9524 CHECK(IsQuietNaN(snan_processed));
6114 CHECK(IsQuietNaN(qnan_processed)); 9525 CHECK(IsQuietNaN(qnan_processed));
6115 9526
6116 // Bootstrap tests. 9527 // Bootstrap tests.
6117 FminFmaxFloatHelper(0, 0, 0, 0, 0, 0); 9528 FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
6118 FminFmaxFloatHelper(0, 1, 0, 1, 0, 1); 9529 FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
(...skipping 814 matching lines...) Expand 10 before | Expand all | Expand 10 after
6933 __ Fmov(s19, 1.9); 10344 __ Fmov(s19, 1.9);
6934 __ Fmov(s20, 2.5); 10345 __ Fmov(s20, 2.5);
6935 __ Fmov(s21, -1.5); 10346 __ Fmov(s21, -1.5);
6936 __ Fmov(s22, -2.5); 10347 __ Fmov(s22, -2.5);
6937 __ Fmov(s23, kFP32PositiveInfinity); 10348 __ Fmov(s23, kFP32PositiveInfinity);
6938 __ Fmov(s24, kFP32NegativeInfinity); 10349 __ Fmov(s24, kFP32NegativeInfinity);
6939 __ Fmov(s25, 0.0); 10350 __ Fmov(s25, 0.0);
6940 __ Fmov(s26, -0.0); 10351 __ Fmov(s26, -0.0);
6941 __ Fmov(s27, FLT_MAX); 10352 __ Fmov(s27, FLT_MAX);
6942 __ Fmov(s28, FLT_MIN); 10353 __ Fmov(s28, FLT_MIN);
6943 __ Fmov(s29, rawbits_to_float(0x7fc12345)); // Quiet NaN. 10354 __ Fmov(s29, bit_cast<float>(0x7fc12345)); // Quiet NaN.
6944 __ Fmov(s30, rawbits_to_float(0x7f812345)); // Signalling NaN. 10355 __ Fmov(s30, bit_cast<float>(0x7f812345)); // Signalling NaN.
6945 10356
6946 __ Fcvt(d0, s16); 10357 __ Fcvt(d0, s16);
6947 __ Fcvt(d1, s17); 10358 __ Fcvt(d1, s17);
6948 __ Fcvt(d2, s18); 10359 __ Fcvt(d2, s18);
6949 __ Fcvt(d3, s19); 10360 __ Fcvt(d3, s19);
6950 __ Fcvt(d4, s20); 10361 __ Fcvt(d4, s20);
6951 __ Fcvt(d5, s21); 10362 __ Fcvt(d5, s21);
6952 __ Fcvt(d6, s22); 10363 __ Fcvt(d6, s22);
6953 __ Fcvt(d7, s23); 10364 __ Fcvt(d7, s23);
6954 __ Fcvt(d8, s24); 10365 __ Fcvt(d8, s24);
(...skipping 20 matching lines...) Expand all
6975 CHECK_EQUAL_FP64(-0.0f, d10); 10386 CHECK_EQUAL_FP64(-0.0f, d10);
6976 CHECK_EQUAL_FP64(FLT_MAX, d11); 10387 CHECK_EQUAL_FP64(FLT_MAX, d11);
6977 CHECK_EQUAL_FP64(FLT_MIN, d12); 10388 CHECK_EQUAL_FP64(FLT_MIN, d12);
6978 10389
6979 // Check that the NaN payload is preserved according to ARM64 conversion 10390 // Check that the NaN payload is preserved according to ARM64 conversion
6980 // rules: 10391 // rules:
6981 // - The sign bit is preserved. 10392 // - The sign bit is preserved.
6982 // - The top bit of the mantissa is forced to 1 (making it a quiet NaN). 10393 // - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
6983 // - The remaining mantissa bits are copied until they run out. 10394 // - The remaining mantissa bits are copied until they run out.
6984 // - The low-order bits that haven't already been assigned are set to 0. 10395 // - The low-order bits that haven't already been assigned are set to 0.
6985 CHECK_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13); 10396 CHECK_EQUAL_FP64(bit_cast<double>(0x7ff82468a0000000), d13);
6986 CHECK_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14); 10397 CHECK_EQUAL_FP64(bit_cast<double>(0x7ff82468a0000000), d14);
6987 10398
6988 TEARDOWN(); 10399 TEARDOWN();
6989 } 10400 }
6990 10401
6991 10402
6992 TEST(fcvt_sd) { 10403 TEST(fcvt_sd) {
6993 INIT_V8(); 10404 INIT_V8();
6994 // There are a huge number of corner-cases to check, so this test iterates 10405 // There are a huge number of corner-cases to check, so this test iterates
6995 // through a list. The list is then negated and checked again (since the sign 10406 // through a list. The list is then negated and checked again (since the sign
6996 // is irrelevant in ties-to-even rounding), so the list shouldn't include any 10407 // is irrelevant in ties-to-even rounding), so the list shouldn't include any
6997 // negative values. 10408 // negative values.
6998 // 10409 //
6999 // Note that this test only checks ties-to-even rounding, because that is all 10410 // Note that this test only checks ties-to-even rounding, because that is all
7000 // that the simulator supports. 10411 // that the simulator supports.
7001 struct {double in; float expected;} test[] = { 10412 struct {
7002 // Check some simple conversions. 10413 double in;
7003 {0.0, 0.0f}, 10414 float expected;
7004 {1.0, 1.0f}, 10415 } test[] = {
7005 {1.5, 1.5f}, 10416 // Check some simple conversions.
7006 {2.0, 2.0f}, 10417 {0.0, 0.0f},
7007 {FLT_MAX, FLT_MAX}, 10418 {1.0, 1.0f},
7008 // - The smallest normalized float. 10419 {1.5, 1.5f},
7009 {pow(2.0, -126), powf(2, -126)}, 10420 {2.0, 2.0f},
7010 // - Normal floats that need (ties-to-even) rounding. 10421 {FLT_MAX, FLT_MAX},
7011 // For normalized numbers: 10422 // - The smallest normalized float.
7012 // bit 29 (0x0000000020000000) is the lowest-order bit which will 10423 {pow(2.0, -126), powf(2, -126)},
7013 // fit in the float's mantissa. 10424 // - Normal floats that need (ties-to-even) rounding.
7014 {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)}, 10425 // For normalized numbers:
7015 {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)}, 10426 // bit 29 (0x0000000020000000) is the lowest-order bit which will
7016 {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)}, 10427 // fit in the float's mantissa.
7017 {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)}, 10428 {bit_cast<double>(0x3ff0000000000000), bit_cast<float>(0x3f800000)},
7018 {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)}, 10429 {bit_cast<double>(0x3ff0000000000001), bit_cast<float>(0x3f800000)},
7019 {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)}, 10430 {bit_cast<double>(0x3ff0000010000000), bit_cast<float>(0x3f800000)},
7020 {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)}, 10431 {bit_cast<double>(0x3ff0000010000001), bit_cast<float>(0x3f800001)},
7021 {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)}, 10432 {bit_cast<double>(0x3ff0000020000000), bit_cast<float>(0x3f800001)},
7022 {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)}, 10433 {bit_cast<double>(0x3ff0000020000001), bit_cast<float>(0x3f800001)},
7023 {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)}, 10434 {bit_cast<double>(0x3ff0000030000000), bit_cast<float>(0x3f800002)},
7024 {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)}, 10435 {bit_cast<double>(0x3ff0000030000001), bit_cast<float>(0x3f800002)},
7025 {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)}, 10436 {bit_cast<double>(0x3ff0000040000000), bit_cast<float>(0x3f800002)},
7026 {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)}, 10437 {bit_cast<double>(0x3ff0000040000001), bit_cast<float>(0x3f800002)},
7027 // - A mantissa that overflows into the exponent during rounding. 10438 {bit_cast<double>(0x3ff0000050000000), bit_cast<float>(0x3f800002)},
7028 {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)}, 10439 {bit_cast<double>(0x3ff0000050000001), bit_cast<float>(0x3f800003)},
7029 // - The largest double that rounds to a normal float. 10440 {bit_cast<double>(0x3ff0000060000000), bit_cast<float>(0x3f800003)},
7030 {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)}, 10441 // - A mantissa that overflows into the exponent during rounding.
10442 {bit_cast<double>(0x3feffffff0000000), bit_cast<float>(0x3f800000)},
10443 // - The largest double that rounds to a normal float.
10444 {bit_cast<double>(0x47efffffefffffff), bit_cast<float>(0x7f7fffff)},
7031 10445
7032 // Doubles that are too big for a float. 10446 // Doubles that are too big for a float.
7033 {kFP64PositiveInfinity, kFP32PositiveInfinity}, 10447 {kFP64PositiveInfinity, kFP32PositiveInfinity},
7034 {DBL_MAX, kFP32PositiveInfinity}, 10448 {DBL_MAX, kFP32PositiveInfinity},
7035 // - The smallest exponent that's too big for a float. 10449 // - The smallest exponent that's too big for a float.
7036 {pow(2.0, 128), kFP32PositiveInfinity}, 10450 {pow(2.0, 128), kFP32PositiveInfinity},
7037 // - This exponent is in range, but the value rounds to infinity. 10451 // - This exponent is in range, but the value rounds to infinity.
7038 {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity}, 10452 {bit_cast<double>(0x47effffff0000000), kFP32PositiveInfinity},
7039 10453
7040 // Doubles that are too small for a float. 10454 // Doubles that are too small for a float.
7041 // - The smallest (subnormal) double. 10455 // - The smallest (subnormal) double.
7042 {DBL_MIN, 0.0}, 10456 {DBL_MIN, 0.0},
7043 // - The largest double which is too small for a subnormal float. 10457 // - The largest double which is too small for a subnormal float.
7044 {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)}, 10458 {bit_cast<double>(0x3690000000000000), bit_cast<float>(0x00000000)},
7045 10459
7046 // Normal doubles that become subnormal floats. 10460 // Normal doubles that become subnormal floats.
7047 // - The largest subnormal float. 10461 // - The largest subnormal float.
7048 {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)}, 10462 {bit_cast<double>(0x380fffffc0000000), bit_cast<float>(0x007fffff)},
7049 // - The smallest subnormal float. 10463 // - The smallest subnormal float.
7050 {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)}, 10464 {bit_cast<double>(0x36a0000000000000), bit_cast<float>(0x00000001)},
7051 // - Subnormal floats that need (ties-to-even) rounding. 10465 // - Subnormal floats that need (ties-to-even) rounding.
7052 // For these subnormals: 10466 // For these subnormals:
7053 // bit 34 (0x0000000400000000) is the lowest-order bit which will 10467 // bit 34 (0x0000000400000000) is the lowest-order bit which will
7054 // fit in the float's mantissa. 10468 // fit in the float's mantissa.
7055 {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)}, 10469 {bit_cast<double>(0x37c159e000000000), bit_cast<float>(0x00045678)},
7056 {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)}, 10470 {bit_cast<double>(0x37c159e000000001), bit_cast<float>(0x00045678)},
7057 {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)}, 10471 {bit_cast<double>(0x37c159e200000000), bit_cast<float>(0x00045678)},
7058 {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)}, 10472 {bit_cast<double>(0x37c159e200000001), bit_cast<float>(0x00045679)},
7059 {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)}, 10473 {bit_cast<double>(0x37c159e400000000), bit_cast<float>(0x00045679)},
7060 {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)}, 10474 {bit_cast<double>(0x37c159e400000001), bit_cast<float>(0x00045679)},
7061 {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)}, 10475 {bit_cast<double>(0x37c159e600000000), bit_cast<float>(0x0004567a)},
7062 {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)}, 10476 {bit_cast<double>(0x37c159e600000001), bit_cast<float>(0x0004567a)},
7063 {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)}, 10477 {bit_cast<double>(0x37c159e800000000), bit_cast<float>(0x0004567a)},
7064 {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)}, 10478 {bit_cast<double>(0x37c159e800000001), bit_cast<float>(0x0004567a)},
7065 {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)}, 10479 {bit_cast<double>(0x37c159ea00000000), bit_cast<float>(0x0004567a)},
7066 {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)}, 10480 {bit_cast<double>(0x37c159ea00000001), bit_cast<float>(0x0004567b)},
7067 {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)}, 10481 {bit_cast<double>(0x37c159ec00000000), bit_cast<float>(0x0004567b)},
7068 // - The smallest double which rounds up to become a subnormal float. 10482 // - The smallest double which rounds up to become a subnormal float.
7069 {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)}, 10483 {bit_cast<double>(0x3690000000000001), bit_cast<float>(0x00000001)},
7070 10484
7071 // Check NaN payload preservation. 10485 // Check NaN payload preservation.
7072 {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)}, 10486 {bit_cast<double>(0x7ff82468a0000000), bit_cast<float>(0x7fc12345)},
7073 {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)}, 10487 {bit_cast<double>(0x7ff82468bfffffff), bit_cast<float>(0x7fc12345)},
7074 // - Signalling NaNs become quiet NaNs. 10488 // - Signalling NaNs become quiet NaNs.
7075 {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)}, 10489 {bit_cast<double>(0x7ff02468a0000000), bit_cast<float>(0x7fc12345)},
7076 {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)}, 10490 {bit_cast<double>(0x7ff02468bfffffff), bit_cast<float>(0x7fc12345)},
7077 {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)}, 10491 {bit_cast<double>(0x7ff000001fffffff), bit_cast<float>(0x7fc00000)},
7078 }; 10492 };
7079 int count = sizeof(test) / sizeof(test[0]); 10493 int count = sizeof(test) / sizeof(test[0]);
7080 10494
7081 for (int i = 0; i < count; i++) { 10495 for (int i = 0; i < count; i++) {
7082 double in = test[i].in; 10496 double in = test[i].in;
7083 float expected = test[i].expected; 10497 float expected = test[i].expected;
7084 10498
7085 // We only expect positive input. 10499 // We only expect positive input.
7086 CHECK(std::signbit(in) == 0); 10500 CHECK(std::signbit(in) == 0);
7087 CHECK(std::signbit(expected) == 0); 10501 CHECK(std::signbit(expected) == 0);
(...skipping 911 matching lines...) Expand 10 before | Expand all | Expand 10 after
7999 __ Scvtf(d0, x10, fbits); 11413 __ Scvtf(d0, x10, fbits);
8000 __ Ucvtf(d1, x10, fbits); 11414 __ Ucvtf(d1, x10, fbits);
8001 __ Str(d0, MemOperand(x0, fbits * kDRegSize)); 11415 __ Str(d0, MemOperand(x0, fbits * kDRegSize));
8002 __ Str(d1, MemOperand(x1, fbits * kDRegSize)); 11416 __ Str(d1, MemOperand(x1, fbits * kDRegSize));
8003 } 11417 }
8004 11418
8005 END(); 11419 END();
8006 RUN(); 11420 RUN();
8007 11421
8008 // Check the results. 11422 // Check the results.
8009 double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits); 11423 double expected_scvtf_base = bit_cast<double>(expected_scvtf_bits);
8010 double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits); 11424 double expected_ucvtf_base = bit_cast<double>(expected_ucvtf_bits);
8011 11425
8012 for (int fbits = 0; fbits <= 32; fbits++) { 11426 for (int fbits = 0; fbits <= 32; fbits++) {
8013 double expected_scvtf = expected_scvtf_base / pow(2.0, fbits); 11427 double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
8014 double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits); 11428 double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
8015 CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]); 11429 CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
8016 CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]); 11430 CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
8017 if (cvtf_s32) CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]); 11431 if (cvtf_s32) CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
8018 if (cvtf_u32) CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]); 11432 if (cvtf_u32) CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
8019 } 11433 }
8020 for (int fbits = 33; fbits <= 64; fbits++) { 11434 for (int fbits = 33; fbits <= 64; fbits++) {
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
8154 __ Scvtf(s0, x10, fbits); 11568 __ Scvtf(s0, x10, fbits);
8155 __ Ucvtf(s1, x10, fbits); 11569 __ Ucvtf(s1, x10, fbits);
8156 __ Str(s0, MemOperand(x0, fbits * kSRegSize)); 11570 __ Str(s0, MemOperand(x0, fbits * kSRegSize));
8157 __ Str(s1, MemOperand(x1, fbits * kSRegSize)); 11571 __ Str(s1, MemOperand(x1, fbits * kSRegSize));
8158 } 11572 }
8159 11573
8160 END(); 11574 END();
8161 RUN(); 11575 RUN();
8162 11576
8163 // Check the results. 11577 // Check the results.
8164 float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits); 11578 float expected_scvtf_base = bit_cast<float>(expected_scvtf_bits);
8165 float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits); 11579 float expected_ucvtf_base = bit_cast<float>(expected_ucvtf_bits);
8166 11580
8167 for (int fbits = 0; fbits <= 32; fbits++) { 11581 for (int fbits = 0; fbits <= 32; fbits++) {
8168 float expected_scvtf = expected_scvtf_base / powf(2, fbits); 11582 float expected_scvtf = expected_scvtf_base / powf(2, fbits);
8169 float expected_ucvtf = expected_ucvtf_base / powf(2, fbits); 11583 float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
8170 CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]); 11584 CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
8171 CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]); 11585 CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
8172 if (cvtf_s32) CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]); 11586 if (cvtf_s32) CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
8173 if (cvtf_u32) CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]); 11587 if (cvtf_u32) CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
8174 break; 11588 break;
8175 } 11589 }
(...skipping 877 matching lines...) Expand 10 before | Expand all | Expand 10 after
9053 PushPopMethod push_method, 12467 PushPopMethod push_method,
9054 PushPopMethod pop_method) { 12468 PushPopMethod pop_method) {
9055 SETUP(); 12469 SETUP();
9056 12470
9057 START(); 12471 START();
9058 12472
9059 // We can use any floating-point register. None of them are reserved for 12473 // We can use any floating-point register. None of them are reserved for
9060 // debug code, for example. 12474 // debug code, for example.
9061 static RegList const allowed = ~0; 12475 static RegList const allowed = ~0;
9062 if (reg_count == kPushPopFPJsspMaxRegCount) { 12476 if (reg_count == kPushPopFPJsspMaxRegCount) {
9063 reg_count = CountSetBits(allowed, kNumberOfFPRegisters); 12477 reg_count = CountSetBits(allowed, kNumberOfVRegisters);
9064 } 12478 }
9065 // Work out which registers to use, based on reg_size. 12479 // Work out which registers to use, based on reg_size.
9066 FPRegister v[kNumberOfRegisters]; 12480 VRegister v[kNumberOfRegisters];
9067 FPRegister d[kNumberOfRegisters]; 12481 VRegister d[kNumberOfRegisters];
9068 RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count, 12482 RegList list =
9069 allowed); 12483 PopulateVRegisterArray(NULL, d, v, reg_size, reg_count, allowed);
9070 12484
9071 // The literal base is chosen to have two useful properties: 12485 // The literal base is chosen to have two useful properties:
9072 // * When multiplied (using an integer) by small values (such as a register 12486 // * When multiplied (using an integer) by small values (such as a register
9073 // index), this value is clearly readable in the result. 12487 // index), this value is clearly readable in the result.
9074 // * The value is not formed from repeating fixed-size smaller values, so it 12488 // * The value is not formed from repeating fixed-size smaller values, so it
9075 // can be used to detect endianness-related errors. 12489 // can be used to detect endianness-related errors.
9076 // * It is never a floating-point NaN, and will therefore always compare 12490 // * It is never a floating-point NaN, and will therefore always compare
9077 // equal to itself. 12491 // equal to itself.
9078 uint64_t literal_base = 0x0100001000100101UL; 12492 uint64_t literal_base = 0x0100001000100101UL;
9079 12493
(...skipping 28 matching lines...) Expand all
9108 switch (i) { 12522 switch (i) {
9109 case 3: __ Push(v[2], v[1], v[0]); break; 12523 case 3: __ Push(v[2], v[1], v[0]); break;
9110 case 2: __ Push(v[1], v[0]); break; 12524 case 2: __ Push(v[1], v[0]); break;
9111 case 1: __ Push(v[0]); break; 12525 case 1: __ Push(v[0]); break;
9112 default: 12526 default:
9113 CHECK(i == 0); 12527 CHECK(i == 0);
9114 break; 12528 break;
9115 } 12529 }
9116 break; 12530 break;
9117 case PushPopRegList: 12531 case PushPopRegList:
9118 __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister); 12532 __ PushSizeRegList(list, reg_size, CPURegister::kVRegister);
9119 break; 12533 break;
9120 } 12534 }
9121 12535
9122 // Clobber all the registers, to ensure that they get repopulated by Pop. 12536 // Clobber all the registers, to ensure that they get repopulated by Pop.
9123 ClobberFP(&masm, list); 12537 ClobberFP(&masm, list);
9124 12538
9125 switch (pop_method) { 12539 switch (pop_method) {
9126 case PushPopByFour: 12540 case PushPopByFour:
9127 // Pop low-numbered registers first (from the lowest addresses). 12541 // Pop low-numbered registers first (from the lowest addresses).
9128 for (i = 0; i <= (reg_count-4); i += 4) { 12542 for (i = 0; i <= (reg_count-4); i += 4) {
9129 __ Pop(v[i], v[i+1], v[i+2], v[i+3]); 12543 __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
9130 } 12544 }
9131 // Finish off the leftovers. 12545 // Finish off the leftovers.
9132 switch (reg_count - i) { 12546 switch (reg_count - i) {
9133 case 3: __ Pop(v[i], v[i+1], v[i+2]); break; 12547 case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
9134 case 2: __ Pop(v[i], v[i+1]); break; 12548 case 2: __ Pop(v[i], v[i+1]); break;
9135 case 1: __ Pop(v[i]); break; 12549 case 1: __ Pop(v[i]); break;
9136 default: 12550 default:
9137 CHECK(i == reg_count); 12551 CHECK(i == reg_count);
9138 break; 12552 break;
9139 } 12553 }
9140 break; 12554 break;
9141 case PushPopRegList: 12555 case PushPopRegList:
9142 __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister); 12556 __ PopSizeRegList(list, reg_size, CPURegister::kVRegister);
9143 break; 12557 break;
9144 } 12558 }
9145 12559
9146 // Drop memory to restore jssp. 12560 // Drop memory to restore jssp.
9147 __ Drop(claim, kByteSizeInBytes); 12561 __ Drop(claim, kByteSizeInBytes);
9148 12562
9149 __ Mov(csp, __ StackPointer()); 12563 __ Mov(csp, __ StackPointer());
9150 __ SetStackPointer(csp); 12564 __ SetStackPointer(csp);
9151 } 12565 }
9152 12566
(...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after
9669 __ Mov(w5, 0x12340005); 13083 __ Mov(w5, 0x12340005);
9670 __ Mov(w6, 0x12340006); 13084 __ Mov(w6, 0x12340006);
9671 __ Fmov(d0, 123400.0); 13085 __ Fmov(d0, 123400.0);
9672 __ Fmov(d1, 123401.0); 13086 __ Fmov(d1, 123401.0);
9673 __ Fmov(s2, 123402.0); 13087 __ Fmov(s2, 123402.0);
9674 13088
9675 // Actually push them. 13089 // Actually push them.
9676 queue.PushQueued(); 13090 queue.PushQueued();
9677 13091
9678 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6)); 13092 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
9679 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2)); 13093 Clobber(&masm, CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, 2));
9680 13094
9681 // Pop them conventionally. 13095 // Pop them conventionally.
9682 __ Pop(s2); 13096 __ Pop(s2);
9683 __ Pop(d1, d0); 13097 __ Pop(d1, d0);
9684 __ Pop(w6, w5, w4); 13098 __ Pop(w6, w5, w4);
9685 __ Pop(x3, x2, x1, x0); 13099 __ Pop(x3, x2, x1, x0);
9686 13100
9687 __ Mov(csp, __ StackPointer()); 13101 __ Mov(csp, __ StackPointer());
9688 __ SetStackPointer(csp); 13102 __ SetStackPointer(csp);
9689 13103
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
9747 queue.Queue(w6); 13161 queue.Queue(w6);
9748 queue.Queue(w5); 13162 queue.Queue(w5);
9749 queue.Queue(w4); 13163 queue.Queue(w4);
9750 13164
9751 queue.Queue(x3); 13165 queue.Queue(x3);
9752 queue.Queue(x2); 13166 queue.Queue(x2);
9753 queue.Queue(x1); 13167 queue.Queue(x1);
9754 queue.Queue(x0); 13168 queue.Queue(x0);
9755 13169
9756 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6)); 13170 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
9757 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2)); 13171 Clobber(&masm, CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, 2));
9758 13172
9759 // Actually pop them. 13173 // Actually pop them.
9760 queue.PopQueued(); 13174 queue.PopQueued();
9761 13175
9762 __ Mov(csp, __ StackPointer()); 13176 __ Mov(csp, __ StackPointer());
9763 __ SetStackPointer(csp); 13177 __ SetStackPointer(csp);
9764 13178
9765 END(); 13179 END();
9766 13180
9767 RUN(); 13181 RUN();
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
9926 CHECK_EQUAL_64(1, x6); 13340 CHECK_EQUAL_64(1, x6);
9927 CHECK_EQUAL_64(1, x7); 13341 CHECK_EQUAL_64(1, x7);
9928 13342
9929 TEARDOWN(); 13343 TEARDOWN();
9930 } 13344 }
9931 13345
9932 13346
9933 TEST(noreg) { 13347 TEST(noreg) {
9934 // This test doesn't generate any code, but it verifies some invariants 13348 // This test doesn't generate any code, but it verifies some invariants
9935 // related to NoReg. 13349 // related to NoReg.
9936 CHECK(NoReg.Is(NoFPReg)); 13350 CHECK(NoReg.Is(NoVReg));
9937 CHECK(NoFPReg.Is(NoReg)); 13351 CHECK(NoVReg.Is(NoReg));
9938 CHECK(NoReg.Is(NoCPUReg)); 13352 CHECK(NoReg.Is(NoCPUReg));
9939 CHECK(NoCPUReg.Is(NoReg)); 13353 CHECK(NoCPUReg.Is(NoReg));
9940 CHECK(NoFPReg.Is(NoCPUReg)); 13354 CHECK(NoVReg.Is(NoCPUReg));
9941 CHECK(NoCPUReg.Is(NoFPReg)); 13355 CHECK(NoCPUReg.Is(NoVReg));
9942 13356
9943 CHECK(NoReg.IsNone()); 13357 CHECK(NoReg.IsNone());
9944 CHECK(NoFPReg.IsNone()); 13358 CHECK(NoVReg.IsNone());
9945 CHECK(NoCPUReg.IsNone()); 13359 CHECK(NoCPUReg.IsNone());
9946 } 13360 }
9947 13361
13362 TEST(vreg) {
13363 // This test doesn't generate any code, but it verifies
13364 // Helper functions and methods pertaining to VRegister logic.
13365
13366 CHECK_EQ(8U, RegisterSizeInBitsFromFormat(kFormatB));
13367 CHECK_EQ(16U, RegisterSizeInBitsFromFormat(kFormatH));
13368 CHECK_EQ(32U, RegisterSizeInBitsFromFormat(kFormatS));
13369 CHECK_EQ(64U, RegisterSizeInBitsFromFormat(kFormatD));
13370 CHECK_EQ(64U, RegisterSizeInBitsFromFormat(kFormat8B));
13371 CHECK_EQ(64U, RegisterSizeInBitsFromFormat(kFormat4H));
13372 CHECK_EQ(64U, RegisterSizeInBitsFromFormat(kFormat2S));
13373 CHECK_EQ(64U, RegisterSizeInBitsFromFormat(kFormat1D));
13374 CHECK_EQ(128U, RegisterSizeInBitsFromFormat(kFormat16B));
13375 CHECK_EQ(128U, RegisterSizeInBitsFromFormat(kFormat8H));
13376 CHECK_EQ(128U, RegisterSizeInBitsFromFormat(kFormat4S));
13377 CHECK_EQ(128U, RegisterSizeInBitsFromFormat(kFormat2D));
13378
13379 CHECK_EQ(16, LaneCountFromFormat(kFormat16B));
13380 CHECK_EQ(8, LaneCountFromFormat(kFormat8B));
13381 CHECK_EQ(8, LaneCountFromFormat(kFormat8H));
13382 CHECK_EQ(4, LaneCountFromFormat(kFormat4H));
13383 CHECK_EQ(4, LaneCountFromFormat(kFormat4S));
13384 CHECK_EQ(2, LaneCountFromFormat(kFormat2S));
13385 CHECK_EQ(2, LaneCountFromFormat(kFormat2D));
13386 CHECK_EQ(1, LaneCountFromFormat(kFormat1D));
13387 CHECK_EQ(1, LaneCountFromFormat(kFormatB));
13388 CHECK_EQ(1, LaneCountFromFormat(kFormatH));
13389 CHECK_EQ(1, LaneCountFromFormat(kFormatS));
13390 CHECK_EQ(1, LaneCountFromFormat(kFormatD));
13391
13392 CHECK(!IsVectorFormat(kFormatB));
13393 CHECK(!IsVectorFormat(kFormatH));
13394 CHECK(!IsVectorFormat(kFormatS));
13395 CHECK(!IsVectorFormat(kFormatD));
13396 CHECK(IsVectorFormat(kFormat16B));
13397 CHECK(IsVectorFormat(kFormat8B));
13398 CHECK(IsVectorFormat(kFormat8H));
13399 CHECK(IsVectorFormat(kFormat4H));
13400 CHECK(IsVectorFormat(kFormat4S));
13401 CHECK(IsVectorFormat(kFormat2S));
13402 CHECK(IsVectorFormat(kFormat2D));
13403 CHECK(IsVectorFormat(kFormat1D));
13404
13405 CHECK(!d0.Is8B());
13406 CHECK(!d0.Is16B());
13407 CHECK(!d0.Is4H());
13408 CHECK(!d0.Is8H());
13409 CHECK(!d0.Is2S());
13410 CHECK(!d0.Is4S());
13411 CHECK(d0.Is1D());
13412 CHECK(!d0.Is1S());
13413 CHECK(!d0.Is1H());
13414 CHECK(!d0.Is1B());
13415 CHECK(!d0.IsVector());
13416 CHECK(d0.IsScalar());
13417 CHECK(d0.IsFPRegister());
13418
13419 CHECK(!d0.IsW());
13420 CHECK(!d0.IsX());
13421 CHECK(d0.IsV());
13422 CHECK(!d0.IsB());
13423 CHECK(!d0.IsH());
13424 CHECK(!d0.IsS());
13425 CHECK(d0.IsD());
13426 CHECK(!d0.IsQ());
13427
13428 CHECK(!s0.Is8B());
13429 CHECK(!s0.Is16B());
13430 CHECK(!s0.Is4H());
13431 CHECK(!s0.Is8H());
13432 CHECK(!s0.Is2S());
13433 CHECK(!s0.Is4S());
13434 CHECK(!s0.Is1D());
13435 CHECK(s0.Is1S());
13436 CHECK(!s0.Is1H());
13437 CHECK(!s0.Is1B());
13438 CHECK(!s0.IsVector());
13439 CHECK(s0.IsScalar());
13440 CHECK(s0.IsFPRegister());
13441
13442 CHECK(!s0.IsW());
13443 CHECK(!s0.IsX());
13444 CHECK(s0.IsV());
13445 CHECK(!s0.IsB());
13446 CHECK(!s0.IsH());
13447 CHECK(s0.IsS());
13448 CHECK(!s0.IsD());
13449 CHECK(!s0.IsQ());
13450
13451 CHECK(!h0.Is8B());
13452 CHECK(!h0.Is16B());
13453 CHECK(!h0.Is4H());
13454 CHECK(!h0.Is8H());
13455 CHECK(!h0.Is2S());
13456 CHECK(!h0.Is4S());
13457 CHECK(!h0.Is1D());
13458 CHECK(!h0.Is1S());
13459 CHECK(h0.Is1H());
13460 CHECK(!h0.Is1B());
13461 CHECK(!h0.IsVector());
13462 CHECK(h0.IsScalar());
13463 CHECK(!h0.IsFPRegister());
13464
13465 CHECK(!h0.IsW());
13466 CHECK(!h0.IsX());
13467 CHECK(h0.IsV());
13468 CHECK(!h0.IsB());
13469 CHECK(h0.IsH());
13470 CHECK(!h0.IsS());
13471 CHECK(!h0.IsD());
13472 CHECK(!h0.IsQ());
13473
13474 CHECK(!b0.Is8B());
13475 CHECK(!b0.Is16B());
13476 CHECK(!b0.Is4H());
13477 CHECK(!b0.Is8H());
13478 CHECK(!b0.Is2S());
13479 CHECK(!b0.Is4S());
13480 CHECK(!b0.Is1D());
13481 CHECK(!b0.Is1S());
13482 CHECK(!b0.Is1H());
13483 CHECK(b0.Is1B());
13484 CHECK(!b0.IsVector());
13485 CHECK(b0.IsScalar());
13486 CHECK(!b0.IsFPRegister());
13487
13488 CHECK(!b0.IsW());
13489 CHECK(!b0.IsX());
13490 CHECK(b0.IsV());
13491 CHECK(b0.IsB());
13492 CHECK(!b0.IsH());
13493 CHECK(!b0.IsS());
13494 CHECK(!b0.IsD());
13495 CHECK(!b0.IsQ());
13496
13497 CHECK(!q0.Is8B());
13498 CHECK(!q0.Is16B());
13499 CHECK(!q0.Is4H());
13500 CHECK(!q0.Is8H());
13501 CHECK(!q0.Is2S());
13502 CHECK(!q0.Is4S());
13503 CHECK(!q0.Is1D());
13504 CHECK(!q0.Is2D());
13505 CHECK(!q0.Is1S());
13506 CHECK(!q0.Is1H());
13507 CHECK(!q0.Is1B());
13508 CHECK(!q0.IsVector());
13509 CHECK(q0.IsScalar());
13510 CHECK(!q0.IsFPRegister());
13511
13512 CHECK(!q0.IsW());
13513 CHECK(!q0.IsX());
13514 CHECK(q0.IsV());
13515 CHECK(!q0.IsB());
13516 CHECK(!q0.IsH());
13517 CHECK(!q0.IsS());
13518 CHECK(!q0.IsD());
13519 CHECK(q0.IsQ());
13520
13521 CHECK(w0.IsW());
13522 CHECK(!w0.IsX());
13523 CHECK(!w0.IsV());
13524 CHECK(!w0.IsB());
13525 CHECK(!w0.IsH());
13526 CHECK(!w0.IsS());
13527 CHECK(!w0.IsD());
13528 CHECK(!w0.IsQ());
13529
13530 CHECK(!x0.IsW());
13531 CHECK(x0.IsX());
13532 CHECK(!x0.IsV());
13533 CHECK(!x0.IsB());
13534 CHECK(!x0.IsH());
13535 CHECK(!x0.IsS());
13536 CHECK(!x0.IsD());
13537 CHECK(!x0.IsQ());
13538
13539 CHECK(v0.V().IsV());
13540 CHECK(v0.B().IsB());
13541 CHECK(v0.H().IsH());
13542 CHECK(v0.D().IsD());
13543 CHECK(v0.S().IsS());
13544 CHECK(v0.Q().IsQ());
13545
13546 VRegister test_8b(VRegister::Create(0, 64, 8));
13547 CHECK(test_8b.Is8B());
13548 CHECK(!test_8b.Is16B());
13549 CHECK(!test_8b.Is4H());
13550 CHECK(!test_8b.Is8H());
13551 CHECK(!test_8b.Is2S());
13552 CHECK(!test_8b.Is4S());
13553 CHECK(!test_8b.Is1D());
13554 CHECK(!test_8b.Is2D());
13555 CHECK(!test_8b.Is1H());
13556 CHECK(!test_8b.Is1B());
13557 CHECK(test_8b.IsVector());
13558 CHECK(!test_8b.IsScalar());
13559 CHECK(test_8b.IsFPRegister());
13560
13561 VRegister test_16b(VRegister::Create(0, 128, 16));
13562 CHECK(!test_16b.Is8B());
13563 CHECK(test_16b.Is16B());
13564 CHECK(!test_16b.Is4H());
13565 CHECK(!test_16b.Is8H());
13566 CHECK(!test_16b.Is2S());
13567 CHECK(!test_16b.Is4S());
13568 CHECK(!test_16b.Is1D());
13569 CHECK(!test_16b.Is2D());
13570 CHECK(!test_16b.Is1H());
13571 CHECK(!test_16b.Is1B());
13572 CHECK(test_16b.IsVector());
13573 CHECK(!test_16b.IsScalar());
13574 CHECK(!test_16b.IsFPRegister());
13575
13576 VRegister test_4h(VRegister::Create(0, 64, 4));
13577 CHECK(!test_4h.Is8B());
13578 CHECK(!test_4h.Is16B());
13579 CHECK(test_4h.Is4H());
13580 CHECK(!test_4h.Is8H());
13581 CHECK(!test_4h.Is2S());
13582 CHECK(!test_4h.Is4S());
13583 CHECK(!test_4h.Is1D());
13584 CHECK(!test_4h.Is2D());
13585 CHECK(!test_4h.Is1H());
13586 CHECK(!test_4h.Is1B());
13587 CHECK(test_4h.IsVector());
13588 CHECK(!test_4h.IsScalar());
13589 CHECK(test_4h.IsFPRegister());
13590
13591 VRegister test_8h(VRegister::Create(0, 128, 8));
13592 CHECK(!test_8h.Is8B());
13593 CHECK(!test_8h.Is16B());
13594 CHECK(!test_8h.Is4H());
13595 CHECK(test_8h.Is8H());
13596 CHECK(!test_8h.Is2S());
13597 CHECK(!test_8h.Is4S());
13598 CHECK(!test_8h.Is1D());
13599 CHECK(!test_8h.Is2D());
13600 CHECK(!test_8h.Is1H());
13601 CHECK(!test_8h.Is1B());
13602 CHECK(test_8h.IsVector());
13603 CHECK(!test_8h.IsScalar());
13604 CHECK(!test_8h.IsFPRegister());
13605
13606 VRegister test_2s(VRegister::Create(0, 64, 2));
13607 CHECK(!test_2s.Is8B());
13608 CHECK(!test_2s.Is16B());
13609 CHECK(!test_2s.Is4H());
13610 CHECK(!test_2s.Is8H());
13611 CHECK(test_2s.Is2S());
13612 CHECK(!test_2s.Is4S());
13613 CHECK(!test_2s.Is1D());
13614 CHECK(!test_2s.Is2D());
13615 CHECK(!test_2s.Is1H());
13616 CHECK(!test_2s.Is1B());
13617 CHECK(test_2s.IsVector());
13618 CHECK(!test_2s.IsScalar());
13619 CHECK(test_2s.IsFPRegister());
13620
13621 VRegister test_4s(VRegister::Create(0, 128, 4));
13622 CHECK(!test_4s.Is8B());
13623 CHECK(!test_4s.Is16B());
13624 CHECK(!test_4s.Is4H());
13625 CHECK(!test_4s.Is8H());
13626 CHECK(!test_4s.Is2S());
13627 CHECK(test_4s.Is4S());
13628 CHECK(!test_4s.Is1D());
13629 CHECK(!test_4s.Is2D());
13630 CHECK(!test_4s.Is1S());
13631 CHECK(!test_4s.Is1H());
13632 CHECK(!test_4s.Is1B());
13633 CHECK(test_4s.IsVector());
13634 CHECK(!test_4s.IsScalar());
13635 CHECK(!test_4s.IsFPRegister());
13636
13637 VRegister test_1d(VRegister::Create(0, 64, 1));
13638 CHECK(!test_1d.Is8B());
13639 CHECK(!test_1d.Is16B());
13640 CHECK(!test_1d.Is4H());
13641 CHECK(!test_1d.Is8H());
13642 CHECK(!test_1d.Is2S());
13643 CHECK(!test_1d.Is4S());
13644 CHECK(test_1d.Is1D());
13645 CHECK(!test_1d.Is2D());
13646 CHECK(!test_1d.Is1S());
13647 CHECK(!test_1d.Is1H());
13648 CHECK(!test_1d.Is1B());
13649 CHECK(!test_1d.IsVector());
13650 CHECK(test_1d.IsScalar());
13651 CHECK(test_1d.IsFPRegister());
13652
13653 VRegister test_2d(VRegister::Create(0, 128, 2));
13654 CHECK(!test_2d.Is8B());
13655 CHECK(!test_2d.Is16B());
13656 CHECK(!test_2d.Is4H());
13657 CHECK(!test_2d.Is8H());
13658 CHECK(!test_2d.Is2S());
13659 CHECK(!test_2d.Is4S());
13660 CHECK(!test_2d.Is1D());
13661 CHECK(test_2d.Is2D());
13662 CHECK(!test_2d.Is1H());
13663 CHECK(!test_2d.Is1B());
13664 CHECK(test_2d.IsVector());
13665 CHECK(!test_2d.IsScalar());
13666 CHECK(!test_2d.IsFPRegister());
13667
13668 VRegister test_1s(VRegister::Create(0, 32, 1));
13669 CHECK(!test_1s.Is8B());
13670 CHECK(!test_1s.Is16B());
13671 CHECK(!test_1s.Is4H());
13672 CHECK(!test_1s.Is8H());
13673 CHECK(!test_1s.Is2S());
13674 CHECK(!test_1s.Is4S());
13675 CHECK(!test_1s.Is1D());
13676 CHECK(!test_1s.Is2D());
13677 CHECK(test_1s.Is1S());
13678 CHECK(!test_1s.Is1H());
13679 CHECK(!test_1s.Is1B());
13680 CHECK(!test_1s.IsVector());
13681 CHECK(test_1s.IsScalar());
13682 CHECK(test_1s.IsFPRegister());
13683
13684 VRegister test_1h(VRegister::Create(0, 16, 1));
13685 CHECK(!test_1h.Is8B());
13686 CHECK(!test_1h.Is16B());
13687 CHECK(!test_1h.Is4H());
13688 CHECK(!test_1h.Is8H());
13689 CHECK(!test_1h.Is2S());
13690 CHECK(!test_1h.Is4S());
13691 CHECK(!test_1h.Is1D());
13692 CHECK(!test_1h.Is2D());
13693 CHECK(!test_1h.Is1S());
13694 CHECK(test_1h.Is1H());
13695 CHECK(!test_1h.Is1B());
13696 CHECK(!test_1h.IsVector());
13697 CHECK(test_1h.IsScalar());
13698 CHECK(!test_1h.IsFPRegister());
13699
13700 VRegister test_1b(VRegister::Create(0, 8, 1));
13701 CHECK(!test_1b.Is8B());
13702 CHECK(!test_1b.Is16B());
13703 CHECK(!test_1b.Is4H());
13704 CHECK(!test_1b.Is8H());
13705 CHECK(!test_1b.Is2S());
13706 CHECK(!test_1b.Is4S());
13707 CHECK(!test_1b.Is1D());
13708 CHECK(!test_1b.Is2D());
13709 CHECK(!test_1b.Is1S());
13710 CHECK(!test_1b.Is1H());
13711 CHECK(test_1b.Is1B());
13712 CHECK(!test_1b.IsVector());
13713 CHECK(test_1b.IsScalar());
13714 CHECK(!test_1b.IsFPRegister());
13715
13716 VRegister test_breg_from_code(VRegister::BRegFromCode(0));
13717 CHECK_EQ(test_breg_from_code.SizeInBits(), kBRegSizeInBits);
13718
13719 VRegister test_hreg_from_code(VRegister::HRegFromCode(0));
13720 CHECK_EQ(test_hreg_from_code.SizeInBits(), kHRegSizeInBits);
13721
13722 VRegister test_sreg_from_code(VRegister::SRegFromCode(0));
13723 CHECK_EQ(test_sreg_from_code.SizeInBits(), kSRegSizeInBits);
13724
13725 VRegister test_dreg_from_code(VRegister::DRegFromCode(0));
13726 CHECK_EQ(test_dreg_from_code.SizeInBits(), kDRegSizeInBits);
13727
13728 VRegister test_qreg_from_code(VRegister::QRegFromCode(0));
13729 CHECK_EQ(test_qreg_from_code.SizeInBits(), kQRegSizeInBits);
13730
13731 VRegister test_vreg_from_code(VRegister::VRegFromCode(0));
13732 CHECK_EQ(test_vreg_from_code.SizeInBits(), kVRegSizeInBits);
13733
13734 VRegister test_v8b(VRegister::VRegFromCode(31).V8B());
13735 CHECK_EQ(test_v8b.code(), 31);
13736 CHECK_EQ(test_v8b.SizeInBits(), kDRegSizeInBits);
13737 CHECK(test_v8b.IsLaneSizeB());
13738 CHECK(!test_v8b.IsLaneSizeH());
13739 CHECK(!test_v8b.IsLaneSizeS());
13740 CHECK(!test_v8b.IsLaneSizeD());
13741 CHECK_EQ(test_v8b.LaneSizeInBits(), 8U);
13742
13743 VRegister test_v16b(VRegister::VRegFromCode(31).V16B());
13744 CHECK_EQ(test_v16b.code(), 31);
13745 CHECK_EQ(test_v16b.SizeInBits(), kQRegSizeInBits);
13746 CHECK(test_v16b.IsLaneSizeB());
13747 CHECK(!test_v16b.IsLaneSizeH());
13748 CHECK(!test_v16b.IsLaneSizeS());
13749 CHECK(!test_v16b.IsLaneSizeD());
13750 CHECK_EQ(test_v16b.LaneSizeInBits(), 8U);
13751
13752 VRegister test_v4h(VRegister::VRegFromCode(31).V4H());
13753 CHECK_EQ(test_v4h.code(), 31);
13754 CHECK_EQ(test_v4h.SizeInBits(), kDRegSizeInBits);
13755 CHECK(!test_v4h.IsLaneSizeB());
13756 CHECK(test_v4h.IsLaneSizeH());
13757 CHECK(!test_v4h.IsLaneSizeS());
13758 CHECK(!test_v4h.IsLaneSizeD());
13759 CHECK_EQ(test_v4h.LaneSizeInBits(), 16U);
13760
13761 VRegister test_v8h(VRegister::VRegFromCode(31).V8H());
13762 CHECK_EQ(test_v8h.code(), 31);
13763 CHECK_EQ(test_v8h.SizeInBits(), kQRegSizeInBits);
13764 CHECK(!test_v8h.IsLaneSizeB());
13765 CHECK(test_v8h.IsLaneSizeH());
13766 CHECK(!test_v8h.IsLaneSizeS());
13767 CHECK(!test_v8h.IsLaneSizeD());
13768 CHECK_EQ(test_v8h.LaneSizeInBits(), 16U);
13769
13770 VRegister test_v2s(VRegister::VRegFromCode(31).V2S());
13771 CHECK_EQ(test_v2s.code(), 31);
13772 CHECK_EQ(test_v2s.SizeInBits(), kDRegSizeInBits);
13773 CHECK(!test_v2s.IsLaneSizeB());
13774 CHECK(!test_v2s.IsLaneSizeH());
13775 CHECK(test_v2s.IsLaneSizeS());
13776 CHECK(!test_v2s.IsLaneSizeD());
13777 CHECK_EQ(test_v2s.LaneSizeInBits(), 32U);
13778
13779 VRegister test_v4s(VRegister::VRegFromCode(31).V4S());
13780 CHECK_EQ(test_v4s.code(), 31);
13781 CHECK_EQ(test_v4s.SizeInBits(), kQRegSizeInBits);
13782 CHECK(!test_v4s.IsLaneSizeB());
13783 CHECK(!test_v4s.IsLaneSizeH());
13784 CHECK(test_v4s.IsLaneSizeS());
13785 CHECK(!test_v4s.IsLaneSizeD());
13786 CHECK_EQ(test_v4s.LaneSizeInBits(), 32U);
13787
13788 VRegister test_v1d(VRegister::VRegFromCode(31).V1D());
13789 CHECK_EQ(test_v1d.code(), 31);
13790 CHECK_EQ(test_v1d.SizeInBits(), kDRegSizeInBits);
13791 CHECK(!test_v1d.IsLaneSizeB());
13792 CHECK(!test_v1d.IsLaneSizeH());
13793 CHECK(!test_v1d.IsLaneSizeS());
13794 CHECK(test_v1d.IsLaneSizeD());
13795 CHECK_EQ(test_v1d.LaneSizeInBits(), 64U);
13796
13797 VRegister test_v2d(VRegister::VRegFromCode(31).V2D());
13798 CHECK_EQ(test_v2d.code(), 31);
13799 CHECK_EQ(test_v2d.SizeInBits(), kQRegSizeInBits);
13800 CHECK(!test_v2d.IsLaneSizeB());
13801 CHECK(!test_v2d.IsLaneSizeH());
13802 CHECK(!test_v2d.IsLaneSizeS());
13803 CHECK(test_v2d.IsLaneSizeD());
13804 CHECK_EQ(test_v2d.LaneSizeInBits(), 64U);
13805
13806 CHECK(test_v1d.IsSameFormat(test_v1d));
13807 CHECK(test_v2d.IsSameFormat(test_v2d));
13808 CHECK(!test_v1d.IsSameFormat(test_v2d));
13809 CHECK(!test_v2s.IsSameFormat(test_v2d));
13810 }
9948 13811
9949 TEST(isvalid) { 13812 TEST(isvalid) {
9950 // This test doesn't generate any code, but it verifies some invariants 13813 // This test doesn't generate any code, but it verifies some invariants
9951 // related to IsValid(). 13814 // related to IsValid().
9952 CHECK(!NoReg.IsValid()); 13815 CHECK(!NoReg.IsValid());
9953 CHECK(!NoFPReg.IsValid()); 13816 CHECK(!NoVReg.IsValid());
9954 CHECK(!NoCPUReg.IsValid()); 13817 CHECK(!NoCPUReg.IsValid());
9955 13818
9956 CHECK(x0.IsValid()); 13819 CHECK(x0.IsValid());
9957 CHECK(w0.IsValid()); 13820 CHECK(w0.IsValid());
9958 CHECK(x30.IsValid()); 13821 CHECK(x30.IsValid());
9959 CHECK(w30.IsValid()); 13822 CHECK(w30.IsValid());
9960 CHECK(xzr.IsValid()); 13823 CHECK(xzr.IsValid());
9961 CHECK(wzr.IsValid()); 13824 CHECK(wzr.IsValid());
9962 13825
9963 CHECK(csp.IsValid()); 13826 CHECK(csp.IsValid());
9964 CHECK(wcsp.IsValid()); 13827 CHECK(wcsp.IsValid());
9965 13828
9966 CHECK(d0.IsValid()); 13829 CHECK(d0.IsValid());
9967 CHECK(s0.IsValid()); 13830 CHECK(s0.IsValid());
9968 CHECK(d31.IsValid()); 13831 CHECK(d31.IsValid());
9969 CHECK(s31.IsValid()); 13832 CHECK(s31.IsValid());
9970 13833
9971 CHECK(x0.IsValidRegister()); 13834 CHECK(x0.IsValidRegister());
9972 CHECK(w0.IsValidRegister()); 13835 CHECK(w0.IsValidRegister());
9973 CHECK(xzr.IsValidRegister()); 13836 CHECK(xzr.IsValidRegister());
9974 CHECK(wzr.IsValidRegister()); 13837 CHECK(wzr.IsValidRegister());
9975 CHECK(csp.IsValidRegister()); 13838 CHECK(csp.IsValidRegister());
9976 CHECK(wcsp.IsValidRegister()); 13839 CHECK(wcsp.IsValidRegister());
9977 CHECK(!x0.IsValidFPRegister()); 13840 CHECK(!x0.IsValidVRegister());
9978 CHECK(!w0.IsValidFPRegister()); 13841 CHECK(!w0.IsValidVRegister());
9979 CHECK(!xzr.IsValidFPRegister()); 13842 CHECK(!xzr.IsValidVRegister());
9980 CHECK(!wzr.IsValidFPRegister()); 13843 CHECK(!wzr.IsValidVRegister());
9981 CHECK(!csp.IsValidFPRegister()); 13844 CHECK(!csp.IsValidVRegister());
9982 CHECK(!wcsp.IsValidFPRegister()); 13845 CHECK(!wcsp.IsValidVRegister());
9983 13846
9984 CHECK(d0.IsValidFPRegister()); 13847 CHECK(d0.IsValidVRegister());
9985 CHECK(s0.IsValidFPRegister()); 13848 CHECK(s0.IsValidVRegister());
9986 CHECK(!d0.IsValidRegister()); 13849 CHECK(!d0.IsValidRegister());
9987 CHECK(!s0.IsValidRegister()); 13850 CHECK(!s0.IsValidRegister());
9988 13851
9989 // Test the same as before, but using CPURegister types. This shouldn't make 13852 // Test the same as before, but using CPURegister types. This shouldn't make
9990 // any difference. 13853 // any difference.
9991 CHECK(static_cast<CPURegister>(x0).IsValid()); 13854 CHECK(static_cast<CPURegister>(x0).IsValid());
9992 CHECK(static_cast<CPURegister>(w0).IsValid()); 13855 CHECK(static_cast<CPURegister>(w0).IsValid());
9993 CHECK(static_cast<CPURegister>(x30).IsValid()); 13856 CHECK(static_cast<CPURegister>(x30).IsValid());
9994 CHECK(static_cast<CPURegister>(w30).IsValid()); 13857 CHECK(static_cast<CPURegister>(w30).IsValid());
9995 CHECK(static_cast<CPURegister>(xzr).IsValid()); 13858 CHECK(static_cast<CPURegister>(xzr).IsValid());
9996 CHECK(static_cast<CPURegister>(wzr).IsValid()); 13859 CHECK(static_cast<CPURegister>(wzr).IsValid());
9997 13860
9998 CHECK(static_cast<CPURegister>(csp).IsValid()); 13861 CHECK(static_cast<CPURegister>(csp).IsValid());
9999 CHECK(static_cast<CPURegister>(wcsp).IsValid()); 13862 CHECK(static_cast<CPURegister>(wcsp).IsValid());
10000 13863
10001 CHECK(static_cast<CPURegister>(d0).IsValid()); 13864 CHECK(static_cast<CPURegister>(d0).IsValid());
10002 CHECK(static_cast<CPURegister>(s0).IsValid()); 13865 CHECK(static_cast<CPURegister>(s0).IsValid());
10003 CHECK(static_cast<CPURegister>(d31).IsValid()); 13866 CHECK(static_cast<CPURegister>(d31).IsValid());
10004 CHECK(static_cast<CPURegister>(s31).IsValid()); 13867 CHECK(static_cast<CPURegister>(s31).IsValid());
10005 13868
10006 CHECK(static_cast<CPURegister>(x0).IsValidRegister()); 13869 CHECK(static_cast<CPURegister>(x0).IsValidRegister());
10007 CHECK(static_cast<CPURegister>(w0).IsValidRegister()); 13870 CHECK(static_cast<CPURegister>(w0).IsValidRegister());
10008 CHECK(static_cast<CPURegister>(xzr).IsValidRegister()); 13871 CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
10009 CHECK(static_cast<CPURegister>(wzr).IsValidRegister()); 13872 CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
10010 CHECK(static_cast<CPURegister>(csp).IsValidRegister()); 13873 CHECK(static_cast<CPURegister>(csp).IsValidRegister());
10011 CHECK(static_cast<CPURegister>(wcsp).IsValidRegister()); 13874 CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
10012 CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister()); 13875 CHECK(!static_cast<CPURegister>(x0).IsValidVRegister());
10013 CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister()); 13876 CHECK(!static_cast<CPURegister>(w0).IsValidVRegister());
10014 CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister()); 13877 CHECK(!static_cast<CPURegister>(xzr).IsValidVRegister());
10015 CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister()); 13878 CHECK(!static_cast<CPURegister>(wzr).IsValidVRegister());
10016 CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister()); 13879 CHECK(!static_cast<CPURegister>(csp).IsValidVRegister());
10017 CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister()); 13880 CHECK(!static_cast<CPURegister>(wcsp).IsValidVRegister());
10018 13881
10019 CHECK(static_cast<CPURegister>(d0).IsValidFPRegister()); 13882 CHECK(static_cast<CPURegister>(d0).IsValidVRegister());
10020 CHECK(static_cast<CPURegister>(s0).IsValidFPRegister()); 13883 CHECK(static_cast<CPURegister>(s0).IsValidVRegister());
10021 CHECK(!static_cast<CPURegister>(d0).IsValidRegister()); 13884 CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
10022 CHECK(!static_cast<CPURegister>(s0).IsValidRegister()); 13885 CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
10023 } 13886 }
10024 13887
13888 TEST(areconsecutive) {
13889 // This test generates no code; it just checks that AreConsecutive works.
13890 CHECK(AreConsecutive(b0, NoVReg));
13891 CHECK(AreConsecutive(b1, b2));
13892 CHECK(AreConsecutive(b3, b4, b5));
13893 CHECK(AreConsecutive(b6, b7, b8, b9));
13894 CHECK(AreConsecutive(h10, NoVReg));
13895 CHECK(AreConsecutive(h11, h12));
13896 CHECK(AreConsecutive(h13, h14, h15));
13897 CHECK(AreConsecutive(h16, h17, h18, h19));
13898 CHECK(AreConsecutive(s20, NoVReg));
13899 CHECK(AreConsecutive(s21, s22));
13900 CHECK(AreConsecutive(s23, s24, s25));
13901 CHECK(AreConsecutive(s26, s27, s28, s29));
13902 CHECK(AreConsecutive(d30, NoVReg));
13903 CHECK(AreConsecutive(d31, d0));
13904 CHECK(AreConsecutive(d1, d2, d3));
13905 CHECK(AreConsecutive(d4, d5, d6, d7));
13906 CHECK(AreConsecutive(q8, NoVReg));
13907 CHECK(AreConsecutive(q9, q10));
13908 CHECK(AreConsecutive(q11, q12, q13));
13909 CHECK(AreConsecutive(q14, q15, q16, q17));
13910 CHECK(AreConsecutive(v18, NoVReg));
13911 CHECK(AreConsecutive(v19, v20));
13912 CHECK(AreConsecutive(v21, v22, v23));
13913 CHECK(AreConsecutive(v24, v25, v26, v27));
13914 CHECK(AreConsecutive(b29, h30));
13915 CHECK(AreConsecutive(s31, d0, q1));
13916 CHECK(AreConsecutive(v2, b3, h4, s5));
13917
13918 CHECK(AreConsecutive(b26, b27, NoVReg, NoVReg));
13919 CHECK(AreConsecutive(h28, NoVReg, NoVReg, NoVReg));
13920
13921 CHECK(!AreConsecutive(b0, b2));
13922 CHECK(!AreConsecutive(h1, h0));
13923 CHECK(!AreConsecutive(s31, s1));
13924 CHECK(!AreConsecutive(d12, d12));
13925 CHECK(!AreConsecutive(q31, q1));
13926
13927 CHECK(!AreConsecutive(b5, b4, b3));
13928 CHECK(!AreConsecutive(h15, h16, h15, h14));
13929 CHECK(!AreConsecutive(s25, s24, s23, s22));
13930 CHECK(!AreConsecutive(d5, d6, d7, d6));
13931 CHECK(!AreConsecutive(q15, q16, q17, q6));
13932
13933 CHECK(!AreConsecutive(b0, b1, b3));
13934 CHECK(!AreConsecutive(h4, h5, h6, h6));
13935 CHECK(!AreConsecutive(d15, d16, d18, NoVReg));
13936 CHECK(!AreConsecutive(s28, s30, NoVReg, NoVReg));
13937 }
10025 13938
10026 TEST(cpureglist_utils_x) { 13939 TEST(cpureglist_utils_x) {
10027 // This test doesn't generate any code, but it verifies the behaviour of 13940 // This test doesn't generate any code, but it verifies the behaviour of
10028 // the CPURegList utility methods. 13941 // the CPURegList utility methods.
10029 13942
10030 // Test a list of X registers. 13943 // Test a list of X registers.
10031 CPURegList test(x0, x1, x2, x3); 13944 CPURegList test(x0, x1, x2, x3);
10032 13945
10033 CHECK(test.IncludesAliasOf(x0)); 13946 CHECK(test.IncludesAliasOf(x0));
10034 CHECK(test.IncludesAliasOf(x1)); 13947 CHECK(test.IncludesAliasOf(x1));
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after
10241 14154
10242 TEST(cpureglist_utils_empty) { 14155 TEST(cpureglist_utils_empty) {
10243 // This test doesn't generate any code, but it verifies the behaviour of 14156 // This test doesn't generate any code, but it verifies the behaviour of
10244 // the CPURegList utility methods. 14157 // the CPURegList utility methods.
10245 14158
10246 // Test an empty list. 14159 // Test an empty list.
10247 // Empty lists can have type and size properties. Check that we can create 14160 // Empty lists can have type and size properties. Check that we can create
10248 // them, and that they are empty. 14161 // them, and that they are empty.
10249 CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0); 14162 CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0);
10250 CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0); 14163 CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0);
10251 CPURegList fpreg32(CPURegister::kFPRegister, kSRegSizeInBits, 0); 14164 CPURegList fpreg32(CPURegister::kVRegister, kSRegSizeInBits, 0);
10252 CPURegList fpreg64(CPURegister::kFPRegister, kDRegSizeInBits, 0); 14165 CPURegList fpreg64(CPURegister::kVRegister, kDRegSizeInBits, 0);
10253 14166
10254 CHECK(reg32.IsEmpty()); 14167 CHECK(reg32.IsEmpty());
10255 CHECK(reg64.IsEmpty()); 14168 CHECK(reg64.IsEmpty());
10256 CHECK(fpreg32.IsEmpty()); 14169 CHECK(fpreg32.IsEmpty());
10257 CHECK(fpreg64.IsEmpty()); 14170 CHECK(fpreg64.IsEmpty());
10258 14171
10259 CHECK(reg32.PopLowestIndex().IsNone()); 14172 CHECK(reg32.PopLowestIndex().IsNone());
10260 CHECK(reg64.PopLowestIndex().IsNone()); 14173 CHECK(reg64.PopLowestIndex().IsNone());
10261 CHECK(fpreg32.PopLowestIndex().IsNone()); 14174 CHECK(fpreg32.PopLowestIndex().IsNone());
10262 CHECK(fpreg64.PopLowestIndex().IsNone()); 14175 CHECK(fpreg64.PopLowestIndex().IsNone());
(...skipping 317 matching lines...) Expand 10 before | Expand all | Expand 10 after
10580 14493
10581 RUN(); 14494 RUN();
10582 14495
10583 TEARDOWN(); 14496 TEARDOWN();
10584 } 14497 }
10585 14498
10586 14499
10587 TEST(process_nan_double) { 14500 TEST(process_nan_double) {
10588 INIT_V8(); 14501 INIT_V8();
10589 // Make sure that NaN propagation works correctly. 14502 // Make sure that NaN propagation works correctly.
10590 double sn = rawbits_to_double(0x7ff5555511111111); 14503 double sn = bit_cast<double>(0x7ff5555511111111);
10591 double qn = rawbits_to_double(0x7ffaaaaa11111111); 14504 double qn = bit_cast<double>(0x7ffaaaaa11111111);
10592 CHECK(IsSignallingNaN(sn)); 14505 CHECK(IsSignallingNaN(sn));
10593 CHECK(IsQuietNaN(qn)); 14506 CHECK(IsQuietNaN(qn));
10594 14507
10595 // The input NaNs after passing through ProcessNaN. 14508 // The input NaNs after passing through ProcessNaN.
10596 double sn_proc = rawbits_to_double(0x7ffd555511111111); 14509 double sn_proc = bit_cast<double>(0x7ffd555511111111);
10597 double qn_proc = qn; 14510 double qn_proc = qn;
10598 CHECK(IsQuietNaN(sn_proc)); 14511 CHECK(IsQuietNaN(sn_proc));
10599 CHECK(IsQuietNaN(qn_proc)); 14512 CHECK(IsQuietNaN(qn_proc));
10600 14513
10601 SETUP(); 14514 SETUP();
10602 START(); 14515 START();
10603 14516
10604 // Execute a number of instructions which all use ProcessNaN, and check that 14517 // Execute a number of instructions which all use ProcessNaN, and check that
10605 // they all handle the NaN correctly. 14518 // they all handle the NaN correctly.
10606 __ Fmov(d0, sn); 14519 __ Fmov(d0, sn);
(...skipping 19 matching lines...) Expand all
10626 __ Fsqrt(d14, d10); 14539 __ Fsqrt(d14, d10);
10627 __ Frinta(d15, d10); 14540 __ Frinta(d15, d10);
10628 __ Frintn(d16, d10); 14541 __ Frintn(d16, d10);
10629 __ Frintz(d17, d10); 14542 __ Frintz(d17, d10);
10630 14543
10631 // The behaviour of fcvt is checked in TEST(fcvt_sd). 14544 // The behaviour of fcvt is checked in TEST(fcvt_sd).
10632 14545
10633 END(); 14546 END();
10634 RUN(); 14547 RUN();
10635 14548
10636 uint64_t qn_raw = double_to_rawbits(qn); 14549 uint64_t qn_raw = bit_cast<uint64_t>(qn);
10637 uint64_t sn_raw = double_to_rawbits(sn); 14550 uint64_t sn_raw = bit_cast<uint64_t>(sn);
10638 14551
10639 // - Signalling NaN 14552 // - Signalling NaN
10640 CHECK_EQUAL_FP64(sn, d1); 14553 CHECK_EQUAL_FP64(sn, d1);
10641 CHECK_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2); 14554 CHECK_EQUAL_FP64(bit_cast<double>(sn_raw & ~kDSignMask), d2);
10642 CHECK_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3); 14555 CHECK_EQUAL_FP64(bit_cast<double>(sn_raw ^ kDSignMask), d3);
10643 // - Quiet NaN 14556 // - Quiet NaN
10644 CHECK_EQUAL_FP64(qn, d11); 14557 CHECK_EQUAL_FP64(qn, d11);
10645 CHECK_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12); 14558 CHECK_EQUAL_FP64(bit_cast<double>(qn_raw & ~kDSignMask), d12);
10646 CHECK_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13); 14559 CHECK_EQUAL_FP64(bit_cast<double>(qn_raw ^ kDSignMask), d13);
10647 14560
10648 // - Signalling NaN 14561 // - Signalling NaN
10649 CHECK_EQUAL_FP64(sn_proc, d4); 14562 CHECK_EQUAL_FP64(sn_proc, d4);
10650 CHECK_EQUAL_FP64(sn_proc, d5); 14563 CHECK_EQUAL_FP64(sn_proc, d5);
10651 CHECK_EQUAL_FP64(sn_proc, d6); 14564 CHECK_EQUAL_FP64(sn_proc, d6);
10652 CHECK_EQUAL_FP64(sn_proc, d7); 14565 CHECK_EQUAL_FP64(sn_proc, d7);
10653 // - Quiet NaN 14566 // - Quiet NaN
10654 CHECK_EQUAL_FP64(qn_proc, d14); 14567 CHECK_EQUAL_FP64(qn_proc, d14);
10655 CHECK_EQUAL_FP64(qn_proc, d15); 14568 CHECK_EQUAL_FP64(qn_proc, d15);
10656 CHECK_EQUAL_FP64(qn_proc, d16); 14569 CHECK_EQUAL_FP64(qn_proc, d16);
10657 CHECK_EQUAL_FP64(qn_proc, d17); 14570 CHECK_EQUAL_FP64(qn_proc, d17);
10658 14571
10659 TEARDOWN(); 14572 TEARDOWN();
10660 } 14573 }
10661 14574
10662 14575
10663 TEST(process_nan_float) { 14576 TEST(process_nan_float) {
10664 INIT_V8(); 14577 INIT_V8();
10665 // Make sure that NaN propagation works correctly. 14578 // Make sure that NaN propagation works correctly.
10666 float sn = rawbits_to_float(0x7f951111); 14579 float sn = bit_cast<float>(0x7f951111);
10667 float qn = rawbits_to_float(0x7fea1111); 14580 float qn = bit_cast<float>(0x7fea1111);
10668 CHECK(IsSignallingNaN(sn)); 14581 CHECK(IsSignallingNaN(sn));
10669 CHECK(IsQuietNaN(qn)); 14582 CHECK(IsQuietNaN(qn));
10670 14583
10671 // The input NaNs after passing through ProcessNaN. 14584 // The input NaNs after passing through ProcessNaN.
10672 float sn_proc = rawbits_to_float(0x7fd51111); 14585 float sn_proc = bit_cast<float>(0x7fd51111);
10673 float qn_proc = qn; 14586 float qn_proc = qn;
10674 CHECK(IsQuietNaN(sn_proc)); 14587 CHECK(IsQuietNaN(sn_proc));
10675 CHECK(IsQuietNaN(qn_proc)); 14588 CHECK(IsQuietNaN(qn_proc));
10676 14589
10677 SETUP(); 14590 SETUP();
10678 START(); 14591 START();
10679 14592
10680 // Execute a number of instructions which all use ProcessNaN, and check that 14593 // Execute a number of instructions which all use ProcessNaN, and check that
10681 // they all handle the NaN correctly. 14594 // they all handle the NaN correctly.
10682 __ Fmov(s0, sn); 14595 __ Fmov(s0, sn);
(...skipping 19 matching lines...) Expand all
10702 __ Fsqrt(s14, s10); 14615 __ Fsqrt(s14, s10);
10703 __ Frinta(s15, s10); 14616 __ Frinta(s15, s10);
10704 __ Frintn(s16, s10); 14617 __ Frintn(s16, s10);
10705 __ Frintz(s17, s10); 14618 __ Frintz(s17, s10);
10706 14619
10707 // The behaviour of fcvt is checked in TEST(fcvt_sd). 14620 // The behaviour of fcvt is checked in TEST(fcvt_sd).
10708 14621
10709 END(); 14622 END();
10710 RUN(); 14623 RUN();
10711 14624
10712 uint32_t qn_raw = float_to_rawbits(qn); 14625 uint32_t qn_raw = bit_cast<uint32_t>(qn);
10713 uint32_t sn_raw = float_to_rawbits(sn); 14626 uint32_t sn_raw = bit_cast<uint32_t>(sn);
14627 uint32_t sign_mask = static_cast<uint32_t>(kSSignMask);
10714 14628
10715 // - Signalling NaN 14629 // - Signalling NaN
10716 CHECK_EQUAL_FP32(sn, s1); 14630 CHECK_EQUAL_FP32(sn, s1);
10717 CHECK_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2); 14631 CHECK_EQUAL_FP32(bit_cast<float>(sn_raw & ~sign_mask), s2);
10718 CHECK_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3); 14632 CHECK_EQUAL_FP32(bit_cast<float>(sn_raw ^ sign_mask), s3);
10719 // - Quiet NaN 14633 // - Quiet NaN
10720 CHECK_EQUAL_FP32(qn, s11); 14634 CHECK_EQUAL_FP32(qn, s11);
10721 CHECK_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12); 14635 CHECK_EQUAL_FP32(bit_cast<float>(qn_raw & ~sign_mask), s12);
10722 CHECK_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13); 14636 CHECK_EQUAL_FP32(bit_cast<float>(qn_raw ^ sign_mask), s13);
10723 14637
10724 // - Signalling NaN 14638 // - Signalling NaN
10725 CHECK_EQUAL_FP32(sn_proc, s4); 14639 CHECK_EQUAL_FP32(sn_proc, s4);
10726 CHECK_EQUAL_FP32(sn_proc, s5); 14640 CHECK_EQUAL_FP32(sn_proc, s5);
10727 CHECK_EQUAL_FP32(sn_proc, s6); 14641 CHECK_EQUAL_FP32(sn_proc, s6);
10728 CHECK_EQUAL_FP32(sn_proc, s7); 14642 CHECK_EQUAL_FP32(sn_proc, s7);
10729 // - Quiet NaN 14643 // - Quiet NaN
10730 CHECK_EQUAL_FP32(qn_proc, s14); 14644 CHECK_EQUAL_FP32(qn_proc, s14);
10731 CHECK_EQUAL_FP32(qn_proc, s15); 14645 CHECK_EQUAL_FP32(qn_proc, s15);
10732 CHECK_EQUAL_FP32(qn_proc, s16); 14646 CHECK_EQUAL_FP32(qn_proc, s16);
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
10765 CHECK_EQUAL_FP64(expected, d6); 14679 CHECK_EQUAL_FP64(expected, d6);
10766 CHECK_EQUAL_FP64(expected, d7); 14680 CHECK_EQUAL_FP64(expected, d7);
10767 14681
10768 TEARDOWN(); 14682 TEARDOWN();
10769 } 14683 }
10770 14684
10771 14685
10772 TEST(process_nans_double) { 14686 TEST(process_nans_double) {
10773 INIT_V8(); 14687 INIT_V8();
10774 // Make sure that NaN propagation works correctly. 14688 // Make sure that NaN propagation works correctly.
10775 double sn = rawbits_to_double(0x7ff5555511111111); 14689 double sn = bit_cast<double>(0x7ff5555511111111);
10776 double sm = rawbits_to_double(0x7ff5555522222222); 14690 double sm = bit_cast<double>(0x7ff5555522222222);
10777 double qn = rawbits_to_double(0x7ffaaaaa11111111); 14691 double qn = bit_cast<double>(0x7ffaaaaa11111111);
10778 double qm = rawbits_to_double(0x7ffaaaaa22222222); 14692 double qm = bit_cast<double>(0x7ffaaaaa22222222);
10779 CHECK(IsSignallingNaN(sn)); 14693 CHECK(IsSignallingNaN(sn));
10780 CHECK(IsSignallingNaN(sm)); 14694 CHECK(IsSignallingNaN(sm));
10781 CHECK(IsQuietNaN(qn)); 14695 CHECK(IsQuietNaN(qn));
10782 CHECK(IsQuietNaN(qm)); 14696 CHECK(IsQuietNaN(qm));
10783 14697
10784 // The input NaNs after passing through ProcessNaN. 14698 // The input NaNs after passing through ProcessNaN.
10785 double sn_proc = rawbits_to_double(0x7ffd555511111111); 14699 double sn_proc = bit_cast<double>(0x7ffd555511111111);
10786 double sm_proc = rawbits_to_double(0x7ffd555522222222); 14700 double sm_proc = bit_cast<double>(0x7ffd555522222222);
10787 double qn_proc = qn; 14701 double qn_proc = qn;
10788 double qm_proc = qm; 14702 double qm_proc = qm;
10789 CHECK(IsQuietNaN(sn_proc)); 14703 CHECK(IsQuietNaN(sn_proc));
10790 CHECK(IsQuietNaN(sm_proc)); 14704 CHECK(IsQuietNaN(sm_proc));
10791 CHECK(IsQuietNaN(qn_proc)); 14705 CHECK(IsQuietNaN(qn_proc));
10792 CHECK(IsQuietNaN(qm_proc)); 14706 CHECK(IsQuietNaN(qm_proc));
10793 14707
10794 // Quiet NaNs are propagated. 14708 // Quiet NaNs are propagated.
10795 ProcessNaNsHelper(qn, 0, qn_proc); 14709 ProcessNaNsHelper(qn, 0, qn_proc);
10796 ProcessNaNsHelper(0, qm, qm_proc); 14710 ProcessNaNsHelper(0, qm, qm_proc);
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
10837 CHECK_EQUAL_FP32(expected, s6); 14751 CHECK_EQUAL_FP32(expected, s6);
10838 CHECK_EQUAL_FP32(expected, s7); 14752 CHECK_EQUAL_FP32(expected, s7);
10839 14753
10840 TEARDOWN(); 14754 TEARDOWN();
10841 } 14755 }
10842 14756
10843 14757
10844 TEST(process_nans_float) { 14758 TEST(process_nans_float) {
10845 INIT_V8(); 14759 INIT_V8();
10846 // Make sure that NaN propagation works correctly. 14760 // Make sure that NaN propagation works correctly.
10847 float sn = rawbits_to_float(0x7f951111); 14761 float sn = bit_cast<float>(0x7f951111);
10848 float sm = rawbits_to_float(0x7f952222); 14762 float sm = bit_cast<float>(0x7f952222);
10849 float qn = rawbits_to_float(0x7fea1111); 14763 float qn = bit_cast<float>(0x7fea1111);
10850 float qm = rawbits_to_float(0x7fea2222); 14764 float qm = bit_cast<float>(0x7fea2222);
10851 CHECK(IsSignallingNaN(sn)); 14765 CHECK(IsSignallingNaN(sn));
10852 CHECK(IsSignallingNaN(sm)); 14766 CHECK(IsSignallingNaN(sm));
10853 CHECK(IsQuietNaN(qn)); 14767 CHECK(IsQuietNaN(qn));
10854 CHECK(IsQuietNaN(qm)); 14768 CHECK(IsQuietNaN(qm));
10855 14769
10856 // The input NaNs after passing through ProcessNaN. 14770 // The input NaNs after passing through ProcessNaN.
10857 float sn_proc = rawbits_to_float(0x7fd51111); 14771 float sn_proc = bit_cast<float>(0x7fd51111);
10858 float sm_proc = rawbits_to_float(0x7fd52222); 14772 float sm_proc = bit_cast<float>(0x7fd52222);
10859 float qn_proc = qn; 14773 float qn_proc = qn;
10860 float qm_proc = qm; 14774 float qm_proc = qm;
10861 CHECK(IsQuietNaN(sn_proc)); 14775 CHECK(IsQuietNaN(sn_proc));
10862 CHECK(IsQuietNaN(sm_proc)); 14776 CHECK(IsQuietNaN(sm_proc));
10863 CHECK(IsQuietNaN(qn_proc)); 14777 CHECK(IsQuietNaN(qn_proc));
10864 CHECK(IsQuietNaN(qm_proc)); 14778 CHECK(IsQuietNaN(qm_proc));
10865 14779
10866 // Quiet NaNs are propagated. 14780 // Quiet NaNs are propagated.
10867 ProcessNaNsHelper(qn, 0, qn_proc); 14781 ProcessNaNsHelper(qn, 0, qn_proc);
10868 ProcessNaNsHelper(0, qm, qm_proc); 14782 ProcessNaNsHelper(0, qm, qm_proc);
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
10930 __ Fnmadd(s26, s0, s1, s2); 14844 __ Fnmadd(s26, s0, s1, s2);
10931 __ Fnmsub(s27, s0, s1, s2); 14845 __ Fnmsub(s27, s0, s1, s2);
10932 14846
10933 // Restore FPCR. 14847 // Restore FPCR.
10934 __ Msr(FPCR, x0); 14848 __ Msr(FPCR, x0);
10935 14849
10936 END(); 14850 END();
10937 RUN(); 14851 RUN();
10938 14852
10939 if (test_1op) { 14853 if (test_1op) {
10940 uint32_t n_raw = float_to_rawbits(n); 14854 uint32_t n_raw = bit_cast<uint32_t>(n);
14855 uint32_t sign_mask = static_cast<uint32_t>(kSSignMask);
10941 CHECK_EQUAL_FP32(n, s10); 14856 CHECK_EQUAL_FP32(n, s10);
10942 CHECK_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11); 14857 CHECK_EQUAL_FP32(bit_cast<float>(n_raw & ~sign_mask), s11);
10943 CHECK_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12); 14858 CHECK_EQUAL_FP32(bit_cast<float>(n_raw ^ sign_mask), s12);
10944 CHECK_EQUAL_FP32(kFP32DefaultNaN, s13); 14859 CHECK_EQUAL_FP32(kFP32DefaultNaN, s13);
10945 CHECK_EQUAL_FP32(kFP32DefaultNaN, s14); 14860 CHECK_EQUAL_FP32(kFP32DefaultNaN, s14);
10946 CHECK_EQUAL_FP32(kFP32DefaultNaN, s15); 14861 CHECK_EQUAL_FP32(kFP32DefaultNaN, s15);
10947 CHECK_EQUAL_FP32(kFP32DefaultNaN, s16); 14862 CHECK_EQUAL_FP32(kFP32DefaultNaN, s16);
10948 CHECK_EQUAL_FP64(kFP64DefaultNaN, d17); 14863 CHECK_EQUAL_FP64(kFP64DefaultNaN, d17);
10949 } 14864 }
10950 14865
10951 if (test_2op) { 14866 if (test_2op) {
10952 CHECK_EQUAL_FP32(kFP32DefaultNaN, s18); 14867 CHECK_EQUAL_FP32(kFP32DefaultNaN, s18);
10953 CHECK_EQUAL_FP32(kFP32DefaultNaN, s19); 14868 CHECK_EQUAL_FP32(kFP32DefaultNaN, s19);
10954 CHECK_EQUAL_FP32(kFP32DefaultNaN, s20); 14869 CHECK_EQUAL_FP32(kFP32DefaultNaN, s20);
10955 CHECK_EQUAL_FP32(kFP32DefaultNaN, s21); 14870 CHECK_EQUAL_FP32(kFP32DefaultNaN, s21);
10956 CHECK_EQUAL_FP32(kFP32DefaultNaN, s22); 14871 CHECK_EQUAL_FP32(kFP32DefaultNaN, s22);
10957 CHECK_EQUAL_FP32(kFP32DefaultNaN, s23); 14872 CHECK_EQUAL_FP32(kFP32DefaultNaN, s23);
10958 } 14873 }
10959 14874
10960 CHECK_EQUAL_FP32(kFP32DefaultNaN, s24); 14875 CHECK_EQUAL_FP32(kFP32DefaultNaN, s24);
10961 CHECK_EQUAL_FP32(kFP32DefaultNaN, s25); 14876 CHECK_EQUAL_FP32(kFP32DefaultNaN, s25);
10962 CHECK_EQUAL_FP32(kFP32DefaultNaN, s26); 14877 CHECK_EQUAL_FP32(kFP32DefaultNaN, s26);
10963 CHECK_EQUAL_FP32(kFP32DefaultNaN, s27); 14878 CHECK_EQUAL_FP32(kFP32DefaultNaN, s27);
10964 14879
10965 TEARDOWN(); 14880 TEARDOWN();
10966 } 14881 }
10967 14882
10968 14883
10969 TEST(default_nan_float) { 14884 TEST(default_nan_float) {
10970 INIT_V8(); 14885 INIT_V8();
10971 float sn = rawbits_to_float(0x7f951111); 14886 float sn = bit_cast<float>(0x7f951111);
10972 float sm = rawbits_to_float(0x7f952222); 14887 float sm = bit_cast<float>(0x7f952222);
10973 float sa = rawbits_to_float(0x7f95aaaa); 14888 float sa = bit_cast<float>(0x7f95aaaa);
10974 float qn = rawbits_to_float(0x7fea1111); 14889 float qn = bit_cast<float>(0x7fea1111);
10975 float qm = rawbits_to_float(0x7fea2222); 14890 float qm = bit_cast<float>(0x7fea2222);
10976 float qa = rawbits_to_float(0x7feaaaaa); 14891 float qa = bit_cast<float>(0x7feaaaaa);
10977 CHECK(IsSignallingNaN(sn)); 14892 CHECK(IsSignallingNaN(sn));
10978 CHECK(IsSignallingNaN(sm)); 14893 CHECK(IsSignallingNaN(sm));
10979 CHECK(IsSignallingNaN(sa)); 14894 CHECK(IsSignallingNaN(sa));
10980 CHECK(IsQuietNaN(qn)); 14895 CHECK(IsQuietNaN(qn));
10981 CHECK(IsQuietNaN(qm)); 14896 CHECK(IsQuietNaN(qm));
10982 CHECK(IsQuietNaN(qa)); 14897 CHECK(IsQuietNaN(qa));
10983 14898
10984 // - Signalling NaNs 14899 // - Signalling NaNs
10985 DefaultNaNHelper(sn, 0.0f, 0.0f); 14900 DefaultNaNHelper(sn, 0.0f, 0.0f);
10986 DefaultNaNHelper(0.0f, sm, 0.0f); 14901 DefaultNaNHelper(0.0f, sm, 0.0f);
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
11058 __ Fnmadd(d26, d0, d1, d2); 14973 __ Fnmadd(d26, d0, d1, d2);
11059 __ Fnmsub(d27, d0, d1, d2); 14974 __ Fnmsub(d27, d0, d1, d2);
11060 14975
11061 // Restore FPCR. 14976 // Restore FPCR.
11062 __ Msr(FPCR, x0); 14977 __ Msr(FPCR, x0);
11063 14978
11064 END(); 14979 END();
11065 RUN(); 14980 RUN();
11066 14981
11067 if (test_1op) { 14982 if (test_1op) {
11068 uint64_t n_raw = double_to_rawbits(n); 14983 uint64_t n_raw = bit_cast<uint64_t>(n);
11069 CHECK_EQUAL_FP64(n, d10); 14984 CHECK_EQUAL_FP64(n, d10);
11070 CHECK_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11); 14985 CHECK_EQUAL_FP64(bit_cast<double>(n_raw & ~kDSignMask), d11);
11071 CHECK_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12); 14986 CHECK_EQUAL_FP64(bit_cast<double>(n_raw ^ kDSignMask), d12);
11072 CHECK_EQUAL_FP64(kFP64DefaultNaN, d13); 14987 CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
11073 CHECK_EQUAL_FP64(kFP64DefaultNaN, d14); 14988 CHECK_EQUAL_FP64(kFP64DefaultNaN, d14);
11074 CHECK_EQUAL_FP64(kFP64DefaultNaN, d15); 14989 CHECK_EQUAL_FP64(kFP64DefaultNaN, d15);
11075 CHECK_EQUAL_FP64(kFP64DefaultNaN, d16); 14990 CHECK_EQUAL_FP64(kFP64DefaultNaN, d16);
11076 CHECK_EQUAL_FP32(kFP32DefaultNaN, s17); 14991 CHECK_EQUAL_FP32(kFP32DefaultNaN, s17);
11077 } 14992 }
11078 14993
11079 if (test_2op) { 14994 if (test_2op) {
11080 CHECK_EQUAL_FP64(kFP64DefaultNaN, d18); 14995 CHECK_EQUAL_FP64(kFP64DefaultNaN, d18);
11081 CHECK_EQUAL_FP64(kFP64DefaultNaN, d19); 14996 CHECK_EQUAL_FP64(kFP64DefaultNaN, d19);
11082 CHECK_EQUAL_FP64(kFP64DefaultNaN, d20); 14997 CHECK_EQUAL_FP64(kFP64DefaultNaN, d20);
11083 CHECK_EQUAL_FP64(kFP64DefaultNaN, d21); 14998 CHECK_EQUAL_FP64(kFP64DefaultNaN, d21);
11084 CHECK_EQUAL_FP64(kFP64DefaultNaN, d22); 14999 CHECK_EQUAL_FP64(kFP64DefaultNaN, d22);
11085 CHECK_EQUAL_FP64(kFP64DefaultNaN, d23); 15000 CHECK_EQUAL_FP64(kFP64DefaultNaN, d23);
11086 } 15001 }
11087 15002
11088 CHECK_EQUAL_FP64(kFP64DefaultNaN, d24); 15003 CHECK_EQUAL_FP64(kFP64DefaultNaN, d24);
11089 CHECK_EQUAL_FP64(kFP64DefaultNaN, d25); 15004 CHECK_EQUAL_FP64(kFP64DefaultNaN, d25);
11090 CHECK_EQUAL_FP64(kFP64DefaultNaN, d26); 15005 CHECK_EQUAL_FP64(kFP64DefaultNaN, d26);
11091 CHECK_EQUAL_FP64(kFP64DefaultNaN, d27); 15006 CHECK_EQUAL_FP64(kFP64DefaultNaN, d27);
11092 15007
11093 TEARDOWN(); 15008 TEARDOWN();
11094 } 15009 }
11095 15010
11096 15011
11097 TEST(default_nan_double) { 15012 TEST(default_nan_double) {
11098 INIT_V8(); 15013 INIT_V8();
11099 double sn = rawbits_to_double(0x7ff5555511111111); 15014 double sn = bit_cast<double>(0x7ff5555511111111);
11100 double sm = rawbits_to_double(0x7ff5555522222222); 15015 double sm = bit_cast<double>(0x7ff5555522222222);
11101 double sa = rawbits_to_double(0x7ff55555aaaaaaaa); 15016 double sa = bit_cast<double>(0x7ff55555aaaaaaaa);
11102 double qn = rawbits_to_double(0x7ffaaaaa11111111); 15017 double qn = bit_cast<double>(0x7ffaaaaa11111111);
11103 double qm = rawbits_to_double(0x7ffaaaaa22222222); 15018 double qm = bit_cast<double>(0x7ffaaaaa22222222);
11104 double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa); 15019 double qa = bit_cast<double>(0x7ffaaaaaaaaaaaaa);
11105 CHECK(IsSignallingNaN(sn)); 15020 CHECK(IsSignallingNaN(sn));
11106 CHECK(IsSignallingNaN(sm)); 15021 CHECK(IsSignallingNaN(sm));
11107 CHECK(IsSignallingNaN(sa)); 15022 CHECK(IsSignallingNaN(sa));
11108 CHECK(IsQuietNaN(qn)); 15023 CHECK(IsQuietNaN(qn));
11109 CHECK(IsQuietNaN(qm)); 15024 CHECK(IsQuietNaN(qm));
11110 CHECK(IsQuietNaN(qa)); 15025 CHECK(IsQuietNaN(qa));
11111 15026
11112 // - Signalling NaNs 15027 // - Signalling NaNs
11113 DefaultNaNHelper(sn, 0.0, 0.0); 15028 DefaultNaNHelper(sn, 0.0, 0.0);
11114 DefaultNaNHelper(0.0, sm, 0.0); 15029 DefaultNaNHelper(0.0, sm, 0.0);
(...skipping 406 matching lines...) Expand 10 before | Expand all | Expand 10 after
11521 __ Mov(x0, 1); 15436 __ Mov(x0, 1);
11522 15437
11523 END(); 15438 END();
11524 15439
11525 RUN(); 15440 RUN();
11526 15441
11527 CHECK_EQUAL_64(0x1, x0); 15442 CHECK_EQUAL_64(0x1, x0);
11528 15443
11529 TEARDOWN(); 15444 TEARDOWN();
11530 } 15445 }
OLDNEW
« no previous file with comments | « src/v8.gyp ('k') | test/cctest/test-disasm-arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698