Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(7)

Side by Side Diff: test/cctest/test-assembler-a64.cc

Issue 196133017: Experimental parser: merge r19949 (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « test/cctest/test-api.cc ('k') | test/cctest/test-atomicops.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 4897 matching lines...) Expand 10 before | Expand all | Expand 10 after
4908 4908
4909 TEARDOWN(); 4909 TEARDOWN();
4910 } 4910 }
4911 4911
4912 4912
4913 TEST(fadd) { 4913 TEST(fadd) {
4914 INIT_V8(); 4914 INIT_V8();
4915 SETUP(); 4915 SETUP();
4916 4916
4917 START(); 4917 START();
4918 __ Fmov(s13, -0.0); 4918 __ Fmov(s14, -0.0f);
4919 __ Fmov(s14, kFP32PositiveInfinity); 4919 __ Fmov(s15, kFP32PositiveInfinity);
4920 __ Fmov(s15, kFP32NegativeInfinity); 4920 __ Fmov(s16, kFP32NegativeInfinity);
4921 __ Fmov(s16, 3.25); 4921 __ Fmov(s17, 3.25f);
4922 __ Fmov(s17, 1.0); 4922 __ Fmov(s18, 1.0f);
4923 __ Fmov(s18, 0); 4923 __ Fmov(s19, 0.0f);
4924 4924
4925 __ Fmov(d26, -0.0); 4925 __ Fmov(d26, -0.0);
4926 __ Fmov(d27, kFP64PositiveInfinity); 4926 __ Fmov(d27, kFP64PositiveInfinity);
4927 __ Fmov(d28, kFP64NegativeInfinity); 4927 __ Fmov(d28, kFP64NegativeInfinity);
4928 __ Fmov(d29, 0); 4928 __ Fmov(d29, 0.0);
4929 __ Fmov(d30, -2.0); 4929 __ Fmov(d30, -2.0);
4930 __ Fmov(d31, 2.25); 4930 __ Fmov(d31, 2.25);
4931 4931
4932 __ Fadd(s0, s16, s17); 4932 __ Fadd(s0, s17, s18);
4933 __ Fadd(s1, s17, s18); 4933 __ Fadd(s1, s18, s19);
4934 __ Fadd(s2, s13, s17); 4934 __ Fadd(s2, s14, s18);
4935 __ Fadd(s3, s14, s17); 4935 __ Fadd(s3, s15, s18);
4936 __ Fadd(s4, s15, s17); 4936 __ Fadd(s4, s16, s18);
4937 __ Fadd(s5, s15, s16);
4938 __ Fadd(s6, s16, s15);
4937 4939
4938 __ Fadd(d5, d30, d31); 4940 __ Fadd(d7, d30, d31);
4939 __ Fadd(d6, d29, d31); 4941 __ Fadd(d8, d29, d31);
4940 __ Fadd(d7, d26, d31); 4942 __ Fadd(d9, d26, d31);
4941 __ Fadd(d8, d27, d31); 4943 __ Fadd(d10, d27, d31);
4942 __ Fadd(d9, d28, d31); 4944 __ Fadd(d11, d28, d31);
4945 __ Fadd(d12, d27, d28);
4946 __ Fadd(d13, d28, d27);
4943 END(); 4947 END();
4944 4948
4945 RUN(); 4949 RUN();
4946 4950
4947 ASSERT_EQUAL_FP32(4.25, s0); 4951 ASSERT_EQUAL_FP32(4.25, s0);
4948 ASSERT_EQUAL_FP32(1.0, s1); 4952 ASSERT_EQUAL_FP32(1.0, s1);
4949 ASSERT_EQUAL_FP32(1.0, s2); 4953 ASSERT_EQUAL_FP32(1.0, s2);
4950 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3); 4954 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
4951 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4); 4955 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
4952 ASSERT_EQUAL_FP64(0.25, d5); 4956 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
4953 ASSERT_EQUAL_FP64(2.25, d6); 4957 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
4954 ASSERT_EQUAL_FP64(2.25, d7); 4958 ASSERT_EQUAL_FP64(0.25, d7);
4955 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d8); 4959 ASSERT_EQUAL_FP64(2.25, d8);
4956 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d9); 4960 ASSERT_EQUAL_FP64(2.25, d9);
4961 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d10);
4962 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d11);
4963 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
4964 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
4957 4965
4958 TEARDOWN(); 4966 TEARDOWN();
4959 } 4967 }
4960 4968
4961 4969
4962 TEST(fsub) { 4970 TEST(fsub) {
4963 INIT_V8(); 4971 INIT_V8();
4964 SETUP(); 4972 SETUP();
4965 4973
4966 START(); 4974 START();
4967 __ Fmov(s13, -0.0); 4975 __ Fmov(s14, -0.0f);
4968 __ Fmov(s14, kFP32PositiveInfinity); 4976 __ Fmov(s15, kFP32PositiveInfinity);
4969 __ Fmov(s15, kFP32NegativeInfinity); 4977 __ Fmov(s16, kFP32NegativeInfinity);
4970 __ Fmov(s16, 3.25); 4978 __ Fmov(s17, 3.25f);
4971 __ Fmov(s17, 1.0); 4979 __ Fmov(s18, 1.0f);
4972 __ Fmov(s18, 0); 4980 __ Fmov(s19, 0.0f);
4973 4981
4974 __ Fmov(d26, -0.0); 4982 __ Fmov(d26, -0.0);
4975 __ Fmov(d27, kFP64PositiveInfinity); 4983 __ Fmov(d27, kFP64PositiveInfinity);
4976 __ Fmov(d28, kFP64NegativeInfinity); 4984 __ Fmov(d28, kFP64NegativeInfinity);
4977 __ Fmov(d29, 0); 4985 __ Fmov(d29, 0.0);
4978 __ Fmov(d30, -2.0); 4986 __ Fmov(d30, -2.0);
4979 __ Fmov(d31, 2.25); 4987 __ Fmov(d31, 2.25);
4980 4988
4981 __ Fsub(s0, s16, s17); 4989 __ Fsub(s0, s17, s18);
4982 __ Fsub(s1, s17, s18); 4990 __ Fsub(s1, s18, s19);
4983 __ Fsub(s2, s13, s17); 4991 __ Fsub(s2, s14, s18);
4984 __ Fsub(s3, s17, s14); 4992 __ Fsub(s3, s18, s15);
4985 __ Fsub(s4, s17, s15); 4993 __ Fsub(s4, s18, s16);
4994 __ Fsub(s5, s15, s15);
4995 __ Fsub(s6, s16, s16);
4986 4996
4987 __ Fsub(d5, d30, d31); 4997 __ Fsub(d7, d30, d31);
4988 __ Fsub(d6, d29, d31); 4998 __ Fsub(d8, d29, d31);
4989 __ Fsub(d7, d26, d31); 4999 __ Fsub(d9, d26, d31);
4990 __ Fsub(d8, d31, d27); 5000 __ Fsub(d10, d31, d27);
4991 __ Fsub(d9, d31, d28); 5001 __ Fsub(d11, d31, d28);
5002 __ Fsub(d12, d27, d27);
5003 __ Fsub(d13, d28, d28);
4992 END(); 5004 END();
4993 5005
4994 RUN(); 5006 RUN();
4995 5007
4996 ASSERT_EQUAL_FP32(2.25, s0); 5008 ASSERT_EQUAL_FP32(2.25, s0);
4997 ASSERT_EQUAL_FP32(1.0, s1); 5009 ASSERT_EQUAL_FP32(1.0, s1);
4998 ASSERT_EQUAL_FP32(-1.0, s2); 5010 ASSERT_EQUAL_FP32(-1.0, s2);
4999 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3); 5011 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5000 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4); 5012 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
5001 ASSERT_EQUAL_FP64(-4.25, d5); 5013 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5002 ASSERT_EQUAL_FP64(-2.25, d6); 5014 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5003 ASSERT_EQUAL_FP64(-2.25, d7); 5015 ASSERT_EQUAL_FP64(-4.25, d7);
5004 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8); 5016 ASSERT_EQUAL_FP64(-2.25, d8);
5005 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9); 5017 ASSERT_EQUAL_FP64(-2.25, d9);
5018 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5019 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5020 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5021 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5006 5022
5007 TEARDOWN(); 5023 TEARDOWN();
5008 } 5024 }
5009 5025
5010 5026
5011 TEST(fmul) { 5027 TEST(fmul) {
5012 INIT_V8(); 5028 INIT_V8();
5013 SETUP(); 5029 SETUP();
5014 5030
5015 START(); 5031 START();
5016 __ Fmov(s13, -0.0); 5032 __ Fmov(s14, -0.0f);
5017 __ Fmov(s14, kFP32PositiveInfinity); 5033 __ Fmov(s15, kFP32PositiveInfinity);
5018 __ Fmov(s15, kFP32NegativeInfinity); 5034 __ Fmov(s16, kFP32NegativeInfinity);
5019 __ Fmov(s16, 3.25); 5035 __ Fmov(s17, 3.25f);
5020 __ Fmov(s17, 2.0); 5036 __ Fmov(s18, 2.0f);
5021 __ Fmov(s18, 0); 5037 __ Fmov(s19, 0.0f);
5022 __ Fmov(s19, -2.0); 5038 __ Fmov(s20, -2.0f);
5023 5039
5024 __ Fmov(d26, -0.0); 5040 __ Fmov(d26, -0.0);
5025 __ Fmov(d27, kFP64PositiveInfinity); 5041 __ Fmov(d27, kFP64PositiveInfinity);
5026 __ Fmov(d28, kFP64NegativeInfinity); 5042 __ Fmov(d28, kFP64NegativeInfinity);
5027 __ Fmov(d29, 0); 5043 __ Fmov(d29, 0.0);
5028 __ Fmov(d30, -2.0); 5044 __ Fmov(d30, -2.0);
5029 __ Fmov(d31, 2.25); 5045 __ Fmov(d31, 2.25);
5030 5046
5031 __ Fmul(s0, s16, s17); 5047 __ Fmul(s0, s17, s18);
5032 __ Fmul(s1, s17, s18); 5048 __ Fmul(s1, s18, s19);
5033 __ Fmul(s2, s13, s13); 5049 __ Fmul(s2, s14, s14);
5034 __ Fmul(s3, s14, s19); 5050 __ Fmul(s3, s15, s20);
5035 __ Fmul(s4, s15, s19); 5051 __ Fmul(s4, s16, s20);
5052 __ Fmul(s5, s15, s19);
5053 __ Fmul(s6, s19, s16);
5036 5054
5037 __ Fmul(d5, d30, d31); 5055 __ Fmul(d7, d30, d31);
5038 __ Fmul(d6, d29, d31); 5056 __ Fmul(d8, d29, d31);
5039 __ Fmul(d7, d26, d26); 5057 __ Fmul(d9, d26, d26);
5040 __ Fmul(d8, d27, d30); 5058 __ Fmul(d10, d27, d30);
5041 __ Fmul(d9, d28, d30); 5059 __ Fmul(d11, d28, d30);
5060 __ Fmul(d12, d27, d29);
5061 __ Fmul(d13, d29, d28);
5042 END(); 5062 END();
5043 5063
5044 RUN(); 5064 RUN();
5045 5065
5046 ASSERT_EQUAL_FP32(6.5, s0); 5066 ASSERT_EQUAL_FP32(6.5, s0);
5047 ASSERT_EQUAL_FP32(0.0, s1); 5067 ASSERT_EQUAL_FP32(0.0, s1);
5048 ASSERT_EQUAL_FP32(0.0, s2); 5068 ASSERT_EQUAL_FP32(0.0, s2);
5049 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3); 5069 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5050 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4); 5070 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
5051 ASSERT_EQUAL_FP64(-4.5, d5); 5071 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5052 ASSERT_EQUAL_FP64(0.0, d6); 5072 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5053 ASSERT_EQUAL_FP64(0.0, d7); 5073 ASSERT_EQUAL_FP64(-4.5, d7);
5054 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8); 5074 ASSERT_EQUAL_FP64(0.0, d8);
5055 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9); 5075 ASSERT_EQUAL_FP64(0.0, d9);
5076 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5077 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5078 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5079 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5056 5080
5057 TEARDOWN(); 5081 TEARDOWN();
5058 } 5082 }
5059 5083
5060 5084
5061 static void FmaddFmsubDoubleHelper(double n, double m, double a, 5085 static void FmaddFmsubHelper(double n, double m, double a,
5062 double fmadd, double fmsub) { 5086 double fmadd, double fmsub,
5087 double fnmadd, double fnmsub) {
5063 SETUP(); 5088 SETUP();
5064 START(); 5089 START();
5065 5090
5066 __ Fmov(d0, n); 5091 __ Fmov(d0, n);
5067 __ Fmov(d1, m); 5092 __ Fmov(d1, m);
5068 __ Fmov(d2, a); 5093 __ Fmov(d2, a);
5069 __ Fmadd(d28, d0, d1, d2); 5094 __ Fmadd(d28, d0, d1, d2);
5070 __ Fmsub(d29, d0, d1, d2); 5095 __ Fmsub(d29, d0, d1, d2);
5071 __ Fnmadd(d30, d0, d1, d2); 5096 __ Fnmadd(d30, d0, d1, d2);
5072 __ Fnmsub(d31, d0, d1, d2); 5097 __ Fnmsub(d31, d0, d1, d2);
5073 5098
5074 END(); 5099 END();
5075 RUN(); 5100 RUN();
5076 5101
5077 ASSERT_EQUAL_FP64(fmadd, d28); 5102 ASSERT_EQUAL_FP64(fmadd, d28);
5078 ASSERT_EQUAL_FP64(fmsub, d29); 5103 ASSERT_EQUAL_FP64(fmsub, d29);
5079 ASSERT_EQUAL_FP64(-fmadd, d30); 5104 ASSERT_EQUAL_FP64(fnmadd, d30);
5080 ASSERT_EQUAL_FP64(-fmsub, d31); 5105 ASSERT_EQUAL_FP64(fnmsub, d31);
5081 5106
5082 TEARDOWN(); 5107 TEARDOWN();
5083 } 5108 }
5084 5109
5085 5110
5086 TEST(fmadd_fmsub_double) { 5111 TEST(fmadd_fmsub_double) {
5087 INIT_V8(); 5112 INIT_V8();
5088 double inputs[] = {
5089 // Normal numbers, including -0.0.
5090 DBL_MAX, DBL_MIN, 3.25, 2.0, 0.0,
5091 -DBL_MAX, -DBL_MIN, -3.25, -2.0, -0.0,
5092 // Infinities.
5093 kFP64NegativeInfinity, kFP64PositiveInfinity,
5094 // Subnormal numbers.
5095 rawbits_to_double(0x000fffffffffffff),
5096 rawbits_to_double(0x0000000000000001),
5097 rawbits_to_double(0x000123456789abcd),
5098 -rawbits_to_double(0x000fffffffffffff),
5099 -rawbits_to_double(0x0000000000000001),
5100 -rawbits_to_double(0x000123456789abcd),
5101 // NaN.
5102 kFP64QuietNaN,
5103 -kFP64QuietNaN,
5104 };
5105 const int count = sizeof(inputs) / sizeof(inputs[0]);
5106 5113
5107 for (int in = 0; in < count; in++) { 5114 // It's hard to check the result of fused operations because the only way to
5108 double n = inputs[in]; 5115 // calculate the result is using fma, which is what the simulator uses anyway.
5109 for (int im = 0; im < count; im++) { 5116 // TODO(jbramley): Add tests to check behaviour against a hardware trace.
5110 double m = inputs[im];
5111 for (int ia = 0; ia < count; ia++) {
5112 double a = inputs[ia];
5113 double fmadd = fma(n, m, a);
5114 double fmsub = fma(-n, m, a);
5115 5117
5116 FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub); 5118 // Basic operation.
5117 } 5119 FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0);
5118 } 5120 FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0);
5119 } 5121
5122 // Check the sign of exact zeroes.
5123 // n m a fmadd fmsub fnmadd fnmsub
5124 FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
5125 FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
5126 FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
5127 FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
5128 FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
5129 FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
5130 FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
5131 FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
5132
5133 // Check NaN generation.
5134 FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
5135 kFP64DefaultNaN, kFP64DefaultNaN,
5136 kFP64DefaultNaN, kFP64DefaultNaN);
5137 FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
5138 kFP64DefaultNaN, kFP64DefaultNaN,
5139 kFP64DefaultNaN, kFP64DefaultNaN);
5140 FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
5141 kFP64PositiveInfinity, // inf + ( inf * 1) = inf
5142 kFP64DefaultNaN, // inf + (-inf * 1) = NaN
5143 kFP64NegativeInfinity, // -inf + (-inf * 1) = -inf
5144 kFP64DefaultNaN); // -inf + ( inf * 1) = NaN
5145 FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
5146 kFP64DefaultNaN, // inf + (-inf * 1) = NaN
5147 kFP64PositiveInfinity, // inf + ( inf * 1) = inf
5148 kFP64DefaultNaN, // -inf + ( inf * 1) = NaN
5149 kFP64NegativeInfinity); // -inf + (-inf * 1) = -inf
5120 } 5150 }
5121 5151
5122 5152
5123 TEST(fmadd_fmsub_double_rounding) { 5153 static void FmaddFmsubHelper(float n, float m, float a,
5124 INIT_V8(); 5154 float fmadd, float fmsub,
5125 // Make sure we run plenty of tests where an intermediate rounding stage would 5155 float fnmadd, float fnmsub) {
5126 // produce an incorrect result.
5127 const int limit = 1000;
5128 int count_fmadd = 0;
5129 int count_fmsub = 0;
5130
5131 uint16_t seed[3] = {42, 43, 44};
5132 seed48(seed);
5133
5134 while ((count_fmadd < limit) || (count_fmsub < limit)) {
5135 double n, m, a;
5136 uint32_t r[2];
5137 ASSERT(sizeof(r) == sizeof(n));
5138
5139 r[0] = mrand48();
5140 r[1] = mrand48();
5141 memcpy(&n, r, sizeof(r));
5142 r[0] = mrand48();
5143 r[1] = mrand48();
5144 memcpy(&m, r, sizeof(r));
5145 r[0] = mrand48();
5146 r[1] = mrand48();
5147 memcpy(&a, r, sizeof(r));
5148
5149 if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
5150 continue;
5151 }
5152
5153 // Calculate the expected results.
5154 double fmadd = fma(n, m, a);
5155 double fmsub = fma(-n, m, a);
5156
5157 bool test_fmadd = (fmadd != (a + n * m));
5158 bool test_fmsub = (fmsub != (a - n * m));
5159
5160 // If rounding would produce a different result, increment the test count.
5161 count_fmadd += test_fmadd;
5162 count_fmsub += test_fmsub;
5163
5164 if (test_fmadd || test_fmsub) {
5165 FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
5166 }
5167 }
5168 }
5169
5170
5171 static void FmaddFmsubFloatHelper(float n, float m, float a,
5172 float fmadd, float fmsub) {
5173 SETUP(); 5156 SETUP();
5174 START(); 5157 START();
5175 5158
5176 __ Fmov(s0, n); 5159 __ Fmov(s0, n);
5177 __ Fmov(s1, m); 5160 __ Fmov(s1, m);
5178 __ Fmov(s2, a); 5161 __ Fmov(s2, a);
5179 __ Fmadd(s30, s0, s1, s2); 5162 __ Fmadd(s28, s0, s1, s2);
5180 __ Fmsub(s31, s0, s1, s2); 5163 __ Fmsub(s29, s0, s1, s2);
5164 __ Fnmadd(s30, s0, s1, s2);
5165 __ Fnmsub(s31, s0, s1, s2);
5181 5166
5182 END(); 5167 END();
5183 RUN(); 5168 RUN();
5184 5169
5185 ASSERT_EQUAL_FP32(fmadd, s30); 5170 ASSERT_EQUAL_FP32(fmadd, s28);
5186 ASSERT_EQUAL_FP32(fmsub, s31); 5171 ASSERT_EQUAL_FP32(fmsub, s29);
5172 ASSERT_EQUAL_FP32(fnmadd, s30);
5173 ASSERT_EQUAL_FP32(fnmsub, s31);
5187 5174
5188 TEARDOWN(); 5175 TEARDOWN();
5189 } 5176 }
5190 5177
5191 5178
5192 TEST(fmadd_fmsub_float) { 5179 TEST(fmadd_fmsub_float) {
5193 INIT_V8(); 5180 INIT_V8();
5194 float inputs[] = { 5181 // It's hard to check the result of fused operations because the only way to
5195 // Normal numbers, including -0.0f. 5182 // calculate the result is using fma, which is what the simulator uses anyway.
5196 FLT_MAX, FLT_MIN, 3.25f, 2.0f, 0.0f, 5183 // TODO(jbramley): Add tests to check behaviour against a hardware trace.
5197 -FLT_MAX, -FLT_MIN, -3.25f, -2.0f, -0.0f,
5198 // Infinities.
5199 kFP32NegativeInfinity, kFP32PositiveInfinity,
5200 // Subnormal numbers.
5201 rawbits_to_float(0x07ffffff),
5202 rawbits_to_float(0x00000001),
5203 rawbits_to_float(0x01234567),
5204 -rawbits_to_float(0x07ffffff),
5205 -rawbits_to_float(0x00000001),
5206 -rawbits_to_float(0x01234567),
5207 // NaN.
5208 kFP32QuietNaN,
5209 -kFP32QuietNaN,
5210 };
5211 const int count = sizeof(inputs) / sizeof(inputs[0]);
5212 5184
5213 for (int in = 0; in < count; in++) { 5185 // Basic operation.
5214 float n = inputs[in]; 5186 FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f);
5215 for (int im = 0; im < count; im++) { 5187 FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f);
5216 float m = inputs[im];
5217 for (int ia = 0; ia < count; ia++) {
5218 float a = inputs[ia];
5219 float fmadd = fmaf(n, m, a);
5220 float fmsub = fmaf(-n, m, a);
5221 5188
5222 FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub); 5189 // Check the sign of exact zeroes.
5223 } 5190 // n m a fmadd fmsub fnmadd fnmsub
5224 } 5191 FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5225 } 5192 FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5193 FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5194 FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5195 FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5196 FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5197 FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5198 FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5199
5200 // Check NaN generation.
5201 FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
5202 kFP32DefaultNaN, kFP32DefaultNaN,
5203 kFP32DefaultNaN, kFP32DefaultNaN);
5204 FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
5205 kFP32DefaultNaN, kFP32DefaultNaN,
5206 kFP32DefaultNaN, kFP32DefaultNaN);
5207 FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
5208 kFP32PositiveInfinity, // inf + ( inf * 1) = inf
5209 kFP32DefaultNaN, // inf + (-inf * 1) = NaN
5210 kFP32NegativeInfinity, // -inf + (-inf * 1) = -inf
5211 kFP32DefaultNaN); // -inf + ( inf * 1) = NaN
5212 FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
5213 kFP32DefaultNaN, // inf + (-inf * 1) = NaN
5214 kFP32PositiveInfinity, // inf + ( inf * 1) = inf
5215 kFP32DefaultNaN, // -inf + ( inf * 1) = NaN
5216 kFP32NegativeInfinity); // -inf + (-inf * 1) = -inf
5226 } 5217 }
5227 5218
5228 5219
5229 TEST(fmadd_fmsub_float_rounding) { 5220 TEST(fmadd_fmsub_double_nans) {
5230 INIT_V8(); 5221 INIT_V8();
5231 // Make sure we run plenty of tests where an intermediate rounding stage would 5222 // Make sure that NaN propagation works correctly.
5232 // produce an incorrect result. 5223 double s1 = rawbits_to_double(0x7ff5555511111111);
5233 const int limit = 1000; 5224 double s2 = rawbits_to_double(0x7ff5555522222222);
5234 int count_fmadd = 0; 5225 double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
5235 int count_fmsub = 0; 5226 double q1 = rawbits_to_double(0x7ffaaaaa11111111);
5227 double q2 = rawbits_to_double(0x7ffaaaaa22222222);
5228 double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
5229 ASSERT(IsSignallingNaN(s1));
5230 ASSERT(IsSignallingNaN(s2));
5231 ASSERT(IsSignallingNaN(sa));
5232 ASSERT(IsQuietNaN(q1));
5233 ASSERT(IsQuietNaN(q2));
5234 ASSERT(IsQuietNaN(qa));
5236 5235
5237 uint16_t seed[3] = {42, 43, 44}; 5236 // The input NaNs after passing through ProcessNaN.
5238 seed48(seed); 5237 double s1_proc = rawbits_to_double(0x7ffd555511111111);
5238 double s2_proc = rawbits_to_double(0x7ffd555522222222);
5239 double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa);
5240 double q1_proc = q1;
5241 double q2_proc = q2;
5242 double qa_proc = qa;
5243 ASSERT(IsQuietNaN(s1_proc));
5244 ASSERT(IsQuietNaN(s2_proc));
5245 ASSERT(IsQuietNaN(sa_proc));
5246 ASSERT(IsQuietNaN(q1_proc));
5247 ASSERT(IsQuietNaN(q2_proc));
5248 ASSERT(IsQuietNaN(qa_proc));
5239 5249
5240 while ((count_fmadd < limit) || (count_fmsub < limit)) { 5250 // Quiet NaNs are propagated.
5241 float n, m, a; 5251 FmaddFmsubHelper(q1, 0, 0, q1_proc, -q1_proc, -q1_proc, q1_proc);
5242 uint32_t r; 5252 FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5243 ASSERT(sizeof(r) == sizeof(n)); 5253 FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
5254 FmaddFmsubHelper(q1, q2, 0, q1_proc, -q1_proc, -q1_proc, q1_proc);
5255 FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
5256 FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
5257 FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
5244 5258
5245 r = mrand48(); 5259 // Signalling NaNs are propagated, and made quiet.
5246 memcpy(&n, &r, sizeof(r)); 5260 FmaddFmsubHelper(s1, 0, 0, s1_proc, -s1_proc, -s1_proc, s1_proc);
5247 r = mrand48(); 5261 FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
5248 memcpy(&m, &r, sizeof(r)); 5262 FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5249 r = mrand48(); 5263 FmaddFmsubHelper(s1, s2, 0, s1_proc, -s1_proc, -s1_proc, s1_proc);
5250 memcpy(&a, &r, sizeof(r)); 5264 FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5265 FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5266 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5251 5267
5252 if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) { 5268 // Signalling NaNs take precedence over quiet NaNs.
5253 continue; 5269 FmaddFmsubHelper(s1, q2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc);
5254 } 5270 FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
5271 FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5272 FmaddFmsubHelper(s1, s2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc);
5273 FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5274 FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5275 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5255 5276
5256 // Calculate the expected results. 5277 // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5257 float fmadd = fmaf(n, m, a); 5278 FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
5258 float fmsub = fmaf(-n, m, a); 5279 kFP64DefaultNaN, kFP64DefaultNaN,
5259 5280 kFP64DefaultNaN, kFP64DefaultNaN);
5260 bool test_fmadd = (fmadd != (a + n * m)); 5281 FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
5261 bool test_fmsub = (fmsub != (a - n * m)); 5282 kFP64DefaultNaN, kFP64DefaultNaN,
5262 5283 kFP64DefaultNaN, kFP64DefaultNaN);
5263 // If rounding would produce a different result, increment the test count. 5284 FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
5264 count_fmadd += test_fmadd; 5285 kFP64DefaultNaN, kFP64DefaultNaN,
5265 count_fmsub += test_fmsub; 5286 kFP64DefaultNaN, kFP64DefaultNaN);
5266 5287 FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
5267 if (test_fmadd || test_fmsub) { 5288 kFP64DefaultNaN, kFP64DefaultNaN,
5268 FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub); 5289 kFP64DefaultNaN, kFP64DefaultNaN);
5269 }
5270 }
5271 } 5290 }
5272 5291
5273 5292
5293 TEST(fmadd_fmsub_float_nans) {
5294 INIT_V8();
5295 // Make sure that NaN propagation works correctly.
5296 float s1 = rawbits_to_float(0x7f951111);
5297 float s2 = rawbits_to_float(0x7f952222);
5298 float sa = rawbits_to_float(0x7f95aaaa);
5299 float q1 = rawbits_to_float(0x7fea1111);
5300 float q2 = rawbits_to_float(0x7fea2222);
5301 float qa = rawbits_to_float(0x7feaaaaa);
5302 ASSERT(IsSignallingNaN(s1));
5303 ASSERT(IsSignallingNaN(s2));
5304 ASSERT(IsSignallingNaN(sa));
5305 ASSERT(IsQuietNaN(q1));
5306 ASSERT(IsQuietNaN(q2));
5307 ASSERT(IsQuietNaN(qa));
5308
5309 // The input NaNs after passing through ProcessNaN.
5310 float s1_proc = rawbits_to_float(0x7fd51111);
5311 float s2_proc = rawbits_to_float(0x7fd52222);
5312 float sa_proc = rawbits_to_float(0x7fd5aaaa);
5313 float q1_proc = q1;
5314 float q2_proc = q2;
5315 float qa_proc = qa;
5316 ASSERT(IsQuietNaN(s1_proc));
5317 ASSERT(IsQuietNaN(s2_proc));
5318 ASSERT(IsQuietNaN(sa_proc));
5319 ASSERT(IsQuietNaN(q1_proc));
5320 ASSERT(IsQuietNaN(q2_proc));
5321 ASSERT(IsQuietNaN(qa_proc));
5322
5323 // Quiet NaNs are propagated.
5324 FmaddFmsubHelper(q1, 0, 0, q1_proc, -q1_proc, -q1_proc, q1_proc);
5325 FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5326 FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
5327 FmaddFmsubHelper(q1, q2, 0, q1_proc, -q1_proc, -q1_proc, q1_proc);
5328 FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
5329 FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
5330 FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc);
5331
5332 // Signalling NaNs are propagated, and made quiet.
5333 FmaddFmsubHelper(s1, 0, 0, s1_proc, -s1_proc, -s1_proc, s1_proc);
5334 FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
5335 FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5336 FmaddFmsubHelper(s1, s2, 0, s1_proc, -s1_proc, -s1_proc, s1_proc);
5337 FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5338 FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5339 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5340
5341 // Signalling NaNs take precedence over quiet NaNs.
5342 FmaddFmsubHelper(s1, q2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc);
5343 FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
5344 FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5345 FmaddFmsubHelper(s1, s2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc);
5346 FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5347 FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5348 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc);
5349
5350 // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5351 FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
5352 kFP32DefaultNaN, kFP32DefaultNaN,
5353 kFP32DefaultNaN, kFP32DefaultNaN);
5354 FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
5355 kFP32DefaultNaN, kFP32DefaultNaN,
5356 kFP32DefaultNaN, kFP32DefaultNaN);
5357 FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
5358 kFP32DefaultNaN, kFP32DefaultNaN,
5359 kFP32DefaultNaN, kFP32DefaultNaN);
5360 FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
5361 kFP32DefaultNaN, kFP32DefaultNaN,
5362 kFP32DefaultNaN, kFP32DefaultNaN);
5363 }
5364
5365
5274 TEST(fdiv) { 5366 TEST(fdiv) {
5275 INIT_V8(); 5367 INIT_V8();
5276 SETUP(); 5368 SETUP();
5277 5369
5278 START(); 5370 START();
5279 __ Fmov(s13, -0.0); 5371 __ Fmov(s14, -0.0f);
5280 __ Fmov(s14, kFP32PositiveInfinity); 5372 __ Fmov(s15, kFP32PositiveInfinity);
5281 __ Fmov(s15, kFP32NegativeInfinity); 5373 __ Fmov(s16, kFP32NegativeInfinity);
5282 __ Fmov(s16, 3.25); 5374 __ Fmov(s17, 3.25f);
5283 __ Fmov(s17, 2.0); 5375 __ Fmov(s18, 2.0f);
5284 __ Fmov(s18, 2.0); 5376 __ Fmov(s19, 2.0f);
5285 __ Fmov(s19, -2.0); 5377 __ Fmov(s20, -2.0f);
5286 5378
5287 __ Fmov(d26, -0.0); 5379 __ Fmov(d26, -0.0);
5288 __ Fmov(d27, kFP64PositiveInfinity); 5380 __ Fmov(d27, kFP64PositiveInfinity);
5289 __ Fmov(d28, kFP64NegativeInfinity); 5381 __ Fmov(d28, kFP64NegativeInfinity);
5290 __ Fmov(d29, 0); 5382 __ Fmov(d29, 0.0);
5291 __ Fmov(d30, -2.0); 5383 __ Fmov(d30, -2.0);
5292 __ Fmov(d31, 2.25); 5384 __ Fmov(d31, 2.25);
5293 5385
5294 __ Fdiv(s0, s16, s17); 5386 __ Fdiv(s0, s17, s18);
5295 __ Fdiv(s1, s17, s18); 5387 __ Fdiv(s1, s18, s19);
5296 __ Fdiv(s2, s13, s17); 5388 __ Fdiv(s2, s14, s18);
5297 __ Fdiv(s3, s17, s14); 5389 __ Fdiv(s3, s18, s15);
5298 __ Fdiv(s4, s17, s15); 5390 __ Fdiv(s4, s18, s16);
5299 __ Fdiv(d5, d31, d30); 5391 __ Fdiv(s5, s15, s16);
5300 __ Fdiv(d6, d29, d31); 5392 __ Fdiv(s6, s14, s14);
5301 __ Fdiv(d7, d26, d31); 5393
5302 __ Fdiv(d8, d31, d27); 5394 __ Fdiv(d7, d31, d30);
5303 __ Fdiv(d9, d31, d28); 5395 __ Fdiv(d8, d29, d31);
5396 __ Fdiv(d9, d26, d31);
5397 __ Fdiv(d10, d31, d27);
5398 __ Fdiv(d11, d31, d28);
5399 __ Fdiv(d12, d28, d27);
5400 __ Fdiv(d13, d29, d29);
5304 END(); 5401 END();
5305 5402
5306 RUN(); 5403 RUN();
5307 5404
5308 ASSERT_EQUAL_FP32(1.625, s0); 5405 ASSERT_EQUAL_FP32(1.625f, s0);
5309 ASSERT_EQUAL_FP32(1.0, s1); 5406 ASSERT_EQUAL_FP32(1.0f, s1);
5310 ASSERT_EQUAL_FP32(-0.0, s2); 5407 ASSERT_EQUAL_FP32(-0.0f, s2);
5311 ASSERT_EQUAL_FP32(0.0, s3); 5408 ASSERT_EQUAL_FP32(0.0f, s3);
5312 ASSERT_EQUAL_FP32(-0.0, s4); 5409 ASSERT_EQUAL_FP32(-0.0f, s4);
5313 ASSERT_EQUAL_FP64(-1.125, d5); 5410 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5314 ASSERT_EQUAL_FP64(0.0, d6); 5411 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5315 ASSERT_EQUAL_FP64(-0.0, d7); 5412 ASSERT_EQUAL_FP64(-1.125, d7);
5316 ASSERT_EQUAL_FP64(0.0, d8); 5413 ASSERT_EQUAL_FP64(0.0, d8);
5317 ASSERT_EQUAL_FP64(-0.0, d9); 5414 ASSERT_EQUAL_FP64(-0.0, d9);
5415 ASSERT_EQUAL_FP64(0.0, d10);
5416 ASSERT_EQUAL_FP64(-0.0, d11);
5417 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5418 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5318 5419
5319 TEARDOWN(); 5420 TEARDOWN();
5320 } 5421 }
5321 5422
5322 5423
5323 static float MinMaxHelper(float n, 5424 static float MinMaxHelper(float n,
5324 float m, 5425 float m,
5325 bool min, 5426 bool min,
5326 float quiet_nan_substitute = 0.0) { 5427 float quiet_nan_substitute = 0.0) {
5327 const uint64_t kFP32QuietNaNMask = 0x00400000UL;
5328 uint32_t raw_n = float_to_rawbits(n); 5428 uint32_t raw_n = float_to_rawbits(n);
5329 uint32_t raw_m = float_to_rawbits(m); 5429 uint32_t raw_m = float_to_rawbits(m);
5330 5430
5331 if (std::isnan(n) && ((raw_n & kFP32QuietNaNMask) == 0)) { 5431 if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) {
5332 // n is signalling NaN. 5432 // n is signalling NaN.
5333 return n; 5433 return rawbits_to_float(raw_n | kSQuietNanMask);
5334 } else if (std::isnan(m) && ((raw_m & kFP32QuietNaNMask) == 0)) { 5434 } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) {
5335 // m is signalling NaN. 5435 // m is signalling NaN.
5336 return m; 5436 return rawbits_to_float(raw_m | kSQuietNanMask);
5337 } else if (quiet_nan_substitute == 0.0) { 5437 } else if (quiet_nan_substitute == 0.0) {
5338 if (std::isnan(n)) { 5438 if (std::isnan(n)) {
5339 // n is quiet NaN. 5439 // n is quiet NaN.
5340 return n; 5440 return n;
5341 } else if (std::isnan(m)) { 5441 } else if (std::isnan(m)) {
5342 // m is quiet NaN. 5442 // m is quiet NaN.
5343 return m; 5443 return m;
5344 } 5444 }
5345 } else { 5445 } else {
5346 // Substitute n or m if one is quiet, but not both. 5446 // Substitute n or m if one is quiet, but not both.
(...skipping 12 matching lines...) Expand all
5359 } 5459 }
5360 5460
5361 return min ? fminf(n, m) : fmaxf(n, m); 5461 return min ? fminf(n, m) : fmaxf(n, m);
5362 } 5462 }
5363 5463
5364 5464
5365 static double MinMaxHelper(double n, 5465 static double MinMaxHelper(double n,
5366 double m, 5466 double m,
5367 bool min, 5467 bool min,
5368 double quiet_nan_substitute = 0.0) { 5468 double quiet_nan_substitute = 0.0) {
5369 const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
5370 uint64_t raw_n = double_to_rawbits(n); 5469 uint64_t raw_n = double_to_rawbits(n);
5371 uint64_t raw_m = double_to_rawbits(m); 5470 uint64_t raw_m = double_to_rawbits(m);
5372 5471
5373 if (std::isnan(n) && ((raw_n & kFP64QuietNaNMask) == 0)) { 5472 if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) {
5374 // n is signalling NaN. 5473 // n is signalling NaN.
5375 return n; 5474 return rawbits_to_double(raw_n | kDQuietNanMask);
5376 } else if (std::isnan(m) && ((raw_m & kFP64QuietNaNMask) == 0)) { 5475 } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) {
5377 // m is signalling NaN. 5476 // m is signalling NaN.
5378 return m; 5477 return rawbits_to_double(raw_m | kDQuietNanMask);
5379 } else if (quiet_nan_substitute == 0.0) { 5478 } else if (quiet_nan_substitute == 0.0) {
5380 if (std::isnan(n)) { 5479 if (std::isnan(n)) {
5381 // n is quiet NaN. 5480 // n is quiet NaN.
5382 return n; 5481 return n;
5383 } else if (std::isnan(m)) { 5482 } else if (std::isnan(m)) {
5384 // m is quiet NaN. 5483 // m is quiet NaN.
5385 return m; 5484 return m;
5386 } 5485 }
5387 } else { 5486 } else {
5388 // Substitute n or m if one is quiet, but not both. 5487 // Substitute n or m if one is quiet, but not both.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
5423 ASSERT_EQUAL_FP64(max, d29); 5522 ASSERT_EQUAL_FP64(max, d29);
5424 ASSERT_EQUAL_FP64(minnm, d30); 5523 ASSERT_EQUAL_FP64(minnm, d30);
5425 ASSERT_EQUAL_FP64(maxnm, d31); 5524 ASSERT_EQUAL_FP64(maxnm, d31);
5426 5525
5427 TEARDOWN(); 5526 TEARDOWN();
5428 } 5527 }
5429 5528
5430 5529
5431 TEST(fmax_fmin_d) { 5530 TEST(fmax_fmin_d) {
5432 INIT_V8(); 5531 INIT_V8();
5532 // Use non-standard NaNs to check that the payload bits are preserved.
5533 double snan = rawbits_to_double(0x7ff5555512345678);
5534 double qnan = rawbits_to_double(0x7ffaaaaa87654321);
5535
5536 double snan_processed = rawbits_to_double(0x7ffd555512345678);
5537 double qnan_processed = qnan;
5538
5539 ASSERT(IsSignallingNaN(snan));
5540 ASSERT(IsQuietNaN(qnan));
5541 ASSERT(IsQuietNaN(snan_processed));
5542 ASSERT(IsQuietNaN(qnan_processed));
5543
5433 // Bootstrap tests. 5544 // Bootstrap tests.
5434 FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0); 5545 FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
5435 FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1); 5546 FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
5436 FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity, 5547 FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
5437 kFP64NegativeInfinity, kFP64PositiveInfinity, 5548 kFP64NegativeInfinity, kFP64PositiveInfinity,
5438 kFP64NegativeInfinity, kFP64PositiveInfinity); 5549 kFP64NegativeInfinity, kFP64PositiveInfinity);
5439 FminFmaxDoubleHelper(kFP64SignallingNaN, 0, 5550 FminFmaxDoubleHelper(snan, 0,
5440 kFP64SignallingNaN, kFP64SignallingNaN, 5551 snan_processed, snan_processed,
5441 kFP64SignallingNaN, kFP64SignallingNaN); 5552 snan_processed, snan_processed);
5442 FminFmaxDoubleHelper(kFP64QuietNaN, 0, 5553 FminFmaxDoubleHelper(0, snan,
5443 kFP64QuietNaN, kFP64QuietNaN, 5554 snan_processed, snan_processed,
5555 snan_processed, snan_processed);
5556 FminFmaxDoubleHelper(qnan, 0,
5557 qnan_processed, qnan_processed,
5444 0, 0); 5558 0, 0);
5445 FminFmaxDoubleHelper(kFP64QuietNaN, kFP64SignallingNaN, 5559 FminFmaxDoubleHelper(0, qnan,
5446 kFP64SignallingNaN, kFP64SignallingNaN, 5560 qnan_processed, qnan_processed,
5447 kFP64SignallingNaN, kFP64SignallingNaN); 5561 0, 0);
5562 FminFmaxDoubleHelper(qnan, snan,
5563 snan_processed, snan_processed,
5564 snan_processed, snan_processed);
5565 FminFmaxDoubleHelper(snan, qnan,
5566 snan_processed, snan_processed,
5567 snan_processed, snan_processed);
5448 5568
5449 // Iterate over all combinations of inputs. 5569 // Iterate over all combinations of inputs.
5450 double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0, 5570 double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
5451 -DBL_MAX, -DBL_MIN, -1.0, -0.0, 5571 -DBL_MAX, -DBL_MIN, -1.0, -0.0,
5452 kFP64PositiveInfinity, kFP64NegativeInfinity, 5572 kFP64PositiveInfinity, kFP64NegativeInfinity,
5453 kFP64QuietNaN, kFP64SignallingNaN }; 5573 kFP64QuietNaN, kFP64SignallingNaN };
5454 5574
5455 const int count = sizeof(inputs) / sizeof(inputs[0]); 5575 const int count = sizeof(inputs) / sizeof(inputs[0]);
5456 5576
5457 for (int in = 0; in < count; in++) { 5577 for (int in = 0; in < count; in++) {
5458 double n = inputs[in]; 5578 double n = inputs[in];
5459 for (int im = 0; im < count; im++) { 5579 for (int im = 0; im < count; im++) {
5460 double m = inputs[im]; 5580 double m = inputs[im];
5461 FminFmaxDoubleHelper(n, m, 5581 FminFmaxDoubleHelper(n, m,
5462 MinMaxHelper(n, m, true), 5582 MinMaxHelper(n, m, true),
5463 MinMaxHelper(n, m, false), 5583 MinMaxHelper(n, m, false),
5464 MinMaxHelper(n, m, true, kFP64PositiveInfinity), 5584 MinMaxHelper(n, m, true, kFP64PositiveInfinity),
5465 MinMaxHelper(n, m, false, kFP64NegativeInfinity)); 5585 MinMaxHelper(n, m, false, kFP64NegativeInfinity));
5466 } 5586 }
5467 } 5587 }
5468 } 5588 }
5469 5589
5470 5590
5471 static void FminFmaxFloatHelper(float n, float m, float min, float max, 5591 static void FminFmaxFloatHelper(float n, float m, float min, float max,
5472 float minnm, float maxnm) { 5592 float minnm, float maxnm) {
5473 SETUP(); 5593 SETUP();
5474 5594
5475 START(); 5595 START();
5476 // TODO(all): Signalling NaNs are sometimes converted by the C compiler to 5596 __ Fmov(s0, n);
5477 // quiet NaNs on implicit casts from float to double. Here, we move the raw 5597 __ Fmov(s1, m);
5478 // bits into a W register first, so we get the correct value. Fix Fmov so this
5479 // additional step is no longer needed.
5480 __ Mov(w0, float_to_rawbits(n));
5481 __ Fmov(s0, w0);
5482 __ Mov(w0, float_to_rawbits(m));
5483 __ Fmov(s1, w0);
5484 __ Fmin(s28, s0, s1); 5598 __ Fmin(s28, s0, s1);
5485 __ Fmax(s29, s0, s1); 5599 __ Fmax(s29, s0, s1);
5486 __ Fminnm(s30, s0, s1); 5600 __ Fminnm(s30, s0, s1);
5487 __ Fmaxnm(s31, s0, s1); 5601 __ Fmaxnm(s31, s0, s1);
5488 END(); 5602 END();
5489 5603
5490 RUN(); 5604 RUN();
5491 5605
5492 ASSERT_EQUAL_FP32(min, s28); 5606 ASSERT_EQUAL_FP32(min, s28);
5493 ASSERT_EQUAL_FP32(max, s29); 5607 ASSERT_EQUAL_FP32(max, s29);
5494 ASSERT_EQUAL_FP32(minnm, s30); 5608 ASSERT_EQUAL_FP32(minnm, s30);
5495 ASSERT_EQUAL_FP32(maxnm, s31); 5609 ASSERT_EQUAL_FP32(maxnm, s31);
5496 5610
5497 TEARDOWN(); 5611 TEARDOWN();
5498 } 5612 }
5499 5613
5500 5614
5501 TEST(fmax_fmin_s) { 5615 TEST(fmax_fmin_s) {
5502 INIT_V8(); 5616 INIT_V8();
5617 // Use non-standard NaNs to check that the payload bits are preserved.
5618 float snan = rawbits_to_float(0x7f951234);
5619 float qnan = rawbits_to_float(0x7fea8765);
5620
5621 float snan_processed = rawbits_to_float(0x7fd51234);
5622 float qnan_processed = qnan;
5623
5624 ASSERT(IsSignallingNaN(snan));
5625 ASSERT(IsQuietNaN(qnan));
5626 ASSERT(IsQuietNaN(snan_processed));
5627 ASSERT(IsQuietNaN(qnan_processed));
5628
5503 // Bootstrap tests. 5629 // Bootstrap tests.
5504 FminFmaxFloatHelper(0, 0, 0, 0, 0, 0); 5630 FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
5505 FminFmaxFloatHelper(0, 1, 0, 1, 0, 1); 5631 FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
5506 FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity, 5632 FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
5507 kFP32NegativeInfinity, kFP32PositiveInfinity, 5633 kFP32NegativeInfinity, kFP32PositiveInfinity,
5508 kFP32NegativeInfinity, kFP32PositiveInfinity); 5634 kFP32NegativeInfinity, kFP32PositiveInfinity);
5509 FminFmaxFloatHelper(kFP32SignallingNaN, 0, 5635 FminFmaxFloatHelper(snan, 0,
5510 kFP32SignallingNaN, kFP32SignallingNaN, 5636 snan_processed, snan_processed,
5511 kFP32SignallingNaN, kFP32SignallingNaN); 5637 snan_processed, snan_processed);
5512 FminFmaxFloatHelper(kFP32QuietNaN, 0, 5638 FminFmaxFloatHelper(0, snan,
5513 kFP32QuietNaN, kFP32QuietNaN, 5639 snan_processed, snan_processed,
5640 snan_processed, snan_processed);
5641 FminFmaxFloatHelper(qnan, 0,
5642 qnan_processed, qnan_processed,
5514 0, 0); 5643 0, 0);
5515 FminFmaxFloatHelper(kFP32QuietNaN, kFP32SignallingNaN, 5644 FminFmaxFloatHelper(0, qnan,
5516 kFP32SignallingNaN, kFP32SignallingNaN, 5645 qnan_processed, qnan_processed,
5517 kFP32SignallingNaN, kFP32SignallingNaN); 5646 0, 0);
5647 FminFmaxFloatHelper(qnan, snan,
5648 snan_processed, snan_processed,
5649 snan_processed, snan_processed);
5650 FminFmaxFloatHelper(snan, qnan,
5651 snan_processed, snan_processed,
5652 snan_processed, snan_processed);
5518 5653
5519 // Iterate over all combinations of inputs. 5654 // Iterate over all combinations of inputs.
5520 float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0, 5655 float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
5521 -FLT_MAX, -FLT_MIN, -1.0, -0.0, 5656 -FLT_MAX, -FLT_MIN, -1.0, -0.0,
5522 kFP32PositiveInfinity, kFP32NegativeInfinity, 5657 kFP32PositiveInfinity, kFP32NegativeInfinity,
5523 kFP32QuietNaN, kFP32SignallingNaN }; 5658 kFP32QuietNaN, kFP32SignallingNaN };
5524 5659
5525 const int count = sizeof(inputs) / sizeof(inputs[0]); 5660 const int count = sizeof(inputs) / sizeof(inputs[0]);
5526 5661
5527 for (int in = 0; in < count; in++) { 5662 for (int in = 0; in < count; in++) {
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
5607 5742
5608 5743
5609 TEST(fcmp) { 5744 TEST(fcmp) {
5610 INIT_V8(); 5745 INIT_V8();
5611 SETUP(); 5746 SETUP();
5612 5747
5613 START(); 5748 START();
5614 5749
5615 // Some of these tests require a floating-point scratch register assigned to 5750 // Some of these tests require a floating-point scratch register assigned to
5616 // the macro assembler, but most do not. 5751 // the macro assembler, but most do not.
5617 __ SetFPScratchRegister(NoFPReg); 5752 {
5753 // We're going to mess around with the available scratch registers in this
5754 // test. A UseScratchRegisterScope will make sure that they are restored to
5755 // the default values once we're finished.
5756 UseScratchRegisterScope temps(&masm);
5757 masm.FPTmpList()->set_list(0);
5618 5758
5619 __ Fmov(s8, 0.0); 5759 __ Fmov(s8, 0.0);
5620 __ Fmov(s9, 0.5); 5760 __ Fmov(s9, 0.5);
5621 __ Mov(w18, 0x7f800001); // Single precision NaN. 5761 __ Mov(w18, 0x7f800001); // Single precision NaN.
5622 __ Fmov(s18, w18); 5762 __ Fmov(s18, w18);
5623 5763
5624 __ Fcmp(s8, s8); 5764 __ Fcmp(s8, s8);
5625 __ Mrs(x0, NZCV); 5765 __ Mrs(x0, NZCV);
5626 __ Fcmp(s8, s9); 5766 __ Fcmp(s8, s9);
5627 __ Mrs(x1, NZCV); 5767 __ Mrs(x1, NZCV);
5628 __ Fcmp(s9, s8); 5768 __ Fcmp(s9, s8);
5629 __ Mrs(x2, NZCV); 5769 __ Mrs(x2, NZCV);
5630 __ Fcmp(s8, s18); 5770 __ Fcmp(s8, s18);
5631 __ Mrs(x3, NZCV); 5771 __ Mrs(x3, NZCV);
5632 __ Fcmp(s18, s18); 5772 __ Fcmp(s18, s18);
5633 __ Mrs(x4, NZCV); 5773 __ Mrs(x4, NZCV);
5634 __ Fcmp(s8, 0.0); 5774 __ Fcmp(s8, 0.0);
5635 __ Mrs(x5, NZCV); 5775 __ Mrs(x5, NZCV);
5636 __ SetFPScratchRegister(d0); 5776 masm.FPTmpList()->set_list(d0.Bit());
5637 __ Fcmp(s8, 255.0); 5777 __ Fcmp(s8, 255.0);
5638 __ SetFPScratchRegister(NoFPReg); 5778 masm.FPTmpList()->set_list(0);
5639 __ Mrs(x6, NZCV); 5779 __ Mrs(x6, NZCV);
5640 5780
5641 __ Fmov(d19, 0.0); 5781 __ Fmov(d19, 0.0);
5642 __ Fmov(d20, 0.5); 5782 __ Fmov(d20, 0.5);
5643 __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN. 5783 __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN.
5644 __ Fmov(d21, x21); 5784 __ Fmov(d21, x21);
5645 5785
5646 __ Fcmp(d19, d19); 5786 __ Fcmp(d19, d19);
5647 __ Mrs(x10, NZCV); 5787 __ Mrs(x10, NZCV);
5648 __ Fcmp(d19, d20); 5788 __ Fcmp(d19, d20);
5649 __ Mrs(x11, NZCV); 5789 __ Mrs(x11, NZCV);
5650 __ Fcmp(d20, d19); 5790 __ Fcmp(d20, d19);
5651 __ Mrs(x12, NZCV); 5791 __ Mrs(x12, NZCV);
5652 __ Fcmp(d19, d21); 5792 __ Fcmp(d19, d21);
5653 __ Mrs(x13, NZCV); 5793 __ Mrs(x13, NZCV);
5654 __ Fcmp(d21, d21); 5794 __ Fcmp(d21, d21);
5655 __ Mrs(x14, NZCV); 5795 __ Mrs(x14, NZCV);
5656 __ Fcmp(d19, 0.0); 5796 __ Fcmp(d19, 0.0);
5657 __ Mrs(x15, NZCV); 5797 __ Mrs(x15, NZCV);
5658 __ SetFPScratchRegister(d0); 5798 masm.FPTmpList()->set_list(d0.Bit());
5659 __ Fcmp(d19, 12.3456); 5799 __ Fcmp(d19, 12.3456);
5660 __ SetFPScratchRegister(NoFPReg); 5800 masm.FPTmpList()->set_list(0);
5661 __ Mrs(x16, NZCV); 5801 __ Mrs(x16, NZCV);
5802 }
5803
5662 END(); 5804 END();
5663 5805
5664 RUN(); 5806 RUN();
5665 5807
5666 ASSERT_EQUAL_32(ZCFlag, w0); 5808 ASSERT_EQUAL_32(ZCFlag, w0);
5667 ASSERT_EQUAL_32(NFlag, w1); 5809 ASSERT_EQUAL_32(NFlag, w1);
5668 ASSERT_EQUAL_32(CFlag, w2); 5810 ASSERT_EQUAL_32(CFlag, w2);
5669 ASSERT_EQUAL_32(CVFlag, w3); 5811 ASSERT_EQUAL_32(CVFlag, w3);
5670 ASSERT_EQUAL_32(CVFlag, w4); 5812 ASSERT_EQUAL_32(CVFlag, w4);
5671 ASSERT_EQUAL_32(ZCFlag, w5); 5813 ASSERT_EQUAL_32(ZCFlag, w5);
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
5801 INIT_V8(); 5943 INIT_V8();
5802 SETUP(); 5944 SETUP();
5803 5945
5804 START(); 5946 START();
5805 __ Fmov(s16, 0.0); 5947 __ Fmov(s16, 0.0);
5806 __ Fmov(s17, 1.0); 5948 __ Fmov(s17, 1.0);
5807 __ Fmov(s18, 0.25); 5949 __ Fmov(s18, 0.25);
5808 __ Fmov(s19, 65536.0); 5950 __ Fmov(s19, 65536.0);
5809 __ Fmov(s20, -0.0); 5951 __ Fmov(s20, -0.0);
5810 __ Fmov(s21, kFP32PositiveInfinity); 5952 __ Fmov(s21, kFP32PositiveInfinity);
5811 __ Fmov(d22, 0.0); 5953 __ Fmov(s22, -1.0);
5812 __ Fmov(d23, 1.0); 5954 __ Fmov(d23, 0.0);
5813 __ Fmov(d24, 0.25); 5955 __ Fmov(d24, 1.0);
5814 __ Fmov(d25, 4294967296.0); 5956 __ Fmov(d25, 0.25);
5815 __ Fmov(d26, -0.0); 5957 __ Fmov(d26, 4294967296.0);
5816 __ Fmov(d27, kFP64PositiveInfinity); 5958 __ Fmov(d27, -0.0);
5959 __ Fmov(d28, kFP64PositiveInfinity);
5960 __ Fmov(d29, -1.0);
5817 5961
5818 __ Fsqrt(s0, s16); 5962 __ Fsqrt(s0, s16);
5819 __ Fsqrt(s1, s17); 5963 __ Fsqrt(s1, s17);
5820 __ Fsqrt(s2, s18); 5964 __ Fsqrt(s2, s18);
5821 __ Fsqrt(s3, s19); 5965 __ Fsqrt(s3, s19);
5822 __ Fsqrt(s4, s20); 5966 __ Fsqrt(s4, s20);
5823 __ Fsqrt(s5, s21); 5967 __ Fsqrt(s5, s21);
5824 __ Fsqrt(d6, d22); 5968 __ Fsqrt(s6, s22);
5825 __ Fsqrt(d7, d23); 5969 __ Fsqrt(d7, d23);
5826 __ Fsqrt(d8, d24); 5970 __ Fsqrt(d8, d24);
5827 __ Fsqrt(d9, d25); 5971 __ Fsqrt(d9, d25);
5828 __ Fsqrt(d10, d26); 5972 __ Fsqrt(d10, d26);
5829 __ Fsqrt(d11, d27); 5973 __ Fsqrt(d11, d27);
5974 __ Fsqrt(d12, d28);
5975 __ Fsqrt(d13, d29);
5830 END(); 5976 END();
5831 5977
5832 RUN(); 5978 RUN();
5833 5979
5834 ASSERT_EQUAL_FP32(0.0, s0); 5980 ASSERT_EQUAL_FP32(0.0, s0);
5835 ASSERT_EQUAL_FP32(1.0, s1); 5981 ASSERT_EQUAL_FP32(1.0, s1);
5836 ASSERT_EQUAL_FP32(0.5, s2); 5982 ASSERT_EQUAL_FP32(0.5, s2);
5837 ASSERT_EQUAL_FP32(256.0, s3); 5983 ASSERT_EQUAL_FP32(256.0, s3);
5838 ASSERT_EQUAL_FP32(-0.0, s4); 5984 ASSERT_EQUAL_FP32(-0.0, s4);
5839 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5); 5985 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
5840 ASSERT_EQUAL_FP64(0.0, d6); 5986 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5841 ASSERT_EQUAL_FP64(1.0, d7); 5987 ASSERT_EQUAL_FP64(0.0, d7);
5842 ASSERT_EQUAL_FP64(0.5, d8); 5988 ASSERT_EQUAL_FP64(1.0, d8);
5843 ASSERT_EQUAL_FP64(65536.0, d9); 5989 ASSERT_EQUAL_FP64(0.5, d9);
5844 ASSERT_EQUAL_FP64(-0.0, d10); 5990 ASSERT_EQUAL_FP64(65536.0, d10);
5845 ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d11); 5991 ASSERT_EQUAL_FP64(-0.0, d11);
5992 ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d12);
5993 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5846 5994
5847 TEARDOWN(); 5995 TEARDOWN();
5848 } 5996 }
5849 5997
5850 5998
5851 TEST(frinta) { 5999 TEST(frinta) {
5852 INIT_V8(); 6000 INIT_V8();
5853 SETUP(); 6001 SETUP();
5854 6002
5855 START(); 6003 START();
(...skipping 1280 matching lines...) Expand 10 before | Expand all | Expand 10 after
7136 __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x)); 7284 __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7137 __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x)); 7285 __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7138 __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w)); 7286 __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7139 __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w)); 7287 __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7140 7288
7141 __ Mov(x10, s64); 7289 __ Mov(x10, s64);
7142 7290
7143 // Corrupt the top word, in case it is accidentally used during W-register 7291 // Corrupt the top word, in case it is accidentally used during W-register
7144 // conversions. 7292 // conversions.
7145 __ Mov(x11, 0x5555555555555555); 7293 __ Mov(x11, 0x5555555555555555);
7146 __ Bfi(x11, x10, 0, kWRegSize); 7294 __ Bfi(x11, x10, 0, kWRegSizeInBits);
7147 7295
7148 // Test integer conversions. 7296 // Test integer conversions.
7149 __ Scvtf(d0, x10); 7297 __ Scvtf(d0, x10);
7150 __ Ucvtf(d1, x10); 7298 __ Ucvtf(d1, x10);
7151 __ Scvtf(d2, w11); 7299 __ Scvtf(d2, w11);
7152 __ Ucvtf(d3, w11); 7300 __ Ucvtf(d3, w11);
7153 __ Str(d0, MemOperand(x0)); 7301 __ Str(d0, MemOperand(x0));
7154 __ Str(d1, MemOperand(x1)); 7302 __ Str(d1, MemOperand(x1));
7155 __ Str(d2, MemOperand(x2)); 7303 __ Str(d2, MemOperand(x2));
7156 __ Str(d3, MemOperand(x3)); 7304 __ Str(d3, MemOperand(x3));
7157 7305
7158 // Test all possible values of fbits. 7306 // Test all possible values of fbits.
7159 for (int fbits = 1; fbits <= 32; fbits++) { 7307 for (int fbits = 1; fbits <= 32; fbits++) {
7160 __ Scvtf(d0, x10, fbits); 7308 __ Scvtf(d0, x10, fbits);
7161 __ Ucvtf(d1, x10, fbits); 7309 __ Ucvtf(d1, x10, fbits);
7162 __ Scvtf(d2, w11, fbits); 7310 __ Scvtf(d2, w11, fbits);
7163 __ Ucvtf(d3, w11, fbits); 7311 __ Ucvtf(d3, w11, fbits);
7164 __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes)); 7312 __ Str(d0, MemOperand(x0, fbits * kDRegSize));
7165 __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes)); 7313 __ Str(d1, MemOperand(x1, fbits * kDRegSize));
7166 __ Str(d2, MemOperand(x2, fbits * kDRegSizeInBytes)); 7314 __ Str(d2, MemOperand(x2, fbits * kDRegSize));
7167 __ Str(d3, MemOperand(x3, fbits * kDRegSizeInBytes)); 7315 __ Str(d3, MemOperand(x3, fbits * kDRegSize));
7168 } 7316 }
7169 7317
7170 // Conversions from W registers can only handle fbits values <= 32, so just 7318 // Conversions from W registers can only handle fbits values <= 32, so just
7171 // test conversions from X registers for 32 < fbits <= 64. 7319 // test conversions from X registers for 32 < fbits <= 64.
7172 for (int fbits = 33; fbits <= 64; fbits++) { 7320 for (int fbits = 33; fbits <= 64; fbits++) {
7173 __ Scvtf(d0, x10, fbits); 7321 __ Scvtf(d0, x10, fbits);
7174 __ Ucvtf(d1, x10, fbits); 7322 __ Ucvtf(d1, x10, fbits);
7175 __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes)); 7323 __ Str(d0, MemOperand(x0, fbits * kDRegSize));
7176 __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes)); 7324 __ Str(d1, MemOperand(x1, fbits * kDRegSize));
7177 } 7325 }
7178 7326
7179 END(); 7327 END();
7180 RUN(); 7328 RUN();
7181 7329
7182 // Check the results. 7330 // Check the results.
7183 double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits); 7331 double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
7184 double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits); 7332 double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
7185 7333
7186 for (int fbits = 0; fbits <= 32; fbits++) { 7334 for (int fbits = 0; fbits <= 32; fbits++) {
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
7291 __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x)); 7439 __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7292 __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x)); 7440 __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7293 __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w)); 7441 __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7294 __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w)); 7442 __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7295 7443
7296 __ Mov(x10, s64); 7444 __ Mov(x10, s64);
7297 7445
7298 // Corrupt the top word, in case it is accidentally used during W-register 7446 // Corrupt the top word, in case it is accidentally used during W-register
7299 // conversions. 7447 // conversions.
7300 __ Mov(x11, 0x5555555555555555); 7448 __ Mov(x11, 0x5555555555555555);
7301 __ Bfi(x11, x10, 0, kWRegSize); 7449 __ Bfi(x11, x10, 0, kWRegSizeInBits);
7302 7450
7303 // Test integer conversions. 7451 // Test integer conversions.
7304 __ Scvtf(s0, x10); 7452 __ Scvtf(s0, x10);
7305 __ Ucvtf(s1, x10); 7453 __ Ucvtf(s1, x10);
7306 __ Scvtf(s2, w11); 7454 __ Scvtf(s2, w11);
7307 __ Ucvtf(s3, w11); 7455 __ Ucvtf(s3, w11);
7308 __ Str(s0, MemOperand(x0)); 7456 __ Str(s0, MemOperand(x0));
7309 __ Str(s1, MemOperand(x1)); 7457 __ Str(s1, MemOperand(x1));
7310 __ Str(s2, MemOperand(x2)); 7458 __ Str(s2, MemOperand(x2));
7311 __ Str(s3, MemOperand(x3)); 7459 __ Str(s3, MemOperand(x3));
7312 7460
7313 // Test all possible values of fbits. 7461 // Test all possible values of fbits.
7314 for (int fbits = 1; fbits <= 32; fbits++) { 7462 for (int fbits = 1; fbits <= 32; fbits++) {
7315 __ Scvtf(s0, x10, fbits); 7463 __ Scvtf(s0, x10, fbits);
7316 __ Ucvtf(s1, x10, fbits); 7464 __ Ucvtf(s1, x10, fbits);
7317 __ Scvtf(s2, w11, fbits); 7465 __ Scvtf(s2, w11, fbits);
7318 __ Ucvtf(s3, w11, fbits); 7466 __ Ucvtf(s3, w11, fbits);
7319 __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes)); 7467 __ Str(s0, MemOperand(x0, fbits * kSRegSize));
7320 __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes)); 7468 __ Str(s1, MemOperand(x1, fbits * kSRegSize));
7321 __ Str(s2, MemOperand(x2, fbits * kSRegSizeInBytes)); 7469 __ Str(s2, MemOperand(x2, fbits * kSRegSize));
7322 __ Str(s3, MemOperand(x3, fbits * kSRegSizeInBytes)); 7470 __ Str(s3, MemOperand(x3, fbits * kSRegSize));
7323 } 7471 }
7324 7472
7325 // Conversions from W registers can only handle fbits values <= 32, so just 7473 // Conversions from W registers can only handle fbits values <= 32, so just
7326 // test conversions from X registers for 32 < fbits <= 64. 7474 // test conversions from X registers for 32 < fbits <= 64.
7327 for (int fbits = 33; fbits <= 64; fbits++) { 7475 for (int fbits = 33; fbits <= 64; fbits++) {
7328 __ Scvtf(s0, x10, fbits); 7476 __ Scvtf(s0, x10, fbits);
7329 __ Ucvtf(s1, x10, fbits); 7477 __ Ucvtf(s1, x10, fbits);
7330 __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes)); 7478 __ Str(s0, MemOperand(x0, fbits * kSRegSize));
7331 __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes)); 7479 __ Str(s1, MemOperand(x1, fbits * kSRegSize));
7332 } 7480 }
7333 7481
7334 END(); 7482 END();
7335 RUN(); 7483 RUN();
7336 7484
7337 // Check the results. 7485 // Check the results.
7338 float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits); 7486 float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
7339 float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits); 7487 float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
7340 7488
7341 for (int fbits = 0; fbits <= 32; fbits++) { 7489 for (int fbits = 0; fbits <= 32; fbits++) {
(...skipping 625 matching lines...) Expand 10 before | Expand all | Expand 10 after
7967 // x6 should match x1[31:0]:x0[63:32] 8115 // x6 should match x1[31:0]:x0[63:32]
7968 // w7 should match x1[15:0]:x0[63:48] 8116 // w7 should match x1[15:0]:x0[63:48]
7969 __ Poke(x1, 8); 8117 __ Poke(x1, 8);
7970 __ Poke(x0, 0); 8118 __ Poke(x0, 0);
7971 { 8119 {
7972 ASSERT(__ StackPointer().Is(csp)); 8120 ASSERT(__ StackPointer().Is(csp));
7973 __ Mov(x4, __ StackPointer()); 8121 __ Mov(x4, __ StackPointer());
7974 __ SetStackPointer(x4); 8122 __ SetStackPointer(x4);
7975 8123
7976 __ Poke(wzr, 0); // Clobber the space we're about to drop. 8124 __ Poke(wzr, 0); // Clobber the space we're about to drop.
7977 __ Drop(1, kWRegSizeInBytes); 8125 __ Drop(1, kWRegSize);
7978 __ Peek(x6, 0); 8126 __ Peek(x6, 0);
7979 __ Claim(1); 8127 __ Claim(1);
7980 __ Peek(w7, 10); 8128 __ Peek(w7, 10);
7981 __ Poke(x3, 28); 8129 __ Poke(x3, 28);
7982 __ Poke(xzr, 0); // Clobber the space we're about to drop. 8130 __ Poke(xzr, 0); // Clobber the space we're about to drop.
7983 __ Drop(1); 8131 __ Drop(1);
7984 __ Poke(x2, 12); 8132 __ Poke(x2, 12);
7985 __ Push(w0); 8133 __ Push(w0);
7986 8134
7987 __ Mov(csp, __ StackPointer()); 8135 __ Mov(csp, __ StackPointer());
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
8149 } 8297 }
8150 8298
8151 TEARDOWN(); 8299 TEARDOWN();
8152 } 8300 }
8153 8301
8154 8302
8155 TEST(push_pop_jssp_simple_32) { 8303 TEST(push_pop_jssp_simple_32) {
8156 INIT_V8(); 8304 INIT_V8();
8157 for (int claim = 0; claim <= 8; claim++) { 8305 for (int claim = 0; claim <= 8; claim++) {
8158 for (int count = 0; count <= 8; count++) { 8306 for (int count = 0; count <= 8; count++) {
8159 PushPopJsspSimpleHelper(count, claim, kWRegSize, 8307 PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8160 PushPopByFour, PushPopByFour); 8308 PushPopByFour, PushPopByFour);
8161 PushPopJsspSimpleHelper(count, claim, kWRegSize, 8309 PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8162 PushPopByFour, PushPopRegList); 8310 PushPopByFour, PushPopRegList);
8163 PushPopJsspSimpleHelper(count, claim, kWRegSize, 8311 PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8164 PushPopRegList, PushPopByFour); 8312 PushPopRegList, PushPopByFour);
8165 PushPopJsspSimpleHelper(count, claim, kWRegSize, 8313 PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8166 PushPopRegList, PushPopRegList); 8314 PushPopRegList, PushPopRegList);
8167 } 8315 }
8168 // Test with the maximum number of registers. 8316 // Test with the maximum number of registers.
8169 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize, 8317 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8170 PushPopByFour, PushPopByFour); 8318 PushPopByFour, PushPopByFour);
8171 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize, 8319 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8172 PushPopByFour, PushPopRegList); 8320 PushPopByFour, PushPopRegList);
8173 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize, 8321 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8174 PushPopRegList, PushPopByFour); 8322 PushPopRegList, PushPopByFour);
8175 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize, 8323 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8176 PushPopRegList, PushPopRegList); 8324 PushPopRegList, PushPopRegList);
8177 } 8325 }
8178 } 8326 }
8179 8327
8180 8328
8181 TEST(push_pop_jssp_simple_64) { 8329 TEST(push_pop_jssp_simple_64) {
8182 INIT_V8(); 8330 INIT_V8();
8183 for (int claim = 0; claim <= 8; claim++) { 8331 for (int claim = 0; claim <= 8; claim++) {
8184 for (int count = 0; count <= 8; count++) { 8332 for (int count = 0; count <= 8; count++) {
8185 PushPopJsspSimpleHelper(count, claim, kXRegSize, 8333 PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8186 PushPopByFour, PushPopByFour); 8334 PushPopByFour, PushPopByFour);
8187 PushPopJsspSimpleHelper(count, claim, kXRegSize, 8335 PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8188 PushPopByFour, PushPopRegList); 8336 PushPopByFour, PushPopRegList);
8189 PushPopJsspSimpleHelper(count, claim, kXRegSize, 8337 PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8190 PushPopRegList, PushPopByFour); 8338 PushPopRegList, PushPopByFour);
8191 PushPopJsspSimpleHelper(count, claim, kXRegSize, 8339 PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8192 PushPopRegList, PushPopRegList); 8340 PushPopRegList, PushPopRegList);
8193 } 8341 }
8194 // Test with the maximum number of registers. 8342 // Test with the maximum number of registers.
8195 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize, 8343 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8196 PushPopByFour, PushPopByFour); 8344 PushPopByFour, PushPopByFour);
8197 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize, 8345 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8198 PushPopByFour, PushPopRegList); 8346 PushPopByFour, PushPopRegList);
8199 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize, 8347 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8200 PushPopRegList, PushPopByFour); 8348 PushPopRegList, PushPopByFour);
8201 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize, 8349 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8202 PushPopRegList, PushPopRegList); 8350 PushPopRegList, PushPopRegList);
8203 } 8351 }
8204 } 8352 }
8205 8353
8206 8354
8207 // The maximum number of registers that can be used by the PushPopFPJssp* tests, 8355 // The maximum number of registers that can be used by the PushPopFPJssp* tests,
8208 // where a reg_count field is provided. 8356 // where a reg_count field is provided.
8209 static int const kPushPopFPJsspMaxRegCount = -1; 8357 static int const kPushPopFPJsspMaxRegCount = -1;
8210 8358
8211 // Test a simple push-pop pattern: 8359 // Test a simple push-pop pattern:
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
8332 } 8480 }
8333 8481
8334 TEARDOWN(); 8482 TEARDOWN();
8335 } 8483 }
8336 8484
8337 8485
8338 TEST(push_pop_fp_jssp_simple_32) { 8486 TEST(push_pop_fp_jssp_simple_32) {
8339 INIT_V8(); 8487 INIT_V8();
8340 for (int claim = 0; claim <= 8; claim++) { 8488 for (int claim = 0; claim <= 8; claim++) {
8341 for (int count = 0; count <= 8; count++) { 8489 for (int count = 0; count <= 8; count++) {
8342 PushPopFPJsspSimpleHelper(count, claim, kSRegSize, 8490 PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8343 PushPopByFour, PushPopByFour); 8491 PushPopByFour, PushPopByFour);
8344 PushPopFPJsspSimpleHelper(count, claim, kSRegSize, 8492 PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8345 PushPopByFour, PushPopRegList); 8493 PushPopByFour, PushPopRegList);
8346 PushPopFPJsspSimpleHelper(count, claim, kSRegSize, 8494 PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8347 PushPopRegList, PushPopByFour); 8495 PushPopRegList, PushPopByFour);
8348 PushPopFPJsspSimpleHelper(count, claim, kSRegSize, 8496 PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8349 PushPopRegList, PushPopRegList); 8497 PushPopRegList, PushPopRegList);
8350 } 8498 }
8351 // Test with the maximum number of registers. 8499 // Test with the maximum number of registers.
8352 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize, 8500 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8353 PushPopByFour, PushPopByFour); 8501 PushPopByFour, PushPopByFour);
8354 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize, 8502 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8355 PushPopByFour, PushPopRegList); 8503 PushPopByFour, PushPopRegList);
8356 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize, 8504 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8357 PushPopRegList, PushPopByFour); 8505 PushPopRegList, PushPopByFour);
8358 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize, 8506 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8359 PushPopRegList, PushPopRegList); 8507 PushPopRegList, PushPopRegList);
8360 } 8508 }
8361 } 8509 }
8362 8510
8363 8511
8364 TEST(push_pop_fp_jssp_simple_64) { 8512 TEST(push_pop_fp_jssp_simple_64) {
8365 INIT_V8(); 8513 INIT_V8();
8366 for (int claim = 0; claim <= 8; claim++) { 8514 for (int claim = 0; claim <= 8; claim++) {
8367 for (int count = 0; count <= 8; count++) { 8515 for (int count = 0; count <= 8; count++) {
8368 PushPopFPJsspSimpleHelper(count, claim, kDRegSize, 8516 PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8369 PushPopByFour, PushPopByFour); 8517 PushPopByFour, PushPopByFour);
8370 PushPopFPJsspSimpleHelper(count, claim, kDRegSize, 8518 PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8371 PushPopByFour, PushPopRegList); 8519 PushPopByFour, PushPopRegList);
8372 PushPopFPJsspSimpleHelper(count, claim, kDRegSize, 8520 PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8373 PushPopRegList, PushPopByFour); 8521 PushPopRegList, PushPopByFour);
8374 PushPopFPJsspSimpleHelper(count, claim, kDRegSize, 8522 PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8375 PushPopRegList, PushPopRegList); 8523 PushPopRegList, PushPopRegList);
8376 } 8524 }
8377 // Test with the maximum number of registers. 8525 // Test with the maximum number of registers.
8378 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize, 8526 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8379 PushPopByFour, PushPopByFour); 8527 PushPopByFour, PushPopByFour);
8380 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize, 8528 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8381 PushPopByFour, PushPopRegList); 8529 PushPopByFour, PushPopRegList);
8382 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize, 8530 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8383 PushPopRegList, PushPopByFour); 8531 PushPopRegList, PushPopByFour);
8384 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize, 8532 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8385 PushPopRegList, PushPopRegList); 8533 PushPopRegList, PushPopRegList);
8386 } 8534 }
8387 } 8535 }
8388 8536
8389 8537
8390 // Push and pop data using an overlapping combination of Push/Pop and 8538 // Push and pop data using an overlapping combination of Push/Pop and
8391 // RegList-based methods. 8539 // RegList-based methods.
8392 static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) { 8540 static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
8393 SETUP(); 8541 SETUP();
8394 8542
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
8472 ASSERT_EQUAL_64(literal_base * 1, x[5]); 8620 ASSERT_EQUAL_64(literal_base * 1, x[5]);
8473 ASSERT_EQUAL_64(literal_base * 2, x[4]); 8621 ASSERT_EQUAL_64(literal_base * 2, x[4]);
8474 8622
8475 TEARDOWN(); 8623 TEARDOWN();
8476 } 8624 }
8477 8625
8478 8626
8479 TEST(push_pop_jssp_mixed_methods_64) { 8627 TEST(push_pop_jssp_mixed_methods_64) {
8480 INIT_V8(); 8628 INIT_V8();
8481 for (int claim = 0; claim <= 8; claim++) { 8629 for (int claim = 0; claim <= 8; claim++) {
8482 PushPopJsspMixedMethodsHelper(claim, kXRegSize); 8630 PushPopJsspMixedMethodsHelper(claim, kXRegSizeInBits);
8483 } 8631 }
8484 } 8632 }
8485 8633
8486 8634
8487 TEST(push_pop_jssp_mixed_methods_32) { 8635 TEST(push_pop_jssp_mixed_methods_32) {
8488 INIT_V8(); 8636 INIT_V8();
8489 for (int claim = 0; claim <= 8; claim++) { 8637 for (int claim = 0; claim <= 8; claim++) {
8490 PushPopJsspMixedMethodsHelper(claim, kWRegSize); 8638 PushPopJsspMixedMethodsHelper(claim, kWRegSizeInBits);
8491 } 8639 }
8492 } 8640 }
8493 8641
8494 8642
8495 // Push and pop data using overlapping X- and W-sized quantities. 8643 // Push and pop data using overlapping X- and W-sized quantities.
8496 static void PushPopJsspWXOverlapHelper(int reg_count, int claim) { 8644 static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
8497 // This test emits rather a lot of code. 8645 // This test emits rather a lot of code.
8498 SETUP_SIZE(BUF_SIZE * 2); 8646 SETUP_SIZE(BUF_SIZE * 2);
8499 8647
8500 // Work out which registers to use, based on reg_size. 8648 // Work out which registers to use, based on reg_size.
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
8619 } else { 8767 } else {
8620 stack[active_w_slots++] = literal_base_hi * i; 8768 stack[active_w_slots++] = literal_base_hi * i;
8621 stack[active_w_slots++] = literal_base_lo * i; 8769 stack[active_w_slots++] = literal_base_lo * i;
8622 } 8770 }
8623 } 8771 }
8624 } 8772 }
8625 } 8773 }
8626 // Because we were pushing several registers at a time, we probably pushed 8774 // Because we were pushing several registers at a time, we probably pushed
8627 // more than we needed to. 8775 // more than we needed to.
8628 if (active_w_slots > requested_w_slots) { 8776 if (active_w_slots > requested_w_slots) {
8629 __ Drop(active_w_slots - requested_w_slots, kWRegSizeInBytes); 8777 __ Drop(active_w_slots - requested_w_slots, kWRegSize);
8630 // Bump the number of active W-sized slots back to where it should be, 8778 // Bump the number of active W-sized slots back to where it should be,
8631 // and fill the empty space with a dummy value. 8779 // and fill the empty space with a dummy value.
8632 do { 8780 do {
8633 stack[active_w_slots--] = 0xdeadbeef; 8781 stack[active_w_slots--] = 0xdeadbeef;
8634 } while (active_w_slots > requested_w_slots); 8782 } while (active_w_slots > requested_w_slots);
8635 } 8783 }
8636 8784
8637 // ---- Pop ---- 8785 // ---- Pop ----
8638 8786
8639 Clobber(&masm, list); 8787 Clobber(&masm, list);
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
8834 __ Mov(w4, 0x12340004); 8982 __ Mov(w4, 0x12340004);
8835 __ Mov(w5, 0x12340005); 8983 __ Mov(w5, 0x12340005);
8836 __ Mov(w6, 0x12340006); 8984 __ Mov(w6, 0x12340006);
8837 __ Fmov(d0, 123400.0); 8985 __ Fmov(d0, 123400.0);
8838 __ Fmov(d1, 123401.0); 8986 __ Fmov(d1, 123401.0);
8839 __ Fmov(s2, 123402.0); 8987 __ Fmov(s2, 123402.0);
8840 8988
8841 // Actually push them. 8989 // Actually push them.
8842 queue.PushQueued(); 8990 queue.PushQueued();
8843 8991
8844 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSize, 0, 6)); 8992 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
8845 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSize, 0, 2)); 8993 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
8846 8994
8847 // Pop them conventionally. 8995 // Pop them conventionally.
8848 __ Pop(s2); 8996 __ Pop(s2);
8849 __ Pop(d1, d0); 8997 __ Pop(d1, d0);
8850 __ Pop(w6, w5, w4); 8998 __ Pop(w6, w5, w4);
8851 __ Pop(x3, x2, x1, x0); 8999 __ Pop(x3, x2, x1, x0);
8852 9000
8853 __ Mov(csp, __ StackPointer()); 9001 __ Mov(csp, __ StackPointer());
8854 __ SetStackPointer(csp); 9002 __ SetStackPointer(csp);
8855 9003
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
8912 9060
8913 queue.Queue(w6); 9061 queue.Queue(w6);
8914 queue.Queue(w5); 9062 queue.Queue(w5);
8915 queue.Queue(w4); 9063 queue.Queue(w4);
8916 9064
8917 queue.Queue(x3); 9065 queue.Queue(x3);
8918 queue.Queue(x2); 9066 queue.Queue(x2);
8919 queue.Queue(x1); 9067 queue.Queue(x1);
8920 queue.Queue(x0); 9068 queue.Queue(x0);
8921 9069
8922 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSize, 0, 6)); 9070 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
8923 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSize, 0, 2)); 9071 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
8924 9072
8925 // Actually pop them. 9073 // Actually pop them.
8926 queue.PopQueued(); 9074 queue.PopQueued();
8927 9075
8928 __ Mov(csp, __ StackPointer()); 9076 __ Mov(csp, __ StackPointer());
8929 __ SetStackPointer(csp); 9077 __ SetStackPointer(csp);
8930 9078
8931 END(); 9079 END();
8932 9080
8933 RUN(); 9081 RUN();
(...skipping 471 matching lines...) Expand 10 before | Expand all | Expand 10 after
9405 } 9553 }
9406 9554
9407 9555
9408 TEST(cpureglist_utils_empty) { 9556 TEST(cpureglist_utils_empty) {
9409 // This test doesn't generate any code, but it verifies the behaviour of 9557 // This test doesn't generate any code, but it verifies the behaviour of
9410 // the CPURegList utility methods. 9558 // the CPURegList utility methods.
9411 9559
9412 // Test an empty list. 9560 // Test an empty list.
9413 // Empty lists can have type and size properties. Check that we can create 9561 // Empty lists can have type and size properties. Check that we can create
9414 // them, and that they are empty. 9562 // them, and that they are empty.
9415 CPURegList reg32(CPURegister::kRegister, kWRegSize, 0); 9563 CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0);
9416 CPURegList reg64(CPURegister::kRegister, kXRegSize, 0); 9564 CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0);
9417 CPURegList fpreg32(CPURegister::kFPRegister, kSRegSize, 0); 9565 CPURegList fpreg32(CPURegister::kFPRegister, kSRegSizeInBits, 0);
9418 CPURegList fpreg64(CPURegister::kFPRegister, kDRegSize, 0); 9566 CPURegList fpreg64(CPURegister::kFPRegister, kDRegSizeInBits, 0);
9419 9567
9420 CHECK(reg32.IsEmpty()); 9568 CHECK(reg32.IsEmpty());
9421 CHECK(reg64.IsEmpty()); 9569 CHECK(reg64.IsEmpty());
9422 CHECK(fpreg32.IsEmpty()); 9570 CHECK(fpreg32.IsEmpty());
9423 CHECK(fpreg64.IsEmpty()); 9571 CHECK(fpreg64.IsEmpty());
9424 9572
9425 CHECK(reg32.PopLowestIndex().IsNone()); 9573 CHECK(reg32.PopLowestIndex().IsNone());
9426 CHECK(reg64.PopLowestIndex().IsNone()); 9574 CHECK(reg64.PopLowestIndex().IsNone());
9427 CHECK(fpreg32.PopLowestIndex().IsNone()); 9575 CHECK(fpreg32.PopLowestIndex().IsNone());
9428 CHECK(fpreg64.PopLowestIndex().IsNone()); 9576 CHECK(fpreg64.PopLowestIndex().IsNone());
(...skipping 416 matching lines...) Expand 10 before | Expand all | Expand 10 after
9845 __ Isb(); 9993 __ Isb();
9846 9994
9847 END(); 9995 END();
9848 9996
9849 RUN(); 9997 RUN();
9850 9998
9851 TEARDOWN(); 9999 TEARDOWN();
9852 } 10000 }
9853 10001
9854 10002
10003 TEST(process_nan_double) {
10004 INIT_V8();
10005 // Make sure that NaN propagation works correctly.
10006 double sn = rawbits_to_double(0x7ff5555511111111);
10007 double qn = rawbits_to_double(0x7ffaaaaa11111111);
10008 ASSERT(IsSignallingNaN(sn));
10009 ASSERT(IsQuietNaN(qn));
10010
10011 // The input NaNs after passing through ProcessNaN.
10012 double sn_proc = rawbits_to_double(0x7ffd555511111111);
10013 double qn_proc = qn;
10014 ASSERT(IsQuietNaN(sn_proc));
10015 ASSERT(IsQuietNaN(qn_proc));
10016
10017 SETUP();
10018 START();
10019
10020 // Execute a number of instructions which all use ProcessNaN, and check that
10021 // they all handle the NaN correctly.
10022 __ Fmov(d0, sn);
10023 __ Fmov(d10, qn);
10024
10025 // Operations that always propagate NaNs unchanged, even signalling NaNs.
10026 // - Signalling NaN
10027 __ Fmov(d1, d0);
10028 __ Fabs(d2, d0);
10029 __ Fneg(d3, d0);
10030 // - Quiet NaN
10031 __ Fmov(d11, d10);
10032 __ Fabs(d12, d10);
10033 __ Fneg(d13, d10);
10034
10035 // Operations that use ProcessNaN.
10036 // - Signalling NaN
10037 __ Fsqrt(d4, d0);
10038 __ Frinta(d5, d0);
10039 __ Frintn(d6, d0);
10040 __ Frintz(d7, d0);
10041 // - Quiet NaN
10042 __ Fsqrt(d14, d10);
10043 __ Frinta(d15, d10);
10044 __ Frintn(d16, d10);
10045 __ Frintz(d17, d10);
10046
10047 // The behaviour of fcvt is checked in TEST(fcvt_sd).
10048
10049 END();
10050 RUN();
10051
10052 uint64_t qn_raw = double_to_rawbits(qn);
10053 uint64_t sn_raw = double_to_rawbits(sn);
10054
10055 // - Signalling NaN
10056 ASSERT_EQUAL_FP64(sn, d1);
10057 ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
10058 ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
10059 // - Quiet NaN
10060 ASSERT_EQUAL_FP64(qn, d11);
10061 ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
10062 ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
10063
10064 // - Signalling NaN
10065 ASSERT_EQUAL_FP64(sn_proc, d4);
10066 ASSERT_EQUAL_FP64(sn_proc, d5);
10067 ASSERT_EQUAL_FP64(sn_proc, d6);
10068 ASSERT_EQUAL_FP64(sn_proc, d7);
10069 // - Quiet NaN
10070 ASSERT_EQUAL_FP64(qn_proc, d14);
10071 ASSERT_EQUAL_FP64(qn_proc, d15);
10072 ASSERT_EQUAL_FP64(qn_proc, d16);
10073 ASSERT_EQUAL_FP64(qn_proc, d17);
10074
10075 TEARDOWN();
10076 }
10077
10078
10079 TEST(process_nan_float) {
10080 INIT_V8();
10081 // Make sure that NaN propagation works correctly.
10082 float sn = rawbits_to_float(0x7f951111);
10083 float qn = rawbits_to_float(0x7fea1111);
10084 ASSERT(IsSignallingNaN(sn));
10085 ASSERT(IsQuietNaN(qn));
10086
10087 // The input NaNs after passing through ProcessNaN.
10088 float sn_proc = rawbits_to_float(0x7fd51111);
10089 float qn_proc = qn;
10090 ASSERT(IsQuietNaN(sn_proc));
10091 ASSERT(IsQuietNaN(qn_proc));
10092
10093 SETUP();
10094 START();
10095
10096 // Execute a number of instructions which all use ProcessNaN, and check that
10097 // they all handle the NaN correctly.
10098 __ Fmov(s0, sn);
10099 __ Fmov(s10, qn);
10100
10101 // Operations that always propagate NaNs unchanged, even signalling NaNs.
10102 // - Signalling NaN
10103 __ Fmov(s1, s0);
10104 __ Fabs(s2, s0);
10105 __ Fneg(s3, s0);
10106 // - Quiet NaN
10107 __ Fmov(s11, s10);
10108 __ Fabs(s12, s10);
10109 __ Fneg(s13, s10);
10110
10111 // Operations that use ProcessNaN.
10112 // - Signalling NaN
10113 __ Fsqrt(s4, s0);
10114 __ Frinta(s5, s0);
10115 __ Frintn(s6, s0);
10116 __ Frintz(s7, s0);
10117 // - Quiet NaN
10118 __ Fsqrt(s14, s10);
10119 __ Frinta(s15, s10);
10120 __ Frintn(s16, s10);
10121 __ Frintz(s17, s10);
10122
10123 // The behaviour of fcvt is checked in TEST(fcvt_sd).
10124
10125 END();
10126 RUN();
10127
10128 uint32_t qn_raw = float_to_rawbits(qn);
10129 uint32_t sn_raw = float_to_rawbits(sn);
10130
10131 // - Signalling NaN
10132 ASSERT_EQUAL_FP32(sn, s1);
10133 ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
10134 ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
10135 // - Quiet NaN
10136 ASSERT_EQUAL_FP32(qn, s11);
10137 ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
10138 ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
10139
10140 // - Signalling NaN
10141 ASSERT_EQUAL_FP32(sn_proc, s4);
10142 ASSERT_EQUAL_FP32(sn_proc, s5);
10143 ASSERT_EQUAL_FP32(sn_proc, s6);
10144 ASSERT_EQUAL_FP32(sn_proc, s7);
10145 // - Quiet NaN
10146 ASSERT_EQUAL_FP32(qn_proc, s14);
10147 ASSERT_EQUAL_FP32(qn_proc, s15);
10148 ASSERT_EQUAL_FP32(qn_proc, s16);
10149 ASSERT_EQUAL_FP32(qn_proc, s17);
10150
10151 TEARDOWN();
10152 }
10153
10154
10155 static void ProcessNaNsHelper(double n, double m, double expected) {
10156 ASSERT(isnan(n) || isnan(m));
10157 ASSERT(isnan(expected));
10158
10159 SETUP();
10160 START();
10161
10162 // Execute a number of instructions which all use ProcessNaNs, and check that
10163 // they all propagate NaNs correctly.
10164 __ Fmov(d0, n);
10165 __ Fmov(d1, m);
10166
10167 __ Fadd(d2, d0, d1);
10168 __ Fsub(d3, d0, d1);
10169 __ Fmul(d4, d0, d1);
10170 __ Fdiv(d5, d0, d1);
10171 __ Fmax(d6, d0, d1);
10172 __ Fmin(d7, d0, d1);
10173
10174 END();
10175 RUN();
10176
10177 ASSERT_EQUAL_FP64(expected, d2);
10178 ASSERT_EQUAL_FP64(expected, d3);
10179 ASSERT_EQUAL_FP64(expected, d4);
10180 ASSERT_EQUAL_FP64(expected, d5);
10181 ASSERT_EQUAL_FP64(expected, d6);
10182 ASSERT_EQUAL_FP64(expected, d7);
10183
10184 TEARDOWN();
10185 }
10186
10187
10188 TEST(process_nans_double) {
10189 INIT_V8();
10190 // Make sure that NaN propagation works correctly.
10191 double sn = rawbits_to_double(0x7ff5555511111111);
10192 double sm = rawbits_to_double(0x7ff5555522222222);
10193 double qn = rawbits_to_double(0x7ffaaaaa11111111);
10194 double qm = rawbits_to_double(0x7ffaaaaa22222222);
10195 ASSERT(IsSignallingNaN(sn));
10196 ASSERT(IsSignallingNaN(sm));
10197 ASSERT(IsQuietNaN(qn));
10198 ASSERT(IsQuietNaN(qm));
10199
10200 // The input NaNs after passing through ProcessNaN.
10201 double sn_proc = rawbits_to_double(0x7ffd555511111111);
10202 double sm_proc = rawbits_to_double(0x7ffd555522222222);
10203 double qn_proc = qn;
10204 double qm_proc = qm;
10205 ASSERT(IsQuietNaN(sn_proc));
10206 ASSERT(IsQuietNaN(sm_proc));
10207 ASSERT(IsQuietNaN(qn_proc));
10208 ASSERT(IsQuietNaN(qm_proc));
10209
10210 // Quiet NaNs are propagated.
10211 ProcessNaNsHelper(qn, 0, qn_proc);
10212 ProcessNaNsHelper(0, qm, qm_proc);
10213 ProcessNaNsHelper(qn, qm, qn_proc);
10214
10215 // Signalling NaNs are propagated, and made quiet.
10216 ProcessNaNsHelper(sn, 0, sn_proc);
10217 ProcessNaNsHelper(0, sm, sm_proc);
10218 ProcessNaNsHelper(sn, sm, sn_proc);
10219
10220 // Signalling NaNs take precedence over quiet NaNs.
10221 ProcessNaNsHelper(sn, qm, sn_proc);
10222 ProcessNaNsHelper(qn, sm, sm_proc);
10223 ProcessNaNsHelper(sn, sm, sn_proc);
10224 }
10225
10226
10227 static void ProcessNaNsHelper(float n, float m, float expected) {
10228 ASSERT(isnan(n) || isnan(m));
10229 ASSERT(isnan(expected));
10230
10231 SETUP();
10232 START();
10233
10234 // Execute a number of instructions which all use ProcessNaNs, and check that
10235 // they all propagate NaNs correctly.
10236 __ Fmov(s0, n);
10237 __ Fmov(s1, m);
10238
10239 __ Fadd(s2, s0, s1);
10240 __ Fsub(s3, s0, s1);
10241 __ Fmul(s4, s0, s1);
10242 __ Fdiv(s5, s0, s1);
10243 __ Fmax(s6, s0, s1);
10244 __ Fmin(s7, s0, s1);
10245
10246 END();
10247 RUN();
10248
10249 ASSERT_EQUAL_FP32(expected, s2);
10250 ASSERT_EQUAL_FP32(expected, s3);
10251 ASSERT_EQUAL_FP32(expected, s4);
10252 ASSERT_EQUAL_FP32(expected, s5);
10253 ASSERT_EQUAL_FP32(expected, s6);
10254 ASSERT_EQUAL_FP32(expected, s7);
10255
10256 TEARDOWN();
10257 }
10258
10259
10260 TEST(process_nans_float) {
10261 INIT_V8();
10262 // Make sure that NaN propagation works correctly.
10263 float sn = rawbits_to_float(0x7f951111);
10264 float sm = rawbits_to_float(0x7f952222);
10265 float qn = rawbits_to_float(0x7fea1111);
10266 float qm = rawbits_to_float(0x7fea2222);
10267 ASSERT(IsSignallingNaN(sn));
10268 ASSERT(IsSignallingNaN(sm));
10269 ASSERT(IsQuietNaN(qn));
10270 ASSERT(IsQuietNaN(qm));
10271
10272 // The input NaNs after passing through ProcessNaN.
10273 float sn_proc = rawbits_to_float(0x7fd51111);
10274 float sm_proc = rawbits_to_float(0x7fd52222);
10275 float qn_proc = qn;
10276 float qm_proc = qm;
10277 ASSERT(IsQuietNaN(sn_proc));
10278 ASSERT(IsQuietNaN(sm_proc));
10279 ASSERT(IsQuietNaN(qn_proc));
10280 ASSERT(IsQuietNaN(qm_proc));
10281
10282 // Quiet NaNs are propagated.
10283 ProcessNaNsHelper(qn, 0, qn_proc);
10284 ProcessNaNsHelper(0, qm, qm_proc);
10285 ProcessNaNsHelper(qn, qm, qn_proc);
10286
10287 // Signalling NaNs are propagated, and made quiet.
10288 ProcessNaNsHelper(sn, 0, sn_proc);
10289 ProcessNaNsHelper(0, sm, sm_proc);
10290 ProcessNaNsHelper(sn, sm, sn_proc);
10291
10292 // Signalling NaNs take precedence over quiet NaNs.
10293 ProcessNaNsHelper(sn, qm, sn_proc);
10294 ProcessNaNsHelper(qn, sm, sm_proc);
10295 ProcessNaNsHelper(sn, sm, sn_proc);
10296 }
10297
10298
10299 static void DefaultNaNHelper(float n, float m, float a) {
10300 ASSERT(isnan(n) || isnan(m) || isnan(a));
10301
10302 bool test_1op = isnan(n);
10303 bool test_2op = isnan(n) || isnan(m);
10304
10305 SETUP();
10306 START();
10307
10308 // Enable Default-NaN mode in the FPCR.
10309 __ Mrs(x0, FPCR);
10310 __ Orr(x1, x0, DN_mask);
10311 __ Msr(FPCR, x1);
10312
10313 // Execute a number of instructions which all use ProcessNaNs, and check that
10314 // they all produce the default NaN.
10315 __ Fmov(s0, n);
10316 __ Fmov(s1, m);
10317 __ Fmov(s2, a);
10318
10319 if (test_1op) {
10320 // Operations that always propagate NaNs unchanged, even signalling NaNs.
10321 __ Fmov(s10, s0);
10322 __ Fabs(s11, s0);
10323 __ Fneg(s12, s0);
10324
10325 // Operations that use ProcessNaN.
10326 __ Fsqrt(s13, s0);
10327 __ Frinta(s14, s0);
10328 __ Frintn(s15, s0);
10329 __ Frintz(s16, s0);
10330
10331 // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10332 __ Fcvt(d17, s0);
10333 }
10334
10335 if (test_2op) {
10336 __ Fadd(s18, s0, s1);
10337 __ Fsub(s19, s0, s1);
10338 __ Fmul(s20, s0, s1);
10339 __ Fdiv(s21, s0, s1);
10340 __ Fmax(s22, s0, s1);
10341 __ Fmin(s23, s0, s1);
10342 }
10343
10344 __ Fmadd(s24, s0, s1, s2);
10345 __ Fmsub(s25, s0, s1, s2);
10346 __ Fnmadd(s26, s0, s1, s2);
10347 __ Fnmsub(s27, s0, s1, s2);
10348
10349 // Restore FPCR.
10350 __ Msr(FPCR, x0);
10351
10352 END();
10353 RUN();
10354
10355 if (test_1op) {
10356 uint32_t n_raw = float_to_rawbits(n);
10357 ASSERT_EQUAL_FP32(n, s10);
10358 ASSERT_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
10359 ASSERT_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
10360 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s13);
10361 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s14);
10362 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s15);
10363 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s16);
10364 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d17);
10365 }
10366
10367 if (test_2op) {
10368 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s18);
10369 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s19);
10370 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s20);
10371 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s21);
10372 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s22);
10373 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s23);
10374 }
10375
10376 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s24);
10377 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s25);
10378 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s26);
10379 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s27);
10380
10381 TEARDOWN();
10382 }
10383
10384
10385 TEST(default_nan_float) {
10386 INIT_V8();
10387 float sn = rawbits_to_float(0x7f951111);
10388 float sm = rawbits_to_float(0x7f952222);
10389 float sa = rawbits_to_float(0x7f95aaaa);
10390 float qn = rawbits_to_float(0x7fea1111);
10391 float qm = rawbits_to_float(0x7fea2222);
10392 float qa = rawbits_to_float(0x7feaaaaa);
10393 ASSERT(IsSignallingNaN(sn));
10394 ASSERT(IsSignallingNaN(sm));
10395 ASSERT(IsSignallingNaN(sa));
10396 ASSERT(IsQuietNaN(qn));
10397 ASSERT(IsQuietNaN(qm));
10398 ASSERT(IsQuietNaN(qa));
10399
10400 // - Signalling NaNs
10401 DefaultNaNHelper(sn, 0.0f, 0.0f);
10402 DefaultNaNHelper(0.0f, sm, 0.0f);
10403 DefaultNaNHelper(0.0f, 0.0f, sa);
10404 DefaultNaNHelper(sn, sm, 0.0f);
10405 DefaultNaNHelper(0.0f, sm, sa);
10406 DefaultNaNHelper(sn, 0.0f, sa);
10407 DefaultNaNHelper(sn, sm, sa);
10408 // - Quiet NaNs
10409 DefaultNaNHelper(qn, 0.0f, 0.0f);
10410 DefaultNaNHelper(0.0f, qm, 0.0f);
10411 DefaultNaNHelper(0.0f, 0.0f, qa);
10412 DefaultNaNHelper(qn, qm, 0.0f);
10413 DefaultNaNHelper(0.0f, qm, qa);
10414 DefaultNaNHelper(qn, 0.0f, qa);
10415 DefaultNaNHelper(qn, qm, qa);
10416 // - Mixed NaNs
10417 DefaultNaNHelper(qn, sm, sa);
10418 DefaultNaNHelper(sn, qm, sa);
10419 DefaultNaNHelper(sn, sm, qa);
10420 DefaultNaNHelper(qn, qm, sa);
10421 DefaultNaNHelper(sn, qm, qa);
10422 DefaultNaNHelper(qn, sm, qa);
10423 DefaultNaNHelper(qn, qm, qa);
10424 }
10425
10426
10427 static void DefaultNaNHelper(double n, double m, double a) {
10428 ASSERT(isnan(n) || isnan(m) || isnan(a));
10429
10430 bool test_1op = isnan(n);
10431 bool test_2op = isnan(n) || isnan(m);
10432
10433 SETUP();
10434 START();
10435
10436 // Enable Default-NaN mode in the FPCR.
10437 __ Mrs(x0, FPCR);
10438 __ Orr(x1, x0, DN_mask);
10439 __ Msr(FPCR, x1);
10440
10441 // Execute a number of instructions which all use ProcessNaNs, and check that
10442 // they all produce the default NaN.
10443 __ Fmov(d0, n);
10444 __ Fmov(d1, m);
10445 __ Fmov(d2, a);
10446
10447 if (test_1op) {
10448 // Operations that always propagate NaNs unchanged, even signalling NaNs.
10449 __ Fmov(d10, d0);
10450 __ Fabs(d11, d0);
10451 __ Fneg(d12, d0);
10452
10453 // Operations that use ProcessNaN.
10454 __ Fsqrt(d13, d0);
10455 __ Frinta(d14, d0);
10456 __ Frintn(d15, d0);
10457 __ Frintz(d16, d0);
10458
10459 // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10460 __ Fcvt(s17, d0);
10461 }
10462
10463 if (test_2op) {
10464 __ Fadd(d18, d0, d1);
10465 __ Fsub(d19, d0, d1);
10466 __ Fmul(d20, d0, d1);
10467 __ Fdiv(d21, d0, d1);
10468 __ Fmax(d22, d0, d1);
10469 __ Fmin(d23, d0, d1);
10470 }
10471
10472 __ Fmadd(d24, d0, d1, d2);
10473 __ Fmsub(d25, d0, d1, d2);
10474 __ Fnmadd(d26, d0, d1, d2);
10475 __ Fnmsub(d27, d0, d1, d2);
10476
10477 // Restore FPCR.
10478 __ Msr(FPCR, x0);
10479
10480 END();
10481 RUN();
10482
10483 if (test_1op) {
10484 uint64_t n_raw = double_to_rawbits(n);
10485 ASSERT_EQUAL_FP64(n, d10);
10486 ASSERT_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
10487 ASSERT_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
10488 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
10489 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d14);
10490 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d15);
10491 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d16);
10492 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s17);
10493 }
10494
10495 if (test_2op) {
10496 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d18);
10497 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d19);
10498 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d20);
10499 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d21);
10500 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d22);
10501 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d23);
10502 }
10503
10504 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d24);
10505 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d25);
10506 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d26);
10507 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d27);
10508
10509 TEARDOWN();
10510 }
10511
10512
10513 TEST(default_nan_double) {
10514 INIT_V8();
10515 double sn = rawbits_to_double(0x7ff5555511111111);
10516 double sm = rawbits_to_double(0x7ff5555522222222);
10517 double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
10518 double qn = rawbits_to_double(0x7ffaaaaa11111111);
10519 double qm = rawbits_to_double(0x7ffaaaaa22222222);
10520 double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
10521 ASSERT(IsSignallingNaN(sn));
10522 ASSERT(IsSignallingNaN(sm));
10523 ASSERT(IsSignallingNaN(sa));
10524 ASSERT(IsQuietNaN(qn));
10525 ASSERT(IsQuietNaN(qm));
10526 ASSERT(IsQuietNaN(qa));
10527
10528 // - Signalling NaNs
10529 DefaultNaNHelper(sn, 0.0, 0.0);
10530 DefaultNaNHelper(0.0, sm, 0.0);
10531 DefaultNaNHelper(0.0, 0.0, sa);
10532 DefaultNaNHelper(sn, sm, 0.0);
10533 DefaultNaNHelper(0.0, sm, sa);
10534 DefaultNaNHelper(sn, 0.0, sa);
10535 DefaultNaNHelper(sn, sm, sa);
10536 // - Quiet NaNs
10537 DefaultNaNHelper(qn, 0.0, 0.0);
10538 DefaultNaNHelper(0.0, qm, 0.0);
10539 DefaultNaNHelper(0.0, 0.0, qa);
10540 DefaultNaNHelper(qn, qm, 0.0);
10541 DefaultNaNHelper(0.0, qm, qa);
10542 DefaultNaNHelper(qn, 0.0, qa);
10543 DefaultNaNHelper(qn, qm, qa);
10544 // - Mixed NaNs
10545 DefaultNaNHelper(qn, sm, sa);
10546 DefaultNaNHelper(sn, qm, sa);
10547 DefaultNaNHelper(sn, sm, qa);
10548 DefaultNaNHelper(qn, qm, sa);
10549 DefaultNaNHelper(sn, qm, qa);
10550 DefaultNaNHelper(qn, sm, qa);
10551 DefaultNaNHelper(qn, qm, qa);
10552 }
10553
10554
9855 TEST(call_no_relocation) { 10555 TEST(call_no_relocation) {
9856 Address call_start; 10556 Address call_start;
9857 Address return_address; 10557 Address return_address;
9858 10558
9859 INIT_V8(); 10559 INIT_V8();
9860 SETUP(); 10560 SETUP();
9861 10561
9862 START(); 10562 START();
9863 10563
9864 Label function; 10564 Label function;
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
10016 AbsHelperX(-42); 10716 AbsHelperX(-42);
10017 AbsHelperX(kXMinInt); 10717 AbsHelperX(kXMinInt);
10018 AbsHelperX(kXMaxInt); 10718 AbsHelperX(kXMaxInt);
10019 10719
10020 AbsHelperW(0); 10720 AbsHelperW(0);
10021 AbsHelperW(42); 10721 AbsHelperW(42);
10022 AbsHelperW(-42); 10722 AbsHelperW(-42);
10023 AbsHelperW(kWMinInt); 10723 AbsHelperW(kWMinInt);
10024 AbsHelperW(kWMaxInt); 10724 AbsHelperW(kWMaxInt);
10025 } 10725 }
10726
10727
10728 TEST(pool_size) {
10729 INIT_V8();
10730 SETUP();
10731
10732 // This test does not execute any code. It only tests that the size of the
10733 // pools is read correctly from the RelocInfo.
10734
10735 Label exit;
10736 __ b(&exit);
10737
10738 const unsigned constant_pool_size = 312;
10739 const unsigned veneer_pool_size = 184;
10740
10741 __ RecordConstPool(constant_pool_size);
10742 for (unsigned i = 0; i < constant_pool_size / 4; ++i) {
10743 __ dc32(0);
10744 }
10745
10746 __ RecordVeneerPool(masm.pc_offset(), veneer_pool_size);
10747 for (unsigned i = 0; i < veneer_pool_size / kInstructionSize; ++i) {
10748 __ nop();
10749 }
10750
10751 __ bind(&exit);
10752
10753 Heap* heap = isolate->heap();
10754 CodeDesc desc;
10755 Object* code_object = NULL;
10756 Code* code;
10757 masm.GetCode(&desc);
10758 MaybeObject* maybe_code = heap->CreateCode(desc, 0, masm.CodeObject());
10759 maybe_code->ToObject(&code_object);
10760 code = Code::cast(code_object);
10761
10762 unsigned pool_count = 0;
10763 int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
10764 RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
10765 for (RelocIterator it(code, pool_mask); !it.done(); it.next()) {
10766 RelocInfo* info = it.rinfo();
10767 if (RelocInfo::IsConstPool(info->rmode())) {
10768 ASSERT(info->data() == constant_pool_size);
10769 ++pool_count;
10770 }
10771 if (RelocInfo::IsVeneerPool(info->rmode())) {
10772 ASSERT(info->data() == veneer_pool_size);
10773 ++pool_count;
10774 }
10775 }
10776
10777 ASSERT(pool_count == 2);
10778
10779 TEARDOWN();
10780 }
OLDNEW
« no previous file with comments | « test/cctest/test-api.cc ('k') | test/cctest/test-atomicops.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698