| OLD | NEW |
| 1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1348 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1359 __ vmovsd(xmm5, Operand(rsp, kDoubleSize)); | 1359 __ vmovsd(xmm5, Operand(rsp, kDoubleSize)); |
| 1360 __ vmovsd(xmm6, xmm5); | 1360 __ vmovsd(xmm6, xmm5); |
| 1361 __ vmovapd(xmm3, xmm6); | 1361 __ vmovapd(xmm3, xmm6); |
| 1362 | 1362 |
| 1363 __ movl(rdx, Immediate(6)); | 1363 __ movl(rdx, Immediate(6)); |
| 1364 __ vcvtlsi2sd(xmm6, xmm6, rdx); | 1364 __ vcvtlsi2sd(xmm6, xmm6, rdx); |
| 1365 __ movl(Operand(rsp, 0), Immediate(5)); | 1365 __ movl(Operand(rsp, 0), Immediate(5)); |
| 1366 __ vcvtlsi2sd(xmm7, xmm7, Operand(rsp, 0)); | 1366 __ vcvtlsi2sd(xmm7, xmm7, Operand(rsp, 0)); |
| 1367 __ vsubsd(xmm7, xmm6, xmm7); // xmm7 is 1.0 | 1367 __ vsubsd(xmm7, xmm6, xmm7); // xmm7 is 1.0 |
| 1368 __ vmulsd(xmm1, xmm1, xmm7); | 1368 __ vmulsd(xmm1, xmm1, xmm7); |
| 1369 |
| 1370 __ movq(rdx, V8_INT64_C(0x3ff0000000000000)); // 1.0 |
| 1371 __ vmovq(xmm7, rdx); |
| 1372 __ vmulsd(xmm1, xmm1, xmm7); |
| 1373 __ movq(Operand(rsp, 0), rdx); |
| 1374 __ vmovq(xmm6, Operand(rsp, 0)); |
| 1375 __ vmulsd(xmm1, xmm1, xmm6); |
| 1369 __ addq(rsp, Immediate(kDoubleSize * 2)); | 1376 __ addq(rsp, Immediate(kDoubleSize * 2)); |
| 1370 | 1377 |
| 1371 __ vucomisd(xmm3, xmm1); | 1378 __ vucomisd(xmm3, xmm1); |
| 1372 __ j(parity_even, &exit); | 1379 __ j(parity_even, &exit); |
| 1373 __ j(not_equal, &exit); | 1380 __ j(not_equal, &exit); |
| 1374 __ movl(rax, Immediate(1)); | 1381 __ movl(rax, Immediate(1)); |
| 1375 | 1382 |
| 1376 __ vminsd(xmm3, xmm1, xmm2); | 1383 __ vminsd(xmm3, xmm1, xmm2); |
| 1377 __ vucomisd(xmm3, xmm1); | 1384 __ vucomisd(xmm3, xmm1); |
| 1378 __ j(parity_even, &exit); | 1385 __ j(parity_even, &exit); |
| (...skipping 704 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2083 | 2090 |
| 2084 F1 f = FUNCTION_CAST<F1>(code->entry()); | 2091 F1 f = FUNCTION_CAST<F1>(code->entry()); |
| 2085 for (int i = 0; i < kNumCases; ++i) { | 2092 for (int i = 0; i < kNumCases; ++i) { |
| 2086 int res = f(i); | 2093 int res = f(i); |
| 2087 PrintF("f(%d) = %d\n", i, res); | 2094 PrintF("f(%d) = %d\n", i, res); |
| 2088 CHECK_EQ(values[i], res); | 2095 CHECK_EQ(values[i], res); |
| 2089 } | 2096 } |
| 2090 } | 2097 } |
| 2091 | 2098 |
| 2092 #undef __ | 2099 #undef __ |
| OLD | NEW |