Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(153)

Side by Side Diff: test/cctest/test-assembler-a64.cc

Issue 181453002: Reset trunk to 3.24.35.4 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « test/cctest/test-api.cc ('k') | test/cctest/test-code-stubs.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <cmath>
32 #include <limits>
33
34 #include "v8.h"
35
36 #include "macro-assembler.h"
37 #include "a64/simulator-a64.h"
38 #include "a64/disasm-a64.h"
39 #include "a64/utils-a64.h"
40 #include "cctest.h"
41 #include "test-utils-a64.h"
42
43 using namespace v8::internal;
44
45 // Test infrastructure.
46 //
47 // Tests are functions which accept no parameters and have no return values.
48 // The testing code should not perform an explicit return once completed. For
49 // example to test the mov immediate instruction a very simple test would be:
50 //
51 // TEST(mov_x0_one) {
52 // SETUP();
53 //
54 // START();
55 // __ mov(x0, Operand(1));
56 // END();
57 //
58 // RUN();
59 //
60 // ASSERT_EQUAL_64(1, x0);
61 //
62 // TEARDOWN();
63 // }
64 //
65 // Within a START ... END block all registers but sp can be modified. sp has to
66 // be explicitly saved/restored. The END() macro replaces the function return
67 // so it may appear multiple times in a test if the test has multiple exit
68 // points.
69 //
70 // Once the test has been run all integer and floating point registers as well
71 // as flags are accessible through a RegisterDump instance, see
72 // utils-a64.cc for more info on RegisterDump.
73 //
74 // We provide some helper assert to handle common cases:
75 //
76 // ASSERT_EQUAL_32(int32_t, int_32t)
77 // ASSERT_EQUAL_FP32(float, float)
78 // ASSERT_EQUAL_32(int32_t, W register)
79 // ASSERT_EQUAL_FP32(float, S register)
80 // ASSERT_EQUAL_64(int64_t, int_64t)
81 // ASSERT_EQUAL_FP64(double, double)
82 // ASSERT_EQUAL_64(int64_t, X register)
83 // ASSERT_EQUAL_64(X register, X register)
84 // ASSERT_EQUAL_FP64(double, D register)
85 //
86 // e.g. ASSERT_EQUAL_64(0.5, d30);
87 //
88 // If more advance computation is required before the assert then access the
89 // RegisterDump named core directly:
90 //
91 // ASSERT_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
92
93
94 #if 0 // TODO(all): enable.
95 static v8::Persistent<v8::Context> env;
96
97 static void InitializeVM() {
98 if (env.IsEmpty()) {
99 env = v8::Context::New();
100 }
101 }
102 #endif
103
104 #define __ masm.
105
106 #define BUF_SIZE 8192
107 #define SETUP() SETUP_SIZE(BUF_SIZE)
108
109 #define INIT_V8() \
110 CcTest::InitializeVM(); \
111
112 #ifdef USE_SIMULATOR
113
114 // Run tests with the simulator.
115 #define SETUP_SIZE(buf_size) \
116 Isolate* isolate = Isolate::Current(); \
117 HandleScope scope(isolate); \
118 ASSERT(isolate != NULL); \
119 byte* buf = new byte[buf_size]; \
120 MacroAssembler masm(isolate, buf, buf_size); \
121 Decoder decoder; \
122 Simulator simulator(&decoder); \
123 PrintDisassembler* pdis = NULL; \
124 RegisterDump core;
125
126 /* if (Cctest::trace_sim()) { \
127 pdis = new PrintDisassembler(stdout); \
128 decoder.PrependVisitor(pdis); \
129 } \
130 */
131
132 // Reset the assembler and simulator, so that instructions can be generated,
133 // but don't actually emit any code. This can be used by tests that need to
134 // emit instructions at the start of the buffer. Note that START_AFTER_RESET
135 // must be called before any callee-saved register is modified, and before an
136 // END is encountered.
137 //
138 // Most tests should call START, rather than call RESET directly.
139 #define RESET() \
140 __ Reset(); \
141 simulator.ResetState();
142
143 #define START_AFTER_RESET() \
144 __ SetStackPointer(csp); \
145 __ PushCalleeSavedRegisters(); \
146 __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
147
148 #define START() \
149 RESET(); \
150 START_AFTER_RESET();
151
152 #define RUN() \
153 simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
154
155 #define END() \
156 __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
157 core.Dump(&masm); \
158 __ PopCalleeSavedRegisters(); \
159 __ Ret(); \
160 __ GetCode(NULL);
161
162 #define TEARDOWN() \
163 delete pdis; \
164 delete[] buf;
165
166 #else // ifdef USE_SIMULATOR.
167 // Run the test on real hardware or models.
168 #define SETUP_SIZE(buf_size) \
169 Isolate* isolate = Isolate::Current(); \
170 HandleScope scope(isolate); \
171 ASSERT(isolate != NULL); \
172 byte* buf = new byte[buf_size]; \
173 MacroAssembler masm(isolate, buf, buf_size); \
174 RegisterDump core; \
175 CPU::SetUp();
176
177 #define RESET() \
178 __ Reset();
179
180 #define START_AFTER_RESET() \
181 __ SetStackPointer(csp); \
182 __ PushCalleeSavedRegisters();
183
184 #define START() \
185 RESET(); \
186 START_AFTER_RESET();
187
188 #define RUN() \
189 CPU::FlushICache(buf, masm.SizeOfGeneratedCode()); \
190 { \
191 void (*test_function)(void); \
192 memcpy(&test_function, &buf, sizeof(buf)); \
193 test_function(); \
194 }
195
196 #define END() \
197 core.Dump(&masm); \
198 __ PopCalleeSavedRegisters(); \
199 __ Ret(); \
200 __ GetCode(NULL);
201
202 #define TEARDOWN() \
203 delete[] buf;
204
205 #endif // ifdef USE_SIMULATOR.
206
207 #define ASSERT_EQUAL_NZCV(expected) \
208 CHECK(EqualNzcv(expected, core.flags_nzcv()))
209
210 #define ASSERT_EQUAL_REGISTERS(expected) \
211 CHECK(EqualRegisters(&expected, &core))
212
213 #define ASSERT_EQUAL_32(expected, result) \
214 CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
215
216 #define ASSERT_EQUAL_FP32(expected, result) \
217 CHECK(EqualFP32(expected, &core, result))
218
219 #define ASSERT_EQUAL_64(expected, result) \
220 CHECK(Equal64(expected, &core, result))
221
222 #define ASSERT_EQUAL_FP64(expected, result) \
223 CHECK(EqualFP64(expected, &core, result))
224
225 #ifdef DEBUG
226 #define ASSERT_LITERAL_POOL_SIZE(expected) \
227 CHECK((expected) == (__ LiteralPoolSize()))
228 #else
229 #define ASSERT_LITERAL_POOL_SIZE(expected) \
230 ((void) 0)
231 #endif
232
233
234 TEST(stack_ops) {
235 INIT_V8();
236 SETUP();
237
238 START();
239 // save csp.
240 __ Mov(x29, csp);
241
242 // Set the csp to a known value.
243 __ Mov(x16, 0x1000);
244 __ Mov(csp, x16);
245 __ Mov(x0, csp);
246
247 // Add immediate to the csp, and move the result to a normal register.
248 __ Add(csp, csp, Operand(0x50));
249 __ Mov(x1, csp);
250
251 // Add extended to the csp, and move the result to a normal register.
252 __ Mov(x17, 0xfff);
253 __ Add(csp, csp, Operand(x17, SXTB));
254 __ Mov(x2, csp);
255
256 // Create an csp using a logical instruction, and move to normal register.
257 __ Orr(csp, xzr, Operand(0x1fff));
258 __ Mov(x3, csp);
259
260 // Write wcsp using a logical instruction.
261 __ Orr(wcsp, wzr, Operand(0xfffffff8L));
262 __ Mov(x4, csp);
263
264 // Write csp, and read back wcsp.
265 __ Orr(csp, xzr, Operand(0xfffffff8L));
266 __ Mov(w5, wcsp);
267
268 // restore csp.
269 __ Mov(csp, x29);
270 END();
271
272 RUN();
273
274 ASSERT_EQUAL_64(0x1000, x0);
275 ASSERT_EQUAL_64(0x1050, x1);
276 ASSERT_EQUAL_64(0x104f, x2);
277 ASSERT_EQUAL_64(0x1fff, x3);
278 ASSERT_EQUAL_64(0xfffffff8, x4);
279 ASSERT_EQUAL_64(0xfffffff8, x5);
280
281 TEARDOWN();
282 }
283
284
285 TEST(mvn) {
286 INIT_V8();
287 SETUP();
288
289 START();
290 __ Mvn(w0, 0xfff);
291 __ Mvn(x1, 0xfff);
292 __ Mvn(w2, Operand(w0, LSL, 1));
293 __ Mvn(x3, Operand(x1, LSL, 2));
294 __ Mvn(w4, Operand(w0, LSR, 3));
295 __ Mvn(x5, Operand(x1, LSR, 4));
296 __ Mvn(w6, Operand(w0, ASR, 11));
297 __ Mvn(x7, Operand(x1, ASR, 12));
298 __ Mvn(w8, Operand(w0, ROR, 13));
299 __ Mvn(x9, Operand(x1, ROR, 14));
300 __ Mvn(w10, Operand(w2, UXTB));
301 __ Mvn(x11, Operand(x2, SXTB, 1));
302 __ Mvn(w12, Operand(w2, UXTH, 2));
303 __ Mvn(x13, Operand(x2, SXTH, 3));
304 __ Mvn(x14, Operand(w2, UXTW, 4));
305 __ Mvn(x15, Operand(w2, SXTW, 4));
306 END();
307
308 RUN();
309
310 ASSERT_EQUAL_64(0xfffff000, x0);
311 ASSERT_EQUAL_64(0xfffffffffffff000UL, x1);
312 ASSERT_EQUAL_64(0x00001fff, x2);
313 ASSERT_EQUAL_64(0x0000000000003fffUL, x3);
314 ASSERT_EQUAL_64(0xe00001ff, x4);
315 ASSERT_EQUAL_64(0xf0000000000000ffUL, x5);
316 ASSERT_EQUAL_64(0x00000001, x6);
317 ASSERT_EQUAL_64(0x0, x7);
318 ASSERT_EQUAL_64(0x7ff80000, x8);
319 ASSERT_EQUAL_64(0x3ffc000000000000UL, x9);
320 ASSERT_EQUAL_64(0xffffff00, x10);
321 ASSERT_EQUAL_64(0x0000000000000001UL, x11);
322 ASSERT_EQUAL_64(0xffff8003, x12);
323 ASSERT_EQUAL_64(0xffffffffffff0007UL, x13);
324 ASSERT_EQUAL_64(0xfffffffffffe000fUL, x14);
325 ASSERT_EQUAL_64(0xfffffffffffe000fUL, x15);
326
327 TEARDOWN();
328 }
329
330
331 TEST(mov) {
332 INIT_V8();
333 SETUP();
334
335 START();
336 __ Mov(x0, 0xffffffffffffffffL);
337 __ Mov(x1, 0xffffffffffffffffL);
338 __ Mov(x2, 0xffffffffffffffffL);
339 __ Mov(x3, 0xffffffffffffffffL);
340
341 __ Mov(x0, 0x0123456789abcdefL);
342
343 __ movz(x1, 0xabcdL << 16);
344 __ movk(x2, 0xabcdL << 32);
345 __ movn(x3, 0xabcdL << 48);
346
347 __ Mov(x4, 0x0123456789abcdefL);
348 __ Mov(x5, x4);
349
350 __ Mov(w6, -1);
351
352 // Test that moves back to the same register have the desired effect. This
353 // is a no-op for X registers, and a truncation for W registers.
354 __ Mov(x7, 0x0123456789abcdefL);
355 __ Mov(x7, x7);
356 __ Mov(x8, 0x0123456789abcdefL);
357 __ Mov(w8, w8);
358 __ Mov(x9, 0x0123456789abcdefL);
359 __ Mov(x9, Operand(x9));
360 __ Mov(x10, 0x0123456789abcdefL);
361 __ Mov(w10, Operand(w10));
362
363 __ Mov(w11, 0xfff);
364 __ Mov(x12, 0xfff);
365 __ Mov(w13, Operand(w11, LSL, 1));
366 __ Mov(x14, Operand(x12, LSL, 2));
367 __ Mov(w15, Operand(w11, LSR, 3));
368 __ Mov(x18, Operand(x12, LSR, 4));
369 __ Mov(w19, Operand(w11, ASR, 11));
370 __ Mov(x20, Operand(x12, ASR, 12));
371 __ Mov(w21, Operand(w11, ROR, 13));
372 __ Mov(x22, Operand(x12, ROR, 14));
373 __ Mov(w23, Operand(w13, UXTB));
374 __ Mov(x24, Operand(x13, SXTB, 1));
375 __ Mov(w25, Operand(w13, UXTH, 2));
376 __ Mov(x26, Operand(x13, SXTH, 3));
377 __ Mov(x27, Operand(w13, UXTW, 4));
378 END();
379
380 RUN();
381
382 ASSERT_EQUAL_64(0x0123456789abcdefL, x0);
383 ASSERT_EQUAL_64(0x00000000abcd0000L, x1);
384 ASSERT_EQUAL_64(0xffffabcdffffffffL, x2);
385 ASSERT_EQUAL_64(0x5432ffffffffffffL, x3);
386 ASSERT_EQUAL_64(x4, x5);
387 ASSERT_EQUAL_32(-1, w6);
388 ASSERT_EQUAL_64(0x0123456789abcdefL, x7);
389 ASSERT_EQUAL_32(0x89abcdefL, w8);
390 ASSERT_EQUAL_64(0x0123456789abcdefL, x9);
391 ASSERT_EQUAL_32(0x89abcdefL, w10);
392 ASSERT_EQUAL_64(0x00000fff, x11);
393 ASSERT_EQUAL_64(0x0000000000000fffUL, x12);
394 ASSERT_EQUAL_64(0x00001ffe, x13);
395 ASSERT_EQUAL_64(0x0000000000003ffcUL, x14);
396 ASSERT_EQUAL_64(0x000001ff, x15);
397 ASSERT_EQUAL_64(0x00000000000000ffUL, x18);
398 ASSERT_EQUAL_64(0x00000001, x19);
399 ASSERT_EQUAL_64(0x0, x20);
400 ASSERT_EQUAL_64(0x7ff80000, x21);
401 ASSERT_EQUAL_64(0x3ffc000000000000UL, x22);
402 ASSERT_EQUAL_64(0x000000fe, x23);
403 ASSERT_EQUAL_64(0xfffffffffffffffcUL, x24);
404 ASSERT_EQUAL_64(0x00007ff8, x25);
405 ASSERT_EQUAL_64(0x000000000000fff0UL, x26);
406 ASSERT_EQUAL_64(0x000000000001ffe0UL, x27);
407
408 TEARDOWN();
409 }
410
411
412 TEST(mov_imm_w) {
413 INIT_V8();
414 SETUP();
415
416 START();
417 __ Mov(w0, 0xffffffffL);
418 __ Mov(w1, 0xffff1234L);
419 __ Mov(w2, 0x1234ffffL);
420 __ Mov(w3, 0x00000000L);
421 __ Mov(w4, 0x00001234L);
422 __ Mov(w5, 0x12340000L);
423 __ Mov(w6, 0x12345678L);
424 END();
425
426 RUN();
427
428 ASSERT_EQUAL_64(0xffffffffL, x0);
429 ASSERT_EQUAL_64(0xffff1234L, x1);
430 ASSERT_EQUAL_64(0x1234ffffL, x2);
431 ASSERT_EQUAL_64(0x00000000L, x3);
432 ASSERT_EQUAL_64(0x00001234L, x4);
433 ASSERT_EQUAL_64(0x12340000L, x5);
434 ASSERT_EQUAL_64(0x12345678L, x6);
435
436 TEARDOWN();
437 }
438
439
440 TEST(mov_imm_x) {
441 INIT_V8();
442 SETUP();
443
444 START();
445 __ Mov(x0, 0xffffffffffffffffL);
446 __ Mov(x1, 0xffffffffffff1234L);
447 __ Mov(x2, 0xffffffff12345678L);
448 __ Mov(x3, 0xffff1234ffff5678L);
449 __ Mov(x4, 0x1234ffffffff5678L);
450 __ Mov(x5, 0x1234ffff5678ffffL);
451 __ Mov(x6, 0x12345678ffffffffL);
452 __ Mov(x7, 0x1234ffffffffffffL);
453 __ Mov(x8, 0x123456789abcffffL);
454 __ Mov(x9, 0x12345678ffff9abcL);
455 __ Mov(x10, 0x1234ffff56789abcL);
456 __ Mov(x11, 0xffff123456789abcL);
457 __ Mov(x12, 0x0000000000000000L);
458 __ Mov(x13, 0x0000000000001234L);
459 __ Mov(x14, 0x0000000012345678L);
460 __ Mov(x15, 0x0000123400005678L);
461 __ Mov(x18, 0x1234000000005678L);
462 __ Mov(x19, 0x1234000056780000L);
463 __ Mov(x20, 0x1234567800000000L);
464 __ Mov(x21, 0x1234000000000000L);
465 __ Mov(x22, 0x123456789abc0000L);
466 __ Mov(x23, 0x1234567800009abcL);
467 __ Mov(x24, 0x1234000056789abcL);
468 __ Mov(x25, 0x0000123456789abcL);
469 __ Mov(x26, 0x123456789abcdef0L);
470 __ Mov(x27, 0xffff000000000001L);
471 __ Mov(x28, 0x8000ffff00000000L);
472 END();
473
474 RUN();
475
476 ASSERT_EQUAL_64(0xffffffffffff1234L, x1);
477 ASSERT_EQUAL_64(0xffffffff12345678L, x2);
478 ASSERT_EQUAL_64(0xffff1234ffff5678L, x3);
479 ASSERT_EQUAL_64(0x1234ffffffff5678L, x4);
480 ASSERT_EQUAL_64(0x1234ffff5678ffffL, x5);
481 ASSERT_EQUAL_64(0x12345678ffffffffL, x6);
482 ASSERT_EQUAL_64(0x1234ffffffffffffL, x7);
483 ASSERT_EQUAL_64(0x123456789abcffffL, x8);
484 ASSERT_EQUAL_64(0x12345678ffff9abcL, x9);
485 ASSERT_EQUAL_64(0x1234ffff56789abcL, x10);
486 ASSERT_EQUAL_64(0xffff123456789abcL, x11);
487 ASSERT_EQUAL_64(0x0000000000000000L, x12);
488 ASSERT_EQUAL_64(0x0000000000001234L, x13);
489 ASSERT_EQUAL_64(0x0000000012345678L, x14);
490 ASSERT_EQUAL_64(0x0000123400005678L, x15);
491 ASSERT_EQUAL_64(0x1234000000005678L, x18);
492 ASSERT_EQUAL_64(0x1234000056780000L, x19);
493 ASSERT_EQUAL_64(0x1234567800000000L, x20);
494 ASSERT_EQUAL_64(0x1234000000000000L, x21);
495 ASSERT_EQUAL_64(0x123456789abc0000L, x22);
496 ASSERT_EQUAL_64(0x1234567800009abcL, x23);
497 ASSERT_EQUAL_64(0x1234000056789abcL, x24);
498 ASSERT_EQUAL_64(0x0000123456789abcL, x25);
499 ASSERT_EQUAL_64(0x123456789abcdef0L, x26);
500 ASSERT_EQUAL_64(0xffff000000000001L, x27);
501 ASSERT_EQUAL_64(0x8000ffff00000000L, x28);
502
503 TEARDOWN();
504 }
505
506
507 TEST(orr) {
508 INIT_V8();
509 SETUP();
510
511 START();
512 __ Mov(x0, 0xf0f0);
513 __ Mov(x1, 0xf00000ff);
514
515 __ Orr(x2, x0, Operand(x1));
516 __ Orr(w3, w0, Operand(w1, LSL, 28));
517 __ Orr(x4, x0, Operand(x1, LSL, 32));
518 __ Orr(x5, x0, Operand(x1, LSR, 4));
519 __ Orr(w6, w0, Operand(w1, ASR, 4));
520 __ Orr(x7, x0, Operand(x1, ASR, 4));
521 __ Orr(w8, w0, Operand(w1, ROR, 12));
522 __ Orr(x9, x0, Operand(x1, ROR, 12));
523 __ Orr(w10, w0, Operand(0xf));
524 __ Orr(x11, x0, Operand(0xf0000000f0000000L));
525 END();
526
527 RUN();
528
529 ASSERT_EQUAL_64(0xf000f0ff, x2);
530 ASSERT_EQUAL_64(0xf000f0f0, x3);
531 ASSERT_EQUAL_64(0xf00000ff0000f0f0L, x4);
532 ASSERT_EQUAL_64(0x0f00f0ff, x5);
533 ASSERT_EQUAL_64(0xff00f0ff, x6);
534 ASSERT_EQUAL_64(0x0f00f0ff, x7);
535 ASSERT_EQUAL_64(0x0ffff0f0, x8);
536 ASSERT_EQUAL_64(0x0ff00000000ff0f0L, x9);
537 ASSERT_EQUAL_64(0xf0ff, x10);
538 ASSERT_EQUAL_64(0xf0000000f000f0f0L, x11);
539
540 TEARDOWN();
541 }
542
543
544 TEST(orr_extend) {
545 INIT_V8();
546 SETUP();
547
548 START();
549 __ Mov(x0, 1);
550 __ Mov(x1, 0x8000000080008080UL);
551 __ Orr(w6, w0, Operand(w1, UXTB));
552 __ Orr(x7, x0, Operand(x1, UXTH, 1));
553 __ Orr(w8, w0, Operand(w1, UXTW, 2));
554 __ Orr(x9, x0, Operand(x1, UXTX, 3));
555 __ Orr(w10, w0, Operand(w1, SXTB));
556 __ Orr(x11, x0, Operand(x1, SXTH, 1));
557 __ Orr(x12, x0, Operand(x1, SXTW, 2));
558 __ Orr(x13, x0, Operand(x1, SXTX, 3));
559 END();
560
561 RUN();
562
563 ASSERT_EQUAL_64(0x00000081, x6);
564 ASSERT_EQUAL_64(0x00010101, x7);
565 ASSERT_EQUAL_64(0x00020201, x8);
566 ASSERT_EQUAL_64(0x0000000400040401UL, x9);
567 ASSERT_EQUAL_64(0x00000000ffffff81UL, x10);
568 ASSERT_EQUAL_64(0xffffffffffff0101UL, x11);
569 ASSERT_EQUAL_64(0xfffffffe00020201UL, x12);
570 ASSERT_EQUAL_64(0x0000000400040401UL, x13);
571
572 TEARDOWN();
573 }
574
575
576 TEST(bitwise_wide_imm) {
577 INIT_V8();
578 SETUP();
579
580 START();
581 __ Mov(x0, 0);
582 __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
583
584 __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
585 __ Orr(w11, w1, Operand(0x90abcdef));
586 END();
587
588 RUN();
589
590 ASSERT_EQUAL_64(0, x0);
591 ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
592 ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
593 ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
594
595 TEARDOWN();
596 }
597
598
599 TEST(orn) {
600 INIT_V8();
601 SETUP();
602
603 START();
604 __ Mov(x0, 0xf0f0);
605 __ Mov(x1, 0xf00000ff);
606
607 __ Orn(x2, x0, Operand(x1));
608 __ Orn(w3, w0, Operand(w1, LSL, 4));
609 __ Orn(x4, x0, Operand(x1, LSL, 4));
610 __ Orn(x5, x0, Operand(x1, LSR, 1));
611 __ Orn(w6, w0, Operand(w1, ASR, 1));
612 __ Orn(x7, x0, Operand(x1, ASR, 1));
613 __ Orn(w8, w0, Operand(w1, ROR, 16));
614 __ Orn(x9, x0, Operand(x1, ROR, 16));
615 __ Orn(w10, w0, Operand(0xffff));
616 __ Orn(x11, x0, Operand(0xffff0000ffffL));
617 END();
618
619 RUN();
620
621 ASSERT_EQUAL_64(0xffffffff0ffffff0L, x2);
622 ASSERT_EQUAL_64(0xfffff0ff, x3);
623 ASSERT_EQUAL_64(0xfffffff0fffff0ffL, x4);
624 ASSERT_EQUAL_64(0xffffffff87fffff0L, x5);
625 ASSERT_EQUAL_64(0x07fffff0, x6);
626 ASSERT_EQUAL_64(0xffffffff87fffff0L, x7);
627 ASSERT_EQUAL_64(0xff00ffff, x8);
628 ASSERT_EQUAL_64(0xff00ffffffffffffL, x9);
629 ASSERT_EQUAL_64(0xfffff0f0, x10);
630 ASSERT_EQUAL_64(0xffff0000fffff0f0L, x11);
631
632 TEARDOWN();
633 }
634
635
636 TEST(orn_extend) {
637 INIT_V8();
638 SETUP();
639
640 START();
641 __ Mov(x0, 1);
642 __ Mov(x1, 0x8000000080008081UL);
643 __ Orn(w6, w0, Operand(w1, UXTB));
644 __ Orn(x7, x0, Operand(x1, UXTH, 1));
645 __ Orn(w8, w0, Operand(w1, UXTW, 2));
646 __ Orn(x9, x0, Operand(x1, UXTX, 3));
647 __ Orn(w10, w0, Operand(w1, SXTB));
648 __ Orn(x11, x0, Operand(x1, SXTH, 1));
649 __ Orn(x12, x0, Operand(x1, SXTW, 2));
650 __ Orn(x13, x0, Operand(x1, SXTX, 3));
651 END();
652
653 RUN();
654
655 ASSERT_EQUAL_64(0xffffff7f, x6);
656 ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
657 ASSERT_EQUAL_64(0xfffdfdfb, x8);
658 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
659 ASSERT_EQUAL_64(0x0000007f, x10);
660 ASSERT_EQUAL_64(0x0000fefd, x11);
661 ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
662 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
663
664 TEARDOWN();
665 }
666
667
668 TEST(and_) {
669 INIT_V8();
670 SETUP();
671
672 START();
673 __ Mov(x0, 0xfff0);
674 __ Mov(x1, 0xf00000ff);
675
676 __ And(x2, x0, Operand(x1));
677 __ And(w3, w0, Operand(w1, LSL, 4));
678 __ And(x4, x0, Operand(x1, LSL, 4));
679 __ And(x5, x0, Operand(x1, LSR, 1));
680 __ And(w6, w0, Operand(w1, ASR, 20));
681 __ And(x7, x0, Operand(x1, ASR, 20));
682 __ And(w8, w0, Operand(w1, ROR, 28));
683 __ And(x9, x0, Operand(x1, ROR, 28));
684 __ And(w10, w0, Operand(0xff00));
685 __ And(x11, x0, Operand(0xff));
686 END();
687
688 RUN();
689
690 ASSERT_EQUAL_64(0x000000f0, x2);
691 ASSERT_EQUAL_64(0x00000ff0, x3);
692 ASSERT_EQUAL_64(0x00000ff0, x4);
693 ASSERT_EQUAL_64(0x00000070, x5);
694 ASSERT_EQUAL_64(0x0000ff00, x6);
695 ASSERT_EQUAL_64(0x00000f00, x7);
696 ASSERT_EQUAL_64(0x00000ff0, x8);
697 ASSERT_EQUAL_64(0x00000000, x9);
698 ASSERT_EQUAL_64(0x0000ff00, x10);
699 ASSERT_EQUAL_64(0x000000f0, x11);
700
701 TEARDOWN();
702 }
703
704
705 TEST(and_extend) {
706 INIT_V8();
707 SETUP();
708
709 START();
710 __ Mov(x0, 0xffffffffffffffffUL);
711 __ Mov(x1, 0x8000000080008081UL);
712 __ And(w6, w0, Operand(w1, UXTB));
713 __ And(x7, x0, Operand(x1, UXTH, 1));
714 __ And(w8, w0, Operand(w1, UXTW, 2));
715 __ And(x9, x0, Operand(x1, UXTX, 3));
716 __ And(w10, w0, Operand(w1, SXTB));
717 __ And(x11, x0, Operand(x1, SXTH, 1));
718 __ And(x12, x0, Operand(x1, SXTW, 2));
719 __ And(x13, x0, Operand(x1, SXTX, 3));
720 END();
721
722 RUN();
723
724 ASSERT_EQUAL_64(0x00000081, x6);
725 ASSERT_EQUAL_64(0x00010102, x7);
726 ASSERT_EQUAL_64(0x00020204, x8);
727 ASSERT_EQUAL_64(0x0000000400040408UL, x9);
728 ASSERT_EQUAL_64(0xffffff81, x10);
729 ASSERT_EQUAL_64(0xffffffffffff0102UL, x11);
730 ASSERT_EQUAL_64(0xfffffffe00020204UL, x12);
731 ASSERT_EQUAL_64(0x0000000400040408UL, x13);
732
733 TEARDOWN();
734 }
735
736
737 TEST(ands) {
738 INIT_V8();
739 SETUP();
740
741 START();
742 __ Mov(x1, 0xf00000ff);
743 __ Ands(w0, w1, Operand(w1));
744 END();
745
746 RUN();
747
748 ASSERT_EQUAL_NZCV(NFlag);
749 ASSERT_EQUAL_64(0xf00000ff, x0);
750
751 START();
752 __ Mov(x0, 0xfff0);
753 __ Mov(x1, 0xf00000ff);
754 __ Ands(w0, w0, Operand(w1, LSR, 4));
755 END();
756
757 RUN();
758
759 ASSERT_EQUAL_NZCV(ZFlag);
760 ASSERT_EQUAL_64(0x00000000, x0);
761
762 START();
763 __ Mov(x0, 0x8000000000000000L);
764 __ Mov(x1, 0x00000001);
765 __ Ands(x0, x0, Operand(x1, ROR, 1));
766 END();
767
768 RUN();
769
770 ASSERT_EQUAL_NZCV(NFlag);
771 ASSERT_EQUAL_64(0x8000000000000000L, x0);
772
773 START();
774 __ Mov(x0, 0xfff0);
775 __ Ands(w0, w0, Operand(0xf));
776 END();
777
778 RUN();
779
780 ASSERT_EQUAL_NZCV(ZFlag);
781 ASSERT_EQUAL_64(0x00000000, x0);
782
783 START();
784 __ Mov(x0, 0xff000000);
785 __ Ands(w0, w0, Operand(0x80000000));
786 END();
787
788 RUN();
789
790 ASSERT_EQUAL_NZCV(NFlag);
791 ASSERT_EQUAL_64(0x80000000, x0);
792
793 TEARDOWN();
794 }
795
796
797 TEST(bic) {
798 INIT_V8();
799 SETUP();
800
801 START();
802 __ Mov(x0, 0xfff0);
803 __ Mov(x1, 0xf00000ff);
804
805 __ Bic(x2, x0, Operand(x1));
806 __ Bic(w3, w0, Operand(w1, LSL, 4));
807 __ Bic(x4, x0, Operand(x1, LSL, 4));
808 __ Bic(x5, x0, Operand(x1, LSR, 1));
809 __ Bic(w6, w0, Operand(w1, ASR, 20));
810 __ Bic(x7, x0, Operand(x1, ASR, 20));
811 __ Bic(w8, w0, Operand(w1, ROR, 28));
812 __ Bic(x9, x0, Operand(x1, ROR, 24));
813 __ Bic(x10, x0, Operand(0x1f));
814 __ Bic(x11, x0, Operand(0x100));
815
816 // Test bic into csp when the constant cannot be encoded in the immediate
817 // field.
818 // Use x20 to preserve csp. We check for the result via x21 because the
819 // test infrastructure requires that csp be restored to its original value.
820 __ Mov(x20, csp);
821 __ Mov(x0, 0xffffff);
822 __ Bic(csp, x0, Operand(0xabcdef));
823 __ Mov(x21, csp);
824 __ Mov(csp, x20);
825 END();
826
827 RUN();
828
829 ASSERT_EQUAL_64(0x0000ff00, x2);
830 ASSERT_EQUAL_64(0x0000f000, x3);
831 ASSERT_EQUAL_64(0x0000f000, x4);
832 ASSERT_EQUAL_64(0x0000ff80, x5);
833 ASSERT_EQUAL_64(0x000000f0, x6);
834 ASSERT_EQUAL_64(0x0000f0f0, x7);
835 ASSERT_EQUAL_64(0x0000f000, x8);
836 ASSERT_EQUAL_64(0x0000ff00, x9);
837 ASSERT_EQUAL_64(0x0000ffe0, x10);
838 ASSERT_EQUAL_64(0x0000fef0, x11);
839
840 ASSERT_EQUAL_64(0x543210, x21);
841
842 TEARDOWN();
843 }
844
845
846 TEST(bic_extend) {
847 INIT_V8();
848 SETUP();
849
850 START();
851 __ Mov(x0, 0xffffffffffffffffUL);
852 __ Mov(x1, 0x8000000080008081UL);
853 __ Bic(w6, w0, Operand(w1, UXTB));
854 __ Bic(x7, x0, Operand(x1, UXTH, 1));
855 __ Bic(w8, w0, Operand(w1, UXTW, 2));
856 __ Bic(x9, x0, Operand(x1, UXTX, 3));
857 __ Bic(w10, w0, Operand(w1, SXTB));
858 __ Bic(x11, x0, Operand(x1, SXTH, 1));
859 __ Bic(x12, x0, Operand(x1, SXTW, 2));
860 __ Bic(x13, x0, Operand(x1, SXTX, 3));
861 END();
862
863 RUN();
864
865 ASSERT_EQUAL_64(0xffffff7e, x6);
866 ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
867 ASSERT_EQUAL_64(0xfffdfdfb, x8);
868 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
869 ASSERT_EQUAL_64(0x0000007e, x10);
870 ASSERT_EQUAL_64(0x0000fefd, x11);
871 ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
872 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
873
874 TEARDOWN();
875 }
876
877
878 TEST(bics) {
879 INIT_V8();
880 SETUP();
881
882 START();
883 __ Mov(x1, 0xffff);
884 __ Bics(w0, w1, Operand(w1));
885 END();
886
887 RUN();
888
889 ASSERT_EQUAL_NZCV(ZFlag);
890 ASSERT_EQUAL_64(0x00000000, x0);
891
892 START();
893 __ Mov(x0, 0xffffffff);
894 __ Bics(w0, w0, Operand(w0, LSR, 1));
895 END();
896
897 RUN();
898
899 ASSERT_EQUAL_NZCV(NFlag);
900 ASSERT_EQUAL_64(0x80000000, x0);
901
902 START();
903 __ Mov(x0, 0x8000000000000000L);
904 __ Mov(x1, 0x00000001);
905 __ Bics(x0, x0, Operand(x1, ROR, 1));
906 END();
907
908 RUN();
909
910 ASSERT_EQUAL_NZCV(ZFlag);
911 ASSERT_EQUAL_64(0x00000000, x0);
912
913 START();
914 __ Mov(x0, 0xffffffffffffffffL);
915 __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
916 END();
917
918 RUN();
919
920 ASSERT_EQUAL_NZCV(NFlag);
921 ASSERT_EQUAL_64(0x8000000000000000L, x0);
922
923 START();
924 __ Mov(w0, 0xffff0000);
925 __ Bics(w0, w0, Operand(0xfffffff0));
926 END();
927
928 RUN();
929
930 ASSERT_EQUAL_NZCV(ZFlag);
931 ASSERT_EQUAL_64(0x00000000, x0);
932
933 TEARDOWN();
934 }
935
936
937 TEST(eor) {
938 INIT_V8();
939 SETUP();
940
941 START();
942 __ Mov(x0, 0xfff0);
943 __ Mov(x1, 0xf00000ff);
944
945 __ Eor(x2, x0, Operand(x1));
946 __ Eor(w3, w0, Operand(w1, LSL, 4));
947 __ Eor(x4, x0, Operand(x1, LSL, 4));
948 __ Eor(x5, x0, Operand(x1, LSR, 1));
949 __ Eor(w6, w0, Operand(w1, ASR, 20));
950 __ Eor(x7, x0, Operand(x1, ASR, 20));
951 __ Eor(w8, w0, Operand(w1, ROR, 28));
952 __ Eor(x9, x0, Operand(x1, ROR, 28));
953 __ Eor(w10, w0, Operand(0xff00ff00));
954 __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
955 END();
956
957 RUN();
958
959 ASSERT_EQUAL_64(0xf000ff0f, x2);
960 ASSERT_EQUAL_64(0x0000f000, x3);
961 ASSERT_EQUAL_64(0x0000000f0000f000L, x4);
962 ASSERT_EQUAL_64(0x7800ff8f, x5);
963 ASSERT_EQUAL_64(0xffff00f0, x6);
964 ASSERT_EQUAL_64(0x0000f0f0, x7);
965 ASSERT_EQUAL_64(0x0000f00f, x8);
966 ASSERT_EQUAL_64(0x00000ff00000ffffL, x9);
967 ASSERT_EQUAL_64(0xff0000f0, x10);
968 ASSERT_EQUAL_64(0xff00ff00ff0000f0L, x11);
969
970 TEARDOWN();
971 }
972
973
974 TEST(eor_extend) {
975 INIT_V8();
976 SETUP();
977
978 START();
979 __ Mov(x0, 0x1111111111111111UL);
980 __ Mov(x1, 0x8000000080008081UL);
981 __ Eor(w6, w0, Operand(w1, UXTB));
982 __ Eor(x7, x0, Operand(x1, UXTH, 1));
983 __ Eor(w8, w0, Operand(w1, UXTW, 2));
984 __ Eor(x9, x0, Operand(x1, UXTX, 3));
985 __ Eor(w10, w0, Operand(w1, SXTB));
986 __ Eor(x11, x0, Operand(x1, SXTH, 1));
987 __ Eor(x12, x0, Operand(x1, SXTW, 2));
988 __ Eor(x13, x0, Operand(x1, SXTX, 3));
989 END();
990
991 RUN();
992
993 ASSERT_EQUAL_64(0x11111190, x6);
994 ASSERT_EQUAL_64(0x1111111111101013UL, x7);
995 ASSERT_EQUAL_64(0x11131315, x8);
996 ASSERT_EQUAL_64(0x1111111511151519UL, x9);
997 ASSERT_EQUAL_64(0xeeeeee90, x10);
998 ASSERT_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
999 ASSERT_EQUAL_64(0xeeeeeeef11131315UL, x12);
1000 ASSERT_EQUAL_64(0x1111111511151519UL, x13);
1001
1002 TEARDOWN();
1003 }
1004
1005
1006 TEST(eon) {
1007 INIT_V8();
1008 SETUP();
1009
1010 START();
1011 __ Mov(x0, 0xfff0);
1012 __ Mov(x1, 0xf00000ff);
1013
1014 __ Eon(x2, x0, Operand(x1));
1015 __ Eon(w3, w0, Operand(w1, LSL, 4));
1016 __ Eon(x4, x0, Operand(x1, LSL, 4));
1017 __ Eon(x5, x0, Operand(x1, LSR, 1));
1018 __ Eon(w6, w0, Operand(w1, ASR, 20));
1019 __ Eon(x7, x0, Operand(x1, ASR, 20));
1020 __ Eon(w8, w0, Operand(w1, ROR, 28));
1021 __ Eon(x9, x0, Operand(x1, ROR, 28));
1022 __ Eon(w10, w0, Operand(0x03c003c0));
1023 __ Eon(x11, x0, Operand(0x0000100000001000L));
1024 END();
1025
1026 RUN();
1027
1028 ASSERT_EQUAL_64(0xffffffff0fff00f0L, x2);
1029 ASSERT_EQUAL_64(0xffff0fff, x3);
1030 ASSERT_EQUAL_64(0xfffffff0ffff0fffL, x4);
1031 ASSERT_EQUAL_64(0xffffffff87ff0070L, x5);
1032 ASSERT_EQUAL_64(0x0000ff0f, x6);
1033 ASSERT_EQUAL_64(0xffffffffffff0f0fL, x7);
1034 ASSERT_EQUAL_64(0xffff0ff0, x8);
1035 ASSERT_EQUAL_64(0xfffff00fffff0000L, x9);
1036 ASSERT_EQUAL_64(0xfc3f03cf, x10);
1037 ASSERT_EQUAL_64(0xffffefffffff100fL, x11);
1038
1039 TEARDOWN();
1040 }
1041
1042
1043 TEST(eon_extend) {
1044 INIT_V8();
1045 SETUP();
1046
1047 START();
1048 __ Mov(x0, 0x1111111111111111UL);
1049 __ Mov(x1, 0x8000000080008081UL);
1050 __ Eon(w6, w0, Operand(w1, UXTB));
1051 __ Eon(x7, x0, Operand(x1, UXTH, 1));
1052 __ Eon(w8, w0, Operand(w1, UXTW, 2));
1053 __ Eon(x9, x0, Operand(x1, UXTX, 3));
1054 __ Eon(w10, w0, Operand(w1, SXTB));
1055 __ Eon(x11, x0, Operand(x1, SXTH, 1));
1056 __ Eon(x12, x0, Operand(x1, SXTW, 2));
1057 __ Eon(x13, x0, Operand(x1, SXTX, 3));
1058 END();
1059
1060 RUN();
1061
1062 ASSERT_EQUAL_64(0xeeeeee6f, x6);
1063 ASSERT_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
1064 ASSERT_EQUAL_64(0xeeececea, x8);
1065 ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
1066 ASSERT_EQUAL_64(0x1111116f, x10);
1067 ASSERT_EQUAL_64(0x111111111111efecUL, x11);
1068 ASSERT_EQUAL_64(0x11111110eeececeaUL, x12);
1069 ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
1070
1071 TEARDOWN();
1072 }
1073
1074
1075 TEST(mul) {
1076 INIT_V8();
1077 SETUP();
1078
1079 START();
1080 __ Mov(x16, 0);
1081 __ Mov(x17, 1);
1082 __ Mov(x18, 0xffffffff);
1083 __ Mov(x19, 0xffffffffffffffffUL);
1084
1085 __ Mul(w0, w16, w16);
1086 __ Mul(w1, w16, w17);
1087 __ Mul(w2, w17, w18);
1088 __ Mul(w3, w18, w19);
1089 __ Mul(x4, x16, x16);
1090 __ Mul(x5, x17, x18);
1091 __ Mul(x6, x18, x19);
1092 __ Mul(x7, x19, x19);
1093 __ Smull(x8, w17, w18);
1094 __ Smull(x9, w18, w18);
1095 __ Smull(x10, w19, w19);
1096 __ Mneg(w11, w16, w16);
1097 __ Mneg(w12, w16, w17);
1098 __ Mneg(w13, w17, w18);
1099 __ Mneg(w14, w18, w19);
1100 __ Mneg(x20, x16, x16);
1101 __ Mneg(x21, x17, x18);
1102 __ Mneg(x22, x18, x19);
1103 __ Mneg(x23, x19, x19);
1104 END();
1105
1106 RUN();
1107
1108 ASSERT_EQUAL_64(0, x0);
1109 ASSERT_EQUAL_64(0, x1);
1110 ASSERT_EQUAL_64(0xffffffff, x2);
1111 ASSERT_EQUAL_64(1, x3);
1112 ASSERT_EQUAL_64(0, x4);
1113 ASSERT_EQUAL_64(0xffffffff, x5);
1114 ASSERT_EQUAL_64(0xffffffff00000001UL, x6);
1115 ASSERT_EQUAL_64(1, x7);
1116 ASSERT_EQUAL_64(0xffffffffffffffffUL, x8);
1117 ASSERT_EQUAL_64(1, x9);
1118 ASSERT_EQUAL_64(1, x10);
1119 ASSERT_EQUAL_64(0, x11);
1120 ASSERT_EQUAL_64(0, x12);
1121 ASSERT_EQUAL_64(1, x13);
1122 ASSERT_EQUAL_64(0xffffffff, x14);
1123 ASSERT_EQUAL_64(0, x20);
1124 ASSERT_EQUAL_64(0xffffffff00000001UL, x21);
1125 ASSERT_EQUAL_64(0xffffffff, x22);
1126 ASSERT_EQUAL_64(0xffffffffffffffffUL, x23);
1127
1128 TEARDOWN();
1129 }
1130
1131
1132 static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
1133 SETUP();
1134 START();
1135 __ Mov(w0, a);
1136 __ Mov(w1, b);
1137 __ Smull(x2, w0, w1);
1138 END();
1139 RUN();
1140 ASSERT_EQUAL_64(expected, x2);
1141 TEARDOWN();
1142 }
1143
1144
1145 TEST(smull) {
1146 INIT_V8();
1147 SmullHelper(0, 0, 0);
1148 SmullHelper(1, 1, 1);
1149 SmullHelper(-1, -1, 1);
1150 SmullHelper(1, -1, -1);
1151 SmullHelper(0xffffffff80000000, 0x80000000, 1);
1152 SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
1153 }
1154
1155
1156 TEST(madd) {
1157 INIT_V8();
1158 SETUP();
1159
1160 START();
1161 __ Mov(x16, 0);
1162 __ Mov(x17, 1);
1163 __ Mov(x18, 0xffffffff);
1164 __ Mov(x19, 0xffffffffffffffffUL);
1165
1166 __ Madd(w0, w16, w16, w16);
1167 __ Madd(w1, w16, w16, w17);
1168 __ Madd(w2, w16, w16, w18);
1169 __ Madd(w3, w16, w16, w19);
1170 __ Madd(w4, w16, w17, w17);
1171 __ Madd(w5, w17, w17, w18);
1172 __ Madd(w6, w17, w17, w19);
1173 __ Madd(w7, w17, w18, w16);
1174 __ Madd(w8, w17, w18, w18);
1175 __ Madd(w9, w18, w18, w17);
1176 __ Madd(w10, w18, w19, w18);
1177 __ Madd(w11, w19, w19, w19);
1178
1179 __ Madd(x12, x16, x16, x16);
1180 __ Madd(x13, x16, x16, x17);
1181 __ Madd(x14, x16, x16, x18);
1182 __ Madd(x15, x16, x16, x19);
1183 __ Madd(x20, x16, x17, x17);
1184 __ Madd(x21, x17, x17, x18);
1185 __ Madd(x22, x17, x17, x19);
1186 __ Madd(x23, x17, x18, x16);
1187 __ Madd(x24, x17, x18, x18);
1188 __ Madd(x25, x18, x18, x17);
1189 __ Madd(x26, x18, x19, x18);
1190 __ Madd(x27, x19, x19, x19);
1191
1192 END();
1193
1194 RUN();
1195
1196 ASSERT_EQUAL_64(0, x0);
1197 ASSERT_EQUAL_64(1, x1);
1198 ASSERT_EQUAL_64(0xffffffff, x2);
1199 ASSERT_EQUAL_64(0xffffffff, x3);
1200 ASSERT_EQUAL_64(1, x4);
1201 ASSERT_EQUAL_64(0, x5);
1202 ASSERT_EQUAL_64(0, x6);
1203 ASSERT_EQUAL_64(0xffffffff, x7);
1204 ASSERT_EQUAL_64(0xfffffffe, x8);
1205 ASSERT_EQUAL_64(2, x9);
1206 ASSERT_EQUAL_64(0, x10);
1207 ASSERT_EQUAL_64(0, x11);
1208
1209 ASSERT_EQUAL_64(0, x12);
1210 ASSERT_EQUAL_64(1, x13);
1211 ASSERT_EQUAL_64(0xffffffff, x14);
1212 ASSERT_EQUAL_64(0xffffffffffffffff, x15);
1213 ASSERT_EQUAL_64(1, x20);
1214 ASSERT_EQUAL_64(0x100000000UL, x21);
1215 ASSERT_EQUAL_64(0, x22);
1216 ASSERT_EQUAL_64(0xffffffff, x23);
1217 ASSERT_EQUAL_64(0x1fffffffe, x24);
1218 ASSERT_EQUAL_64(0xfffffffe00000002UL, x25);
1219 ASSERT_EQUAL_64(0, x26);
1220 ASSERT_EQUAL_64(0, x27);
1221
1222 TEARDOWN();
1223 }
1224
1225
1226 TEST(msub) {
1227 INIT_V8();
1228 SETUP();
1229
1230 START();
1231 __ Mov(x16, 0);
1232 __ Mov(x17, 1);
1233 __ Mov(x18, 0xffffffff);
1234 __ Mov(x19, 0xffffffffffffffffUL);
1235
1236 __ Msub(w0, w16, w16, w16);
1237 __ Msub(w1, w16, w16, w17);
1238 __ Msub(w2, w16, w16, w18);
1239 __ Msub(w3, w16, w16, w19);
1240 __ Msub(w4, w16, w17, w17);
1241 __ Msub(w5, w17, w17, w18);
1242 __ Msub(w6, w17, w17, w19);
1243 __ Msub(w7, w17, w18, w16);
1244 __ Msub(w8, w17, w18, w18);
1245 __ Msub(w9, w18, w18, w17);
1246 __ Msub(w10, w18, w19, w18);
1247 __ Msub(w11, w19, w19, w19);
1248
1249 __ Msub(x12, x16, x16, x16);
1250 __ Msub(x13, x16, x16, x17);
1251 __ Msub(x14, x16, x16, x18);
1252 __ Msub(x15, x16, x16, x19);
1253 __ Msub(x20, x16, x17, x17);
1254 __ Msub(x21, x17, x17, x18);
1255 __ Msub(x22, x17, x17, x19);
1256 __ Msub(x23, x17, x18, x16);
1257 __ Msub(x24, x17, x18, x18);
1258 __ Msub(x25, x18, x18, x17);
1259 __ Msub(x26, x18, x19, x18);
1260 __ Msub(x27, x19, x19, x19);
1261
1262 END();
1263
1264 RUN();
1265
1266 ASSERT_EQUAL_64(0, x0);
1267 ASSERT_EQUAL_64(1, x1);
1268 ASSERT_EQUAL_64(0xffffffff, x2);
1269 ASSERT_EQUAL_64(0xffffffff, x3);
1270 ASSERT_EQUAL_64(1, x4);
1271 ASSERT_EQUAL_64(0xfffffffe, x5);
1272 ASSERT_EQUAL_64(0xfffffffe, x6);
1273 ASSERT_EQUAL_64(1, x7);
1274 ASSERT_EQUAL_64(0, x8);
1275 ASSERT_EQUAL_64(0, x9);
1276 ASSERT_EQUAL_64(0xfffffffe, x10);
1277 ASSERT_EQUAL_64(0xfffffffe, x11);
1278
1279 ASSERT_EQUAL_64(0, x12);
1280 ASSERT_EQUAL_64(1, x13);
1281 ASSERT_EQUAL_64(0xffffffff, x14);
1282 ASSERT_EQUAL_64(0xffffffffffffffffUL, x15);
1283 ASSERT_EQUAL_64(1, x20);
1284 ASSERT_EQUAL_64(0xfffffffeUL, x21);
1285 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x22);
1286 ASSERT_EQUAL_64(0xffffffff00000001UL, x23);
1287 ASSERT_EQUAL_64(0, x24);
1288 ASSERT_EQUAL_64(0x200000000UL, x25);
1289 ASSERT_EQUAL_64(0x1fffffffeUL, x26);
1290 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x27);
1291
1292 TEARDOWN();
1293 }
1294
1295
1296 TEST(smulh) {
1297 INIT_V8();
1298 SETUP();
1299
1300 START();
1301 __ Mov(x20, 0);
1302 __ Mov(x21, 1);
1303 __ Mov(x22, 0x0000000100000000L);
1304 __ Mov(x23, 0x12345678);
1305 __ Mov(x24, 0x0123456789abcdefL);
1306 __ Mov(x25, 0x0000000200000000L);
1307 __ Mov(x26, 0x8000000000000000UL);
1308 __ Mov(x27, 0xffffffffffffffffUL);
1309 __ Mov(x28, 0x5555555555555555UL);
1310 __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
1311
1312 __ Smulh(x0, x20, x24);
1313 __ Smulh(x1, x21, x24);
1314 __ Smulh(x2, x22, x23);
1315 __ Smulh(x3, x22, x24);
1316 __ Smulh(x4, x24, x25);
1317 __ Smulh(x5, x23, x27);
1318 __ Smulh(x6, x26, x26);
1319 __ Smulh(x7, x26, x27);
1320 __ Smulh(x8, x27, x27);
1321 __ Smulh(x9, x28, x28);
1322 __ Smulh(x10, x28, x29);
1323 __ Smulh(x11, x29, x29);
1324 END();
1325
1326 RUN();
1327
1328 ASSERT_EQUAL_64(0, x0);
1329 ASSERT_EQUAL_64(0, x1);
1330 ASSERT_EQUAL_64(0, x2);
1331 ASSERT_EQUAL_64(0x01234567, x3);
1332 ASSERT_EQUAL_64(0x02468acf, x4);
1333 ASSERT_EQUAL_64(0xffffffffffffffffUL, x5);
1334 ASSERT_EQUAL_64(0x4000000000000000UL, x6);
1335 ASSERT_EQUAL_64(0, x7);
1336 ASSERT_EQUAL_64(0, x8);
1337 ASSERT_EQUAL_64(0x1c71c71c71c71c71UL, x9);
1338 ASSERT_EQUAL_64(0xe38e38e38e38e38eUL, x10);
1339 ASSERT_EQUAL_64(0x1c71c71c71c71c72UL, x11);
1340
1341 TEARDOWN();
1342 }
1343
1344
1345 TEST(smaddl_umaddl) {
1346 INIT_V8();
1347 SETUP();
1348
1349 START();
1350 __ Mov(x17, 1);
1351 __ Mov(x18, 0xffffffff);
1352 __ Mov(x19, 0xffffffffffffffffUL);
1353 __ Mov(x20, 4);
1354 __ Mov(x21, 0x200000000UL);
1355
1356 __ Smaddl(x9, w17, w18, x20);
1357 __ Smaddl(x10, w18, w18, x20);
1358 __ Smaddl(x11, w19, w19, x20);
1359 __ Smaddl(x12, w19, w19, x21);
1360 __ Umaddl(x13, w17, w18, x20);
1361 __ Umaddl(x14, w18, w18, x20);
1362 __ Umaddl(x15, w19, w19, x20);
1363 __ Umaddl(x22, w19, w19, x21);
1364 END();
1365
1366 RUN();
1367
1368 ASSERT_EQUAL_64(3, x9);
1369 ASSERT_EQUAL_64(5, x10);
1370 ASSERT_EQUAL_64(5, x11);
1371 ASSERT_EQUAL_64(0x200000001UL, x12);
1372 ASSERT_EQUAL_64(0x100000003UL, x13);
1373 ASSERT_EQUAL_64(0xfffffffe00000005UL, x14);
1374 ASSERT_EQUAL_64(0xfffffffe00000005UL, x15);
1375 ASSERT_EQUAL_64(0x1, x22);
1376
1377 TEARDOWN();
1378 }
1379
1380
1381 TEST(smsubl_umsubl) {
1382 INIT_V8();
1383 SETUP();
1384
1385 START();
1386 __ Mov(x17, 1);
1387 __ Mov(x18, 0xffffffff);
1388 __ Mov(x19, 0xffffffffffffffffUL);
1389 __ Mov(x20, 4);
1390 __ Mov(x21, 0x200000000UL);
1391
1392 __ Smsubl(x9, w17, w18, x20);
1393 __ Smsubl(x10, w18, w18, x20);
1394 __ Smsubl(x11, w19, w19, x20);
1395 __ Smsubl(x12, w19, w19, x21);
1396 __ Umsubl(x13, w17, w18, x20);
1397 __ Umsubl(x14, w18, w18, x20);
1398 __ Umsubl(x15, w19, w19, x20);
1399 __ Umsubl(x22, w19, w19, x21);
1400 END();
1401
1402 RUN();
1403
1404 ASSERT_EQUAL_64(5, x9);
1405 ASSERT_EQUAL_64(3, x10);
1406 ASSERT_EQUAL_64(3, x11);
1407 ASSERT_EQUAL_64(0x1ffffffffUL, x12);
1408 ASSERT_EQUAL_64(0xffffffff00000005UL, x13);
1409 ASSERT_EQUAL_64(0x200000003UL, x14);
1410 ASSERT_EQUAL_64(0x200000003UL, x15);
1411 ASSERT_EQUAL_64(0x3ffffffffUL, x22);
1412
1413 TEARDOWN();
1414 }
1415
1416
1417 TEST(div) {
1418 INIT_V8();
1419 SETUP();
1420
1421 START();
1422 __ Mov(x16, 1);
1423 __ Mov(x17, 0xffffffff);
1424 __ Mov(x18, 0xffffffffffffffffUL);
1425 __ Mov(x19, 0x80000000);
1426 __ Mov(x20, 0x8000000000000000UL);
1427 __ Mov(x21, 2);
1428
1429 __ Udiv(w0, w16, w16);
1430 __ Udiv(w1, w17, w16);
1431 __ Sdiv(w2, w16, w16);
1432 __ Sdiv(w3, w16, w17);
1433 __ Sdiv(w4, w17, w18);
1434
1435 __ Udiv(x5, x16, x16);
1436 __ Udiv(x6, x17, x18);
1437 __ Sdiv(x7, x16, x16);
1438 __ Sdiv(x8, x16, x17);
1439 __ Sdiv(x9, x17, x18);
1440
1441 __ Udiv(w10, w19, w21);
1442 __ Sdiv(w11, w19, w21);
1443 __ Udiv(x12, x19, x21);
1444 __ Sdiv(x13, x19, x21);
1445 __ Udiv(x14, x20, x21);
1446 __ Sdiv(x15, x20, x21);
1447
1448 __ Udiv(w22, w19, w17);
1449 __ Sdiv(w23, w19, w17);
1450 __ Udiv(x24, x20, x18);
1451 __ Sdiv(x25, x20, x18);
1452
1453 __ Udiv(x26, x16, x21);
1454 __ Sdiv(x27, x16, x21);
1455 __ Udiv(x28, x18, x21);
1456 __ Sdiv(x29, x18, x21);
1457
1458 __ Mov(x17, 0);
1459 __ Udiv(w18, w16, w17);
1460 __ Sdiv(w19, w16, w17);
1461 __ Udiv(x20, x16, x17);
1462 __ Sdiv(x21, x16, x17);
1463 END();
1464
1465 RUN();
1466
1467 ASSERT_EQUAL_64(1, x0);
1468 ASSERT_EQUAL_64(0xffffffff, x1);
1469 ASSERT_EQUAL_64(1, x2);
1470 ASSERT_EQUAL_64(0xffffffff, x3);
1471 ASSERT_EQUAL_64(1, x4);
1472 ASSERT_EQUAL_64(1, x5);
1473 ASSERT_EQUAL_64(0, x6);
1474 ASSERT_EQUAL_64(1, x7);
1475 ASSERT_EQUAL_64(0, x8);
1476 ASSERT_EQUAL_64(0xffffffff00000001UL, x9);
1477 ASSERT_EQUAL_64(0x40000000, x10);
1478 ASSERT_EQUAL_64(0xC0000000, x11);
1479 ASSERT_EQUAL_64(0x40000000, x12);
1480 ASSERT_EQUAL_64(0x40000000, x13);
1481 ASSERT_EQUAL_64(0x4000000000000000UL, x14);
1482 ASSERT_EQUAL_64(0xC000000000000000UL, x15);
1483 ASSERT_EQUAL_64(0, x22);
1484 ASSERT_EQUAL_64(0x80000000, x23);
1485 ASSERT_EQUAL_64(0, x24);
1486 ASSERT_EQUAL_64(0x8000000000000000UL, x25);
1487 ASSERT_EQUAL_64(0, x26);
1488 ASSERT_EQUAL_64(0, x27);
1489 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x28);
1490 ASSERT_EQUAL_64(0, x29);
1491 ASSERT_EQUAL_64(0, x18);
1492 ASSERT_EQUAL_64(0, x19);
1493 ASSERT_EQUAL_64(0, x20);
1494 ASSERT_EQUAL_64(0, x21);
1495
1496 TEARDOWN();
1497 }
1498
1499
1500 TEST(rbit_rev) {
1501 INIT_V8();
1502 SETUP();
1503
1504 START();
1505 __ Mov(x24, 0xfedcba9876543210UL);
1506 __ Rbit(w0, w24);
1507 __ Rbit(x1, x24);
1508 __ Rev16(w2, w24);
1509 __ Rev16(x3, x24);
1510 __ Rev(w4, w24);
1511 __ Rev32(x5, x24);
1512 __ Rev(x6, x24);
1513 END();
1514
1515 RUN();
1516
1517 ASSERT_EQUAL_64(0x084c2a6e, x0);
1518 ASSERT_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
1519 ASSERT_EQUAL_64(0x54761032, x2);
1520 ASSERT_EQUAL_64(0xdcfe98ba54761032UL, x3);
1521 ASSERT_EQUAL_64(0x10325476, x4);
1522 ASSERT_EQUAL_64(0x98badcfe10325476UL, x5);
1523 ASSERT_EQUAL_64(0x1032547698badcfeUL, x6);
1524
1525 TEARDOWN();
1526 }
1527
1528
1529 TEST(clz_cls) {
1530 INIT_V8();
1531 SETUP();
1532
1533 START();
1534 __ Mov(x24, 0x0008000000800000UL);
1535 __ Mov(x25, 0xff800000fff80000UL);
1536 __ Mov(x26, 0);
1537 __ Clz(w0, w24);
1538 __ Clz(x1, x24);
1539 __ Clz(w2, w25);
1540 __ Clz(x3, x25);
1541 __ Clz(w4, w26);
1542 __ Clz(x5, x26);
1543 __ Cls(w6, w24);
1544 __ Cls(x7, x24);
1545 __ Cls(w8, w25);
1546 __ Cls(x9, x25);
1547 __ Cls(w10, w26);
1548 __ Cls(x11, x26);
1549 END();
1550
1551 RUN();
1552
1553 ASSERT_EQUAL_64(8, x0);
1554 ASSERT_EQUAL_64(12, x1);
1555 ASSERT_EQUAL_64(0, x2);
1556 ASSERT_EQUAL_64(0, x3);
1557 ASSERT_EQUAL_64(32, x4);
1558 ASSERT_EQUAL_64(64, x5);
1559 ASSERT_EQUAL_64(7, x6);
1560 ASSERT_EQUAL_64(11, x7);
1561 ASSERT_EQUAL_64(12, x8);
1562 ASSERT_EQUAL_64(8, x9);
1563 ASSERT_EQUAL_64(31, x10);
1564 ASSERT_EQUAL_64(63, x11);
1565
1566 TEARDOWN();
1567 }
1568
1569
1570 TEST(label) {
1571 INIT_V8();
1572 SETUP();
1573
1574 Label label_1, label_2, label_3, label_4;
1575
1576 START();
1577 __ Mov(x0, 0x1);
1578 __ Mov(x1, 0x0);
1579 __ Mov(x22, lr); // Save lr.
1580
1581 __ B(&label_1);
1582 __ B(&label_1);
1583 __ B(&label_1); // Multiple branches to the same label.
1584 __ Mov(x0, 0x0);
1585 __ Bind(&label_2);
1586 __ B(&label_3); // Forward branch.
1587 __ Mov(x0, 0x0);
1588 __ Bind(&label_1);
1589 __ B(&label_2); // Backward branch.
1590 __ Mov(x0, 0x0);
1591 __ Bind(&label_3);
1592 __ Bl(&label_4);
1593 END();
1594
1595 __ Bind(&label_4);
1596 __ Mov(x1, 0x1);
1597 __ Mov(lr, x22);
1598 END();
1599
1600 RUN();
1601
1602 ASSERT_EQUAL_64(0x1, x0);
1603 ASSERT_EQUAL_64(0x1, x1);
1604
1605 TEARDOWN();
1606 }
1607
1608
1609 TEST(branch_at_start) {
1610 INIT_V8();
1611 SETUP();
1612
1613 Label good, exit;
1614
1615 // Test that branches can exist at the start of the buffer. (This is a
1616 // boundary condition in the label-handling code.) To achieve this, we have
1617 // to work around the code generated by START.
1618 RESET();
1619 __ B(&good);
1620
1621 START_AFTER_RESET();
1622 __ Mov(x0, 0x0);
1623 END();
1624
1625 __ Bind(&exit);
1626 START_AFTER_RESET();
1627 __ Mov(x0, 0x1);
1628 END();
1629
1630 __ Bind(&good);
1631 __ B(&exit);
1632 END();
1633
1634 RUN();
1635
1636 ASSERT_EQUAL_64(0x1, x0);
1637 TEARDOWN();
1638 }
1639
1640
1641 TEST(adr) {
1642 INIT_V8();
1643 SETUP();
1644
1645 Label label_1, label_2, label_3, label_4;
1646
1647 START();
1648 __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
1649 __ Adr(x1, &label_3); // Set to zero to indicate success.
1650
1651 __ Adr(x2, &label_1); // Multiple forward references to the same label.
1652 __ Adr(x3, &label_1);
1653 __ Adr(x4, &label_1);
1654
1655 __ Bind(&label_2);
1656 __ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
1657 __ Eor(x6, x2, Operand(x4));
1658 __ Orr(x0, x0, Operand(x5));
1659 __ Orr(x0, x0, Operand(x6));
1660 __ Br(x2); // label_1, label_3
1661
1662 __ Bind(&label_3);
1663 __ Adr(x2, &label_3); // Self-reference (offset 0).
1664 __ Eor(x1, x1, Operand(x2));
1665 __ Adr(x2, &label_4); // Simple forward reference.
1666 __ Br(x2); // label_4
1667
1668 __ Bind(&label_1);
1669 __ Adr(x2, &label_3); // Multiple reverse references to the same label.
1670 __ Adr(x3, &label_3);
1671 __ Adr(x4, &label_3);
1672 __ Adr(x5, &label_2); // Simple reverse reference.
1673 __ Br(x5); // label_2
1674
1675 __ Bind(&label_4);
1676 END();
1677
1678 RUN();
1679
1680 ASSERT_EQUAL_64(0x0, x0);
1681 ASSERT_EQUAL_64(0x0, x1);
1682
1683 TEARDOWN();
1684 }
1685
1686
1687 TEST(branch_cond) {
1688 INIT_V8();
1689 SETUP();
1690
1691 Label wrong;
1692
1693 START();
1694 __ Mov(x0, 0x1);
1695 __ Mov(x1, 0x1);
1696 __ Mov(x2, 0x8000000000000000L);
1697
1698 // For each 'cmp' instruction below, condition codes other than the ones
1699 // following it would branch.
1700
1701 __ Cmp(x1, 0);
1702 __ B(&wrong, eq);
1703 __ B(&wrong, lo);
1704 __ B(&wrong, mi);
1705 __ B(&wrong, vs);
1706 __ B(&wrong, ls);
1707 __ B(&wrong, lt);
1708 __ B(&wrong, le);
1709 Label ok_1;
1710 __ B(&ok_1, ne);
1711 __ Mov(x0, 0x0);
1712 __ Bind(&ok_1);
1713
1714 __ Cmp(x1, 1);
1715 __ B(&wrong, ne);
1716 __ B(&wrong, lo);
1717 __ B(&wrong, mi);
1718 __ B(&wrong, vs);
1719 __ B(&wrong, hi);
1720 __ B(&wrong, lt);
1721 __ B(&wrong, gt);
1722 Label ok_2;
1723 __ B(&ok_2, pl);
1724 __ Mov(x0, 0x0);
1725 __ Bind(&ok_2);
1726
1727 __ Cmp(x1, 2);
1728 __ B(&wrong, eq);
1729 __ B(&wrong, hs);
1730 __ B(&wrong, pl);
1731 __ B(&wrong, vs);
1732 __ B(&wrong, hi);
1733 __ B(&wrong, ge);
1734 __ B(&wrong, gt);
1735 Label ok_3;
1736 __ B(&ok_3, vc);
1737 __ Mov(x0, 0x0);
1738 __ Bind(&ok_3);
1739
1740 __ Cmp(x2, 1);
1741 __ B(&wrong, eq);
1742 __ B(&wrong, lo);
1743 __ B(&wrong, mi);
1744 __ B(&wrong, vc);
1745 __ B(&wrong, ls);
1746 __ B(&wrong, ge);
1747 __ B(&wrong, gt);
1748 Label ok_4;
1749 __ B(&ok_4, le);
1750 __ Mov(x0, 0x0);
1751 __ Bind(&ok_4);
1752
1753 Label ok_5;
1754 __ b(&ok_5, al);
1755 __ Mov(x0, 0x0);
1756 __ Bind(&ok_5);
1757
1758 Label ok_6;
1759 __ b(&ok_6, nv);
1760 __ Mov(x0, 0x0);
1761 __ Bind(&ok_6);
1762
1763 END();
1764
1765 __ Bind(&wrong);
1766 __ Mov(x0, 0x0);
1767 END();
1768
1769 RUN();
1770
1771 ASSERT_EQUAL_64(0x1, x0);
1772
1773 TEARDOWN();
1774 }
1775
1776
1777 TEST(branch_to_reg) {
1778 INIT_V8();
1779 SETUP();
1780
1781 // Test br.
1782 Label fn1, after_fn1;
1783
1784 START();
1785 __ Mov(x29, lr);
1786
1787 __ Mov(x1, 0);
1788 __ B(&after_fn1);
1789
1790 __ Bind(&fn1);
1791 __ Mov(x0, lr);
1792 __ Mov(x1, 42);
1793 __ Br(x0);
1794
1795 __ Bind(&after_fn1);
1796 __ Bl(&fn1);
1797
1798 // Test blr.
1799 Label fn2, after_fn2;
1800
1801 __ Mov(x2, 0);
1802 __ B(&after_fn2);
1803
1804 __ Bind(&fn2);
1805 __ Mov(x0, lr);
1806 __ Mov(x2, 84);
1807 __ Blr(x0);
1808
1809 __ Bind(&after_fn2);
1810 __ Bl(&fn2);
1811 __ Mov(x3, lr);
1812
1813 __ Mov(lr, x29);
1814 END();
1815
1816 RUN();
1817
1818 ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
1819 ASSERT_EQUAL_64(42, x1);
1820 ASSERT_EQUAL_64(84, x2);
1821
1822 TEARDOWN();
1823 }
1824
1825
1826 TEST(compare_branch) {
1827 INIT_V8();
1828 SETUP();
1829
1830 START();
1831 __ Mov(x0, 0);
1832 __ Mov(x1, 0);
1833 __ Mov(x2, 0);
1834 __ Mov(x3, 0);
1835 __ Mov(x4, 0);
1836 __ Mov(x5, 0);
1837 __ Mov(x16, 0);
1838 __ Mov(x17, 42);
1839
1840 Label zt, zt_end;
1841 __ Cbz(w16, &zt);
1842 __ B(&zt_end);
1843 __ Bind(&zt);
1844 __ Mov(x0, 1);
1845 __ Bind(&zt_end);
1846
1847 Label zf, zf_end;
1848 __ Cbz(x17, &zf);
1849 __ B(&zf_end);
1850 __ Bind(&zf);
1851 __ Mov(x1, 1);
1852 __ Bind(&zf_end);
1853
1854 Label nzt, nzt_end;
1855 __ Cbnz(w17, &nzt);
1856 __ B(&nzt_end);
1857 __ Bind(&nzt);
1858 __ Mov(x2, 1);
1859 __ Bind(&nzt_end);
1860
1861 Label nzf, nzf_end;
1862 __ Cbnz(x16, &nzf);
1863 __ B(&nzf_end);
1864 __ Bind(&nzf);
1865 __ Mov(x3, 1);
1866 __ Bind(&nzf_end);
1867
1868 __ Mov(x18, 0xffffffff00000000UL);
1869
1870 Label a, a_end;
1871 __ Cbz(w18, &a);
1872 __ B(&a_end);
1873 __ Bind(&a);
1874 __ Mov(x4, 1);
1875 __ Bind(&a_end);
1876
1877 Label b, b_end;
1878 __ Cbnz(w18, &b);
1879 __ B(&b_end);
1880 __ Bind(&b);
1881 __ Mov(x5, 1);
1882 __ Bind(&b_end);
1883
1884 END();
1885
1886 RUN();
1887
1888 ASSERT_EQUAL_64(1, x0);
1889 ASSERT_EQUAL_64(0, x1);
1890 ASSERT_EQUAL_64(1, x2);
1891 ASSERT_EQUAL_64(0, x3);
1892 ASSERT_EQUAL_64(1, x4);
1893 ASSERT_EQUAL_64(0, x5);
1894
1895 TEARDOWN();
1896 }
1897
1898
1899 TEST(test_branch) {
1900 INIT_V8();
1901 SETUP();
1902
1903 START();
1904 __ Mov(x0, 0);
1905 __ Mov(x1, 0);
1906 __ Mov(x2, 0);
1907 __ Mov(x3, 0);
1908 __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
1909
1910 Label bz, bz_end;
1911 __ Tbz(w16, 0, &bz);
1912 __ B(&bz_end);
1913 __ Bind(&bz);
1914 __ Mov(x0, 1);
1915 __ Bind(&bz_end);
1916
1917 Label bo, bo_end;
1918 __ Tbz(x16, 63, &bo);
1919 __ B(&bo_end);
1920 __ Bind(&bo);
1921 __ Mov(x1, 1);
1922 __ Bind(&bo_end);
1923
1924 Label nbz, nbz_end;
1925 __ Tbnz(x16, 61, &nbz);
1926 __ B(&nbz_end);
1927 __ Bind(&nbz);
1928 __ Mov(x2, 1);
1929 __ Bind(&nbz_end);
1930
1931 Label nbo, nbo_end;
1932 __ Tbnz(w16, 2, &nbo);
1933 __ B(&nbo_end);
1934 __ Bind(&nbo);
1935 __ Mov(x3, 1);
1936 __ Bind(&nbo_end);
1937 END();
1938
1939 RUN();
1940
1941 ASSERT_EQUAL_64(1, x0);
1942 ASSERT_EQUAL_64(0, x1);
1943 ASSERT_EQUAL_64(1, x2);
1944 ASSERT_EQUAL_64(0, x3);
1945
1946 TEARDOWN();
1947 }
1948
1949
1950 TEST(far_branch_backward) {
1951 INIT_V8();
1952
1953 // Test that the MacroAssembler correctly resolves backward branches to labels
1954 // that are outside the immediate range of branch instructions.
1955 int max_range =
1956 std::max(Instruction::ImmBranchRange(TestBranchType),
1957 std::max(Instruction::ImmBranchRange(CompareBranchType),
1958 Instruction::ImmBranchRange(CondBranchType)));
1959
1960 SETUP_SIZE(max_range + 1000 * kInstructionSize);
1961
1962 START();
1963
1964 Label done, fail;
1965 Label test_tbz, test_cbz, test_bcond;
1966 Label success_tbz, success_cbz, success_bcond;
1967
1968 __ Mov(x0, 0);
1969 __ Mov(x1, 1);
1970 __ Mov(x10, 0);
1971
1972 __ B(&test_tbz);
1973 __ Bind(&success_tbz);
1974 __ Orr(x0, x0, 1 << 0);
1975 __ B(&test_cbz);
1976 __ Bind(&success_cbz);
1977 __ Orr(x0, x0, 1 << 1);
1978 __ B(&test_bcond);
1979 __ Bind(&success_bcond);
1980 __ Orr(x0, x0, 1 << 2);
1981
1982 __ B(&done);
1983
1984 // Generate enough code to overflow the immediate range of the three types of
1985 // branches below.
1986 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
1987 if (i % 100 == 0) {
1988 // If we do land in this code, we do not want to execute so many nops
1989 // before reaching the end of test (especially if tracing is activated).
1990 __ B(&fail);
1991 } else {
1992 __ Nop();
1993 }
1994 }
1995 __ B(&fail);
1996
1997 __ Bind(&test_tbz);
1998 __ Tbz(x10, 7, &success_tbz);
1999 __ Bind(&test_cbz);
2000 __ Cbz(x10, &success_cbz);
2001 __ Bind(&test_bcond);
2002 __ Cmp(x10, 0);
2003 __ B(eq, &success_bcond);
2004
2005 // For each out-of-range branch instructions, at least two instructions should
2006 // have been generated.
2007 CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
2008
2009 __ Bind(&fail);
2010 __ Mov(x1, 0);
2011 __ Bind(&done);
2012
2013 END();
2014
2015 RUN();
2016
2017 ASSERT_EQUAL_64(0x7, x0);
2018 ASSERT_EQUAL_64(0x1, x1);
2019
2020 TEARDOWN();
2021 }
2022
2023
2024 TEST(far_branch_simple_veneer) {
2025 INIT_V8();
2026
2027 // Test that the MacroAssembler correctly emits veneers for forward branches
2028 // to labels that are outside the immediate range of branch instructions.
2029 int max_range =
2030 std::max(Instruction::ImmBranchRange(TestBranchType),
2031 std::max(Instruction::ImmBranchRange(CompareBranchType),
2032 Instruction::ImmBranchRange(CondBranchType)));
2033
2034 SETUP_SIZE(max_range + 1000 * kInstructionSize);
2035
2036 START();
2037
2038 Label done, fail;
2039 Label test_tbz, test_cbz, test_bcond;
2040 Label success_tbz, success_cbz, success_bcond;
2041
2042 __ Mov(x0, 0);
2043 __ Mov(x1, 1);
2044 __ Mov(x10, 0);
2045
2046 __ Bind(&test_tbz);
2047 __ Tbz(x10, 7, &success_tbz);
2048 __ Bind(&test_cbz);
2049 __ Cbz(x10, &success_cbz);
2050 __ Bind(&test_bcond);
2051 __ Cmp(x10, 0);
2052 __ B(eq, &success_bcond);
2053
2054 // Generate enough code to overflow the immediate range of the three types of
2055 // branches below.
2056 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2057 if (i % 100 == 0) {
2058 // If we do land in this code, we do not want to execute so many nops
2059 // before reaching the end of test (especially if tracing is activated).
2060 // Also, the branches give the MacroAssembler the opportunity to emit the
2061 // veneers.
2062 __ B(&fail);
2063 } else {
2064 __ Nop();
2065 }
2066 }
2067 __ B(&fail);
2068
2069 __ Bind(&success_tbz);
2070 __ Orr(x0, x0, 1 << 0);
2071 __ B(&test_cbz);
2072 __ Bind(&success_cbz);
2073 __ Orr(x0, x0, 1 << 1);
2074 __ B(&test_bcond);
2075 __ Bind(&success_bcond);
2076 __ Orr(x0, x0, 1 << 2);
2077
2078 __ B(&done);
2079 __ Bind(&fail);
2080 __ Mov(x1, 0);
2081 __ Bind(&done);
2082
2083 END();
2084
2085 RUN();
2086
2087 ASSERT_EQUAL_64(0x7, x0);
2088 ASSERT_EQUAL_64(0x1, x1);
2089
2090 TEARDOWN();
2091 }
2092
2093
2094 TEST(far_branch_veneer_link_chain) {
2095 INIT_V8();
2096
2097 // Test that the MacroAssembler correctly emits veneers for forward branches
2098 // that target out-of-range labels and are part of multiple instructions
2099 // jumping to that label.
2100 //
2101 // We test the three situations with the different types of instruction:
2102 // (1)- When the branch is at the start of the chain with tbz.
2103 // (2)- When the branch is in the middle of the chain with cbz.
2104 // (3)- When the branch is at the end of the chain with bcond.
2105 int max_range =
2106 std::max(Instruction::ImmBranchRange(TestBranchType),
2107 std::max(Instruction::ImmBranchRange(CompareBranchType),
2108 Instruction::ImmBranchRange(CondBranchType)));
2109
2110 SETUP_SIZE(max_range + 1000 * kInstructionSize);
2111
2112 START();
2113
2114 Label skip, fail, done;
2115 Label test_tbz, test_cbz, test_bcond;
2116 Label success_tbz, success_cbz, success_bcond;
2117
2118 __ Mov(x0, 0);
2119 __ Mov(x1, 1);
2120 __ Mov(x10, 0);
2121
2122 __ B(&skip);
2123 // Branches at the start of the chain for situations (2) and (3).
2124 __ B(&success_cbz);
2125 __ B(&success_bcond);
2126 __ Nop();
2127 __ B(&success_bcond);
2128 __ B(&success_cbz);
2129 __ Bind(&skip);
2130
2131 __ Bind(&test_tbz);
2132 __ Tbz(x10, 7, &success_tbz);
2133 __ Bind(&test_cbz);
2134 __ Cbz(x10, &success_cbz);
2135 __ Bind(&test_bcond);
2136 __ Cmp(x10, 0);
2137 __ B(eq, &success_bcond);
2138
2139 skip.Unuse();
2140 __ B(&skip);
2141 // Branches at the end of the chain for situations (1) and (2).
2142 __ B(&success_cbz);
2143 __ B(&success_tbz);
2144 __ Nop();
2145 __ B(&success_tbz);
2146 __ B(&success_cbz);
2147 __ Bind(&skip);
2148
2149 // Generate enough code to overflow the immediate range of the three types of
2150 // branches below.
2151 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2152 if (i % 100 == 0) {
2153 // If we do land in this code, we do not want to execute so many nops
2154 // before reaching the end of test (especially if tracing is activated).
2155 // Also, the branches give the MacroAssembler the opportunity to emit the
2156 // veneers.
2157 __ B(&fail);
2158 } else {
2159 __ Nop();
2160 }
2161 }
2162 __ B(&fail);
2163
2164 __ Bind(&success_tbz);
2165 __ Orr(x0, x0, 1 << 0);
2166 __ B(&test_cbz);
2167 __ Bind(&success_cbz);
2168 __ Orr(x0, x0, 1 << 1);
2169 __ B(&test_bcond);
2170 __ Bind(&success_bcond);
2171 __ Orr(x0, x0, 1 << 2);
2172
2173 __ B(&done);
2174 __ Bind(&fail);
2175 __ Mov(x1, 0);
2176 __ Bind(&done);
2177
2178 END();
2179
2180 RUN();
2181
2182 ASSERT_EQUAL_64(0x7, x0);
2183 ASSERT_EQUAL_64(0x1, x1);
2184
2185 TEARDOWN();
2186 }
2187
2188
2189 TEST(far_branch_veneer_broken_link_chain) {
2190 INIT_V8();
2191
2192 // Check that the MacroAssembler correctly handles the situation when removing
2193 // a branch from the link chain of a label and the two links on each side of
2194 // the removed branch cannot be linked together (out of range).
2195 //
2196 // We test with tbz because it has a small range.
2197 int max_range = Instruction::ImmBranchRange(TestBranchType);
2198 int inter_range = max_range / 2 + max_range / 10;
2199
2200 SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
2201
2202 START();
2203
2204 Label skip, fail, done;
2205 Label test_1, test_2, test_3;
2206 Label far_target;
2207
2208 __ Mov(x0, 0); // Indicates the origin of the branch.
2209 __ Mov(x1, 1);
2210 __ Mov(x10, 0);
2211
2212 // First instruction in the label chain.
2213 __ Bind(&test_1);
2214 __ Mov(x0, 1);
2215 __ B(&far_target);
2216
2217 for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2218 if (i % 100 == 0) {
2219 // Do not allow generating veneers. They should not be needed.
2220 __ b(&fail);
2221 } else {
2222 __ Nop();
2223 }
2224 }
2225
2226 // Will need a veneer to point to reach the target.
2227 __ Bind(&test_2);
2228 __ Mov(x0, 2);
2229 __ Tbz(x10, 7, &far_target);
2230
2231 for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2232 if (i % 100 == 0) {
2233 // Do not allow generating veneers. They should not be needed.
2234 __ b(&fail);
2235 } else {
2236 __ Nop();
2237 }
2238 }
2239
2240 // Does not need a veneer to reach the target, but the initial branch
2241 // instruction is out of range.
2242 __ Bind(&test_3);
2243 __ Mov(x0, 3);
2244 __ Tbz(x10, 7, &far_target);
2245
2246 for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2247 if (i % 100 == 0) {
2248 // Allow generating veneers.
2249 __ B(&fail);
2250 } else {
2251 __ Nop();
2252 }
2253 }
2254
2255 __ B(&fail);
2256
2257 __ Bind(&far_target);
2258 __ Cmp(x0, 1);
2259 __ B(eq, &test_2);
2260 __ Cmp(x0, 2);
2261 __ B(eq, &test_3);
2262
2263 __ B(&done);
2264 __ Bind(&fail);
2265 __ Mov(x1, 0);
2266 __ Bind(&done);
2267
2268 END();
2269
2270 RUN();
2271
2272 ASSERT_EQUAL_64(0x3, x0);
2273 ASSERT_EQUAL_64(0x1, x1);
2274
2275 TEARDOWN();
2276 }
2277
2278
2279 TEST(branch_type) {
2280 INIT_V8();
2281
2282 SETUP();
2283
2284 Label fail, done;
2285
2286 START();
2287 __ Mov(x0, 0x0);
2288 __ Mov(x10, 0x7);
2289 __ Mov(x11, 0x0);
2290
2291 // Test non taken branches.
2292 __ Cmp(x10, 0x7);
2293 __ B(&fail, ne);
2294 __ B(&fail, never);
2295 __ B(&fail, reg_zero, x10);
2296 __ B(&fail, reg_not_zero, x11);
2297 __ B(&fail, reg_bit_clear, x10, 0);
2298 __ B(&fail, reg_bit_set, x10, 3);
2299
2300 // Test taken branches.
2301 Label l1, l2, l3, l4, l5;
2302 __ Cmp(x10, 0x7);
2303 __ B(&l1, eq);
2304 __ B(&fail);
2305 __ Bind(&l1);
2306 __ B(&l2, always);
2307 __ B(&fail);
2308 __ Bind(&l2);
2309 __ B(&l3, reg_not_zero, x10);
2310 __ B(&fail);
2311 __ Bind(&l3);
2312 __ B(&l4, reg_bit_clear, x10, 15);
2313 __ B(&fail);
2314 __ Bind(&l4);
2315 __ B(&l5, reg_bit_set, x10, 1);
2316 __ B(&fail);
2317 __ Bind(&l5);
2318
2319 __ B(&done);
2320
2321 __ Bind(&fail);
2322 __ Mov(x0, 0x1);
2323
2324 __ Bind(&done);
2325
2326 END();
2327
2328 RUN();
2329
2330 ASSERT_EQUAL_64(0x0, x0);
2331
2332 TEARDOWN();
2333 }
2334
2335
2336 TEST(ldr_str_offset) {
2337 INIT_V8();
2338 SETUP();
2339
2340 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2341 uint64_t dst[5] = {0, 0, 0, 0, 0};
2342 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2343 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2344
2345 START();
2346 __ Mov(x17, src_base);
2347 __ Mov(x18, dst_base);
2348 __ Ldr(w0, MemOperand(x17));
2349 __ Str(w0, MemOperand(x18));
2350 __ Ldr(w1, MemOperand(x17, 4));
2351 __ Str(w1, MemOperand(x18, 12));
2352 __ Ldr(x2, MemOperand(x17, 8));
2353 __ Str(x2, MemOperand(x18, 16));
2354 __ Ldrb(w3, MemOperand(x17, 1));
2355 __ Strb(w3, MemOperand(x18, 25));
2356 __ Ldrh(w4, MemOperand(x17, 2));
2357 __ Strh(w4, MemOperand(x18, 33));
2358 END();
2359
2360 RUN();
2361
2362 ASSERT_EQUAL_64(0x76543210, x0);
2363 ASSERT_EQUAL_64(0x76543210, dst[0]);
2364 ASSERT_EQUAL_64(0xfedcba98, x1);
2365 ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2366 ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
2367 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2368 ASSERT_EQUAL_64(0x32, x3);
2369 ASSERT_EQUAL_64(0x3200, dst[3]);
2370 ASSERT_EQUAL_64(0x7654, x4);
2371 ASSERT_EQUAL_64(0x765400, dst[4]);
2372 ASSERT_EQUAL_64(src_base, x17);
2373 ASSERT_EQUAL_64(dst_base, x18);
2374
2375 TEARDOWN();
2376 }
2377
2378
2379 TEST(ldr_str_wide) {
2380 INIT_V8();
2381 SETUP();
2382
2383 uint32_t src[8192];
2384 uint32_t dst[8192];
2385 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2386 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2387 memset(src, 0xaa, 8192 * sizeof(src[0]));
2388 memset(dst, 0xaa, 8192 * sizeof(dst[0]));
2389 src[0] = 0;
2390 src[6144] = 6144;
2391 src[8191] = 8191;
2392
2393 START();
2394 __ Mov(x22, src_base);
2395 __ Mov(x23, dst_base);
2396 __ Mov(x24, src_base);
2397 __ Mov(x25, dst_base);
2398 __ Mov(x26, src_base);
2399 __ Mov(x27, dst_base);
2400
2401 __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
2402 __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
2403 __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
2404 __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
2405 __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
2406 __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
2407 END();
2408
2409 RUN();
2410
2411 ASSERT_EQUAL_32(8191, w0);
2412 ASSERT_EQUAL_32(8191, dst[8191]);
2413 ASSERT_EQUAL_64(src_base, x22);
2414 ASSERT_EQUAL_64(dst_base, x23);
2415 ASSERT_EQUAL_32(0, w1);
2416 ASSERT_EQUAL_32(0, dst[0]);
2417 ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
2418 ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
2419 ASSERT_EQUAL_32(6144, w2);
2420 ASSERT_EQUAL_32(6144, dst[6144]);
2421 ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
2422 ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
2423
2424 TEARDOWN();
2425 }
2426
2427
2428 TEST(ldr_str_preindex) {
2429 INIT_V8();
2430 SETUP();
2431
2432 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2433 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2434 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2435 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2436
2437 START();
2438 __ Mov(x17, src_base);
2439 __ Mov(x18, dst_base);
2440 __ Mov(x19, src_base);
2441 __ Mov(x20, dst_base);
2442 __ Mov(x21, src_base + 16);
2443 __ Mov(x22, dst_base + 40);
2444 __ Mov(x23, src_base);
2445 __ Mov(x24, dst_base);
2446 __ Mov(x25, src_base);
2447 __ Mov(x26, dst_base);
2448 __ Ldr(w0, MemOperand(x17, 4, PreIndex));
2449 __ Str(w0, MemOperand(x18, 12, PreIndex));
2450 __ Ldr(x1, MemOperand(x19, 8, PreIndex));
2451 __ Str(x1, MemOperand(x20, 16, PreIndex));
2452 __ Ldr(w2, MemOperand(x21, -4, PreIndex));
2453 __ Str(w2, MemOperand(x22, -4, PreIndex));
2454 __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
2455 __ Strb(w3, MemOperand(x24, 25, PreIndex));
2456 __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
2457 __ Strh(w4, MemOperand(x26, 41, PreIndex));
2458 END();
2459
2460 RUN();
2461
2462 ASSERT_EQUAL_64(0xfedcba98, x0);
2463 ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2464 ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
2465 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2466 ASSERT_EQUAL_64(0x01234567, x2);
2467 ASSERT_EQUAL_64(0x0123456700000000UL, dst[4]);
2468 ASSERT_EQUAL_64(0x32, x3);
2469 ASSERT_EQUAL_64(0x3200, dst[3]);
2470 ASSERT_EQUAL_64(0x9876, x4);
2471 ASSERT_EQUAL_64(0x987600, dst[5]);
2472 ASSERT_EQUAL_64(src_base + 4, x17);
2473 ASSERT_EQUAL_64(dst_base + 12, x18);
2474 ASSERT_EQUAL_64(src_base + 8, x19);
2475 ASSERT_EQUAL_64(dst_base + 16, x20);
2476 ASSERT_EQUAL_64(src_base + 12, x21);
2477 ASSERT_EQUAL_64(dst_base + 36, x22);
2478 ASSERT_EQUAL_64(src_base + 1, x23);
2479 ASSERT_EQUAL_64(dst_base + 25, x24);
2480 ASSERT_EQUAL_64(src_base + 3, x25);
2481 ASSERT_EQUAL_64(dst_base + 41, x26);
2482
2483 TEARDOWN();
2484 }
2485
2486
2487 TEST(ldr_str_postindex) {
2488 INIT_V8();
2489 SETUP();
2490
2491 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2492 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2493 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2494 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2495
2496 START();
2497 __ Mov(x17, src_base + 4);
2498 __ Mov(x18, dst_base + 12);
2499 __ Mov(x19, src_base + 8);
2500 __ Mov(x20, dst_base + 16);
2501 __ Mov(x21, src_base + 8);
2502 __ Mov(x22, dst_base + 32);
2503 __ Mov(x23, src_base + 1);
2504 __ Mov(x24, dst_base + 25);
2505 __ Mov(x25, src_base + 3);
2506 __ Mov(x26, dst_base + 41);
2507 __ Ldr(w0, MemOperand(x17, 4, PostIndex));
2508 __ Str(w0, MemOperand(x18, 12, PostIndex));
2509 __ Ldr(x1, MemOperand(x19, 8, PostIndex));
2510 __ Str(x1, MemOperand(x20, 16, PostIndex));
2511 __ Ldr(x2, MemOperand(x21, -8, PostIndex));
2512 __ Str(x2, MemOperand(x22, -32, PostIndex));
2513 __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
2514 __ Strb(w3, MemOperand(x24, 5, PostIndex));
2515 __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
2516 __ Strh(w4, MemOperand(x26, -41, PostIndex));
2517 END();
2518
2519 RUN();
2520
2521 ASSERT_EQUAL_64(0xfedcba98, x0);
2522 ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2523 ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
2524 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2525 ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
2526 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[4]);
2527 ASSERT_EQUAL_64(0x32, x3);
2528 ASSERT_EQUAL_64(0x3200, dst[3]);
2529 ASSERT_EQUAL_64(0x9876, x4);
2530 ASSERT_EQUAL_64(0x987600, dst[5]);
2531 ASSERT_EQUAL_64(src_base + 8, x17);
2532 ASSERT_EQUAL_64(dst_base + 24, x18);
2533 ASSERT_EQUAL_64(src_base + 16, x19);
2534 ASSERT_EQUAL_64(dst_base + 32, x20);
2535 ASSERT_EQUAL_64(src_base, x21);
2536 ASSERT_EQUAL_64(dst_base, x22);
2537 ASSERT_EQUAL_64(src_base + 2, x23);
2538 ASSERT_EQUAL_64(dst_base + 30, x24);
2539 ASSERT_EQUAL_64(src_base, x25);
2540 ASSERT_EQUAL_64(dst_base, x26);
2541
2542 TEARDOWN();
2543 }
2544
2545
2546 TEST(load_signed) {
2547 INIT_V8();
2548 SETUP();
2549
2550 uint32_t src[2] = {0x80008080, 0x7fff7f7f};
2551 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2552
2553 START();
2554 __ Mov(x24, src_base);
2555 __ Ldrsb(w0, MemOperand(x24));
2556 __ Ldrsb(w1, MemOperand(x24, 4));
2557 __ Ldrsh(w2, MemOperand(x24));
2558 __ Ldrsh(w3, MemOperand(x24, 4));
2559 __ Ldrsb(x4, MemOperand(x24));
2560 __ Ldrsb(x5, MemOperand(x24, 4));
2561 __ Ldrsh(x6, MemOperand(x24));
2562 __ Ldrsh(x7, MemOperand(x24, 4));
2563 __ Ldrsw(x8, MemOperand(x24));
2564 __ Ldrsw(x9, MemOperand(x24, 4));
2565 END();
2566
2567 RUN();
2568
2569 ASSERT_EQUAL_64(0xffffff80, x0);
2570 ASSERT_EQUAL_64(0x0000007f, x1);
2571 ASSERT_EQUAL_64(0xffff8080, x2);
2572 ASSERT_EQUAL_64(0x00007f7f, x3);
2573 ASSERT_EQUAL_64(0xffffffffffffff80UL, x4);
2574 ASSERT_EQUAL_64(0x000000000000007fUL, x5);
2575 ASSERT_EQUAL_64(0xffffffffffff8080UL, x6);
2576 ASSERT_EQUAL_64(0x0000000000007f7fUL, x7);
2577 ASSERT_EQUAL_64(0xffffffff80008080UL, x8);
2578 ASSERT_EQUAL_64(0x000000007fff7f7fUL, x9);
2579
2580 TEARDOWN();
2581 }
2582
2583
2584 TEST(load_store_regoffset) {
2585 INIT_V8();
2586 SETUP();
2587
2588 uint32_t src[3] = {1, 2, 3};
2589 uint32_t dst[4] = {0, 0, 0, 0};
2590 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2591 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2592
2593 START();
2594 __ Mov(x16, src_base);
2595 __ Mov(x17, dst_base);
2596 __ Mov(x18, src_base + 3 * sizeof(src[0]));
2597 __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
2598 __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
2599 __ Mov(x24, 0);
2600 __ Mov(x25, 4);
2601 __ Mov(x26, -4);
2602 __ Mov(x27, 0xfffffffc); // 32-bit -4.
2603 __ Mov(x28, 0xfffffffe); // 32-bit -2.
2604 __ Mov(x29, 0xffffffff); // 32-bit -1.
2605
2606 __ Ldr(w0, MemOperand(x16, x24));
2607 __ Ldr(x1, MemOperand(x16, x25));
2608 __ Ldr(w2, MemOperand(x18, x26));
2609 __ Ldr(w3, MemOperand(x18, x27, SXTW));
2610 __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
2611 __ Str(w0, MemOperand(x17, x24));
2612 __ Str(x1, MemOperand(x17, x25));
2613 __ Str(w2, MemOperand(x20, x29, SXTW, 2));
2614 END();
2615
2616 RUN();
2617
2618 ASSERT_EQUAL_64(1, x0);
2619 ASSERT_EQUAL_64(0x0000000300000002UL, x1);
2620 ASSERT_EQUAL_64(3, x2);
2621 ASSERT_EQUAL_64(3, x3);
2622 ASSERT_EQUAL_64(2, x4);
2623 ASSERT_EQUAL_32(1, dst[0]);
2624 ASSERT_EQUAL_32(2, dst[1]);
2625 ASSERT_EQUAL_32(3, dst[2]);
2626 ASSERT_EQUAL_32(3, dst[3]);
2627
2628 TEARDOWN();
2629 }
2630
2631
2632 TEST(load_store_float) {
2633 INIT_V8();
2634 SETUP();
2635
2636 float src[3] = {1.0, 2.0, 3.0};
2637 float dst[3] = {0.0, 0.0, 0.0};
2638 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2639 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2640
2641 START();
2642 __ Mov(x17, src_base);
2643 __ Mov(x18, dst_base);
2644 __ Mov(x19, src_base);
2645 __ Mov(x20, dst_base);
2646 __ Mov(x21, src_base);
2647 __ Mov(x22, dst_base);
2648 __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
2649 __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2650 __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
2651 __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2652 __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2653 __ Str(s2, MemOperand(x22, sizeof(dst[0])));
2654 END();
2655
2656 RUN();
2657
2658 ASSERT_EQUAL_FP32(2.0, s0);
2659 ASSERT_EQUAL_FP32(2.0, dst[0]);
2660 ASSERT_EQUAL_FP32(1.0, s1);
2661 ASSERT_EQUAL_FP32(1.0, dst[2]);
2662 ASSERT_EQUAL_FP32(3.0, s2);
2663 ASSERT_EQUAL_FP32(3.0, dst[1]);
2664 ASSERT_EQUAL_64(src_base, x17);
2665 ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2666 ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2667 ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2668 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2669 ASSERT_EQUAL_64(dst_base, x22);
2670
2671 TEARDOWN();
2672 }
2673
2674
2675 TEST(load_store_double) {
2676 INIT_V8();
2677 SETUP();
2678
2679 double src[3] = {1.0, 2.0, 3.0};
2680 double dst[3] = {0.0, 0.0, 0.0};
2681 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2682 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2683
2684 START();
2685 __ Mov(x17, src_base);
2686 __ Mov(x18, dst_base);
2687 __ Mov(x19, src_base);
2688 __ Mov(x20, dst_base);
2689 __ Mov(x21, src_base);
2690 __ Mov(x22, dst_base);
2691 __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
2692 __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2693 __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
2694 __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2695 __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2696 __ Str(d2, MemOperand(x22, sizeof(dst[0])));
2697 END();
2698
2699 RUN();
2700
2701 ASSERT_EQUAL_FP64(2.0, d0);
2702 ASSERT_EQUAL_FP64(2.0, dst[0]);
2703 ASSERT_EQUAL_FP64(1.0, d1);
2704 ASSERT_EQUAL_FP64(1.0, dst[2]);
2705 ASSERT_EQUAL_FP64(3.0, d2);
2706 ASSERT_EQUAL_FP64(3.0, dst[1]);
2707 ASSERT_EQUAL_64(src_base, x17);
2708 ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2709 ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2710 ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2711 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2712 ASSERT_EQUAL_64(dst_base, x22);
2713
2714 TEARDOWN();
2715 }
2716
2717
2718 TEST(ldp_stp_float) {
2719 INIT_V8();
2720 SETUP();
2721
2722 float src[2] = {1.0, 2.0};
2723 float dst[3] = {0.0, 0.0, 0.0};
2724 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2725 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2726
2727 START();
2728 __ Mov(x16, src_base);
2729 __ Mov(x17, dst_base);
2730 __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2731 __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2732 END();
2733
2734 RUN();
2735
2736 ASSERT_EQUAL_FP32(1.0, s31);
2737 ASSERT_EQUAL_FP32(2.0, s0);
2738 ASSERT_EQUAL_FP32(0.0, dst[0]);
2739 ASSERT_EQUAL_FP32(2.0, dst[1]);
2740 ASSERT_EQUAL_FP32(1.0, dst[2]);
2741 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2742 ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2743
2744 TEARDOWN();
2745 }
2746
2747
2748 TEST(ldp_stp_double) {
2749 INIT_V8();
2750 SETUP();
2751
2752 double src[2] = {1.0, 2.0};
2753 double dst[3] = {0.0, 0.0, 0.0};
2754 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2755 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2756
2757 START();
2758 __ Mov(x16, src_base);
2759 __ Mov(x17, dst_base);
2760 __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2761 __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2762 END();
2763
2764 RUN();
2765
2766 ASSERT_EQUAL_FP64(1.0, d31);
2767 ASSERT_EQUAL_FP64(2.0, d0);
2768 ASSERT_EQUAL_FP64(0.0, dst[0]);
2769 ASSERT_EQUAL_FP64(2.0, dst[1]);
2770 ASSERT_EQUAL_FP64(1.0, dst[2]);
2771 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2772 ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2773
2774 TEARDOWN();
2775 }
2776
2777
2778 TEST(ldp_stp_offset) {
2779 INIT_V8();
2780 SETUP();
2781
2782 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2783 0xffeeddccbbaa9988UL};
2784 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2785 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2786 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2787
2788 START();
2789 __ Mov(x16, src_base);
2790 __ Mov(x17, dst_base);
2791 __ Mov(x18, src_base + 24);
2792 __ Mov(x19, dst_base + 56);
2793 __ Ldp(w0, w1, MemOperand(x16));
2794 __ Ldp(w2, w3, MemOperand(x16, 4));
2795 __ Ldp(x4, x5, MemOperand(x16, 8));
2796 __ Ldp(w6, w7, MemOperand(x18, -12));
2797 __ Ldp(x8, x9, MemOperand(x18, -16));
2798 __ Stp(w0, w1, MemOperand(x17));
2799 __ Stp(w2, w3, MemOperand(x17, 8));
2800 __ Stp(x4, x5, MemOperand(x17, 16));
2801 __ Stp(w6, w7, MemOperand(x19, -24));
2802 __ Stp(x8, x9, MemOperand(x19, -16));
2803 END();
2804
2805 RUN();
2806
2807 ASSERT_EQUAL_64(0x44556677, x0);
2808 ASSERT_EQUAL_64(0x00112233, x1);
2809 ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
2810 ASSERT_EQUAL_64(0x00112233, x2);
2811 ASSERT_EQUAL_64(0xccddeeff, x3);
2812 ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2813 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2814 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2815 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2816 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2817 ASSERT_EQUAL_64(0x8899aabb, x6);
2818 ASSERT_EQUAL_64(0xbbaa9988, x7);
2819 ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2820 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
2821 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2822 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2823 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2824 ASSERT_EQUAL_64(src_base, x16);
2825 ASSERT_EQUAL_64(dst_base, x17);
2826 ASSERT_EQUAL_64(src_base + 24, x18);
2827 ASSERT_EQUAL_64(dst_base + 56, x19);
2828
2829 TEARDOWN();
2830 }
2831
2832
2833 TEST(ldnp_stnp_offset) {
2834 INIT_V8();
2835 SETUP();
2836
2837 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2838 0xffeeddccbbaa9988UL};
2839 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2840 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2841 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2842
2843 START();
2844 __ Mov(x16, src_base);
2845 __ Mov(x17, dst_base);
2846 __ Mov(x18, src_base + 24);
2847 __ Mov(x19, dst_base + 56);
2848 __ Ldnp(w0, w1, MemOperand(x16));
2849 __ Ldnp(w2, w3, MemOperand(x16, 4));
2850 __ Ldnp(x4, x5, MemOperand(x16, 8));
2851 __ Ldnp(w6, w7, MemOperand(x18, -12));
2852 __ Ldnp(x8, x9, MemOperand(x18, -16));
2853 __ Stnp(w0, w1, MemOperand(x17));
2854 __ Stnp(w2, w3, MemOperand(x17, 8));
2855 __ Stnp(x4, x5, MemOperand(x17, 16));
2856 __ Stnp(w6, w7, MemOperand(x19, -24));
2857 __ Stnp(x8, x9, MemOperand(x19, -16));
2858 END();
2859
2860 RUN();
2861
2862 ASSERT_EQUAL_64(0x44556677, x0);
2863 ASSERT_EQUAL_64(0x00112233, x1);
2864 ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
2865 ASSERT_EQUAL_64(0x00112233, x2);
2866 ASSERT_EQUAL_64(0xccddeeff, x3);
2867 ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2868 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2869 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2870 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2871 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2872 ASSERT_EQUAL_64(0x8899aabb, x6);
2873 ASSERT_EQUAL_64(0xbbaa9988, x7);
2874 ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2875 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
2876 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2877 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2878 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2879 ASSERT_EQUAL_64(src_base, x16);
2880 ASSERT_EQUAL_64(dst_base, x17);
2881 ASSERT_EQUAL_64(src_base + 24, x18);
2882 ASSERT_EQUAL_64(dst_base + 56, x19);
2883
2884 TEARDOWN();
2885 }
2886
2887
2888 TEST(ldp_stp_preindex) {
2889 INIT_V8();
2890 SETUP();
2891
2892 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2893 0xffeeddccbbaa9988UL};
2894 uint64_t dst[5] = {0, 0, 0, 0, 0};
2895 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2896 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2897
2898 START();
2899 __ Mov(x16, src_base);
2900 __ Mov(x17, dst_base);
2901 __ Mov(x18, dst_base + 16);
2902 __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
2903 __ Mov(x19, x16);
2904 __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
2905 __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
2906 __ Mov(x20, x17);
2907 __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
2908 __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
2909 __ Mov(x21, x16);
2910 __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
2911 __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
2912 __ Mov(x22, x18);
2913 __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
2914 END();
2915
2916 RUN();
2917
2918 ASSERT_EQUAL_64(0x00112233, x0);
2919 ASSERT_EQUAL_64(0xccddeeff, x1);
2920 ASSERT_EQUAL_64(0x44556677, x2);
2921 ASSERT_EQUAL_64(0x00112233, x3);
2922 ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[0]);
2923 ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
2924 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2925 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2926 ASSERT_EQUAL_64(0x0011223344556677UL, x6);
2927 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x7);
2928 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
2929 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
2930 ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
2931 ASSERT_EQUAL_64(src_base, x16);
2932 ASSERT_EQUAL_64(dst_base, x17);
2933 ASSERT_EQUAL_64(dst_base + 16, x18);
2934 ASSERT_EQUAL_64(src_base + 4, x19);
2935 ASSERT_EQUAL_64(dst_base + 4, x20);
2936 ASSERT_EQUAL_64(src_base + 8, x21);
2937 ASSERT_EQUAL_64(dst_base + 24, x22);
2938
2939 TEARDOWN();
2940 }
2941
2942
2943 TEST(ldp_stp_postindex) {
2944 INIT_V8();
2945 SETUP();
2946
2947 uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2948 0xffeeddccbbaa9988UL, 0x7766554433221100UL};
2949 uint64_t dst[5] = {0, 0, 0, 0, 0};
2950 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2951 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2952
2953 START();
2954 __ Mov(x16, src_base);
2955 __ Mov(x17, dst_base);
2956 __ Mov(x18, dst_base + 16);
2957 __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
2958 __ Mov(x19, x16);
2959 __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
2960 __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
2961 __ Mov(x20, x17);
2962 __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
2963 __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
2964 __ Mov(x21, x16);
2965 __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
2966 __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
2967 __ Mov(x22, x18);
2968 __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
2969 END();
2970
2971 RUN();
2972
2973 ASSERT_EQUAL_64(0x44556677, x0);
2974 ASSERT_EQUAL_64(0x00112233, x1);
2975 ASSERT_EQUAL_64(0x00112233, x2);
2976 ASSERT_EQUAL_64(0xccddeeff, x3);
2977 ASSERT_EQUAL_64(0x4455667700112233UL, dst[0]);
2978 ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
2979 ASSERT_EQUAL_64(0x0011223344556677UL, x4);
2980 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x5);
2981 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x6);
2982 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x7);
2983 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
2984 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
2985 ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
2986 ASSERT_EQUAL_64(src_base, x16);
2987 ASSERT_EQUAL_64(dst_base, x17);
2988 ASSERT_EQUAL_64(dst_base + 16, x18);
2989 ASSERT_EQUAL_64(src_base + 4, x19);
2990 ASSERT_EQUAL_64(dst_base + 4, x20);
2991 ASSERT_EQUAL_64(src_base + 8, x21);
2992 ASSERT_EQUAL_64(dst_base + 24, x22);
2993
2994 TEARDOWN();
2995 }
2996
2997
2998 TEST(ldp_sign_extend) {
2999 INIT_V8();
3000 SETUP();
3001
3002 uint32_t src[2] = {0x80000000, 0x7fffffff};
3003 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3004
3005 START();
3006 __ Mov(x24, src_base);
3007 __ Ldpsw(x0, x1, MemOperand(x24));
3008 END();
3009
3010 RUN();
3011
3012 ASSERT_EQUAL_64(0xffffffff80000000UL, x0);
3013 ASSERT_EQUAL_64(0x000000007fffffffUL, x1);
3014
3015 TEARDOWN();
3016 }
3017
3018
3019 TEST(ldur_stur) {
3020 INIT_V8();
3021 SETUP();
3022
3023 int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
3024 int64_t dst[5] = {0, 0, 0, 0, 0};
3025 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3026 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3027
3028 START();
3029 __ Mov(x17, src_base);
3030 __ Mov(x18, dst_base);
3031 __ Mov(x19, src_base + 16);
3032 __ Mov(x20, dst_base + 32);
3033 __ Mov(x21, dst_base + 40);
3034 __ Ldr(w0, MemOperand(x17, 1));
3035 __ Str(w0, MemOperand(x18, 2));
3036 __ Ldr(x1, MemOperand(x17, 3));
3037 __ Str(x1, MemOperand(x18, 9));
3038 __ Ldr(w2, MemOperand(x19, -9));
3039 __ Str(w2, MemOperand(x20, -5));
3040 __ Ldrb(w3, MemOperand(x19, -1));
3041 __ Strb(w3, MemOperand(x21, -1));
3042 END();
3043
3044 RUN();
3045
3046 ASSERT_EQUAL_64(0x6789abcd, x0);
3047 ASSERT_EQUAL_64(0x6789abcd0000L, dst[0]);
3048 ASSERT_EQUAL_64(0xabcdef0123456789L, x1);
3049 ASSERT_EQUAL_64(0xcdef012345678900L, dst[1]);
3050 ASSERT_EQUAL_64(0x000000ab, dst[2]);
3051 ASSERT_EQUAL_64(0xabcdef01, x2);
3052 ASSERT_EQUAL_64(0x00abcdef01000000L, dst[3]);
3053 ASSERT_EQUAL_64(0x00000001, x3);
3054 ASSERT_EQUAL_64(0x0100000000000000L, dst[4]);
3055 ASSERT_EQUAL_64(src_base, x17);
3056 ASSERT_EQUAL_64(dst_base, x18);
3057 ASSERT_EQUAL_64(src_base + 16, x19);
3058 ASSERT_EQUAL_64(dst_base + 32, x20);
3059
3060 TEARDOWN();
3061 }
3062
3063
3064 #if 0 // TODO(all) enable.
3065 // TODO(rodolph): Adapt w16 Literal tests for RelocInfo.
3066 TEST(ldr_literal) {
3067 INIT_V8();
3068 SETUP();
3069
3070 START();
3071 __ Ldr(x2, 0x1234567890abcdefUL);
3072 __ Ldr(w3, 0xfedcba09);
3073 __ Ldr(d13, 1.234);
3074 __ Ldr(s25, 2.5);
3075 END();
3076
3077 RUN();
3078
3079 ASSERT_EQUAL_64(0x1234567890abcdefUL, x2);
3080 ASSERT_EQUAL_64(0xfedcba09, x3);
3081 ASSERT_EQUAL_FP64(1.234, d13);
3082 ASSERT_EQUAL_FP32(2.5, s25);
3083
3084 TEARDOWN();
3085 }
3086
3087
3088 static void LdrLiteralRangeHelper(ptrdiff_t range_,
3089 LiteralPoolEmitOption option,
3090 bool expect_dump) {
3091 ASSERT(range_ > 0);
3092 SETUP_SIZE(range_ + 1024);
3093
3094 Label label_1, label_2;
3095
3096 size_t range = static_cast<size_t>(range_);
3097 size_t code_size = 0;
3098 size_t pool_guard_size;
3099
3100 if (option == NoJumpRequired) {
3101 // Space for an explicit branch.
3102 pool_guard_size = sizeof(Instr);
3103 } else {
3104 pool_guard_size = 0;
3105 }
3106
3107 START();
3108 // Force a pool dump so the pool starts off empty.
3109 __ EmitLiteralPool(JumpRequired);
3110 ASSERT_LITERAL_POOL_SIZE(0);
3111
3112 __ Ldr(x0, 0x1234567890abcdefUL);
3113 __ Ldr(w1, 0xfedcba09);
3114 __ Ldr(d0, 1.234);
3115 __ Ldr(s1, 2.5);
3116 ASSERT_LITERAL_POOL_SIZE(4);
3117
3118 code_size += 4 * sizeof(Instr);
3119
3120 // Check that the requested range (allowing space for a branch over the pool)
3121 // can be handled by this test.
3122 ASSERT((code_size + pool_guard_size) <= range);
3123
3124 // Emit NOPs up to 'range', leaving space for the pool guard.
3125 while ((code_size + pool_guard_size) < range) {
3126 __ Nop();
3127 code_size += sizeof(Instr);
3128 }
3129
3130 // Emit the guard sequence before the literal pool.
3131 if (option == NoJumpRequired) {
3132 __ B(&label_1);
3133 code_size += sizeof(Instr);
3134 }
3135
3136 ASSERT(code_size == range);
3137 ASSERT_LITERAL_POOL_SIZE(4);
3138
3139 // Possibly generate a literal pool.
3140 __ CheckLiteralPool(option);
3141 __ Bind(&label_1);
3142 if (expect_dump) {
3143 ASSERT_LITERAL_POOL_SIZE(0);
3144 } else {
3145 ASSERT_LITERAL_POOL_SIZE(4);
3146 }
3147
3148 // Force a pool flush to check that a second pool functions correctly.
3149 __ EmitLiteralPool(JumpRequired);
3150 ASSERT_LITERAL_POOL_SIZE(0);
3151
3152 // These loads should be after the pool (and will require a new one).
3153 __ Ldr(x4, 0x34567890abcdef12UL);
3154 __ Ldr(w5, 0xdcba09fe);
3155 __ Ldr(d4, 123.4);
3156 __ Ldr(s5, 250.0);
3157 ASSERT_LITERAL_POOL_SIZE(4);
3158 END();
3159
3160 RUN();
3161
3162 // Check that the literals loaded correctly.
3163 ASSERT_EQUAL_64(0x1234567890abcdefUL, x0);
3164 ASSERT_EQUAL_64(0xfedcba09, x1);
3165 ASSERT_EQUAL_FP64(1.234, d0);
3166 ASSERT_EQUAL_FP32(2.5, s1);
3167 ASSERT_EQUAL_64(0x34567890abcdef12UL, x4);
3168 ASSERT_EQUAL_64(0xdcba09fe, x5);
3169 ASSERT_EQUAL_FP64(123.4, d4);
3170 ASSERT_EQUAL_FP32(250.0, s5);
3171
3172 TEARDOWN();
3173 }
3174
3175
3176 TEST(ldr_literal_range_1) {
3177 INIT_V8();
3178 LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
3179 NoJumpRequired,
3180 true);
3181 }
3182
3183
3184 TEST(ldr_literal_range_2) {
3185 INIT_V8();
3186 LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
3187 NoJumpRequired,
3188 false);
3189 }
3190
3191
3192 TEST(ldr_literal_range_3) {
3193 INIT_V8();
3194 LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
3195 JumpRequired,
3196 true);
3197 }
3198
3199
3200 TEST(ldr_literal_range_4) {
3201 INIT_V8();
3202 LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
3203 JumpRequired,
3204 false);
3205 }
3206
3207
3208 TEST(ldr_literal_range_5) {
3209 INIT_V8();
3210 LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
3211 JumpRequired,
3212 false);
3213 }
3214
3215
3216 TEST(ldr_literal_range_6) {
3217 INIT_V8();
3218 LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
3219 JumpRequired,
3220 false);
3221 }
3222 #endif
3223
3224 TEST(add_sub_imm) {
3225 INIT_V8();
3226 SETUP();
3227
3228 START();
3229 __ Mov(x0, 0x0);
3230 __ Mov(x1, 0x1111);
3231 __ Mov(x2, 0xffffffffffffffffL);
3232 __ Mov(x3, 0x8000000000000000L);
3233
3234 __ Add(x10, x0, Operand(0x123));
3235 __ Add(x11, x1, Operand(0x122000));
3236 __ Add(x12, x0, Operand(0xabc << 12));
3237 __ Add(x13, x2, Operand(1));
3238
3239 __ Add(w14, w0, Operand(0x123));
3240 __ Add(w15, w1, Operand(0x122000));
3241 __ Add(w16, w0, Operand(0xabc << 12));
3242 __ Add(w17, w2, Operand(1));
3243
3244 __ Sub(x20, x0, Operand(0x1));
3245 __ Sub(x21, x1, Operand(0x111));
3246 __ Sub(x22, x1, Operand(0x1 << 12));
3247 __ Sub(x23, x3, Operand(1));
3248
3249 __ Sub(w24, w0, Operand(0x1));
3250 __ Sub(w25, w1, Operand(0x111));
3251 __ Sub(w26, w1, Operand(0x1 << 12));
3252 __ Sub(w27, w3, Operand(1));
3253 END();
3254
3255 RUN();
3256
3257 ASSERT_EQUAL_64(0x123, x10);
3258 ASSERT_EQUAL_64(0x123111, x11);
3259 ASSERT_EQUAL_64(0xabc000, x12);
3260 ASSERT_EQUAL_64(0x0, x13);
3261
3262 ASSERT_EQUAL_32(0x123, w14);
3263 ASSERT_EQUAL_32(0x123111, w15);
3264 ASSERT_EQUAL_32(0xabc000, w16);
3265 ASSERT_EQUAL_32(0x0, w17);
3266
3267 ASSERT_EQUAL_64(0xffffffffffffffffL, x20);
3268 ASSERT_EQUAL_64(0x1000, x21);
3269 ASSERT_EQUAL_64(0x111, x22);
3270 ASSERT_EQUAL_64(0x7fffffffffffffffL, x23);
3271
3272 ASSERT_EQUAL_32(0xffffffff, w24);
3273 ASSERT_EQUAL_32(0x1000, w25);
3274 ASSERT_EQUAL_32(0x111, w26);
3275 ASSERT_EQUAL_32(0xffffffff, w27);
3276
3277 TEARDOWN();
3278 }
3279
3280
3281 TEST(add_sub_wide_imm) {
3282 INIT_V8();
3283 SETUP();
3284
3285 START();
3286 __ Mov(x0, 0x0);
3287 __ Mov(x1, 0x1);
3288
3289 __ Add(x10, x0, Operand(0x1234567890abcdefUL));
3290 __ Add(x11, x1, Operand(0xffffffff));
3291
3292 __ Add(w12, w0, Operand(0x12345678));
3293 __ Add(w13, w1, Operand(0xffffffff));
3294
3295 __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
3296
3297 __ Sub(w21, w0, Operand(0x12345678));
3298 END();
3299
3300 RUN();
3301
3302 ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
3303 ASSERT_EQUAL_64(0x100000000UL, x11);
3304
3305 ASSERT_EQUAL_32(0x12345678, w12);
3306 ASSERT_EQUAL_64(0x0, x13);
3307
3308 ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
3309
3310 ASSERT_EQUAL_32(-0x12345678, w21);
3311
3312 TEARDOWN();
3313 }
3314
3315
3316 TEST(add_sub_shifted) {
3317 INIT_V8();
3318 SETUP();
3319
3320 START();
3321 __ Mov(x0, 0);
3322 __ Mov(x1, 0x0123456789abcdefL);
3323 __ Mov(x2, 0xfedcba9876543210L);
3324 __ Mov(x3, 0xffffffffffffffffL);
3325
3326 __ Add(x10, x1, Operand(x2));
3327 __ Add(x11, x0, Operand(x1, LSL, 8));
3328 __ Add(x12, x0, Operand(x1, LSR, 8));
3329 __ Add(x13, x0, Operand(x1, ASR, 8));
3330 __ Add(x14, x0, Operand(x2, ASR, 8));
3331 __ Add(w15, w0, Operand(w1, ASR, 8));
3332 __ Add(w18, w3, Operand(w1, ROR, 8));
3333 __ Add(x19, x3, Operand(x1, ROR, 8));
3334
3335 __ Sub(x20, x3, Operand(x2));
3336 __ Sub(x21, x3, Operand(x1, LSL, 8));
3337 __ Sub(x22, x3, Operand(x1, LSR, 8));
3338 __ Sub(x23, x3, Operand(x1, ASR, 8));
3339 __ Sub(x24, x3, Operand(x2, ASR, 8));
3340 __ Sub(w25, w3, Operand(w1, ASR, 8));
3341 __ Sub(w26, w3, Operand(w1, ROR, 8));
3342 __ Sub(x27, x3, Operand(x1, ROR, 8));
3343 END();
3344
3345 RUN();
3346
3347 ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
3348 ASSERT_EQUAL_64(0x23456789abcdef00L, x11);
3349 ASSERT_EQUAL_64(0x000123456789abcdL, x12);
3350 ASSERT_EQUAL_64(0x000123456789abcdL, x13);
3351 ASSERT_EQUAL_64(0xfffedcba98765432L, x14);
3352 ASSERT_EQUAL_64(0xff89abcd, x15);
3353 ASSERT_EQUAL_64(0xef89abcc, x18);
3354 ASSERT_EQUAL_64(0xef0123456789abccL, x19);
3355
3356 ASSERT_EQUAL_64(0x0123456789abcdefL, x20);
3357 ASSERT_EQUAL_64(0xdcba9876543210ffL, x21);
3358 ASSERT_EQUAL_64(0xfffedcba98765432L, x22);
3359 ASSERT_EQUAL_64(0xfffedcba98765432L, x23);
3360 ASSERT_EQUAL_64(0x000123456789abcdL, x24);
3361 ASSERT_EQUAL_64(0x00765432, x25);
3362 ASSERT_EQUAL_64(0x10765432, x26);
3363 ASSERT_EQUAL_64(0x10fedcba98765432L, x27);
3364
3365 TEARDOWN();
3366 }
3367
3368
3369 TEST(add_sub_extended) {
3370 INIT_V8();
3371 SETUP();
3372
3373 START();
3374 __ Mov(x0, 0);
3375 __ Mov(x1, 0x0123456789abcdefL);
3376 __ Mov(x2, 0xfedcba9876543210L);
3377 __ Mov(w3, 0x80);
3378
3379 __ Add(x10, x0, Operand(x1, UXTB, 0));
3380 __ Add(x11, x0, Operand(x1, UXTB, 1));
3381 __ Add(x12, x0, Operand(x1, UXTH, 2));
3382 __ Add(x13, x0, Operand(x1, UXTW, 4));
3383
3384 __ Add(x14, x0, Operand(x1, SXTB, 0));
3385 __ Add(x15, x0, Operand(x1, SXTB, 1));
3386 __ Add(x16, x0, Operand(x1, SXTH, 2));
3387 __ Add(x17, x0, Operand(x1, SXTW, 3));
3388 __ Add(x18, x0, Operand(x2, SXTB, 0));
3389 __ Add(x19, x0, Operand(x2, SXTB, 1));
3390 __ Add(x20, x0, Operand(x2, SXTH, 2));
3391 __ Add(x21, x0, Operand(x2, SXTW, 3));
3392
3393 __ Add(x22, x1, Operand(x2, SXTB, 1));
3394 __ Sub(x23, x1, Operand(x2, SXTB, 1));
3395
3396 __ Add(w24, w1, Operand(w2, UXTB, 2));
3397 __ Add(w25, w0, Operand(w1, SXTB, 0));
3398 __ Add(w26, w0, Operand(w1, SXTB, 1));
3399 __ Add(w27, w2, Operand(w1, SXTW, 3));
3400
3401 __ Add(w28, w0, Operand(w1, SXTW, 3));
3402 __ Add(x29, x0, Operand(w1, SXTW, 3));
3403
3404 __ Sub(x30, x0, Operand(w3, SXTB, 1));
3405 END();
3406
3407 RUN();
3408
3409 ASSERT_EQUAL_64(0xefL, x10);
3410 ASSERT_EQUAL_64(0x1deL, x11);
3411 ASSERT_EQUAL_64(0x337bcL, x12);
3412 ASSERT_EQUAL_64(0x89abcdef0L, x13);
3413
3414 ASSERT_EQUAL_64(0xffffffffffffffefL, x14);
3415 ASSERT_EQUAL_64(0xffffffffffffffdeL, x15);
3416 ASSERT_EQUAL_64(0xffffffffffff37bcL, x16);
3417 ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x17);
3418 ASSERT_EQUAL_64(0x10L, x18);
3419 ASSERT_EQUAL_64(0x20L, x19);
3420 ASSERT_EQUAL_64(0xc840L, x20);
3421 ASSERT_EQUAL_64(0x3b2a19080L, x21);
3422
3423 ASSERT_EQUAL_64(0x0123456789abce0fL, x22);
3424 ASSERT_EQUAL_64(0x0123456789abcdcfL, x23);
3425
3426 ASSERT_EQUAL_32(0x89abce2f, w24);
3427 ASSERT_EQUAL_32(0xffffffef, w25);
3428 ASSERT_EQUAL_32(0xffffffde, w26);
3429 ASSERT_EQUAL_32(0xc3b2a188, w27);
3430
3431 ASSERT_EQUAL_32(0x4d5e6f78, w28);
3432 ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x29);
3433
3434 ASSERT_EQUAL_64(256, x30);
3435
3436 TEARDOWN();
3437 }
3438
3439
3440 TEST(add_sub_negative) {
3441 INIT_V8();
3442 SETUP();
3443
3444 START();
3445 __ Mov(x0, 0);
3446 __ Mov(x1, 4687);
3447 __ Mov(x2, 0x1122334455667788);
3448 __ Mov(w3, 0x11223344);
3449 __ Mov(w4, 400000);
3450
3451 __ Add(x10, x0, -42);
3452 __ Add(x11, x1, -687);
3453 __ Add(x12, x2, -0x88);
3454
3455 __ Sub(x13, x0, -600);
3456 __ Sub(x14, x1, -313);
3457 __ Sub(x15, x2, -0x555);
3458
3459 __ Add(w19, w3, -0x344);
3460 __ Add(w20, w4, -2000);
3461
3462 __ Sub(w21, w3, -0xbc);
3463 __ Sub(w22, w4, -2000);
3464 END();
3465
3466 RUN();
3467
3468 ASSERT_EQUAL_64(-42, x10);
3469 ASSERT_EQUAL_64(4000, x11);
3470 ASSERT_EQUAL_64(0x1122334455667700, x12);
3471
3472 ASSERT_EQUAL_64(600, x13);
3473 ASSERT_EQUAL_64(5000, x14);
3474 ASSERT_EQUAL_64(0x1122334455667cdd, x15);
3475
3476 ASSERT_EQUAL_32(0x11223000, w19);
3477 ASSERT_EQUAL_32(398000, w20);
3478
3479 ASSERT_EQUAL_32(0x11223400, w21);
3480 ASSERT_EQUAL_32(402000, w22);
3481
3482 TEARDOWN();
3483 }
3484
3485
3486 TEST(add_sub_zero) {
3487 INIT_V8();
3488 SETUP();
3489
3490 START();
3491 __ Mov(x0, 0);
3492 __ Mov(x1, 0);
3493 __ Mov(x2, 0);
3494
3495 Label blob1;
3496 __ Bind(&blob1);
3497 __ Add(x0, x0, 0);
3498 __ Sub(x1, x1, 0);
3499 __ Sub(x2, x2, xzr);
3500 CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1));
3501
3502 Label blob2;
3503 __ Bind(&blob2);
3504 __ Add(w3, w3, 0);
3505 CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2));
3506
3507 Label blob3;
3508 __ Bind(&blob3);
3509 __ Sub(w3, w3, wzr);
3510 CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3));
3511
3512 END();
3513
3514 RUN();
3515
3516 ASSERT_EQUAL_64(0, x0);
3517 ASSERT_EQUAL_64(0, x1);
3518 ASSERT_EQUAL_64(0, x2);
3519
3520 TEARDOWN();
3521 }
3522
3523
3524 TEST(claim_drop_zero) {
3525 INIT_V8();
3526 SETUP();
3527
3528 START();
3529
3530 Label start;
3531 __ Bind(&start);
3532 __ Claim(0);
3533 __ Drop(0);
3534 __ Claim(xzr, 8);
3535 __ Drop(xzr, 8);
3536 __ Claim(xzr, 0);
3537 __ Drop(xzr, 0);
3538 __ Claim(x7, 0);
3539 __ Drop(x7, 0);
3540 __ ClaimBySMI(xzr, 8);
3541 __ DropBySMI(xzr, 8);
3542 __ ClaimBySMI(xzr, 0);
3543 __ DropBySMI(xzr, 0);
3544 CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start));
3545
3546 END();
3547
3548 RUN();
3549
3550 TEARDOWN();
3551 }
3552
3553
3554 TEST(neg) {
3555 INIT_V8();
3556 SETUP();
3557
3558 START();
3559 __ Mov(x0, 0xf123456789abcdefL);
3560
3561 // Immediate.
3562 __ Neg(x1, 0x123);
3563 __ Neg(w2, 0x123);
3564
3565 // Shifted.
3566 __ Neg(x3, Operand(x0, LSL, 1));
3567 __ Neg(w4, Operand(w0, LSL, 2));
3568 __ Neg(x5, Operand(x0, LSR, 3));
3569 __ Neg(w6, Operand(w0, LSR, 4));
3570 __ Neg(x7, Operand(x0, ASR, 5));
3571 __ Neg(w8, Operand(w0, ASR, 6));
3572
3573 // Extended.
3574 __ Neg(w9, Operand(w0, UXTB));
3575 __ Neg(x10, Operand(x0, SXTB, 1));
3576 __ Neg(w11, Operand(w0, UXTH, 2));
3577 __ Neg(x12, Operand(x0, SXTH, 3));
3578 __ Neg(w13, Operand(w0, UXTW, 4));
3579 __ Neg(x14, Operand(x0, SXTW, 4));
3580 END();
3581
3582 RUN();
3583
3584 ASSERT_EQUAL_64(0xfffffffffffffeddUL, x1);
3585 ASSERT_EQUAL_64(0xfffffedd, x2);
3586 ASSERT_EQUAL_64(0x1db97530eca86422UL, x3);
3587 ASSERT_EQUAL_64(0xd950c844, x4);
3588 ASSERT_EQUAL_64(0xe1db97530eca8643UL, x5);
3589 ASSERT_EQUAL_64(0xf7654322, x6);
3590 ASSERT_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
3591 ASSERT_EQUAL_64(0x01d950c9, x8);
3592 ASSERT_EQUAL_64(0xffffff11, x9);
3593 ASSERT_EQUAL_64(0x0000000000000022UL, x10);
3594 ASSERT_EQUAL_64(0xfffcc844, x11);
3595 ASSERT_EQUAL_64(0x0000000000019088UL, x12);
3596 ASSERT_EQUAL_64(0x65432110, x13);
3597 ASSERT_EQUAL_64(0x0000000765432110UL, x14);
3598
3599 TEARDOWN();
3600 }
3601
3602
3603 TEST(adc_sbc_shift) {
3604 INIT_V8();
3605 SETUP();
3606
3607 START();
3608 __ Mov(x0, 0);
3609 __ Mov(x1, 1);
3610 __ Mov(x2, 0x0123456789abcdefL);
3611 __ Mov(x3, 0xfedcba9876543210L);
3612 __ Mov(x4, 0xffffffffffffffffL);
3613
3614 // Clear the C flag.
3615 __ Adds(x0, x0, Operand(0));
3616
3617 __ Adc(x5, x2, Operand(x3));
3618 __ Adc(x6, x0, Operand(x1, LSL, 60));
3619 __ Sbc(x7, x4, Operand(x3, LSR, 4));
3620 __ Adc(x8, x2, Operand(x3, ASR, 4));
3621 __ Adc(x9, x2, Operand(x3, ROR, 8));
3622
3623 __ Adc(w10, w2, Operand(w3));
3624 __ Adc(w11, w0, Operand(w1, LSL, 30));
3625 __ Sbc(w12, w4, Operand(w3, LSR, 4));
3626 __ Adc(w13, w2, Operand(w3, ASR, 4));
3627 __ Adc(w14, w2, Operand(w3, ROR, 8));
3628
3629 // Set the C flag.
3630 __ Cmp(w0, Operand(w0));
3631
3632 __ Adc(x18, x2, Operand(x3));
3633 __ Adc(x19, x0, Operand(x1, LSL, 60));
3634 __ Sbc(x20, x4, Operand(x3, LSR, 4));
3635 __ Adc(x21, x2, Operand(x3, ASR, 4));
3636 __ Adc(x22, x2, Operand(x3, ROR, 8));
3637
3638 __ Adc(w23, w2, Operand(w3));
3639 __ Adc(w24, w0, Operand(w1, LSL, 30));
3640 __ Sbc(w25, w4, Operand(w3, LSR, 4));
3641 __ Adc(w26, w2, Operand(w3, ASR, 4));
3642 __ Adc(w27, w2, Operand(w3, ROR, 8));
3643 END();
3644
3645 RUN();
3646
3647 ASSERT_EQUAL_64(0xffffffffffffffffL, x5);
3648 ASSERT_EQUAL_64(1L << 60, x6);
3649 ASSERT_EQUAL_64(0xf0123456789abcddL, x7);
3650 ASSERT_EQUAL_64(0x0111111111111110L, x8);
3651 ASSERT_EQUAL_64(0x1222222222222221L, x9);
3652
3653 ASSERT_EQUAL_32(0xffffffff, w10);
3654 ASSERT_EQUAL_32(1 << 30, w11);
3655 ASSERT_EQUAL_32(0xf89abcdd, w12);
3656 ASSERT_EQUAL_32(0x91111110, w13);
3657 ASSERT_EQUAL_32(0x9a222221, w14);
3658
3659 ASSERT_EQUAL_64(0xffffffffffffffffL + 1, x18);
3660 ASSERT_EQUAL_64((1L << 60) + 1, x19);
3661 ASSERT_EQUAL_64(0xf0123456789abcddL + 1, x20);
3662 ASSERT_EQUAL_64(0x0111111111111110L + 1, x21);
3663 ASSERT_EQUAL_64(0x1222222222222221L + 1, x22);
3664
3665 ASSERT_EQUAL_32(0xffffffff + 1, w23);
3666 ASSERT_EQUAL_32((1 << 30) + 1, w24);
3667 ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
3668 ASSERT_EQUAL_32(0x91111110 + 1, w26);
3669 ASSERT_EQUAL_32(0x9a222221 + 1, w27);
3670
3671 // Check that adc correctly sets the condition flags.
3672 START();
3673 __ Mov(x0, 1);
3674 __ Mov(x1, 0xffffffffffffffffL);
3675 // Clear the C flag.
3676 __ Adds(x0, x0, Operand(0));
3677 __ Adcs(x10, x0, Operand(x1));
3678 END();
3679
3680 RUN();
3681
3682 ASSERT_EQUAL_NZCV(ZCFlag);
3683 ASSERT_EQUAL_64(0, x10);
3684
3685 START();
3686 __ Mov(x0, 1);
3687 __ Mov(x1, 0x8000000000000000L);
3688 // Clear the C flag.
3689 __ Adds(x0, x0, Operand(0));
3690 __ Adcs(x10, x0, Operand(x1, ASR, 63));
3691 END();
3692
3693 RUN();
3694
3695 ASSERT_EQUAL_NZCV(ZCFlag);
3696 ASSERT_EQUAL_64(0, x10);
3697
3698 START();
3699 __ Mov(x0, 0x10);
3700 __ Mov(x1, 0x07ffffffffffffffL);
3701 // Clear the C flag.
3702 __ Adds(x0, x0, Operand(0));
3703 __ Adcs(x10, x0, Operand(x1, LSL, 4));
3704 END();
3705
3706 RUN();
3707
3708 ASSERT_EQUAL_NZCV(NVFlag);
3709 ASSERT_EQUAL_64(0x8000000000000000L, x10);
3710
3711 // Check that sbc correctly sets the condition flags.
3712 START();
3713 __ Mov(x0, 0);
3714 __ Mov(x1, 0xffffffffffffffffL);
3715 // Clear the C flag.
3716 __ Adds(x0, x0, Operand(0));
3717 __ Sbcs(x10, x0, Operand(x1));
3718 END();
3719
3720 RUN();
3721
3722 ASSERT_EQUAL_NZCV(ZFlag);
3723 ASSERT_EQUAL_64(0, x10);
3724
3725 START();
3726 __ Mov(x0, 1);
3727 __ Mov(x1, 0xffffffffffffffffL);
3728 // Clear the C flag.
3729 __ Adds(x0, x0, Operand(0));
3730 __ Sbcs(x10, x0, Operand(x1, LSR, 1));
3731 END();
3732
3733 RUN();
3734
3735 ASSERT_EQUAL_NZCV(NFlag);
3736 ASSERT_EQUAL_64(0x8000000000000001L, x10);
3737
3738 START();
3739 __ Mov(x0, 0);
3740 // Clear the C flag.
3741 __ Adds(x0, x0, Operand(0));
3742 __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
3743 END();
3744
3745 RUN();
3746
3747 ASSERT_EQUAL_NZCV(ZFlag);
3748 ASSERT_EQUAL_64(0, x10);
3749
3750 START()
3751 __ Mov(w0, 0x7fffffff);
3752 // Clear the C flag.
3753 __ Adds(x0, x0, Operand(0));
3754 __ Ngcs(w10, w0);
3755 END();
3756
3757 RUN();
3758
3759 ASSERT_EQUAL_NZCV(NFlag);
3760 ASSERT_EQUAL_64(0x80000000, x10);
3761
3762 START();
3763 // Clear the C flag.
3764 __ Adds(x0, x0, Operand(0));
3765 __ Ngcs(x10, 0x7fffffffffffffffL);
3766 END();
3767
3768 RUN();
3769
3770 ASSERT_EQUAL_NZCV(NFlag);
3771 ASSERT_EQUAL_64(0x8000000000000000L, x10);
3772
3773 START()
3774 __ Mov(x0, 0);
3775 // Set the C flag.
3776 __ Cmp(x0, Operand(x0));
3777 __ Sbcs(x10, x0, Operand(1));
3778 END();
3779
3780 RUN();
3781
3782 ASSERT_EQUAL_NZCV(NFlag);
3783 ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
3784
3785 START()
3786 __ Mov(x0, 0);
3787 // Set the C flag.
3788 __ Cmp(x0, Operand(x0));
3789 __ Ngcs(x10, 0x7fffffffffffffffL);
3790 END();
3791
3792 RUN();
3793
3794 ASSERT_EQUAL_NZCV(NFlag);
3795 ASSERT_EQUAL_64(0x8000000000000001L, x10);
3796
3797 TEARDOWN();
3798 }
3799
3800
3801 TEST(adc_sbc_extend) {
3802 INIT_V8();
3803 SETUP();
3804
3805 START();
3806 // Clear the C flag.
3807 __ Adds(x0, x0, Operand(0));
3808
3809 __ Mov(x0, 0);
3810 __ Mov(x1, 1);
3811 __ Mov(x2, 0x0123456789abcdefL);
3812
3813 __ Adc(x10, x1, Operand(w2, UXTB, 1));
3814 __ Adc(x11, x1, Operand(x2, SXTH, 2));
3815 __ Sbc(x12, x1, Operand(w2, UXTW, 4));
3816 __ Adc(x13, x1, Operand(x2, UXTX, 4));
3817
3818 __ Adc(w14, w1, Operand(w2, UXTB, 1));
3819 __ Adc(w15, w1, Operand(w2, SXTH, 2));
3820 __ Adc(w9, w1, Operand(w2, UXTW, 4));
3821
3822 // Set the C flag.
3823 __ Cmp(w0, Operand(w0));
3824
3825 __ Adc(x20, x1, Operand(w2, UXTB, 1));
3826 __ Adc(x21, x1, Operand(x2, SXTH, 2));
3827 __ Sbc(x22, x1, Operand(w2, UXTW, 4));
3828 __ Adc(x23, x1, Operand(x2, UXTX, 4));
3829
3830 __ Adc(w24, w1, Operand(w2, UXTB, 1));
3831 __ Adc(w25, w1, Operand(w2, SXTH, 2));
3832 __ Adc(w26, w1, Operand(w2, UXTW, 4));
3833 END();
3834
3835 RUN();
3836
3837 ASSERT_EQUAL_64(0x1df, x10);
3838 ASSERT_EQUAL_64(0xffffffffffff37bdL, x11);
3839 ASSERT_EQUAL_64(0xfffffff765432110L, x12);
3840 ASSERT_EQUAL_64(0x123456789abcdef1L, x13);
3841
3842 ASSERT_EQUAL_32(0x1df, w14);
3843 ASSERT_EQUAL_32(0xffff37bd, w15);
3844 ASSERT_EQUAL_32(0x9abcdef1, w9);
3845
3846 ASSERT_EQUAL_64(0x1df + 1, x20);
3847 ASSERT_EQUAL_64(0xffffffffffff37bdL + 1, x21);
3848 ASSERT_EQUAL_64(0xfffffff765432110L + 1, x22);
3849 ASSERT_EQUAL_64(0x123456789abcdef1L + 1, x23);
3850
3851 ASSERT_EQUAL_32(0x1df + 1, w24);
3852 ASSERT_EQUAL_32(0xffff37bd + 1, w25);
3853 ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
3854
3855 // Check that adc correctly sets the condition flags.
3856 START();
3857 __ Mov(x0, 0xff);
3858 __ Mov(x1, 0xffffffffffffffffL);
3859 // Clear the C flag.
3860 __ Adds(x0, x0, Operand(0));
3861 __ Adcs(x10, x0, Operand(x1, SXTX, 1));
3862 END();
3863
3864 RUN();
3865
3866 ASSERT_EQUAL_NZCV(CFlag);
3867
3868 START();
3869 __ Mov(x0, 0x7fffffffffffffffL);
3870 __ Mov(x1, 1);
3871 // Clear the C flag.
3872 __ Adds(x0, x0, Operand(0));
3873 __ Adcs(x10, x0, Operand(x1, UXTB, 2));
3874 END();
3875
3876 RUN();
3877
3878 ASSERT_EQUAL_NZCV(NVFlag);
3879
3880 START();
3881 __ Mov(x0, 0x7fffffffffffffffL);
3882 // Clear the C flag.
3883 __ Adds(x0, x0, Operand(0));
3884 __ Adcs(x10, x0, Operand(1));
3885 END();
3886
3887 RUN();
3888
3889 ASSERT_EQUAL_NZCV(NVFlag);
3890
3891 TEARDOWN();
3892 }
3893
3894
3895 TEST(adc_sbc_wide_imm) {
3896 INIT_V8();
3897 SETUP();
3898
3899 START();
3900 __ Mov(x0, 0);
3901
3902 // Clear the C flag.
3903 __ Adds(x0, x0, Operand(0));
3904
3905 __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
3906 __ Adc(w8, w0, Operand(0xffffffff));
3907 __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
3908 __ Sbc(w10, w0, Operand(0xffffffff));
3909 __ Ngc(x11, Operand(0xffffffff00000000UL));
3910 __ Ngc(w12, Operand(0xffff0000));
3911
3912 // Set the C flag.
3913 __ Cmp(w0, Operand(w0));
3914
3915 __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
3916 __ Adc(w19, w0, Operand(0xffffffff));
3917 __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
3918 __ Sbc(w21, w0, Operand(0xffffffff));
3919 __ Ngc(x22, Operand(0xffffffff00000000UL));
3920 __ Ngc(w23, Operand(0xffff0000));
3921 END();
3922
3923 RUN();
3924
3925 ASSERT_EQUAL_64(0x1234567890abcdefUL, x7);
3926 ASSERT_EQUAL_64(0xffffffff, x8);
3927 ASSERT_EQUAL_64(0xedcba9876f543210UL, x9);
3928 ASSERT_EQUAL_64(0, x10);
3929 ASSERT_EQUAL_64(0xffffffff, x11);
3930 ASSERT_EQUAL_64(0xffff, x12);
3931
3932 ASSERT_EQUAL_64(0x1234567890abcdefUL + 1, x18);
3933 ASSERT_EQUAL_64(0, x19);
3934 ASSERT_EQUAL_64(0xedcba9876f543211UL, x20);
3935 ASSERT_EQUAL_64(1, x21);
3936 ASSERT_EQUAL_64(0x100000000UL, x22);
3937 ASSERT_EQUAL_64(0x10000, x23);
3938
3939 TEARDOWN();
3940 }
3941
3942
3943 TEST(flags) {
3944 INIT_V8();
3945 SETUP();
3946
3947 START();
3948 __ Mov(x0, 0);
3949 __ Mov(x1, 0x1111111111111111L);
3950 __ Neg(x10, Operand(x0));
3951 __ Neg(x11, Operand(x1));
3952 __ Neg(w12, Operand(w1));
3953 // Clear the C flag.
3954 __ Adds(x0, x0, Operand(0));
3955 __ Ngc(x13, Operand(x0));
3956 // Set the C flag.
3957 __ Cmp(x0, Operand(x0));
3958 __ Ngc(w14, Operand(w0));
3959 END();
3960
3961 RUN();
3962
3963 ASSERT_EQUAL_64(0, x10);
3964 ASSERT_EQUAL_64(-0x1111111111111111L, x11);
3965 ASSERT_EQUAL_32(-0x11111111, w12);
3966 ASSERT_EQUAL_64(-1L, x13);
3967 ASSERT_EQUAL_32(0, w14);
3968
3969 START();
3970 __ Mov(x0, 0);
3971 __ Cmp(x0, Operand(x0));
3972 END();
3973
3974 RUN();
3975
3976 ASSERT_EQUAL_NZCV(ZCFlag);
3977
3978 START();
3979 __ Mov(w0, 0);
3980 __ Cmp(w0, Operand(w0));
3981 END();
3982
3983 RUN();
3984
3985 ASSERT_EQUAL_NZCV(ZCFlag);
3986
3987 START();
3988 __ Mov(x0, 0);
3989 __ Mov(x1, 0x1111111111111111L);
3990 __ Cmp(x0, Operand(x1));
3991 END();
3992
3993 RUN();
3994
3995 ASSERT_EQUAL_NZCV(NFlag);
3996
3997 START();
3998 __ Mov(w0, 0);
3999 __ Mov(w1, 0x11111111);
4000 __ Cmp(w0, Operand(w1));
4001 END();
4002
4003 RUN();
4004
4005 ASSERT_EQUAL_NZCV(NFlag);
4006
4007 START();
4008 __ Mov(x1, 0x1111111111111111L);
4009 __ Cmp(x1, Operand(0));
4010 END();
4011
4012 RUN();
4013
4014 ASSERT_EQUAL_NZCV(CFlag);
4015
4016 START();
4017 __ Mov(w1, 0x11111111);
4018 __ Cmp(w1, Operand(0));
4019 END();
4020
4021 RUN();
4022
4023 ASSERT_EQUAL_NZCV(CFlag);
4024
4025 START();
4026 __ Mov(x0, 1);
4027 __ Mov(x1, 0x7fffffffffffffffL);
4028 __ Cmn(x1, Operand(x0));
4029 END();
4030
4031 RUN();
4032
4033 ASSERT_EQUAL_NZCV(NVFlag);
4034
4035 START();
4036 __ Mov(w0, 1);
4037 __ Mov(w1, 0x7fffffff);
4038 __ Cmn(w1, Operand(w0));
4039 END();
4040
4041 RUN();
4042
4043 ASSERT_EQUAL_NZCV(NVFlag);
4044
4045 START();
4046 __ Mov(x0, 1);
4047 __ Mov(x1, 0xffffffffffffffffL);
4048 __ Cmn(x1, Operand(x0));
4049 END();
4050
4051 RUN();
4052
4053 ASSERT_EQUAL_NZCV(ZCFlag);
4054
4055 START();
4056 __ Mov(w0, 1);
4057 __ Mov(w1, 0xffffffff);
4058 __ Cmn(w1, Operand(w0));
4059 END();
4060
4061 RUN();
4062
4063 ASSERT_EQUAL_NZCV(ZCFlag);
4064
4065 START();
4066 __ Mov(w0, 0);
4067 __ Mov(w1, 1);
4068 // Clear the C flag.
4069 __ Adds(w0, w0, Operand(0));
4070 __ Ngcs(w0, Operand(w1));
4071 END();
4072
4073 RUN();
4074
4075 ASSERT_EQUAL_NZCV(NFlag);
4076
4077 START();
4078 __ Mov(w0, 0);
4079 __ Mov(w1, 0);
4080 // Set the C flag.
4081 __ Cmp(w0, Operand(w0));
4082 __ Ngcs(w0, Operand(w1));
4083 END();
4084
4085 RUN();
4086
4087 ASSERT_EQUAL_NZCV(ZCFlag);
4088
4089 TEARDOWN();
4090 }
4091
4092
4093 TEST(cmp_shift) {
4094 INIT_V8();
4095 SETUP();
4096
4097 START();
4098 __ Mov(x18, 0xf0000000);
4099 __ Mov(x19, 0xf000000010000000UL);
4100 __ Mov(x20, 0xf0000000f0000000UL);
4101 __ Mov(x21, 0x7800000078000000UL);
4102 __ Mov(x22, 0x3c0000003c000000UL);
4103 __ Mov(x23, 0x8000000780000000UL);
4104 __ Mov(x24, 0x0000000f00000000UL);
4105 __ Mov(x25, 0x00000003c0000000UL);
4106 __ Mov(x26, 0x8000000780000000UL);
4107 __ Mov(x27, 0xc0000003);
4108
4109 __ Cmp(w20, Operand(w21, LSL, 1));
4110 __ Mrs(x0, NZCV);
4111
4112 __ Cmp(x20, Operand(x22, LSL, 2));
4113 __ Mrs(x1, NZCV);
4114
4115 __ Cmp(w19, Operand(w23, LSR, 3));
4116 __ Mrs(x2, NZCV);
4117
4118 __ Cmp(x18, Operand(x24, LSR, 4));
4119 __ Mrs(x3, NZCV);
4120
4121 __ Cmp(w20, Operand(w25, ASR, 2));
4122 __ Mrs(x4, NZCV);
4123
4124 __ Cmp(x20, Operand(x26, ASR, 3));
4125 __ Mrs(x5, NZCV);
4126
4127 __ Cmp(w27, Operand(w22, ROR, 28));
4128 __ Mrs(x6, NZCV);
4129
4130 __ Cmp(x20, Operand(x21, ROR, 31));
4131 __ Mrs(x7, NZCV);
4132 END();
4133
4134 RUN();
4135
4136 ASSERT_EQUAL_32(ZCFlag, w0);
4137 ASSERT_EQUAL_32(ZCFlag, w1);
4138 ASSERT_EQUAL_32(ZCFlag, w2);
4139 ASSERT_EQUAL_32(ZCFlag, w3);
4140 ASSERT_EQUAL_32(ZCFlag, w4);
4141 ASSERT_EQUAL_32(ZCFlag, w5);
4142 ASSERT_EQUAL_32(ZCFlag, w6);
4143 ASSERT_EQUAL_32(ZCFlag, w7);
4144
4145 TEARDOWN();
4146 }
4147
4148
4149 TEST(cmp_extend) {
4150 INIT_V8();
4151 SETUP();
4152
4153 START();
4154 __ Mov(w20, 0x2);
4155 __ Mov(w21, 0x1);
4156 __ Mov(x22, 0xffffffffffffffffUL);
4157 __ Mov(x23, 0xff);
4158 __ Mov(x24, 0xfffffffffffffffeUL);
4159 __ Mov(x25, 0xffff);
4160 __ Mov(x26, 0xffffffff);
4161
4162 __ Cmp(w20, Operand(w21, LSL, 1));
4163 __ Mrs(x0, NZCV);
4164
4165 __ Cmp(x22, Operand(x23, SXTB, 0));
4166 __ Mrs(x1, NZCV);
4167
4168 __ Cmp(x24, Operand(x23, SXTB, 1));
4169 __ Mrs(x2, NZCV);
4170
4171 __ Cmp(x24, Operand(x23, UXTB, 1));
4172 __ Mrs(x3, NZCV);
4173
4174 __ Cmp(w22, Operand(w25, UXTH));
4175 __ Mrs(x4, NZCV);
4176
4177 __ Cmp(x22, Operand(x25, SXTH));
4178 __ Mrs(x5, NZCV);
4179
4180 __ Cmp(x22, Operand(x26, UXTW));
4181 __ Mrs(x6, NZCV);
4182
4183 __ Cmp(x24, Operand(x26, SXTW, 1));
4184 __ Mrs(x7, NZCV);
4185 END();
4186
4187 RUN();
4188
4189 ASSERT_EQUAL_32(ZCFlag, w0);
4190 ASSERT_EQUAL_32(ZCFlag, w1);
4191 ASSERT_EQUAL_32(ZCFlag, w2);
4192 ASSERT_EQUAL_32(NCFlag, w3);
4193 ASSERT_EQUAL_32(NCFlag, w4);
4194 ASSERT_EQUAL_32(ZCFlag, w5);
4195 ASSERT_EQUAL_32(NCFlag, w6);
4196 ASSERT_EQUAL_32(ZCFlag, w7);
4197
4198 TEARDOWN();
4199 }
4200
4201
4202 TEST(ccmp) {
4203 INIT_V8();
4204 SETUP();
4205
4206 START();
4207 __ Mov(w16, 0);
4208 __ Mov(w17, 1);
4209 __ Cmp(w16, w16);
4210 __ Ccmp(w16, w17, NCFlag, eq);
4211 __ Mrs(x0, NZCV);
4212
4213 __ Cmp(w16, w16);
4214 __ Ccmp(w16, w17, NCFlag, ne);
4215 __ Mrs(x1, NZCV);
4216
4217 __ Cmp(x16, x16);
4218 __ Ccmn(x16, 2, NZCVFlag, eq);
4219 __ Mrs(x2, NZCV);
4220
4221 __ Cmp(x16, x16);
4222 __ Ccmn(x16, 2, NZCVFlag, ne);
4223 __ Mrs(x3, NZCV);
4224
4225 __ ccmp(x16, x16, NZCVFlag, al);
4226 __ Mrs(x4, NZCV);
4227
4228 __ ccmp(x16, x16, NZCVFlag, nv);
4229 __ Mrs(x5, NZCV);
4230
4231 END();
4232
4233 RUN();
4234
4235 ASSERT_EQUAL_32(NFlag, w0);
4236 ASSERT_EQUAL_32(NCFlag, w1);
4237 ASSERT_EQUAL_32(NoFlag, w2);
4238 ASSERT_EQUAL_32(NZCVFlag, w3);
4239 ASSERT_EQUAL_32(ZCFlag, w4);
4240 ASSERT_EQUAL_32(ZCFlag, w5);
4241
4242 TEARDOWN();
4243 }
4244
4245
4246 TEST(ccmp_wide_imm) {
4247 INIT_V8();
4248 SETUP();
4249
4250 START();
4251 __ Mov(w20, 0);
4252
4253 __ Cmp(w20, Operand(w20));
4254 __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
4255 __ Mrs(x0, NZCV);
4256
4257 __ Cmp(w20, Operand(w20));
4258 __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
4259 __ Mrs(x1, NZCV);
4260 END();
4261
4262 RUN();
4263
4264 ASSERT_EQUAL_32(NFlag, w0);
4265 ASSERT_EQUAL_32(NoFlag, w1);
4266
4267 TEARDOWN();
4268 }
4269
4270
4271 TEST(ccmp_shift_extend) {
4272 INIT_V8();
4273 SETUP();
4274
4275 START();
4276 __ Mov(w20, 0x2);
4277 __ Mov(w21, 0x1);
4278 __ Mov(x22, 0xffffffffffffffffUL);
4279 __ Mov(x23, 0xff);
4280 __ Mov(x24, 0xfffffffffffffffeUL);
4281
4282 __ Cmp(w20, Operand(w20));
4283 __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
4284 __ Mrs(x0, NZCV);
4285
4286 __ Cmp(w20, Operand(w20));
4287 __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
4288 __ Mrs(x1, NZCV);
4289
4290 __ Cmp(w20, Operand(w20));
4291 __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
4292 __ Mrs(x2, NZCV);
4293
4294 __ Cmp(w20, Operand(w20));
4295 __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
4296 __ Mrs(x3, NZCV);
4297
4298 __ Cmp(w20, Operand(w20));
4299 __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
4300 __ Mrs(x4, NZCV);
4301 END();
4302
4303 RUN();
4304
4305 ASSERT_EQUAL_32(ZCFlag, w0);
4306 ASSERT_EQUAL_32(ZCFlag, w1);
4307 ASSERT_EQUAL_32(ZCFlag, w2);
4308 ASSERT_EQUAL_32(NCFlag, w3);
4309 ASSERT_EQUAL_32(NZCVFlag, w4);
4310
4311 TEARDOWN();
4312 }
4313
4314
4315 TEST(csel) {
4316 INIT_V8();
4317 SETUP();
4318
4319 START();
4320 __ Mov(x16, 0);
4321 __ Mov(x24, 0x0000000f0000000fUL);
4322 __ Mov(x25, 0x0000001f0000001fUL);
4323 __ Mov(x26, 0);
4324 __ Mov(x27, 0);
4325
4326 __ Cmp(w16, 0);
4327 __ Csel(w0, w24, w25, eq);
4328 __ Csel(w1, w24, w25, ne);
4329 __ Csinc(w2, w24, w25, mi);
4330 __ Csinc(w3, w24, w25, pl);
4331
4332 __ csel(w13, w24, w25, al);
4333 __ csel(x14, x24, x25, nv);
4334
4335 __ Cmp(x16, 1);
4336 __ Csinv(x4, x24, x25, gt);
4337 __ Csinv(x5, x24, x25, le);
4338 __ Csneg(x6, x24, x25, hs);
4339 __ Csneg(x7, x24, x25, lo);
4340
4341 __ Cset(w8, ne);
4342 __ Csetm(w9, ne);
4343 __ Cinc(x10, x25, ne);
4344 __ Cinv(x11, x24, ne);
4345 __ Cneg(x12, x24, ne);
4346
4347 __ csel(w15, w24, w25, al);
4348 __ csel(x18, x24, x25, nv);
4349
4350 __ CzeroX(x24, ne);
4351 __ CzeroX(x25, eq);
4352
4353 __ CmovX(x26, x25, ne);
4354 __ CmovX(x27, x25, eq);
4355 END();
4356
4357 RUN();
4358
4359 ASSERT_EQUAL_64(0x0000000f, x0);
4360 ASSERT_EQUAL_64(0x0000001f, x1);
4361 ASSERT_EQUAL_64(0x00000020, x2);
4362 ASSERT_EQUAL_64(0x0000000f, x3);
4363 ASSERT_EQUAL_64(0xffffffe0ffffffe0UL, x4);
4364 ASSERT_EQUAL_64(0x0000000f0000000fUL, x5);
4365 ASSERT_EQUAL_64(0xffffffe0ffffffe1UL, x6);
4366 ASSERT_EQUAL_64(0x0000000f0000000fUL, x7);
4367 ASSERT_EQUAL_64(0x00000001, x8);
4368 ASSERT_EQUAL_64(0xffffffff, x9);
4369 ASSERT_EQUAL_64(0x0000001f00000020UL, x10);
4370 ASSERT_EQUAL_64(0xfffffff0fffffff0UL, x11);
4371 ASSERT_EQUAL_64(0xfffffff0fffffff1UL, x12);
4372 ASSERT_EQUAL_64(0x0000000f, x13);
4373 ASSERT_EQUAL_64(0x0000000f0000000fUL, x14);
4374 ASSERT_EQUAL_64(0x0000000f, x15);
4375 ASSERT_EQUAL_64(0x0000000f0000000fUL, x18);
4376 ASSERT_EQUAL_64(0, x24);
4377 ASSERT_EQUAL_64(0x0000001f0000001fUL, x25);
4378 ASSERT_EQUAL_64(0x0000001f0000001fUL, x26);
4379 ASSERT_EQUAL_64(0, x27);
4380
4381 TEARDOWN();
4382 }
4383
4384
4385 TEST(csel_imm) {
4386 INIT_V8();
4387 SETUP();
4388
4389 START();
4390 __ Mov(x18, 0);
4391 __ Mov(x19, 0x80000000);
4392 __ Mov(x20, 0x8000000000000000UL);
4393
4394 __ Cmp(x18, Operand(0));
4395 __ Csel(w0, w19, -2, ne);
4396 __ Csel(w1, w19, -1, ne);
4397 __ Csel(w2, w19, 0, ne);
4398 __ Csel(w3, w19, 1, ne);
4399 __ Csel(w4, w19, 2, ne);
4400 __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
4401 __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
4402 __ Csel(w7, w19, 3, eq);
4403
4404 __ Csel(x8, x20, -2, ne);
4405 __ Csel(x9, x20, -1, ne);
4406 __ Csel(x10, x20, 0, ne);
4407 __ Csel(x11, x20, 1, ne);
4408 __ Csel(x12, x20, 2, ne);
4409 __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
4410 __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
4411 __ Csel(x15, x20, 3, eq);
4412
4413 END();
4414
4415 RUN();
4416
4417 ASSERT_EQUAL_32(-2, w0);
4418 ASSERT_EQUAL_32(-1, w1);
4419 ASSERT_EQUAL_32(0, w2);
4420 ASSERT_EQUAL_32(1, w3);
4421 ASSERT_EQUAL_32(2, w4);
4422 ASSERT_EQUAL_32(-1, w5);
4423 ASSERT_EQUAL_32(0x40000000, w6);
4424 ASSERT_EQUAL_32(0x80000000, w7);
4425
4426 ASSERT_EQUAL_64(-2, x8);
4427 ASSERT_EQUAL_64(-1, x9);
4428 ASSERT_EQUAL_64(0, x10);
4429 ASSERT_EQUAL_64(1, x11);
4430 ASSERT_EQUAL_64(2, x12);
4431 ASSERT_EQUAL_64(-1, x13);
4432 ASSERT_EQUAL_64(0x4000000000000000UL, x14);
4433 ASSERT_EQUAL_64(0x8000000000000000UL, x15);
4434
4435 TEARDOWN();
4436 }
4437
4438
4439 TEST(lslv) {
4440 INIT_V8();
4441 SETUP();
4442
4443 uint64_t value = 0x0123456789abcdefUL;
4444 int shift[] = {1, 3, 5, 9, 17, 33};
4445
4446 START();
4447 __ Mov(x0, value);
4448 __ Mov(w1, shift[0]);
4449 __ Mov(w2, shift[1]);
4450 __ Mov(w3, shift[2]);
4451 __ Mov(w4, shift[3]);
4452 __ Mov(w5, shift[4]);
4453 __ Mov(w6, shift[5]);
4454
4455 __ lslv(x0, x0, xzr);
4456
4457 __ Lsl(x16, x0, x1);
4458 __ Lsl(x17, x0, x2);
4459 __ Lsl(x18, x0, x3);
4460 __ Lsl(x19, x0, x4);
4461 __ Lsl(x20, x0, x5);
4462 __ Lsl(x21, x0, x6);
4463
4464 __ Lsl(w22, w0, w1);
4465 __ Lsl(w23, w0, w2);
4466 __ Lsl(w24, w0, w3);
4467 __ Lsl(w25, w0, w4);
4468 __ Lsl(w26, w0, w5);
4469 __ Lsl(w27, w0, w6);
4470 END();
4471
4472 RUN();
4473
4474 ASSERT_EQUAL_64(value, x0);
4475 ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
4476 ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
4477 ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
4478 ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
4479 ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
4480 ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
4481 ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
4482 ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
4483 ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
4484 ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
4485 ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
4486 ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
4487
4488 TEARDOWN();
4489 }
4490
4491
4492 TEST(lsrv) {
4493 INIT_V8();
4494 SETUP();
4495
4496 uint64_t value = 0x0123456789abcdefUL;
4497 int shift[] = {1, 3, 5, 9, 17, 33};
4498
4499 START();
4500 __ Mov(x0, value);
4501 __ Mov(w1, shift[0]);
4502 __ Mov(w2, shift[1]);
4503 __ Mov(w3, shift[2]);
4504 __ Mov(w4, shift[3]);
4505 __ Mov(w5, shift[4]);
4506 __ Mov(w6, shift[5]);
4507
4508 __ lsrv(x0, x0, xzr);
4509
4510 __ Lsr(x16, x0, x1);
4511 __ Lsr(x17, x0, x2);
4512 __ Lsr(x18, x0, x3);
4513 __ Lsr(x19, x0, x4);
4514 __ Lsr(x20, x0, x5);
4515 __ Lsr(x21, x0, x6);
4516
4517 __ Lsr(w22, w0, w1);
4518 __ Lsr(w23, w0, w2);
4519 __ Lsr(w24, w0, w3);
4520 __ Lsr(w25, w0, w4);
4521 __ Lsr(w26, w0, w5);
4522 __ Lsr(w27, w0, w6);
4523 END();
4524
4525 RUN();
4526
4527 ASSERT_EQUAL_64(value, x0);
4528 ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4529 ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4530 ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4531 ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4532 ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4533 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4534
4535 value &= 0xffffffffUL;
4536 ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
4537 ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
4538 ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
4539 ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
4540 ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
4541 ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
4542
4543 TEARDOWN();
4544 }
4545
4546
4547 TEST(asrv) {
4548 INIT_V8();
4549 SETUP();
4550
4551 int64_t value = 0xfedcba98fedcba98UL;
4552 int shift[] = {1, 3, 5, 9, 17, 33};
4553
4554 START();
4555 __ Mov(x0, value);
4556 __ Mov(w1, shift[0]);
4557 __ Mov(w2, shift[1]);
4558 __ Mov(w3, shift[2]);
4559 __ Mov(w4, shift[3]);
4560 __ Mov(w5, shift[4]);
4561 __ Mov(w6, shift[5]);
4562
4563 __ asrv(x0, x0, xzr);
4564
4565 __ Asr(x16, x0, x1);
4566 __ Asr(x17, x0, x2);
4567 __ Asr(x18, x0, x3);
4568 __ Asr(x19, x0, x4);
4569 __ Asr(x20, x0, x5);
4570 __ Asr(x21, x0, x6);
4571
4572 __ Asr(w22, w0, w1);
4573 __ Asr(w23, w0, w2);
4574 __ Asr(w24, w0, w3);
4575 __ Asr(w25, w0, w4);
4576 __ Asr(w26, w0, w5);
4577 __ Asr(w27, w0, w6);
4578 END();
4579
4580 RUN();
4581
4582 ASSERT_EQUAL_64(value, x0);
4583 ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4584 ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4585 ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4586 ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4587 ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4588 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4589
4590 int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
4591 ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
4592 ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
4593 ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
4594 ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
4595 ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
4596 ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
4597
4598 TEARDOWN();
4599 }
4600
4601
4602 TEST(rorv) {
4603 INIT_V8();
4604 SETUP();
4605
4606 uint64_t value = 0x0123456789abcdefUL;
4607 int shift[] = {4, 8, 12, 16, 24, 36};
4608
4609 START();
4610 __ Mov(x0, value);
4611 __ Mov(w1, shift[0]);
4612 __ Mov(w2, shift[1]);
4613 __ Mov(w3, shift[2]);
4614 __ Mov(w4, shift[3]);
4615 __ Mov(w5, shift[4]);
4616 __ Mov(w6, shift[5]);
4617
4618 __ rorv(x0, x0, xzr);
4619
4620 __ Ror(x16, x0, x1);
4621 __ Ror(x17, x0, x2);
4622 __ Ror(x18, x0, x3);
4623 __ Ror(x19, x0, x4);
4624 __ Ror(x20, x0, x5);
4625 __ Ror(x21, x0, x6);
4626
4627 __ Ror(w22, w0, w1);
4628 __ Ror(w23, w0, w2);
4629 __ Ror(w24, w0, w3);
4630 __ Ror(w25, w0, w4);
4631 __ Ror(w26, w0, w5);
4632 __ Ror(w27, w0, w6);
4633 END();
4634
4635 RUN();
4636
4637 ASSERT_EQUAL_64(value, x0);
4638 ASSERT_EQUAL_64(0xf0123456789abcdeUL, x16);
4639 ASSERT_EQUAL_64(0xef0123456789abcdUL, x17);
4640 ASSERT_EQUAL_64(0xdef0123456789abcUL, x18);
4641 ASSERT_EQUAL_64(0xcdef0123456789abUL, x19);
4642 ASSERT_EQUAL_64(0xabcdef0123456789UL, x20);
4643 ASSERT_EQUAL_64(0x789abcdef0123456UL, x21);
4644 ASSERT_EQUAL_32(0xf89abcde, w22);
4645 ASSERT_EQUAL_32(0xef89abcd, w23);
4646 ASSERT_EQUAL_32(0xdef89abc, w24);
4647 ASSERT_EQUAL_32(0xcdef89ab, w25);
4648 ASSERT_EQUAL_32(0xabcdef89, w26);
4649 ASSERT_EQUAL_32(0xf89abcde, w27);
4650
4651 TEARDOWN();
4652 }
4653
4654
4655 TEST(bfm) {
4656 INIT_V8();
4657 SETUP();
4658
4659 START();
4660 __ Mov(x1, 0x0123456789abcdefL);
4661
4662 __ Mov(x10, 0x8888888888888888L);
4663 __ Mov(x11, 0x8888888888888888L);
4664 __ Mov(x12, 0x8888888888888888L);
4665 __ Mov(x13, 0x8888888888888888L);
4666 __ Mov(w20, 0x88888888);
4667 __ Mov(w21, 0x88888888);
4668
4669 __ bfm(x10, x1, 16, 31);
4670 __ bfm(x11, x1, 32, 15);
4671
4672 __ bfm(w20, w1, 16, 23);
4673 __ bfm(w21, w1, 24, 15);
4674
4675 // Aliases.
4676 __ Bfi(x12, x1, 16, 8);
4677 __ Bfxil(x13, x1, 16, 8);
4678 END();
4679
4680 RUN();
4681
4682
4683 ASSERT_EQUAL_64(0x88888888888889abL, x10);
4684 ASSERT_EQUAL_64(0x8888cdef88888888L, x11);
4685
4686 ASSERT_EQUAL_32(0x888888ab, w20);
4687 ASSERT_EQUAL_32(0x88cdef88, w21);
4688
4689 ASSERT_EQUAL_64(0x8888888888ef8888L, x12);
4690 ASSERT_EQUAL_64(0x88888888888888abL, x13);
4691
4692 TEARDOWN();
4693 }
4694
4695
4696 TEST(sbfm) {
4697 INIT_V8();
4698 SETUP();
4699
4700 START();
4701 __ Mov(x1, 0x0123456789abcdefL);
4702 __ Mov(x2, 0xfedcba9876543210L);
4703
4704 __ sbfm(x10, x1, 16, 31);
4705 __ sbfm(x11, x1, 32, 15);
4706 __ sbfm(x12, x1, 32, 47);
4707 __ sbfm(x13, x1, 48, 35);
4708
4709 __ sbfm(w14, w1, 16, 23);
4710 __ sbfm(w15, w1, 24, 15);
4711 __ sbfm(w16, w2, 16, 23);
4712 __ sbfm(w17, w2, 24, 15);
4713
4714 // Aliases.
4715 __ Asr(x18, x1, 32);
4716 __ Asr(x19, x2, 32);
4717 __ Sbfiz(x20, x1, 8, 16);
4718 __ Sbfiz(x21, x2, 8, 16);
4719 __ Sbfx(x22, x1, 8, 16);
4720 __ Sbfx(x23, x2, 8, 16);
4721 __ Sxtb(x24, w1);
4722 __ Sxtb(x25, x2);
4723 __ Sxth(x26, w1);
4724 __ Sxth(x27, x2);
4725 __ Sxtw(x28, w1);
4726 __ Sxtw(x29, x2);
4727 END();
4728
4729 RUN();
4730
4731
4732 ASSERT_EQUAL_64(0xffffffffffff89abL, x10);
4733 ASSERT_EQUAL_64(0xffffcdef00000000L, x11);
4734 ASSERT_EQUAL_64(0x4567L, x12);
4735 ASSERT_EQUAL_64(0x789abcdef0000L, x13);
4736
4737 ASSERT_EQUAL_32(0xffffffab, w14);
4738 ASSERT_EQUAL_32(0xffcdef00, w15);
4739 ASSERT_EQUAL_32(0x54, w16);
4740 ASSERT_EQUAL_32(0x00321000, w17);
4741
4742 ASSERT_EQUAL_64(0x01234567L, x18);
4743 ASSERT_EQUAL_64(0xfffffffffedcba98L, x19);
4744 ASSERT_EQUAL_64(0xffffffffffcdef00L, x20);
4745 ASSERT_EQUAL_64(0x321000L, x21);
4746 ASSERT_EQUAL_64(0xffffffffffffabcdL, x22);
4747 ASSERT_EQUAL_64(0x5432L, x23);
4748 ASSERT_EQUAL_64(0xffffffffffffffefL, x24);
4749 ASSERT_EQUAL_64(0x10, x25);
4750 ASSERT_EQUAL_64(0xffffffffffffcdefL, x26);
4751 ASSERT_EQUAL_64(0x3210, x27);
4752 ASSERT_EQUAL_64(0xffffffff89abcdefL, x28);
4753 ASSERT_EQUAL_64(0x76543210, x29);
4754
4755 TEARDOWN();
4756 }
4757
4758
4759 TEST(ubfm) {
4760 INIT_V8();
4761 SETUP();
4762
4763 START();
4764 __ Mov(x1, 0x0123456789abcdefL);
4765 __ Mov(x2, 0xfedcba9876543210L);
4766
4767 __ Mov(x10, 0x8888888888888888L);
4768 __ Mov(x11, 0x8888888888888888L);
4769
4770 __ ubfm(x10, x1, 16, 31);
4771 __ ubfm(x11, x1, 32, 15);
4772 __ ubfm(x12, x1, 32, 47);
4773 __ ubfm(x13, x1, 48, 35);
4774
4775 __ ubfm(w25, w1, 16, 23);
4776 __ ubfm(w26, w1, 24, 15);
4777 __ ubfm(w27, w2, 16, 23);
4778 __ ubfm(w28, w2, 24, 15);
4779
4780 // Aliases
4781 __ Lsl(x15, x1, 63);
4782 __ Lsl(x16, x1, 0);
4783 __ Lsr(x17, x1, 32);
4784 __ Ubfiz(x18, x1, 8, 16);
4785 __ Ubfx(x19, x1, 8, 16);
4786 __ Uxtb(x20, x1);
4787 __ Uxth(x21, x1);
4788 __ Uxtw(x22, x1);
4789 END();
4790
4791 RUN();
4792
4793 ASSERT_EQUAL_64(0x00000000000089abL, x10);
4794 ASSERT_EQUAL_64(0x0000cdef00000000L, x11);
4795 ASSERT_EQUAL_64(0x4567L, x12);
4796 ASSERT_EQUAL_64(0x789abcdef0000L, x13);
4797
4798 ASSERT_EQUAL_32(0x000000ab, w25);
4799 ASSERT_EQUAL_32(0x00cdef00, w26);
4800 ASSERT_EQUAL_32(0x54, w27);
4801 ASSERT_EQUAL_32(0x00321000, w28);
4802
4803 ASSERT_EQUAL_64(0x8000000000000000L, x15);
4804 ASSERT_EQUAL_64(0x0123456789abcdefL, x16);
4805 ASSERT_EQUAL_64(0x01234567L, x17);
4806 ASSERT_EQUAL_64(0xcdef00L, x18);
4807 ASSERT_EQUAL_64(0xabcdL, x19);
4808 ASSERT_EQUAL_64(0xefL, x20);
4809 ASSERT_EQUAL_64(0xcdefL, x21);
4810 ASSERT_EQUAL_64(0x89abcdefL, x22);
4811
4812 TEARDOWN();
4813 }
4814
4815
4816 TEST(extr) {
4817 INIT_V8();
4818 SETUP();
4819
4820 START();
4821 __ Mov(x1, 0x0123456789abcdefL);
4822 __ Mov(x2, 0xfedcba9876543210L);
4823
4824 __ Extr(w10, w1, w2, 0);
4825 __ Extr(w11, w1, w2, 1);
4826 __ Extr(x12, x2, x1, 2);
4827
4828 __ Ror(w13, w1, 0);
4829 __ Ror(w14, w2, 17);
4830 __ Ror(w15, w1, 31);
4831 __ Ror(x18, x2, 1);
4832 __ Ror(x19, x1, 63);
4833 END();
4834
4835 RUN();
4836
4837 ASSERT_EQUAL_64(0x76543210, x10);
4838 ASSERT_EQUAL_64(0xbb2a1908, x11);
4839 ASSERT_EQUAL_64(0x0048d159e26af37bUL, x12);
4840 ASSERT_EQUAL_64(0x89abcdef, x13);
4841 ASSERT_EQUAL_64(0x19083b2a, x14);
4842 ASSERT_EQUAL_64(0x13579bdf, x15);
4843 ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908UL, x18);
4844 ASSERT_EQUAL_64(0x02468acf13579bdeUL, x19);
4845
4846 TEARDOWN();
4847 }
4848
4849
4850 TEST(fmov_imm) {
4851 INIT_V8();
4852 SETUP();
4853
4854 START();
4855 __ Fmov(s11, 1.0);
4856 __ Fmov(d22, -13.0);
4857 __ Fmov(s1, 255.0);
4858 __ Fmov(d2, 12.34567);
4859 __ Fmov(s3, 0.0);
4860 __ Fmov(d4, 0.0);
4861 __ Fmov(s5, kFP32PositiveInfinity);
4862 __ Fmov(d6, kFP64NegativeInfinity);
4863 END();
4864
4865 RUN();
4866
4867 ASSERT_EQUAL_FP32(1.0, s11);
4868 ASSERT_EQUAL_FP64(-13.0, d22);
4869 ASSERT_EQUAL_FP32(255.0, s1);
4870 ASSERT_EQUAL_FP64(12.34567, d2);
4871 ASSERT_EQUAL_FP32(0.0, s3);
4872 ASSERT_EQUAL_FP64(0.0, d4);
4873 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
4874 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
4875
4876 TEARDOWN();
4877 }
4878
4879
4880 TEST(fmov_reg) {
4881 INIT_V8();
4882 SETUP();
4883
4884 START();
4885 __ Fmov(s20, 1.0);
4886 __ Fmov(w10, s20);
4887 __ Fmov(s30, w10);
4888 __ Fmov(s5, s20);
4889 __ Fmov(d1, -13.0);
4890 __ Fmov(x1, d1);
4891 __ Fmov(d2, x1);
4892 __ Fmov(d4, d1);
4893 __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL));
4894 __ Fmov(s6, s6);
4895 END();
4896
4897 RUN();
4898
4899 ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
4900 ASSERT_EQUAL_FP32(1.0, s30);
4901 ASSERT_EQUAL_FP32(1.0, s5);
4902 ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
4903 ASSERT_EQUAL_FP64(-13.0, d2);
4904 ASSERT_EQUAL_FP64(-13.0, d4);
4905 ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
4906
4907 TEARDOWN();
4908 }
4909
4910
4911 TEST(fadd) {
4912 INIT_V8();
4913 SETUP();
4914
4915 START();
4916 __ Fmov(s13, -0.0);
4917 __ Fmov(s14, kFP32PositiveInfinity);
4918 __ Fmov(s15, kFP32NegativeInfinity);
4919 __ Fmov(s16, 3.25);
4920 __ Fmov(s17, 1.0);
4921 __ Fmov(s18, 0);
4922
4923 __ Fmov(d26, -0.0);
4924 __ Fmov(d27, kFP64PositiveInfinity);
4925 __ Fmov(d28, kFP64NegativeInfinity);
4926 __ Fmov(d29, 0);
4927 __ Fmov(d30, -2.0);
4928 __ Fmov(d31, 2.25);
4929
4930 __ Fadd(s0, s16, s17);
4931 __ Fadd(s1, s17, s18);
4932 __ Fadd(s2, s13, s17);
4933 __ Fadd(s3, s14, s17);
4934 __ Fadd(s4, s15, s17);
4935
4936 __ Fadd(d5, d30, d31);
4937 __ Fadd(d6, d29, d31);
4938 __ Fadd(d7, d26, d31);
4939 __ Fadd(d8, d27, d31);
4940 __ Fadd(d9, d28, d31);
4941 END();
4942
4943 RUN();
4944
4945 ASSERT_EQUAL_FP32(4.25, s0);
4946 ASSERT_EQUAL_FP32(1.0, s1);
4947 ASSERT_EQUAL_FP32(1.0, s2);
4948 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
4949 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
4950 ASSERT_EQUAL_FP64(0.25, d5);
4951 ASSERT_EQUAL_FP64(2.25, d6);
4952 ASSERT_EQUAL_FP64(2.25, d7);
4953 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d8);
4954 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d9);
4955
4956 TEARDOWN();
4957 }
4958
4959
4960 TEST(fsub) {
4961 INIT_V8();
4962 SETUP();
4963
4964 START();
4965 __ Fmov(s13, -0.0);
4966 __ Fmov(s14, kFP32PositiveInfinity);
4967 __ Fmov(s15, kFP32NegativeInfinity);
4968 __ Fmov(s16, 3.25);
4969 __ Fmov(s17, 1.0);
4970 __ Fmov(s18, 0);
4971
4972 __ Fmov(d26, -0.0);
4973 __ Fmov(d27, kFP64PositiveInfinity);
4974 __ Fmov(d28, kFP64NegativeInfinity);
4975 __ Fmov(d29, 0);
4976 __ Fmov(d30, -2.0);
4977 __ Fmov(d31, 2.25);
4978
4979 __ Fsub(s0, s16, s17);
4980 __ Fsub(s1, s17, s18);
4981 __ Fsub(s2, s13, s17);
4982 __ Fsub(s3, s17, s14);
4983 __ Fsub(s4, s17, s15);
4984
4985 __ Fsub(d5, d30, d31);
4986 __ Fsub(d6, d29, d31);
4987 __ Fsub(d7, d26, d31);
4988 __ Fsub(d8, d31, d27);
4989 __ Fsub(d9, d31, d28);
4990 END();
4991
4992 RUN();
4993
4994 ASSERT_EQUAL_FP32(2.25, s0);
4995 ASSERT_EQUAL_FP32(1.0, s1);
4996 ASSERT_EQUAL_FP32(-1.0, s2);
4997 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
4998 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
4999 ASSERT_EQUAL_FP64(-4.25, d5);
5000 ASSERT_EQUAL_FP64(-2.25, d6);
5001 ASSERT_EQUAL_FP64(-2.25, d7);
5002 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
5003 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9);
5004
5005 TEARDOWN();
5006 }
5007
5008
5009 TEST(fmul) {
5010 INIT_V8();
5011 SETUP();
5012
5013 START();
5014 __ Fmov(s13, -0.0);
5015 __ Fmov(s14, kFP32PositiveInfinity);
5016 __ Fmov(s15, kFP32NegativeInfinity);
5017 __ Fmov(s16, 3.25);
5018 __ Fmov(s17, 2.0);
5019 __ Fmov(s18, 0);
5020 __ Fmov(s19, -2.0);
5021
5022 __ Fmov(d26, -0.0);
5023 __ Fmov(d27, kFP64PositiveInfinity);
5024 __ Fmov(d28, kFP64NegativeInfinity);
5025 __ Fmov(d29, 0);
5026 __ Fmov(d30, -2.0);
5027 __ Fmov(d31, 2.25);
5028
5029 __ Fmul(s0, s16, s17);
5030 __ Fmul(s1, s17, s18);
5031 __ Fmul(s2, s13, s13);
5032 __ Fmul(s3, s14, s19);
5033 __ Fmul(s4, s15, s19);
5034
5035 __ Fmul(d5, d30, d31);
5036 __ Fmul(d6, d29, d31);
5037 __ Fmul(d7, d26, d26);
5038 __ Fmul(d8, d27, d30);
5039 __ Fmul(d9, d28, d30);
5040 END();
5041
5042 RUN();
5043
5044 ASSERT_EQUAL_FP32(6.5, s0);
5045 ASSERT_EQUAL_FP32(0.0, s1);
5046 ASSERT_EQUAL_FP32(0.0, s2);
5047 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5048 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
5049 ASSERT_EQUAL_FP64(-4.5, d5);
5050 ASSERT_EQUAL_FP64(0.0, d6);
5051 ASSERT_EQUAL_FP64(0.0, d7);
5052 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
5053 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9);
5054
5055 TEARDOWN();
5056 }
5057
5058
5059 static void FmaddFmsubDoubleHelper(double n, double m, double a,
5060 double fmadd, double fmsub) {
5061 SETUP();
5062 START();
5063
5064 __ Fmov(d0, n);
5065 __ Fmov(d1, m);
5066 __ Fmov(d2, a);
5067 __ Fmadd(d28, d0, d1, d2);
5068 __ Fmsub(d29, d0, d1, d2);
5069 __ Fnmadd(d30, d0, d1, d2);
5070 __ Fnmsub(d31, d0, d1, d2);
5071
5072 END();
5073 RUN();
5074
5075 ASSERT_EQUAL_FP64(fmadd, d28);
5076 ASSERT_EQUAL_FP64(fmsub, d29);
5077 ASSERT_EQUAL_FP64(-fmadd, d30);
5078 ASSERT_EQUAL_FP64(-fmsub, d31);
5079
5080 TEARDOWN();
5081 }
5082
5083
5084 TEST(fmadd_fmsub_double) {
5085 INIT_V8();
5086 double inputs[] = {
5087 // Normal numbers, including -0.0.
5088 DBL_MAX, DBL_MIN, 3.25, 2.0, 0.0,
5089 -DBL_MAX, -DBL_MIN, -3.25, -2.0, -0.0,
5090 // Infinities.
5091 kFP64NegativeInfinity, kFP64PositiveInfinity,
5092 // Subnormal numbers.
5093 rawbits_to_double(0x000fffffffffffff),
5094 rawbits_to_double(0x0000000000000001),
5095 rawbits_to_double(0x000123456789abcd),
5096 -rawbits_to_double(0x000fffffffffffff),
5097 -rawbits_to_double(0x0000000000000001),
5098 -rawbits_to_double(0x000123456789abcd),
5099 // NaN.
5100 kFP64QuietNaN,
5101 -kFP64QuietNaN,
5102 };
5103 const int count = sizeof(inputs) / sizeof(inputs[0]);
5104
5105 for (int in = 0; in < count; in++) {
5106 double n = inputs[in];
5107 for (int im = 0; im < count; im++) {
5108 double m = inputs[im];
5109 for (int ia = 0; ia < count; ia++) {
5110 double a = inputs[ia];
5111 double fmadd = fma(n, m, a);
5112 double fmsub = fma(-n, m, a);
5113
5114 FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
5115 }
5116 }
5117 }
5118 }
5119
5120
5121 TEST(fmadd_fmsub_double_rounding) {
5122 INIT_V8();
5123 // Make sure we run plenty of tests where an intermediate rounding stage would
5124 // produce an incorrect result.
5125 const int limit = 1000;
5126 int count_fmadd = 0;
5127 int count_fmsub = 0;
5128
5129 uint16_t seed[3] = {42, 43, 44};
5130 seed48(seed);
5131
5132 while ((count_fmadd < limit) || (count_fmsub < limit)) {
5133 double n, m, a;
5134 uint32_t r[2];
5135 ASSERT(sizeof(r) == sizeof(n));
5136
5137 r[0] = mrand48();
5138 r[1] = mrand48();
5139 memcpy(&n, r, sizeof(r));
5140 r[0] = mrand48();
5141 r[1] = mrand48();
5142 memcpy(&m, r, sizeof(r));
5143 r[0] = mrand48();
5144 r[1] = mrand48();
5145 memcpy(&a, r, sizeof(r));
5146
5147 if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
5148 continue;
5149 }
5150
5151 // Calculate the expected results.
5152 double fmadd = fma(n, m, a);
5153 double fmsub = fma(-n, m, a);
5154
5155 bool test_fmadd = (fmadd != (a + n * m));
5156 bool test_fmsub = (fmsub != (a - n * m));
5157
5158 // If rounding would produce a different result, increment the test count.
5159 count_fmadd += test_fmadd;
5160 count_fmsub += test_fmsub;
5161
5162 if (test_fmadd || test_fmsub) {
5163 FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
5164 }
5165 }
5166 }
5167
5168
5169 static void FmaddFmsubFloatHelper(float n, float m, float a,
5170 float fmadd, float fmsub) {
5171 SETUP();
5172 START();
5173
5174 __ Fmov(s0, n);
5175 __ Fmov(s1, m);
5176 __ Fmov(s2, a);
5177 __ Fmadd(s30, s0, s1, s2);
5178 __ Fmsub(s31, s0, s1, s2);
5179
5180 END();
5181 RUN();
5182
5183 ASSERT_EQUAL_FP32(fmadd, s30);
5184 ASSERT_EQUAL_FP32(fmsub, s31);
5185
5186 TEARDOWN();
5187 }
5188
5189
5190 TEST(fmadd_fmsub_float) {
5191 INIT_V8();
5192 float inputs[] = {
5193 // Normal numbers, including -0.0f.
5194 FLT_MAX, FLT_MIN, 3.25f, 2.0f, 0.0f,
5195 -FLT_MAX, -FLT_MIN, -3.25f, -2.0f, -0.0f,
5196 // Infinities.
5197 kFP32NegativeInfinity, kFP32PositiveInfinity,
5198 // Subnormal numbers.
5199 rawbits_to_float(0x07ffffff),
5200 rawbits_to_float(0x00000001),
5201 rawbits_to_float(0x01234567),
5202 -rawbits_to_float(0x07ffffff),
5203 -rawbits_to_float(0x00000001),
5204 -rawbits_to_float(0x01234567),
5205 // NaN.
5206 kFP32QuietNaN,
5207 -kFP32QuietNaN,
5208 };
5209 const int count = sizeof(inputs) / sizeof(inputs[0]);
5210
5211 for (int in = 0; in < count; in++) {
5212 float n = inputs[in];
5213 for (int im = 0; im < count; im++) {
5214 float m = inputs[im];
5215 for (int ia = 0; ia < count; ia++) {
5216 float a = inputs[ia];
5217 float fmadd = fmaf(n, m, a);
5218 float fmsub = fmaf(-n, m, a);
5219
5220 FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub);
5221 }
5222 }
5223 }
5224 }
5225
5226
5227 TEST(fmadd_fmsub_float_rounding) {
5228 INIT_V8();
5229 // Make sure we run plenty of tests where an intermediate rounding stage would
5230 // produce an incorrect result.
5231 const int limit = 1000;
5232 int count_fmadd = 0;
5233 int count_fmsub = 0;
5234
5235 uint16_t seed[3] = {42, 43, 44};
5236 seed48(seed);
5237
5238 while ((count_fmadd < limit) || (count_fmsub < limit)) {
5239 float n, m, a;
5240 uint32_t r;
5241 ASSERT(sizeof(r) == sizeof(n));
5242
5243 r = mrand48();
5244 memcpy(&n, &r, sizeof(r));
5245 r = mrand48();
5246 memcpy(&m, &r, sizeof(r));
5247 r = mrand48();
5248 memcpy(&a, &r, sizeof(r));
5249
5250 if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
5251 continue;
5252 }
5253
5254 // Calculate the expected results.
5255 float fmadd = fmaf(n, m, a);
5256 float fmsub = fmaf(-n, m, a);
5257
5258 bool test_fmadd = (fmadd != (a + n * m));
5259 bool test_fmsub = (fmsub != (a - n * m));
5260
5261 // If rounding would produce a different result, increment the test count.
5262 count_fmadd += test_fmadd;
5263 count_fmsub += test_fmsub;
5264
5265 if (test_fmadd || test_fmsub) {
5266 FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub);
5267 }
5268 }
5269 }
5270
5271
5272 TEST(fdiv) {
5273 INIT_V8();
5274 SETUP();
5275
5276 START();
5277 __ Fmov(s13, -0.0);
5278 __ Fmov(s14, kFP32PositiveInfinity);
5279 __ Fmov(s15, kFP32NegativeInfinity);
5280 __ Fmov(s16, 3.25);
5281 __ Fmov(s17, 2.0);
5282 __ Fmov(s18, 2.0);
5283 __ Fmov(s19, -2.0);
5284
5285 __ Fmov(d26, -0.0);
5286 __ Fmov(d27, kFP64PositiveInfinity);
5287 __ Fmov(d28, kFP64NegativeInfinity);
5288 __ Fmov(d29, 0);
5289 __ Fmov(d30, -2.0);
5290 __ Fmov(d31, 2.25);
5291
5292 __ Fdiv(s0, s16, s17);
5293 __ Fdiv(s1, s17, s18);
5294 __ Fdiv(s2, s13, s17);
5295 __ Fdiv(s3, s17, s14);
5296 __ Fdiv(s4, s17, s15);
5297 __ Fdiv(d5, d31, d30);
5298 __ Fdiv(d6, d29, d31);
5299 __ Fdiv(d7, d26, d31);
5300 __ Fdiv(d8, d31, d27);
5301 __ Fdiv(d9, d31, d28);
5302 END();
5303
5304 RUN();
5305
5306 ASSERT_EQUAL_FP32(1.625, s0);
5307 ASSERT_EQUAL_FP32(1.0, s1);
5308 ASSERT_EQUAL_FP32(-0.0, s2);
5309 ASSERT_EQUAL_FP32(0.0, s3);
5310 ASSERT_EQUAL_FP32(-0.0, s4);
5311 ASSERT_EQUAL_FP64(-1.125, d5);
5312 ASSERT_EQUAL_FP64(0.0, d6);
5313 ASSERT_EQUAL_FP64(-0.0, d7);
5314 ASSERT_EQUAL_FP64(0.0, d8);
5315 ASSERT_EQUAL_FP64(-0.0, d9);
5316
5317 TEARDOWN();
5318 }
5319
5320
5321 static float MinMaxHelper(float n,
5322 float m,
5323 bool min,
5324 float quiet_nan_substitute = 0.0) {
5325 const uint64_t kFP32QuietNaNMask = 0x00400000UL;
5326 uint32_t raw_n = float_to_rawbits(n);
5327 uint32_t raw_m = float_to_rawbits(m);
5328
5329 if (std::isnan(n) && ((raw_n & kFP32QuietNaNMask) == 0)) {
5330 // n is signalling NaN.
5331 return n;
5332 } else if (std::isnan(m) && ((raw_m & kFP32QuietNaNMask) == 0)) {
5333 // m is signalling NaN.
5334 return m;
5335 } else if (quiet_nan_substitute == 0.0) {
5336 if (std::isnan(n)) {
5337 // n is quiet NaN.
5338 return n;
5339 } else if (std::isnan(m)) {
5340 // m is quiet NaN.
5341 return m;
5342 }
5343 } else {
5344 // Substitute n or m if one is quiet, but not both.
5345 if (std::isnan(n) && !std::isnan(m)) {
5346 // n is quiet NaN: replace with substitute.
5347 n = quiet_nan_substitute;
5348 } else if (!std::isnan(n) && std::isnan(m)) {
5349 // m is quiet NaN: replace with substitute.
5350 m = quiet_nan_substitute;
5351 }
5352 }
5353
5354 if ((n == 0.0) && (m == 0.0) &&
5355 (copysign(1.0, n) != copysign(1.0, m))) {
5356 return min ? -0.0 : 0.0;
5357 }
5358
5359 return min ? fminf(n, m) : fmaxf(n, m);
5360 }
5361
5362
5363 static double MinMaxHelper(double n,
5364 double m,
5365 bool min,
5366 double quiet_nan_substitute = 0.0) {
5367 const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
5368 uint64_t raw_n = double_to_rawbits(n);
5369 uint64_t raw_m = double_to_rawbits(m);
5370
5371 if (std::isnan(n) && ((raw_n & kFP64QuietNaNMask) == 0)) {
5372 // n is signalling NaN.
5373 return n;
5374 } else if (std::isnan(m) && ((raw_m & kFP64QuietNaNMask) == 0)) {
5375 // m is signalling NaN.
5376 return m;
5377 } else if (quiet_nan_substitute == 0.0) {
5378 if (std::isnan(n)) {
5379 // n is quiet NaN.
5380 return n;
5381 } else if (std::isnan(m)) {
5382 // m is quiet NaN.
5383 return m;
5384 }
5385 } else {
5386 // Substitute n or m if one is quiet, but not both.
5387 if (std::isnan(n) && !std::isnan(m)) {
5388 // n is quiet NaN: replace with substitute.
5389 n = quiet_nan_substitute;
5390 } else if (!std::isnan(n) && std::isnan(m)) {
5391 // m is quiet NaN: replace with substitute.
5392 m = quiet_nan_substitute;
5393 }
5394 }
5395
5396 if ((n == 0.0) && (m == 0.0) &&
5397 (copysign(1.0, n) != copysign(1.0, m))) {
5398 return min ? -0.0 : 0.0;
5399 }
5400
5401 return min ? fmin(n, m) : fmax(n, m);
5402 }
5403
5404
5405 static void FminFmaxDoubleHelper(double n, double m, double min, double max,
5406 double minnm, double maxnm) {
5407 SETUP();
5408
5409 START();
5410 __ Fmov(d0, n);
5411 __ Fmov(d1, m);
5412 __ Fmin(d28, d0, d1);
5413 __ Fmax(d29, d0, d1);
5414 __ Fminnm(d30, d0, d1);
5415 __ Fmaxnm(d31, d0, d1);
5416 END();
5417
5418 RUN();
5419
5420 ASSERT_EQUAL_FP64(min, d28);
5421 ASSERT_EQUAL_FP64(max, d29);
5422 ASSERT_EQUAL_FP64(minnm, d30);
5423 ASSERT_EQUAL_FP64(maxnm, d31);
5424
5425 TEARDOWN();
5426 }
5427
5428
5429 TEST(fmax_fmin_d) {
5430 INIT_V8();
5431 // Bootstrap tests.
5432 FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
5433 FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
5434 FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
5435 kFP64NegativeInfinity, kFP64PositiveInfinity,
5436 kFP64NegativeInfinity, kFP64PositiveInfinity);
5437 FminFmaxDoubleHelper(kFP64SignallingNaN, 0,
5438 kFP64SignallingNaN, kFP64SignallingNaN,
5439 kFP64SignallingNaN, kFP64SignallingNaN);
5440 FminFmaxDoubleHelper(kFP64QuietNaN, 0,
5441 kFP64QuietNaN, kFP64QuietNaN,
5442 0, 0);
5443 FminFmaxDoubleHelper(kFP64QuietNaN, kFP64SignallingNaN,
5444 kFP64SignallingNaN, kFP64SignallingNaN,
5445 kFP64SignallingNaN, kFP64SignallingNaN);
5446
5447 // Iterate over all combinations of inputs.
5448 double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
5449 -DBL_MAX, -DBL_MIN, -1.0, -0.0,
5450 kFP64PositiveInfinity, kFP64NegativeInfinity,
5451 kFP64QuietNaN, kFP64SignallingNaN };
5452
5453 const int count = sizeof(inputs) / sizeof(inputs[0]);
5454
5455 for (int in = 0; in < count; in++) {
5456 double n = inputs[in];
5457 for (int im = 0; im < count; im++) {
5458 double m = inputs[im];
5459 FminFmaxDoubleHelper(n, m,
5460 MinMaxHelper(n, m, true),
5461 MinMaxHelper(n, m, false),
5462 MinMaxHelper(n, m, true, kFP64PositiveInfinity),
5463 MinMaxHelper(n, m, false, kFP64NegativeInfinity));
5464 }
5465 }
5466 }
5467
5468
5469 static void FminFmaxFloatHelper(float n, float m, float min, float max,
5470 float minnm, float maxnm) {
5471 SETUP();
5472
5473 START();
5474 // TODO(all): Signalling NaNs are sometimes converted by the C compiler to
5475 // quiet NaNs on implicit casts from float to double. Here, we move the raw
5476 // bits into a W register first, so we get the correct value. Fix Fmov so this
5477 // additional step is no longer needed.
5478 __ Mov(w0, float_to_rawbits(n));
5479 __ Fmov(s0, w0);
5480 __ Mov(w0, float_to_rawbits(m));
5481 __ Fmov(s1, w0);
5482 __ Fmin(s28, s0, s1);
5483 __ Fmax(s29, s0, s1);
5484 __ Fminnm(s30, s0, s1);
5485 __ Fmaxnm(s31, s0, s1);
5486 END();
5487
5488 RUN();
5489
5490 ASSERT_EQUAL_FP32(min, s28);
5491 ASSERT_EQUAL_FP32(max, s29);
5492 ASSERT_EQUAL_FP32(minnm, s30);
5493 ASSERT_EQUAL_FP32(maxnm, s31);
5494
5495 TEARDOWN();
5496 }
5497
5498
5499 TEST(fmax_fmin_s) {
5500 INIT_V8();
5501 // Bootstrap tests.
5502 FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
5503 FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
5504 FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
5505 kFP32NegativeInfinity, kFP32PositiveInfinity,
5506 kFP32NegativeInfinity, kFP32PositiveInfinity);
5507 FminFmaxFloatHelper(kFP32SignallingNaN, 0,
5508 kFP32SignallingNaN, kFP32SignallingNaN,
5509 kFP32SignallingNaN, kFP32SignallingNaN);
5510 FminFmaxFloatHelper(kFP32QuietNaN, 0,
5511 kFP32QuietNaN, kFP32QuietNaN,
5512 0, 0);
5513 FminFmaxFloatHelper(kFP32QuietNaN, kFP32SignallingNaN,
5514 kFP32SignallingNaN, kFP32SignallingNaN,
5515 kFP32SignallingNaN, kFP32SignallingNaN);
5516
5517 // Iterate over all combinations of inputs.
5518 float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
5519 -FLT_MAX, -FLT_MIN, -1.0, -0.0,
5520 kFP32PositiveInfinity, kFP32NegativeInfinity,
5521 kFP32QuietNaN, kFP32SignallingNaN };
5522
5523 const int count = sizeof(inputs) / sizeof(inputs[0]);
5524
5525 for (int in = 0; in < count; in++) {
5526 float n = inputs[in];
5527 for (int im = 0; im < count; im++) {
5528 float m = inputs[im];
5529 FminFmaxFloatHelper(n, m,
5530 MinMaxHelper(n, m, true),
5531 MinMaxHelper(n, m, false),
5532 MinMaxHelper(n, m, true, kFP32PositiveInfinity),
5533 MinMaxHelper(n, m, false, kFP32NegativeInfinity));
5534 }
5535 }
5536 }
5537
5538
5539 TEST(fccmp) {
5540 INIT_V8();
5541 SETUP();
5542
5543 START();
5544 __ Fmov(s16, 0.0);
5545 __ Fmov(s17, 0.5);
5546 __ Fmov(d18, -0.5);
5547 __ Fmov(d19, -1.0);
5548 __ Mov(x20, 0);
5549
5550 __ Cmp(x20, 0);
5551 __ Fccmp(s16, s16, NoFlag, eq);
5552 __ Mrs(x0, NZCV);
5553
5554 __ Cmp(x20, 0);
5555 __ Fccmp(s16, s16, VFlag, ne);
5556 __ Mrs(x1, NZCV);
5557
5558 __ Cmp(x20, 0);
5559 __ Fccmp(s16, s17, CFlag, ge);
5560 __ Mrs(x2, NZCV);
5561
5562 __ Cmp(x20, 0);
5563 __ Fccmp(s16, s17, CVFlag, lt);
5564 __ Mrs(x3, NZCV);
5565
5566 __ Cmp(x20, 0);
5567 __ Fccmp(d18, d18, ZFlag, le);
5568 __ Mrs(x4, NZCV);
5569
5570 __ Cmp(x20, 0);
5571 __ Fccmp(d18, d18, ZVFlag, gt);
5572 __ Mrs(x5, NZCV);
5573
5574 __ Cmp(x20, 0);
5575 __ Fccmp(d18, d19, ZCVFlag, ls);
5576 __ Mrs(x6, NZCV);
5577
5578 __ Cmp(x20, 0);
5579 __ Fccmp(d18, d19, NFlag, hi);
5580 __ Mrs(x7, NZCV);
5581
5582 __ fccmp(s16, s16, NFlag, al);
5583 __ Mrs(x8, NZCV);
5584
5585 __ fccmp(d18, d18, NFlag, nv);
5586 __ Mrs(x9, NZCV);
5587
5588 END();
5589
5590 RUN();
5591
5592 ASSERT_EQUAL_32(ZCFlag, w0);
5593 ASSERT_EQUAL_32(VFlag, w1);
5594 ASSERT_EQUAL_32(NFlag, w2);
5595 ASSERT_EQUAL_32(CVFlag, w3);
5596 ASSERT_EQUAL_32(ZCFlag, w4);
5597 ASSERT_EQUAL_32(ZVFlag, w5);
5598 ASSERT_EQUAL_32(CFlag, w6);
5599 ASSERT_EQUAL_32(NFlag, w7);
5600 ASSERT_EQUAL_32(ZCFlag, w8);
5601 ASSERT_EQUAL_32(ZCFlag, w9);
5602
5603 TEARDOWN();
5604 }
5605
5606
5607 TEST(fcmp) {
5608 INIT_V8();
5609 SETUP();
5610
5611 START();
5612
5613 // Some of these tests require a floating-point scratch register assigned to
5614 // the macro assembler, but most do not.
5615 __ SetFPScratchRegister(NoFPReg);
5616
5617 __ Fmov(s8, 0.0);
5618 __ Fmov(s9, 0.5);
5619 __ Mov(w18, 0x7f800001); // Single precision NaN.
5620 __ Fmov(s18, w18);
5621
5622 __ Fcmp(s8, s8);
5623 __ Mrs(x0, NZCV);
5624 __ Fcmp(s8, s9);
5625 __ Mrs(x1, NZCV);
5626 __ Fcmp(s9, s8);
5627 __ Mrs(x2, NZCV);
5628 __ Fcmp(s8, s18);
5629 __ Mrs(x3, NZCV);
5630 __ Fcmp(s18, s18);
5631 __ Mrs(x4, NZCV);
5632 __ Fcmp(s8, 0.0);
5633 __ Mrs(x5, NZCV);
5634 __ SetFPScratchRegister(d0);
5635 __ Fcmp(s8, 255.0);
5636 __ SetFPScratchRegister(NoFPReg);
5637 __ Mrs(x6, NZCV);
5638
5639 __ Fmov(d19, 0.0);
5640 __ Fmov(d20, 0.5);
5641 __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN.
5642 __ Fmov(d21, x21);
5643
5644 __ Fcmp(d19, d19);
5645 __ Mrs(x10, NZCV);
5646 __ Fcmp(d19, d20);
5647 __ Mrs(x11, NZCV);
5648 __ Fcmp(d20, d19);
5649 __ Mrs(x12, NZCV);
5650 __ Fcmp(d19, d21);
5651 __ Mrs(x13, NZCV);
5652 __ Fcmp(d21, d21);
5653 __ Mrs(x14, NZCV);
5654 __ Fcmp(d19, 0.0);
5655 __ Mrs(x15, NZCV);
5656 __ SetFPScratchRegister(d0);
5657 __ Fcmp(d19, 12.3456);
5658 __ SetFPScratchRegister(NoFPReg);
5659 __ Mrs(x16, NZCV);
5660 END();
5661
5662 RUN();
5663
5664 ASSERT_EQUAL_32(ZCFlag, w0);
5665 ASSERT_EQUAL_32(NFlag, w1);
5666 ASSERT_EQUAL_32(CFlag, w2);
5667 ASSERT_EQUAL_32(CVFlag, w3);
5668 ASSERT_EQUAL_32(CVFlag, w4);
5669 ASSERT_EQUAL_32(ZCFlag, w5);
5670 ASSERT_EQUAL_32(NFlag, w6);
5671 ASSERT_EQUAL_32(ZCFlag, w10);
5672 ASSERT_EQUAL_32(NFlag, w11);
5673 ASSERT_EQUAL_32(CFlag, w12);
5674 ASSERT_EQUAL_32(CVFlag, w13);
5675 ASSERT_EQUAL_32(CVFlag, w14);
5676 ASSERT_EQUAL_32(ZCFlag, w15);
5677 ASSERT_EQUAL_32(NFlag, w16);
5678
5679 TEARDOWN();
5680 }
5681
5682
5683 TEST(fcsel) {
5684 INIT_V8();
5685 SETUP();
5686
5687 START();
5688 __ Mov(x16, 0);
5689 __ Fmov(s16, 1.0);
5690 __ Fmov(s17, 2.0);
5691 __ Fmov(d18, 3.0);
5692 __ Fmov(d19, 4.0);
5693
5694 __ Cmp(x16, 0);
5695 __ Fcsel(s0, s16, s17, eq);
5696 __ Fcsel(s1, s16, s17, ne);
5697 __ Fcsel(d2, d18, d19, eq);
5698 __ Fcsel(d3, d18, d19, ne);
5699 __ fcsel(s4, s16, s17, al);
5700 __ fcsel(d5, d18, d19, nv);
5701 END();
5702
5703 RUN();
5704
5705 ASSERT_EQUAL_FP32(1.0, s0);
5706 ASSERT_EQUAL_FP32(2.0, s1);
5707 ASSERT_EQUAL_FP64(3.0, d2);
5708 ASSERT_EQUAL_FP64(4.0, d3);
5709 ASSERT_EQUAL_FP32(1.0, s4);
5710 ASSERT_EQUAL_FP64(3.0, d5);
5711
5712 TEARDOWN();
5713 }
5714
5715
5716 TEST(fneg) {
5717 INIT_V8();
5718 SETUP();
5719
5720 START();
5721 __ Fmov(s16, 1.0);
5722 __ Fmov(s17, 0.0);
5723 __ Fmov(s18, kFP32PositiveInfinity);
5724 __ Fmov(d19, 1.0);
5725 __ Fmov(d20, 0.0);
5726 __ Fmov(d21, kFP64PositiveInfinity);
5727
5728 __ Fneg(s0, s16);
5729 __ Fneg(s1, s0);
5730 __ Fneg(s2, s17);
5731 __ Fneg(s3, s2);
5732 __ Fneg(s4, s18);
5733 __ Fneg(s5, s4);
5734 __ Fneg(d6, d19);
5735 __ Fneg(d7, d6);
5736 __ Fneg(d8, d20);
5737 __ Fneg(d9, d8);
5738 __ Fneg(d10, d21);
5739 __ Fneg(d11, d10);
5740 END();
5741
5742 RUN();
5743
5744 ASSERT_EQUAL_FP32(-1.0, s0);
5745 ASSERT_EQUAL_FP32(1.0, s1);
5746 ASSERT_EQUAL_FP32(-0.0, s2);
5747 ASSERT_EQUAL_FP32(0.0, s3);
5748 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
5749 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
5750 ASSERT_EQUAL_FP64(-1.0, d6);
5751 ASSERT_EQUAL_FP64(1.0, d7);
5752 ASSERT_EQUAL_FP64(-0.0, d8);
5753 ASSERT_EQUAL_FP64(0.0, d9);
5754 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5755 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5756
5757 TEARDOWN();
5758 }
5759
5760
5761 TEST(fabs) {
5762 INIT_V8();
5763 SETUP();
5764
5765 START();
5766 __ Fmov(s16, -1.0);
5767 __ Fmov(s17, -0.0);
5768 __ Fmov(s18, kFP32NegativeInfinity);
5769 __ Fmov(d19, -1.0);
5770 __ Fmov(d20, -0.0);
5771 __ Fmov(d21, kFP64NegativeInfinity);
5772
5773 __ Fabs(s0, s16);
5774 __ Fabs(s1, s0);
5775 __ Fabs(s2, s17);
5776 __ Fabs(s3, s18);
5777 __ Fabs(d4, d19);
5778 __ Fabs(d5, d4);
5779 __ Fabs(d6, d20);
5780 __ Fabs(d7, d21);
5781 END();
5782
5783 RUN();
5784
5785 ASSERT_EQUAL_FP32(1.0, s0);
5786 ASSERT_EQUAL_FP32(1.0, s1);
5787 ASSERT_EQUAL_FP32(0.0, s2);
5788 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
5789 ASSERT_EQUAL_FP64(1.0, d4);
5790 ASSERT_EQUAL_FP64(1.0, d5);
5791 ASSERT_EQUAL_FP64(0.0, d6);
5792 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
5793
5794 TEARDOWN();
5795 }
5796
5797
5798 TEST(fsqrt) {
5799 INIT_V8();
5800 SETUP();
5801
5802 START();
5803 __ Fmov(s16, 0.0);
5804 __ Fmov(s17, 1.0);
5805 __ Fmov(s18, 0.25);
5806 __ Fmov(s19, 65536.0);
5807 __ Fmov(s20, -0.0);
5808 __ Fmov(s21, kFP32PositiveInfinity);
5809 __ Fmov(d22, 0.0);
5810 __ Fmov(d23, 1.0);
5811 __ Fmov(d24, 0.25);
5812 __ Fmov(d25, 4294967296.0);
5813 __ Fmov(d26, -0.0);
5814 __ Fmov(d27, kFP64PositiveInfinity);
5815
5816 __ Fsqrt(s0, s16);
5817 __ Fsqrt(s1, s17);
5818 __ Fsqrt(s2, s18);
5819 __ Fsqrt(s3, s19);
5820 __ Fsqrt(s4, s20);
5821 __ Fsqrt(s5, s21);
5822 __ Fsqrt(d6, d22);
5823 __ Fsqrt(d7, d23);
5824 __ Fsqrt(d8, d24);
5825 __ Fsqrt(d9, d25);
5826 __ Fsqrt(d10, d26);
5827 __ Fsqrt(d11, d27);
5828 END();
5829
5830 RUN();
5831
5832 ASSERT_EQUAL_FP32(0.0, s0);
5833 ASSERT_EQUAL_FP32(1.0, s1);
5834 ASSERT_EQUAL_FP32(0.5, s2);
5835 ASSERT_EQUAL_FP32(256.0, s3);
5836 ASSERT_EQUAL_FP32(-0.0, s4);
5837 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
5838 ASSERT_EQUAL_FP64(0.0, d6);
5839 ASSERT_EQUAL_FP64(1.0, d7);
5840 ASSERT_EQUAL_FP64(0.5, d8);
5841 ASSERT_EQUAL_FP64(65536.0, d9);
5842 ASSERT_EQUAL_FP64(-0.0, d10);
5843 ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d11);
5844
5845 TEARDOWN();
5846 }
5847
5848
5849 TEST(frinta) {
5850 INIT_V8();
5851 SETUP();
5852
5853 START();
5854 __ Fmov(s16, 1.0);
5855 __ Fmov(s17, 1.1);
5856 __ Fmov(s18, 1.5);
5857 __ Fmov(s19, 1.9);
5858 __ Fmov(s20, 2.5);
5859 __ Fmov(s21, -1.5);
5860 __ Fmov(s22, -2.5);
5861 __ Fmov(s23, kFP32PositiveInfinity);
5862 __ Fmov(s24, kFP32NegativeInfinity);
5863 __ Fmov(s25, 0.0);
5864 __ Fmov(s26, -0.0);
5865
5866 __ Frinta(s0, s16);
5867 __ Frinta(s1, s17);
5868 __ Frinta(s2, s18);
5869 __ Frinta(s3, s19);
5870 __ Frinta(s4, s20);
5871 __ Frinta(s5, s21);
5872 __ Frinta(s6, s22);
5873 __ Frinta(s7, s23);
5874 __ Frinta(s8, s24);
5875 __ Frinta(s9, s25);
5876 __ Frinta(s10, s26);
5877
5878 __ Fmov(d16, 1.0);
5879 __ Fmov(d17, 1.1);
5880 __ Fmov(d18, 1.5);
5881 __ Fmov(d19, 1.9);
5882 __ Fmov(d20, 2.5);
5883 __ Fmov(d21, -1.5);
5884 __ Fmov(d22, -2.5);
5885 __ Fmov(d23, kFP32PositiveInfinity);
5886 __ Fmov(d24, kFP32NegativeInfinity);
5887 __ Fmov(d25, 0.0);
5888 __ Fmov(d26, -0.0);
5889
5890 __ Frinta(d11, d16);
5891 __ Frinta(d12, d17);
5892 __ Frinta(d13, d18);
5893 __ Frinta(d14, d19);
5894 __ Frinta(d15, d20);
5895 __ Frinta(d16, d21);
5896 __ Frinta(d17, d22);
5897 __ Frinta(d18, d23);
5898 __ Frinta(d19, d24);
5899 __ Frinta(d20, d25);
5900 __ Frinta(d21, d26);
5901 END();
5902
5903 RUN();
5904
5905 ASSERT_EQUAL_FP32(1.0, s0);
5906 ASSERT_EQUAL_FP32(1.0, s1);
5907 ASSERT_EQUAL_FP32(2.0, s2);
5908 ASSERT_EQUAL_FP32(2.0, s3);
5909 ASSERT_EQUAL_FP32(3.0, s4);
5910 ASSERT_EQUAL_FP32(-2.0, s5);
5911 ASSERT_EQUAL_FP32(-3.0, s6);
5912 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
5913 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
5914 ASSERT_EQUAL_FP32(0.0, s9);
5915 ASSERT_EQUAL_FP32(-0.0, s10);
5916 ASSERT_EQUAL_FP64(1.0, d11);
5917 ASSERT_EQUAL_FP64(1.0, d12);
5918 ASSERT_EQUAL_FP64(2.0, d13);
5919 ASSERT_EQUAL_FP64(2.0, d14);
5920 ASSERT_EQUAL_FP64(3.0, d15);
5921 ASSERT_EQUAL_FP64(-2.0, d16);
5922 ASSERT_EQUAL_FP64(-3.0, d17);
5923 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
5924 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
5925 ASSERT_EQUAL_FP64(0.0, d20);
5926 ASSERT_EQUAL_FP64(-0.0, d21);
5927
5928 TEARDOWN();
5929 }
5930
5931
5932 TEST(frintn) {
5933 INIT_V8();
5934 SETUP();
5935
5936 START();
5937 __ Fmov(s16, 1.0);
5938 __ Fmov(s17, 1.1);
5939 __ Fmov(s18, 1.5);
5940 __ Fmov(s19, 1.9);
5941 __ Fmov(s20, 2.5);
5942 __ Fmov(s21, -1.5);
5943 __ Fmov(s22, -2.5);
5944 __ Fmov(s23, kFP32PositiveInfinity);
5945 __ Fmov(s24, kFP32NegativeInfinity);
5946 __ Fmov(s25, 0.0);
5947 __ Fmov(s26, -0.0);
5948
5949 __ Frintn(s0, s16);
5950 __ Frintn(s1, s17);
5951 __ Frintn(s2, s18);
5952 __ Frintn(s3, s19);
5953 __ Frintn(s4, s20);
5954 __ Frintn(s5, s21);
5955 __ Frintn(s6, s22);
5956 __ Frintn(s7, s23);
5957 __ Frintn(s8, s24);
5958 __ Frintn(s9, s25);
5959 __ Frintn(s10, s26);
5960
5961 __ Fmov(d16, 1.0);
5962 __ Fmov(d17, 1.1);
5963 __ Fmov(d18, 1.5);
5964 __ Fmov(d19, 1.9);
5965 __ Fmov(d20, 2.5);
5966 __ Fmov(d21, -1.5);
5967 __ Fmov(d22, -2.5);
5968 __ Fmov(d23, kFP32PositiveInfinity);
5969 __ Fmov(d24, kFP32NegativeInfinity);
5970 __ Fmov(d25, 0.0);
5971 __ Fmov(d26, -0.0);
5972
5973 __ Frintn(d11, d16);
5974 __ Frintn(d12, d17);
5975 __ Frintn(d13, d18);
5976 __ Frintn(d14, d19);
5977 __ Frintn(d15, d20);
5978 __ Frintn(d16, d21);
5979 __ Frintn(d17, d22);
5980 __ Frintn(d18, d23);
5981 __ Frintn(d19, d24);
5982 __ Frintn(d20, d25);
5983 __ Frintn(d21, d26);
5984 END();
5985
5986 RUN();
5987
5988 ASSERT_EQUAL_FP32(1.0, s0);
5989 ASSERT_EQUAL_FP32(1.0, s1);
5990 ASSERT_EQUAL_FP32(2.0, s2);
5991 ASSERT_EQUAL_FP32(2.0, s3);
5992 ASSERT_EQUAL_FP32(2.0, s4);
5993 ASSERT_EQUAL_FP32(-2.0, s5);
5994 ASSERT_EQUAL_FP32(-2.0, s6);
5995 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
5996 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
5997 ASSERT_EQUAL_FP32(0.0, s9);
5998 ASSERT_EQUAL_FP32(-0.0, s10);
5999 ASSERT_EQUAL_FP64(1.0, d11);
6000 ASSERT_EQUAL_FP64(1.0, d12);
6001 ASSERT_EQUAL_FP64(2.0, d13);
6002 ASSERT_EQUAL_FP64(2.0, d14);
6003 ASSERT_EQUAL_FP64(2.0, d15);
6004 ASSERT_EQUAL_FP64(-2.0, d16);
6005 ASSERT_EQUAL_FP64(-2.0, d17);
6006 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
6007 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
6008 ASSERT_EQUAL_FP64(0.0, d20);
6009 ASSERT_EQUAL_FP64(-0.0, d21);
6010
6011 TEARDOWN();
6012 }
6013
6014
6015 TEST(frintz) {
6016 INIT_V8();
6017 SETUP();
6018
6019 START();
6020 __ Fmov(s16, 1.0);
6021 __ Fmov(s17, 1.1);
6022 __ Fmov(s18, 1.5);
6023 __ Fmov(s19, 1.9);
6024 __ Fmov(s20, 2.5);
6025 __ Fmov(s21, -1.5);
6026 __ Fmov(s22, -2.5);
6027 __ Fmov(s23, kFP32PositiveInfinity);
6028 __ Fmov(s24, kFP32NegativeInfinity);
6029 __ Fmov(s25, 0.0);
6030 __ Fmov(s26, -0.0);
6031
6032 __ Frintz(s0, s16);
6033 __ Frintz(s1, s17);
6034 __ Frintz(s2, s18);
6035 __ Frintz(s3, s19);
6036 __ Frintz(s4, s20);
6037 __ Frintz(s5, s21);
6038 __ Frintz(s6, s22);
6039 __ Frintz(s7, s23);
6040 __ Frintz(s8, s24);
6041 __ Frintz(s9, s25);
6042 __ Frintz(s10, s26);
6043
6044 __ Fmov(d16, 1.0);
6045 __ Fmov(d17, 1.1);
6046 __ Fmov(d18, 1.5);
6047 __ Fmov(d19, 1.9);
6048 __ Fmov(d20, 2.5);
6049 __ Fmov(d21, -1.5);
6050 __ Fmov(d22, -2.5);
6051 __ Fmov(d23, kFP32PositiveInfinity);
6052 __ Fmov(d24, kFP32NegativeInfinity);
6053 __ Fmov(d25, 0.0);
6054 __ Fmov(d26, -0.0);
6055
6056 __ Frintz(d11, d16);
6057 __ Frintz(d12, d17);
6058 __ Frintz(d13, d18);
6059 __ Frintz(d14, d19);
6060 __ Frintz(d15, d20);
6061 __ Frintz(d16, d21);
6062 __ Frintz(d17, d22);
6063 __ Frintz(d18, d23);
6064 __ Frintz(d19, d24);
6065 __ Frintz(d20, d25);
6066 __ Frintz(d21, d26);
6067 END();
6068
6069 RUN();
6070
6071 ASSERT_EQUAL_FP32(1.0, s0);
6072 ASSERT_EQUAL_FP32(1.0, s1);
6073 ASSERT_EQUAL_FP32(1.0, s2);
6074 ASSERT_EQUAL_FP32(1.0, s3);
6075 ASSERT_EQUAL_FP32(2.0, s4);
6076 ASSERT_EQUAL_FP32(-1.0, s5);
6077 ASSERT_EQUAL_FP32(-2.0, s6);
6078 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6079 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6080 ASSERT_EQUAL_FP32(0.0, s9);
6081 ASSERT_EQUAL_FP32(-0.0, s10);
6082 ASSERT_EQUAL_FP64(1.0, d11);
6083 ASSERT_EQUAL_FP64(1.0, d12);
6084 ASSERT_EQUAL_FP64(1.0, d13);
6085 ASSERT_EQUAL_FP64(1.0, d14);
6086 ASSERT_EQUAL_FP64(2.0, d15);
6087 ASSERT_EQUAL_FP64(-1.0, d16);
6088 ASSERT_EQUAL_FP64(-2.0, d17);
6089 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
6090 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
6091 ASSERT_EQUAL_FP64(0.0, d20);
6092 ASSERT_EQUAL_FP64(-0.0, d21);
6093
6094 TEARDOWN();
6095 }
6096
6097
6098 TEST(fcvt_ds) {
6099 INIT_V8();
6100 SETUP();
6101
6102 START();
6103 __ Fmov(s16, 1.0);
6104 __ Fmov(s17, 1.1);
6105 __ Fmov(s18, 1.5);
6106 __ Fmov(s19, 1.9);
6107 __ Fmov(s20, 2.5);
6108 __ Fmov(s21, -1.5);
6109 __ Fmov(s22, -2.5);
6110 __ Fmov(s23, kFP32PositiveInfinity);
6111 __ Fmov(s24, kFP32NegativeInfinity);
6112 __ Fmov(s25, 0.0);
6113 __ Fmov(s26, -0.0);
6114 __ Fmov(s27, FLT_MAX);
6115 __ Fmov(s28, FLT_MIN);
6116 __ Fmov(s29, rawbits_to_float(0x7fc12345)); // Quiet NaN.
6117 __ Fmov(s30, rawbits_to_float(0x7f812345)); // Signalling NaN.
6118
6119 __ Fcvt(d0, s16);
6120 __ Fcvt(d1, s17);
6121 __ Fcvt(d2, s18);
6122 __ Fcvt(d3, s19);
6123 __ Fcvt(d4, s20);
6124 __ Fcvt(d5, s21);
6125 __ Fcvt(d6, s22);
6126 __ Fcvt(d7, s23);
6127 __ Fcvt(d8, s24);
6128 __ Fcvt(d9, s25);
6129 __ Fcvt(d10, s26);
6130 __ Fcvt(d11, s27);
6131 __ Fcvt(d12, s28);
6132 __ Fcvt(d13, s29);
6133 __ Fcvt(d14, s30);
6134 END();
6135
6136 RUN();
6137
6138 ASSERT_EQUAL_FP64(1.0f, d0);
6139 ASSERT_EQUAL_FP64(1.1f, d1);
6140 ASSERT_EQUAL_FP64(1.5f, d2);
6141 ASSERT_EQUAL_FP64(1.9f, d3);
6142 ASSERT_EQUAL_FP64(2.5f, d4);
6143 ASSERT_EQUAL_FP64(-1.5f, d5);
6144 ASSERT_EQUAL_FP64(-2.5f, d6);
6145 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
6146 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
6147 ASSERT_EQUAL_FP64(0.0f, d9);
6148 ASSERT_EQUAL_FP64(-0.0f, d10);
6149 ASSERT_EQUAL_FP64(FLT_MAX, d11);
6150 ASSERT_EQUAL_FP64(FLT_MIN, d12);
6151
6152 // Check that the NaN payload is preserved according to A64 conversion rules:
6153 // - The sign bit is preserved.
6154 // - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
6155 // - The remaining mantissa bits are copied until they run out.
6156 // - The low-order bits that haven't already been assigned are set to 0.
6157 ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
6158 ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
6159
6160 TEARDOWN();
6161 }
6162
6163
6164 TEST(fcvt_sd) {
6165 INIT_V8();
6166 // There are a huge number of corner-cases to check, so this test iterates
6167 // through a list. The list is then negated and checked again (since the sign
6168 // is irrelevant in ties-to-even rounding), so the list shouldn't include any
6169 // negative values.
6170 //
6171 // Note that this test only checks ties-to-even rounding, because that is all
6172 // that the simulator supports.
6173 struct {double in; float expected;} test[] = {
6174 // Check some simple conversions.
6175 {0.0, 0.0f},
6176 {1.0, 1.0f},
6177 {1.5, 1.5f},
6178 {2.0, 2.0f},
6179 {FLT_MAX, FLT_MAX},
6180 // - The smallest normalized float.
6181 {pow(2.0, -126), powf(2, -126)},
6182 // - Normal floats that need (ties-to-even) rounding.
6183 // For normalized numbers:
6184 // bit 29 (0x0000000020000000) is the lowest-order bit which will
6185 // fit in the float's mantissa.
6186 {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)},
6187 {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)},
6188 {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)},
6189 {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)},
6190 {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)},
6191 {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)},
6192 {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)},
6193 {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)},
6194 {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)},
6195 {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)},
6196 {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)},
6197 {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)},
6198 {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)},
6199 // - A mantissa that overflows into the exponent during rounding.
6200 {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)},
6201 // - The largest double that rounds to a normal float.
6202 {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)},
6203
6204 // Doubles that are too big for a float.
6205 {kFP64PositiveInfinity, kFP32PositiveInfinity},
6206 {DBL_MAX, kFP32PositiveInfinity},
6207 // - The smallest exponent that's too big for a float.
6208 {pow(2.0, 128), kFP32PositiveInfinity},
6209 // - This exponent is in range, but the value rounds to infinity.
6210 {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity},
6211
6212 // Doubles that are too small for a float.
6213 // - The smallest (subnormal) double.
6214 {DBL_MIN, 0.0},
6215 // - The largest double which is too small for a subnormal float.
6216 {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)},
6217
6218 // Normal doubles that become subnormal floats.
6219 // - The largest subnormal float.
6220 {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)},
6221 // - The smallest subnormal float.
6222 {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)},
6223 // - Subnormal floats that need (ties-to-even) rounding.
6224 // For these subnormals:
6225 // bit 34 (0x0000000400000000) is the lowest-order bit which will
6226 // fit in the float's mantissa.
6227 {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)},
6228 {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)},
6229 {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)},
6230 {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)},
6231 {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)},
6232 {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)},
6233 {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)},
6234 {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)},
6235 {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)},
6236 {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)},
6237 {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)},
6238 {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)},
6239 {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)},
6240 // - The smallest double which rounds up to become a subnormal float.
6241 {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)},
6242
6243 // Check NaN payload preservation.
6244 {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)},
6245 {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)},
6246 // - Signalling NaNs become quiet NaNs.
6247 {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)},
6248 {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)},
6249 {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)},
6250 };
6251 int count = sizeof(test) / sizeof(test[0]);
6252
6253 for (int i = 0; i < count; i++) {
6254 double in = test[i].in;
6255 float expected = test[i].expected;
6256
6257 // We only expect positive input.
6258 ASSERT(std::signbit(in) == 0);
6259 ASSERT(std::signbit(expected) == 0);
6260
6261 SETUP();
6262 START();
6263
6264 __ Fmov(d10, in);
6265 __ Fcvt(s20, d10);
6266
6267 __ Fmov(d11, -in);
6268 __ Fcvt(s21, d11);
6269
6270 END();
6271 RUN();
6272 ASSERT_EQUAL_FP32(expected, s20);
6273 ASSERT_EQUAL_FP32(-expected, s21);
6274 TEARDOWN();
6275 }
6276 }
6277
6278
6279 TEST(fcvtas) {
6280 INIT_V8();
6281 SETUP();
6282
6283 START();
6284 __ Fmov(s0, 1.0);
6285 __ Fmov(s1, 1.1);
6286 __ Fmov(s2, 2.5);
6287 __ Fmov(s3, -2.5);
6288 __ Fmov(s4, kFP32PositiveInfinity);
6289 __ Fmov(s5, kFP32NegativeInfinity);
6290 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6291 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6292 __ Fmov(d8, 1.0);
6293 __ Fmov(d9, 1.1);
6294 __ Fmov(d10, 2.5);
6295 __ Fmov(d11, -2.5);
6296 __ Fmov(d12, kFP64PositiveInfinity);
6297 __ Fmov(d13, kFP64NegativeInfinity);
6298 __ Fmov(d14, kWMaxInt - 1);
6299 __ Fmov(d15, kWMinInt + 1);
6300 __ Fmov(s17, 1.1);
6301 __ Fmov(s18, 2.5);
6302 __ Fmov(s19, -2.5);
6303 __ Fmov(s20, kFP32PositiveInfinity);
6304 __ Fmov(s21, kFP32NegativeInfinity);
6305 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6306 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6307 __ Fmov(d24, 1.1);
6308 __ Fmov(d25, 2.5);
6309 __ Fmov(d26, -2.5);
6310 __ Fmov(d27, kFP64PositiveInfinity);
6311 __ Fmov(d28, kFP64NegativeInfinity);
6312 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6313 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6314
6315 __ Fcvtas(w0, s0);
6316 __ Fcvtas(w1, s1);
6317 __ Fcvtas(w2, s2);
6318 __ Fcvtas(w3, s3);
6319 __ Fcvtas(w4, s4);
6320 __ Fcvtas(w5, s5);
6321 __ Fcvtas(w6, s6);
6322 __ Fcvtas(w7, s7);
6323 __ Fcvtas(w8, d8);
6324 __ Fcvtas(w9, d9);
6325 __ Fcvtas(w10, d10);
6326 __ Fcvtas(w11, d11);
6327 __ Fcvtas(w12, d12);
6328 __ Fcvtas(w13, d13);
6329 __ Fcvtas(w14, d14);
6330 __ Fcvtas(w15, d15);
6331 __ Fcvtas(x17, s17);
6332 __ Fcvtas(x18, s18);
6333 __ Fcvtas(x19, s19);
6334 __ Fcvtas(x20, s20);
6335 __ Fcvtas(x21, s21);
6336 __ Fcvtas(x22, s22);
6337 __ Fcvtas(x23, s23);
6338 __ Fcvtas(x24, d24);
6339 __ Fcvtas(x25, d25);
6340 __ Fcvtas(x26, d26);
6341 __ Fcvtas(x27, d27);
6342 __ Fcvtas(x28, d28);
6343 __ Fcvtas(x29, d29);
6344 __ Fcvtas(x30, d30);
6345 END();
6346
6347 RUN();
6348
6349 ASSERT_EQUAL_64(1, x0);
6350 ASSERT_EQUAL_64(1, x1);
6351 ASSERT_EQUAL_64(3, x2);
6352 ASSERT_EQUAL_64(0xfffffffd, x3);
6353 ASSERT_EQUAL_64(0x7fffffff, x4);
6354 ASSERT_EQUAL_64(0x80000000, x5);
6355 ASSERT_EQUAL_64(0x7fffff80, x6);
6356 ASSERT_EQUAL_64(0x80000080, x7);
6357 ASSERT_EQUAL_64(1, x8);
6358 ASSERT_EQUAL_64(1, x9);
6359 ASSERT_EQUAL_64(3, x10);
6360 ASSERT_EQUAL_64(0xfffffffd, x11);
6361 ASSERT_EQUAL_64(0x7fffffff, x12);
6362 ASSERT_EQUAL_64(0x80000000, x13);
6363 ASSERT_EQUAL_64(0x7ffffffe, x14);
6364 ASSERT_EQUAL_64(0x80000001, x15);
6365 ASSERT_EQUAL_64(1, x17);
6366 ASSERT_EQUAL_64(3, x18);
6367 ASSERT_EQUAL_64(0xfffffffffffffffdUL, x19);
6368 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6369 ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6370 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6371 ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6372 ASSERT_EQUAL_64(1, x24);
6373 ASSERT_EQUAL_64(3, x25);
6374 ASSERT_EQUAL_64(0xfffffffffffffffdUL, x26);
6375 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6376 ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6377 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6378 ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6379
6380 TEARDOWN();
6381 }
6382
6383
6384 TEST(fcvtau) {
6385 INIT_V8();
6386 SETUP();
6387
6388 START();
6389 __ Fmov(s0, 1.0);
6390 __ Fmov(s1, 1.1);
6391 __ Fmov(s2, 2.5);
6392 __ Fmov(s3, -2.5);
6393 __ Fmov(s4, kFP32PositiveInfinity);
6394 __ Fmov(s5, kFP32NegativeInfinity);
6395 __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
6396 __ Fmov(d8, 1.0);
6397 __ Fmov(d9, 1.1);
6398 __ Fmov(d10, 2.5);
6399 __ Fmov(d11, -2.5);
6400 __ Fmov(d12, kFP64PositiveInfinity);
6401 __ Fmov(d13, kFP64NegativeInfinity);
6402 __ Fmov(d14, 0xfffffffe);
6403 __ Fmov(s16, 1.0);
6404 __ Fmov(s17, 1.1);
6405 __ Fmov(s18, 2.5);
6406 __ Fmov(s19, -2.5);
6407 __ Fmov(s20, kFP32PositiveInfinity);
6408 __ Fmov(s21, kFP32NegativeInfinity);
6409 __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
6410 __ Fmov(d24, 1.1);
6411 __ Fmov(d25, 2.5);
6412 __ Fmov(d26, -2.5);
6413 __ Fmov(d27, kFP64PositiveInfinity);
6414 __ Fmov(d28, kFP64NegativeInfinity);
6415 __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
6416 __ Fmov(s30, 0x100000000UL);
6417
6418 __ Fcvtau(w0, s0);
6419 __ Fcvtau(w1, s1);
6420 __ Fcvtau(w2, s2);
6421 __ Fcvtau(w3, s3);
6422 __ Fcvtau(w4, s4);
6423 __ Fcvtau(w5, s5);
6424 __ Fcvtau(w6, s6);
6425 __ Fcvtau(w8, d8);
6426 __ Fcvtau(w9, d9);
6427 __ Fcvtau(w10, d10);
6428 __ Fcvtau(w11, d11);
6429 __ Fcvtau(w12, d12);
6430 __ Fcvtau(w13, d13);
6431 __ Fcvtau(w14, d14);
6432 __ Fcvtau(w15, d15);
6433 __ Fcvtau(x16, s16);
6434 __ Fcvtau(x17, s17);
6435 __ Fcvtau(x18, s18);
6436 __ Fcvtau(x19, s19);
6437 __ Fcvtau(x20, s20);
6438 __ Fcvtau(x21, s21);
6439 __ Fcvtau(x22, s22);
6440 __ Fcvtau(x24, d24);
6441 __ Fcvtau(x25, d25);
6442 __ Fcvtau(x26, d26);
6443 __ Fcvtau(x27, d27);
6444 __ Fcvtau(x28, d28);
6445 __ Fcvtau(x29, d29);
6446 __ Fcvtau(w30, s30);
6447 END();
6448
6449 RUN();
6450
6451 ASSERT_EQUAL_64(1, x0);
6452 ASSERT_EQUAL_64(1, x1);
6453 ASSERT_EQUAL_64(3, x2);
6454 ASSERT_EQUAL_64(0, x3);
6455 ASSERT_EQUAL_64(0xffffffff, x4);
6456 ASSERT_EQUAL_64(0, x5);
6457 ASSERT_EQUAL_64(0xffffff00, x6);
6458 ASSERT_EQUAL_64(1, x8);
6459 ASSERT_EQUAL_64(1, x9);
6460 ASSERT_EQUAL_64(3, x10);
6461 ASSERT_EQUAL_64(0, x11);
6462 ASSERT_EQUAL_64(0xffffffff, x12);
6463 ASSERT_EQUAL_64(0, x13);
6464 ASSERT_EQUAL_64(0xfffffffe, x14);
6465 ASSERT_EQUAL_64(1, x16);
6466 ASSERT_EQUAL_64(1, x17);
6467 ASSERT_EQUAL_64(3, x18);
6468 ASSERT_EQUAL_64(0, x19);
6469 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
6470 ASSERT_EQUAL_64(0, x21);
6471 ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
6472 ASSERT_EQUAL_64(1, x24);
6473 ASSERT_EQUAL_64(3, x25);
6474 ASSERT_EQUAL_64(0, x26);
6475 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
6476 ASSERT_EQUAL_64(0, x28);
6477 ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
6478 ASSERT_EQUAL_64(0xffffffff, x30);
6479
6480 TEARDOWN();
6481 }
6482
6483
6484 TEST(fcvtms) {
6485 INIT_V8();
6486 SETUP();
6487
6488 START();
6489 __ Fmov(s0, 1.0);
6490 __ Fmov(s1, 1.1);
6491 __ Fmov(s2, 1.5);
6492 __ Fmov(s3, -1.5);
6493 __ Fmov(s4, kFP32PositiveInfinity);
6494 __ Fmov(s5, kFP32NegativeInfinity);
6495 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6496 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6497 __ Fmov(d8, 1.0);
6498 __ Fmov(d9, 1.1);
6499 __ Fmov(d10, 1.5);
6500 __ Fmov(d11, -1.5);
6501 __ Fmov(d12, kFP64PositiveInfinity);
6502 __ Fmov(d13, kFP64NegativeInfinity);
6503 __ Fmov(d14, kWMaxInt - 1);
6504 __ Fmov(d15, kWMinInt + 1);
6505 __ Fmov(s17, 1.1);
6506 __ Fmov(s18, 1.5);
6507 __ Fmov(s19, -1.5);
6508 __ Fmov(s20, kFP32PositiveInfinity);
6509 __ Fmov(s21, kFP32NegativeInfinity);
6510 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6511 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6512 __ Fmov(d24, 1.1);
6513 __ Fmov(d25, 1.5);
6514 __ Fmov(d26, -1.5);
6515 __ Fmov(d27, kFP64PositiveInfinity);
6516 __ Fmov(d28, kFP64NegativeInfinity);
6517 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6518 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6519
6520 __ Fcvtms(w0, s0);
6521 __ Fcvtms(w1, s1);
6522 __ Fcvtms(w2, s2);
6523 __ Fcvtms(w3, s3);
6524 __ Fcvtms(w4, s4);
6525 __ Fcvtms(w5, s5);
6526 __ Fcvtms(w6, s6);
6527 __ Fcvtms(w7, s7);
6528 __ Fcvtms(w8, d8);
6529 __ Fcvtms(w9, d9);
6530 __ Fcvtms(w10, d10);
6531 __ Fcvtms(w11, d11);
6532 __ Fcvtms(w12, d12);
6533 __ Fcvtms(w13, d13);
6534 __ Fcvtms(w14, d14);
6535 __ Fcvtms(w15, d15);
6536 __ Fcvtms(x17, s17);
6537 __ Fcvtms(x18, s18);
6538 __ Fcvtms(x19, s19);
6539 __ Fcvtms(x20, s20);
6540 __ Fcvtms(x21, s21);
6541 __ Fcvtms(x22, s22);
6542 __ Fcvtms(x23, s23);
6543 __ Fcvtms(x24, d24);
6544 __ Fcvtms(x25, d25);
6545 __ Fcvtms(x26, d26);
6546 __ Fcvtms(x27, d27);
6547 __ Fcvtms(x28, d28);
6548 __ Fcvtms(x29, d29);
6549 __ Fcvtms(x30, d30);
6550 END();
6551
6552 RUN();
6553
6554 ASSERT_EQUAL_64(1, x0);
6555 ASSERT_EQUAL_64(1, x1);
6556 ASSERT_EQUAL_64(1, x2);
6557 ASSERT_EQUAL_64(0xfffffffe, x3);
6558 ASSERT_EQUAL_64(0x7fffffff, x4);
6559 ASSERT_EQUAL_64(0x80000000, x5);
6560 ASSERT_EQUAL_64(0x7fffff80, x6);
6561 ASSERT_EQUAL_64(0x80000080, x7);
6562 ASSERT_EQUAL_64(1, x8);
6563 ASSERT_EQUAL_64(1, x9);
6564 ASSERT_EQUAL_64(1, x10);
6565 ASSERT_EQUAL_64(0xfffffffe, x11);
6566 ASSERT_EQUAL_64(0x7fffffff, x12);
6567 ASSERT_EQUAL_64(0x80000000, x13);
6568 ASSERT_EQUAL_64(0x7ffffffe, x14);
6569 ASSERT_EQUAL_64(0x80000001, x15);
6570 ASSERT_EQUAL_64(1, x17);
6571 ASSERT_EQUAL_64(1, x18);
6572 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
6573 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6574 ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6575 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6576 ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6577 ASSERT_EQUAL_64(1, x24);
6578 ASSERT_EQUAL_64(1, x25);
6579 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
6580 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6581 ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6582 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6583 ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6584
6585 TEARDOWN();
6586 }
6587
6588
6589 TEST(fcvtmu) {
6590 INIT_V8();
6591 SETUP();
6592
6593 START();
6594 __ Fmov(s0, 1.0);
6595 __ Fmov(s1, 1.1);
6596 __ Fmov(s2, 1.5);
6597 __ Fmov(s3, -1.5);
6598 __ Fmov(s4, kFP32PositiveInfinity);
6599 __ Fmov(s5, kFP32NegativeInfinity);
6600 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6601 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6602 __ Fmov(d8, 1.0);
6603 __ Fmov(d9, 1.1);
6604 __ Fmov(d10, 1.5);
6605 __ Fmov(d11, -1.5);
6606 __ Fmov(d12, kFP64PositiveInfinity);
6607 __ Fmov(d13, kFP64NegativeInfinity);
6608 __ Fmov(d14, kWMaxInt - 1);
6609 __ Fmov(d15, kWMinInt + 1);
6610 __ Fmov(s17, 1.1);
6611 __ Fmov(s18, 1.5);
6612 __ Fmov(s19, -1.5);
6613 __ Fmov(s20, kFP32PositiveInfinity);
6614 __ Fmov(s21, kFP32NegativeInfinity);
6615 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6616 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6617 __ Fmov(d24, 1.1);
6618 __ Fmov(d25, 1.5);
6619 __ Fmov(d26, -1.5);
6620 __ Fmov(d27, kFP64PositiveInfinity);
6621 __ Fmov(d28, kFP64NegativeInfinity);
6622 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6623 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6624
6625 __ Fcvtmu(w0, s0);
6626 __ Fcvtmu(w1, s1);
6627 __ Fcvtmu(w2, s2);
6628 __ Fcvtmu(w3, s3);
6629 __ Fcvtmu(w4, s4);
6630 __ Fcvtmu(w5, s5);
6631 __ Fcvtmu(w6, s6);
6632 __ Fcvtmu(w7, s7);
6633 __ Fcvtmu(w8, d8);
6634 __ Fcvtmu(w9, d9);
6635 __ Fcvtmu(w10, d10);
6636 __ Fcvtmu(w11, d11);
6637 __ Fcvtmu(w12, d12);
6638 __ Fcvtmu(w13, d13);
6639 __ Fcvtmu(w14, d14);
6640 __ Fcvtmu(x17, s17);
6641 __ Fcvtmu(x18, s18);
6642 __ Fcvtmu(x19, s19);
6643 __ Fcvtmu(x20, s20);
6644 __ Fcvtmu(x21, s21);
6645 __ Fcvtmu(x22, s22);
6646 __ Fcvtmu(x23, s23);
6647 __ Fcvtmu(x24, d24);
6648 __ Fcvtmu(x25, d25);
6649 __ Fcvtmu(x26, d26);
6650 __ Fcvtmu(x27, d27);
6651 __ Fcvtmu(x28, d28);
6652 __ Fcvtmu(x29, d29);
6653 __ Fcvtmu(x30, d30);
6654 END();
6655
6656 RUN();
6657
6658 ASSERT_EQUAL_64(1, x0);
6659 ASSERT_EQUAL_64(1, x1);
6660 ASSERT_EQUAL_64(1, x2);
6661 ASSERT_EQUAL_64(0, x3);
6662 ASSERT_EQUAL_64(0xffffffff, x4);
6663 ASSERT_EQUAL_64(0, x5);
6664 ASSERT_EQUAL_64(0x7fffff80, x6);
6665 ASSERT_EQUAL_64(0, x7);
6666 ASSERT_EQUAL_64(1, x8);
6667 ASSERT_EQUAL_64(1, x9);
6668 ASSERT_EQUAL_64(1, x10);
6669 ASSERT_EQUAL_64(0, x11);
6670 ASSERT_EQUAL_64(0xffffffff, x12);
6671 ASSERT_EQUAL_64(0, x13);
6672 ASSERT_EQUAL_64(0x7ffffffe, x14);
6673 ASSERT_EQUAL_64(1, x17);
6674 ASSERT_EQUAL_64(1, x18);
6675 ASSERT_EQUAL_64(0x0UL, x19);
6676 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
6677 ASSERT_EQUAL_64(0x0UL, x21);
6678 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6679 ASSERT_EQUAL_64(0x0UL, x23);
6680 ASSERT_EQUAL_64(1, x24);
6681 ASSERT_EQUAL_64(1, x25);
6682 ASSERT_EQUAL_64(0x0UL, x26);
6683 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
6684 ASSERT_EQUAL_64(0x0UL, x28);
6685 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6686 ASSERT_EQUAL_64(0x0UL, x30);
6687
6688 TEARDOWN();
6689 }
6690
6691
6692 TEST(fcvtns) {
6693 INIT_V8();
6694 SETUP();
6695
6696 START();
6697 __ Fmov(s0, 1.0);
6698 __ Fmov(s1, 1.1);
6699 __ Fmov(s2, 1.5);
6700 __ Fmov(s3, -1.5);
6701 __ Fmov(s4, kFP32PositiveInfinity);
6702 __ Fmov(s5, kFP32NegativeInfinity);
6703 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6704 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6705 __ Fmov(d8, 1.0);
6706 __ Fmov(d9, 1.1);
6707 __ Fmov(d10, 1.5);
6708 __ Fmov(d11, -1.5);
6709 __ Fmov(d12, kFP64PositiveInfinity);
6710 __ Fmov(d13, kFP64NegativeInfinity);
6711 __ Fmov(d14, kWMaxInt - 1);
6712 __ Fmov(d15, kWMinInt + 1);
6713 __ Fmov(s17, 1.1);
6714 __ Fmov(s18, 1.5);
6715 __ Fmov(s19, -1.5);
6716 __ Fmov(s20, kFP32PositiveInfinity);
6717 __ Fmov(s21, kFP32NegativeInfinity);
6718 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6719 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6720 __ Fmov(d24, 1.1);
6721 __ Fmov(d25, 1.5);
6722 __ Fmov(d26, -1.5);
6723 __ Fmov(d27, kFP64PositiveInfinity);
6724 __ Fmov(d28, kFP64NegativeInfinity);
6725 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6726 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6727
6728 __ Fcvtns(w0, s0);
6729 __ Fcvtns(w1, s1);
6730 __ Fcvtns(w2, s2);
6731 __ Fcvtns(w3, s3);
6732 __ Fcvtns(w4, s4);
6733 __ Fcvtns(w5, s5);
6734 __ Fcvtns(w6, s6);
6735 __ Fcvtns(w7, s7);
6736 __ Fcvtns(w8, d8);
6737 __ Fcvtns(w9, d9);
6738 __ Fcvtns(w10, d10);
6739 __ Fcvtns(w11, d11);
6740 __ Fcvtns(w12, d12);
6741 __ Fcvtns(w13, d13);
6742 __ Fcvtns(w14, d14);
6743 __ Fcvtns(w15, d15);
6744 __ Fcvtns(x17, s17);
6745 __ Fcvtns(x18, s18);
6746 __ Fcvtns(x19, s19);
6747 __ Fcvtns(x20, s20);
6748 __ Fcvtns(x21, s21);
6749 __ Fcvtns(x22, s22);
6750 __ Fcvtns(x23, s23);
6751 __ Fcvtns(x24, d24);
6752 __ Fcvtns(x25, d25);
6753 __ Fcvtns(x26, d26);
6754 __ Fcvtns(x27, d27);
6755 // __ Fcvtns(x28, d28);
6756 __ Fcvtns(x29, d29);
6757 __ Fcvtns(x30, d30);
6758 END();
6759
6760 RUN();
6761
6762 ASSERT_EQUAL_64(1, x0);
6763 ASSERT_EQUAL_64(1, x1);
6764 ASSERT_EQUAL_64(2, x2);
6765 ASSERT_EQUAL_64(0xfffffffe, x3);
6766 ASSERT_EQUAL_64(0x7fffffff, x4);
6767 ASSERT_EQUAL_64(0x80000000, x5);
6768 ASSERT_EQUAL_64(0x7fffff80, x6);
6769 ASSERT_EQUAL_64(0x80000080, x7);
6770 ASSERT_EQUAL_64(1, x8);
6771 ASSERT_EQUAL_64(1, x9);
6772 ASSERT_EQUAL_64(2, x10);
6773 ASSERT_EQUAL_64(0xfffffffe, x11);
6774 ASSERT_EQUAL_64(0x7fffffff, x12);
6775 ASSERT_EQUAL_64(0x80000000, x13);
6776 ASSERT_EQUAL_64(0x7ffffffe, x14);
6777 ASSERT_EQUAL_64(0x80000001, x15);
6778 ASSERT_EQUAL_64(1, x17);
6779 ASSERT_EQUAL_64(2, x18);
6780 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
6781 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6782 ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6783 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6784 ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6785 ASSERT_EQUAL_64(1, x24);
6786 ASSERT_EQUAL_64(2, x25);
6787 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
6788 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6789 // ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6790 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6791 ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6792
6793 TEARDOWN();
6794 }
6795
6796
6797 TEST(fcvtnu) {
6798 INIT_V8();
6799 SETUP();
6800
6801 START();
6802 __ Fmov(s0, 1.0);
6803 __ Fmov(s1, 1.1);
6804 __ Fmov(s2, 1.5);
6805 __ Fmov(s3, -1.5);
6806 __ Fmov(s4, kFP32PositiveInfinity);
6807 __ Fmov(s5, kFP32NegativeInfinity);
6808 __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
6809 __ Fmov(d8, 1.0);
6810 __ Fmov(d9, 1.1);
6811 __ Fmov(d10, 1.5);
6812 __ Fmov(d11, -1.5);
6813 __ Fmov(d12, kFP64PositiveInfinity);
6814 __ Fmov(d13, kFP64NegativeInfinity);
6815 __ Fmov(d14, 0xfffffffe);
6816 __ Fmov(s16, 1.0);
6817 __ Fmov(s17, 1.1);
6818 __ Fmov(s18, 1.5);
6819 __ Fmov(s19, -1.5);
6820 __ Fmov(s20, kFP32PositiveInfinity);
6821 __ Fmov(s21, kFP32NegativeInfinity);
6822 __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
6823 __ Fmov(d24, 1.1);
6824 __ Fmov(d25, 1.5);
6825 __ Fmov(d26, -1.5);
6826 __ Fmov(d27, kFP64PositiveInfinity);
6827 __ Fmov(d28, kFP64NegativeInfinity);
6828 __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
6829 __ Fmov(s30, 0x100000000UL);
6830
6831 __ Fcvtnu(w0, s0);
6832 __ Fcvtnu(w1, s1);
6833 __ Fcvtnu(w2, s2);
6834 __ Fcvtnu(w3, s3);
6835 __ Fcvtnu(w4, s4);
6836 __ Fcvtnu(w5, s5);
6837 __ Fcvtnu(w6, s6);
6838 __ Fcvtnu(w8, d8);
6839 __ Fcvtnu(w9, d9);
6840 __ Fcvtnu(w10, d10);
6841 __ Fcvtnu(w11, d11);
6842 __ Fcvtnu(w12, d12);
6843 __ Fcvtnu(w13, d13);
6844 __ Fcvtnu(w14, d14);
6845 __ Fcvtnu(w15, d15);
6846 __ Fcvtnu(x16, s16);
6847 __ Fcvtnu(x17, s17);
6848 __ Fcvtnu(x18, s18);
6849 __ Fcvtnu(x19, s19);
6850 __ Fcvtnu(x20, s20);
6851 __ Fcvtnu(x21, s21);
6852 __ Fcvtnu(x22, s22);
6853 __ Fcvtnu(x24, d24);
6854 __ Fcvtnu(x25, d25);
6855 __ Fcvtnu(x26, d26);
6856 __ Fcvtnu(x27, d27);
6857 // __ Fcvtnu(x28, d28);
6858 __ Fcvtnu(x29, d29);
6859 __ Fcvtnu(w30, s30);
6860 END();
6861
6862 RUN();
6863
6864 ASSERT_EQUAL_64(1, x0);
6865 ASSERT_EQUAL_64(1, x1);
6866 ASSERT_EQUAL_64(2, x2);
6867 ASSERT_EQUAL_64(0, x3);
6868 ASSERT_EQUAL_64(0xffffffff, x4);
6869 ASSERT_EQUAL_64(0, x5);
6870 ASSERT_EQUAL_64(0xffffff00, x6);
6871 ASSERT_EQUAL_64(1, x8);
6872 ASSERT_EQUAL_64(1, x9);
6873 ASSERT_EQUAL_64(2, x10);
6874 ASSERT_EQUAL_64(0, x11);
6875 ASSERT_EQUAL_64(0xffffffff, x12);
6876 ASSERT_EQUAL_64(0, x13);
6877 ASSERT_EQUAL_64(0xfffffffe, x14);
6878 ASSERT_EQUAL_64(1, x16);
6879 ASSERT_EQUAL_64(1, x17);
6880 ASSERT_EQUAL_64(2, x18);
6881 ASSERT_EQUAL_64(0, x19);
6882 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
6883 ASSERT_EQUAL_64(0, x21);
6884 ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
6885 ASSERT_EQUAL_64(1, x24);
6886 ASSERT_EQUAL_64(2, x25);
6887 ASSERT_EQUAL_64(0, x26);
6888 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
6889 // ASSERT_EQUAL_64(0, x28);
6890 ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
6891 ASSERT_EQUAL_64(0xffffffff, x30);
6892
6893 TEARDOWN();
6894 }
6895
6896
6897 TEST(fcvtzs) {
6898 INIT_V8();
6899 SETUP();
6900
6901 START();
6902 __ Fmov(s0, 1.0);
6903 __ Fmov(s1, 1.1);
6904 __ Fmov(s2, 1.5);
6905 __ Fmov(s3, -1.5);
6906 __ Fmov(s4, kFP32PositiveInfinity);
6907 __ Fmov(s5, kFP32NegativeInfinity);
6908 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6909 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6910 __ Fmov(d8, 1.0);
6911 __ Fmov(d9, 1.1);
6912 __ Fmov(d10, 1.5);
6913 __ Fmov(d11, -1.5);
6914 __ Fmov(d12, kFP64PositiveInfinity);
6915 __ Fmov(d13, kFP64NegativeInfinity);
6916 __ Fmov(d14, kWMaxInt - 1);
6917 __ Fmov(d15, kWMinInt + 1);
6918 __ Fmov(s17, 1.1);
6919 __ Fmov(s18, 1.5);
6920 __ Fmov(s19, -1.5);
6921 __ Fmov(s20, kFP32PositiveInfinity);
6922 __ Fmov(s21, kFP32NegativeInfinity);
6923 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6924 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6925 __ Fmov(d24, 1.1);
6926 __ Fmov(d25, 1.5);
6927 __ Fmov(d26, -1.5);
6928 __ Fmov(d27, kFP64PositiveInfinity);
6929 __ Fmov(d28, kFP64NegativeInfinity);
6930 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6931 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6932
6933 __ Fcvtzs(w0, s0);
6934 __ Fcvtzs(w1, s1);
6935 __ Fcvtzs(w2, s2);
6936 __ Fcvtzs(w3, s3);
6937 __ Fcvtzs(w4, s4);
6938 __ Fcvtzs(w5, s5);
6939 __ Fcvtzs(w6, s6);
6940 __ Fcvtzs(w7, s7);
6941 __ Fcvtzs(w8, d8);
6942 __ Fcvtzs(w9, d9);
6943 __ Fcvtzs(w10, d10);
6944 __ Fcvtzs(w11, d11);
6945 __ Fcvtzs(w12, d12);
6946 __ Fcvtzs(w13, d13);
6947 __ Fcvtzs(w14, d14);
6948 __ Fcvtzs(w15, d15);
6949 __ Fcvtzs(x17, s17);
6950 __ Fcvtzs(x18, s18);
6951 __ Fcvtzs(x19, s19);
6952 __ Fcvtzs(x20, s20);
6953 __ Fcvtzs(x21, s21);
6954 __ Fcvtzs(x22, s22);
6955 __ Fcvtzs(x23, s23);
6956 __ Fcvtzs(x24, d24);
6957 __ Fcvtzs(x25, d25);
6958 __ Fcvtzs(x26, d26);
6959 __ Fcvtzs(x27, d27);
6960 __ Fcvtzs(x28, d28);
6961 __ Fcvtzs(x29, d29);
6962 __ Fcvtzs(x30, d30);
6963 END();
6964
6965 RUN();
6966
6967 ASSERT_EQUAL_64(1, x0);
6968 ASSERT_EQUAL_64(1, x1);
6969 ASSERT_EQUAL_64(1, x2);
6970 ASSERT_EQUAL_64(0xffffffff, x3);
6971 ASSERT_EQUAL_64(0x7fffffff, x4);
6972 ASSERT_EQUAL_64(0x80000000, x5);
6973 ASSERT_EQUAL_64(0x7fffff80, x6);
6974 ASSERT_EQUAL_64(0x80000080, x7);
6975 ASSERT_EQUAL_64(1, x8);
6976 ASSERT_EQUAL_64(1, x9);
6977 ASSERT_EQUAL_64(1, x10);
6978 ASSERT_EQUAL_64(0xffffffff, x11);
6979 ASSERT_EQUAL_64(0x7fffffff, x12);
6980 ASSERT_EQUAL_64(0x80000000, x13);
6981 ASSERT_EQUAL_64(0x7ffffffe, x14);
6982 ASSERT_EQUAL_64(0x80000001, x15);
6983 ASSERT_EQUAL_64(1, x17);
6984 ASSERT_EQUAL_64(1, x18);
6985 ASSERT_EQUAL_64(0xffffffffffffffffUL, x19);
6986 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6987 ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6988 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6989 ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6990 ASSERT_EQUAL_64(1, x24);
6991 ASSERT_EQUAL_64(1, x25);
6992 ASSERT_EQUAL_64(0xffffffffffffffffUL, x26);
6993 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6994 ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6995 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6996 ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6997
6998 TEARDOWN();
6999 }
7000
7001
7002 TEST(fcvtzu) {
7003 INIT_V8();
7004 SETUP();
7005
7006 START();
7007 __ Fmov(s0, 1.0);
7008 __ Fmov(s1, 1.1);
7009 __ Fmov(s2, 1.5);
7010 __ Fmov(s3, -1.5);
7011 __ Fmov(s4, kFP32PositiveInfinity);
7012 __ Fmov(s5, kFP32NegativeInfinity);
7013 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
7014 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
7015 __ Fmov(d8, 1.0);
7016 __ Fmov(d9, 1.1);
7017 __ Fmov(d10, 1.5);
7018 __ Fmov(d11, -1.5);
7019 __ Fmov(d12, kFP64PositiveInfinity);
7020 __ Fmov(d13, kFP64NegativeInfinity);
7021 __ Fmov(d14, kWMaxInt - 1);
7022 __ Fmov(d15, kWMinInt + 1);
7023 __ Fmov(s17, 1.1);
7024 __ Fmov(s18, 1.5);
7025 __ Fmov(s19, -1.5);
7026 __ Fmov(s20, kFP32PositiveInfinity);
7027 __ Fmov(s21, kFP32NegativeInfinity);
7028 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
7029 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
7030 __ Fmov(d24, 1.1);
7031 __ Fmov(d25, 1.5);
7032 __ Fmov(d26, -1.5);
7033 __ Fmov(d27, kFP64PositiveInfinity);
7034 __ Fmov(d28, kFP64NegativeInfinity);
7035 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
7036 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
7037
7038 __ Fcvtzu(w0, s0);
7039 __ Fcvtzu(w1, s1);
7040 __ Fcvtzu(w2, s2);
7041 __ Fcvtzu(w3, s3);
7042 __ Fcvtzu(w4, s4);
7043 __ Fcvtzu(w5, s5);
7044 __ Fcvtzu(w6, s6);
7045 __ Fcvtzu(w7, s7);
7046 __ Fcvtzu(w8, d8);
7047 __ Fcvtzu(w9, d9);
7048 __ Fcvtzu(w10, d10);
7049 __ Fcvtzu(w11, d11);
7050 __ Fcvtzu(w12, d12);
7051 __ Fcvtzu(w13, d13);
7052 __ Fcvtzu(w14, d14);
7053 __ Fcvtzu(x17, s17);
7054 __ Fcvtzu(x18, s18);
7055 __ Fcvtzu(x19, s19);
7056 __ Fcvtzu(x20, s20);
7057 __ Fcvtzu(x21, s21);
7058 __ Fcvtzu(x22, s22);
7059 __ Fcvtzu(x23, s23);
7060 __ Fcvtzu(x24, d24);
7061 __ Fcvtzu(x25, d25);
7062 __ Fcvtzu(x26, d26);
7063 __ Fcvtzu(x27, d27);
7064 __ Fcvtzu(x28, d28);
7065 __ Fcvtzu(x29, d29);
7066 __ Fcvtzu(x30, d30);
7067 END();
7068
7069 RUN();
7070
7071 ASSERT_EQUAL_64(1, x0);
7072 ASSERT_EQUAL_64(1, x1);
7073 ASSERT_EQUAL_64(1, x2);
7074 ASSERT_EQUAL_64(0, x3);
7075 ASSERT_EQUAL_64(0xffffffff, x4);
7076 ASSERT_EQUAL_64(0, x5);
7077 ASSERT_EQUAL_64(0x7fffff80, x6);
7078 ASSERT_EQUAL_64(0, x7);
7079 ASSERT_EQUAL_64(1, x8);
7080 ASSERT_EQUAL_64(1, x9);
7081 ASSERT_EQUAL_64(1, x10);
7082 ASSERT_EQUAL_64(0, x11);
7083 ASSERT_EQUAL_64(0xffffffff, x12);
7084 ASSERT_EQUAL_64(0, x13);
7085 ASSERT_EQUAL_64(0x7ffffffe, x14);
7086 ASSERT_EQUAL_64(1, x17);
7087 ASSERT_EQUAL_64(1, x18);
7088 ASSERT_EQUAL_64(0x0UL, x19);
7089 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
7090 ASSERT_EQUAL_64(0x0UL, x21);
7091 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
7092 ASSERT_EQUAL_64(0x0UL, x23);
7093 ASSERT_EQUAL_64(1, x24);
7094 ASSERT_EQUAL_64(1, x25);
7095 ASSERT_EQUAL_64(0x0UL, x26);
7096 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
7097 ASSERT_EQUAL_64(0x0UL, x28);
7098 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
7099 ASSERT_EQUAL_64(0x0UL, x30);
7100
7101 TEARDOWN();
7102 }
7103
7104
7105 // Test that scvtf and ucvtf can convert the 64-bit input into the expected
7106 // value. All possible values of 'fbits' are tested. The expected value is
7107 // modified accordingly in each case.
7108 //
7109 // The expected value is specified as the bit encoding of the expected double
7110 // produced by scvtf (expected_scvtf_bits) as well as ucvtf
7111 // (expected_ucvtf_bits).
7112 //
7113 // Where the input value is representable by int32_t or uint32_t, conversions
7114 // from W registers will also be tested.
7115 static void TestUScvtfHelper(uint64_t in,
7116 uint64_t expected_scvtf_bits,
7117 uint64_t expected_ucvtf_bits) {
7118 uint64_t u64 = in;
7119 uint32_t u32 = u64 & 0xffffffff;
7120 int64_t s64 = static_cast<int64_t>(in);
7121 int32_t s32 = s64 & 0x7fffffff;
7122
7123 bool cvtf_s32 = (s64 == s32);
7124 bool cvtf_u32 = (u64 == u32);
7125
7126 double results_scvtf_x[65];
7127 double results_ucvtf_x[65];
7128 double results_scvtf_w[33];
7129 double results_ucvtf_w[33];
7130
7131 SETUP();
7132 START();
7133
7134 __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7135 __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7136 __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7137 __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7138
7139 __ Mov(x10, s64);
7140
7141 // Corrupt the top word, in case it is accidentally used during W-register
7142 // conversions.
7143 __ Mov(x11, 0x5555555555555555);
7144 __ Bfi(x11, x10, 0, kWRegSize);
7145
7146 // Test integer conversions.
7147 __ Scvtf(d0, x10);
7148 __ Ucvtf(d1, x10);
7149 __ Scvtf(d2, w11);
7150 __ Ucvtf(d3, w11);
7151 __ Str(d0, MemOperand(x0));
7152 __ Str(d1, MemOperand(x1));
7153 __ Str(d2, MemOperand(x2));
7154 __ Str(d3, MemOperand(x3));
7155
7156 // Test all possible values of fbits.
7157 for (int fbits = 1; fbits <= 32; fbits++) {
7158 __ Scvtf(d0, x10, fbits);
7159 __ Ucvtf(d1, x10, fbits);
7160 __ Scvtf(d2, w11, fbits);
7161 __ Ucvtf(d3, w11, fbits);
7162 __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
7163 __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
7164 __ Str(d2, MemOperand(x2, fbits * kDRegSizeInBytes));
7165 __ Str(d3, MemOperand(x3, fbits * kDRegSizeInBytes));
7166 }
7167
7168 // Conversions from W registers can only handle fbits values <= 32, so just
7169 // test conversions from X registers for 32 < fbits <= 64.
7170 for (int fbits = 33; fbits <= 64; fbits++) {
7171 __ Scvtf(d0, x10, fbits);
7172 __ Ucvtf(d1, x10, fbits);
7173 __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
7174 __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
7175 }
7176
7177 END();
7178 RUN();
7179
7180 // Check the results.
7181 double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
7182 double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
7183
7184 for (int fbits = 0; fbits <= 32; fbits++) {
7185 double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7186 double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7187 ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7188 ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7189 if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
7190 if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
7191 }
7192 for (int fbits = 33; fbits <= 64; fbits++) {
7193 double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7194 double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7195 ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7196 ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7197 }
7198
7199 TEARDOWN();
7200 }
7201
7202
7203 TEST(scvtf_ucvtf_double) {
7204 INIT_V8();
7205 // Simple conversions of positive numbers which require no rounding; the
7206 // results should not depened on the rounding mode, and ucvtf and scvtf should
7207 // produce the same result.
7208 TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
7209 TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
7210 TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
7211 TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
7212 TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
7213 // Test mantissa extremities.
7214 TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
7215 // The largest int32_t that fits in a double.
7216 TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
7217 // Values that would be negative if treated as an int32_t.
7218 TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
7219 TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
7220 TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
7221 // The largest int64_t that fits in a double.
7222 TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
7223 // Check for bit pattern reproduction.
7224 TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
7225 TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
7226
7227 // Simple conversions of negative int64_t values. These require no rounding,
7228 // and the results should not depend on the rounding mode.
7229 TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
7230 TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
7231 TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
7232
7233 // Conversions which require rounding.
7234 TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
7235 TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
7236 TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
7237 TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
7238 TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
7239 TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
7240 TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
7241 TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
7242 TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
7243 TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
7244 TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
7245 TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
7246 TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
7247 // Check rounding of negative int64_t values (and large uint64_t values).
7248 TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
7249 TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
7250 TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
7251 TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
7252 TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
7253 TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
7254 TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
7255 TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
7256 TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
7257 TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
7258 TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
7259 TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
7260 TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
7261 // Round up to produce a result that's too big for the input to represent.
7262 TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
7263 TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
7264 TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
7265 TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
7266 }
7267
7268
7269 // The same as TestUScvtfHelper, but convert to floats.
7270 static void TestUScvtf32Helper(uint64_t in,
7271 uint32_t expected_scvtf_bits,
7272 uint32_t expected_ucvtf_bits) {
7273 uint64_t u64 = in;
7274 uint32_t u32 = u64 & 0xffffffff;
7275 int64_t s64 = static_cast<int64_t>(in);
7276 int32_t s32 = s64 & 0x7fffffff;
7277
7278 bool cvtf_s32 = (s64 == s32);
7279 bool cvtf_u32 = (u64 == u32);
7280
7281 float results_scvtf_x[65];
7282 float results_ucvtf_x[65];
7283 float results_scvtf_w[33];
7284 float results_ucvtf_w[33];
7285
7286 SETUP();
7287 START();
7288
7289 __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7290 __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7291 __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7292 __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7293
7294 __ Mov(x10, s64);
7295
7296 // Corrupt the top word, in case it is accidentally used during W-register
7297 // conversions.
7298 __ Mov(x11, 0x5555555555555555);
7299 __ Bfi(x11, x10, 0, kWRegSize);
7300
7301 // Test integer conversions.
7302 __ Scvtf(s0, x10);
7303 __ Ucvtf(s1, x10);
7304 __ Scvtf(s2, w11);
7305 __ Ucvtf(s3, w11);
7306 __ Str(s0, MemOperand(x0));
7307 __ Str(s1, MemOperand(x1));
7308 __ Str(s2, MemOperand(x2));
7309 __ Str(s3, MemOperand(x3));
7310
7311 // Test all possible values of fbits.
7312 for (int fbits = 1; fbits <= 32; fbits++) {
7313 __ Scvtf(s0, x10, fbits);
7314 __ Ucvtf(s1, x10, fbits);
7315 __ Scvtf(s2, w11, fbits);
7316 __ Ucvtf(s3, w11, fbits);
7317 __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
7318 __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
7319 __ Str(s2, MemOperand(x2, fbits * kSRegSizeInBytes));
7320 __ Str(s3, MemOperand(x3, fbits * kSRegSizeInBytes));
7321 }
7322
7323 // Conversions from W registers can only handle fbits values <= 32, so just
7324 // test conversions from X registers for 32 < fbits <= 64.
7325 for (int fbits = 33; fbits <= 64; fbits++) {
7326 __ Scvtf(s0, x10, fbits);
7327 __ Ucvtf(s1, x10, fbits);
7328 __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
7329 __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
7330 }
7331
7332 END();
7333 RUN();
7334
7335 // Check the results.
7336 float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
7337 float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
7338
7339 for (int fbits = 0; fbits <= 32; fbits++) {
7340 float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7341 float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7342 ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7343 ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7344 if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
7345 if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
7346 break;
7347 }
7348 for (int fbits = 33; fbits <= 64; fbits++) {
7349 break;
7350 float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7351 float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7352 ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7353 ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7354 }
7355
7356 TEARDOWN();
7357 }
7358
7359
7360 TEST(scvtf_ucvtf_float) {
7361 INIT_V8();
7362 // Simple conversions of positive numbers which require no rounding; the
7363 // results should not depened on the rounding mode, and ucvtf and scvtf should
7364 // produce the same result.
7365 TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
7366 TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
7367 TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
7368 TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
7369 TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
7370 // Test mantissa extremities.
7371 TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
7372 TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
7373 // The largest int32_t that fits in a float.
7374 TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
7375 // Values that would be negative if treated as an int32_t.
7376 TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
7377 TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
7378 TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
7379 // The largest int64_t that fits in a float.
7380 TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
7381 // Check for bit pattern reproduction.
7382 TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
7383
7384 // Simple conversions of negative int64_t values. These require no rounding,
7385 // and the results should not depend on the rounding mode.
7386 TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
7387 TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
7388
7389 // Conversions which require rounding.
7390 TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
7391 TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
7392 TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
7393 TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
7394 TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
7395 TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
7396 TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
7397 TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
7398 TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
7399 TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
7400 TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
7401 TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
7402 TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
7403 // Check rounding of negative int64_t values (and large uint64_t values).
7404 TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
7405 TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
7406 TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
7407 TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
7408 TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
7409 TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
7410 TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
7411 TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
7412 TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
7413 TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
7414 TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
7415 TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
7416 TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
7417 // Round up to produce a result that's too big for the input to represent.
7418 TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
7419 TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
7420 TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
7421 TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
7422 TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
7423 TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
7424 TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
7425 TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
7426 }
7427
7428
7429 TEST(system_mrs) {
7430 INIT_V8();
7431 SETUP();
7432
7433 START();
7434 __ Mov(w0, 0);
7435 __ Mov(w1, 1);
7436 __ Mov(w2, 0x80000000);
7437
7438 // Set the Z and C flags.
7439 __ Cmp(w0, w0);
7440 __ Mrs(x3, NZCV);
7441
7442 // Set the N flag.
7443 __ Cmp(w0, w1);
7444 __ Mrs(x4, NZCV);
7445
7446 // Set the Z, C and V flags.
7447 __ Adds(w0, w2, w2);
7448 __ Mrs(x5, NZCV);
7449
7450 // Read the default FPCR.
7451 __ Mrs(x6, FPCR);
7452 END();
7453
7454 RUN();
7455
7456 // NZCV
7457 ASSERT_EQUAL_32(ZCFlag, w3);
7458 ASSERT_EQUAL_32(NFlag, w4);
7459 ASSERT_EQUAL_32(ZCVFlag, w5);
7460
7461 // FPCR
7462 // The default FPCR on Linux-based platforms is 0.
7463 ASSERT_EQUAL_32(0, w6);
7464
7465 TEARDOWN();
7466 }
7467
7468
7469 TEST(system_msr) {
7470 INIT_V8();
7471 // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
7472 const uint64_t fpcr_core = 0x07c00000;
7473
7474 // All FPCR fields (including fields which may be read-as-zero):
7475 // Stride, Len
7476 // IDE, IXE, UFE, OFE, DZE, IOE
7477 const uint64_t fpcr_all = fpcr_core | 0x00379f00;
7478
7479 SETUP();
7480
7481 START();
7482 __ Mov(w0, 0);
7483 __ Mov(w1, 0x7fffffff);
7484
7485 __ Mov(x7, 0);
7486
7487 __ Mov(x10, NVFlag);
7488 __ Cmp(w0, w0); // Set Z and C.
7489 __ Msr(NZCV, x10); // Set N and V.
7490 // The Msr should have overwritten every flag set by the Cmp.
7491 __ Cinc(x7, x7, mi); // N
7492 __ Cinc(x7, x7, ne); // !Z
7493 __ Cinc(x7, x7, lo); // !C
7494 __ Cinc(x7, x7, vs); // V
7495
7496 __ Mov(x10, ZCFlag);
7497 __ Cmn(w1, w1); // Set N and V.
7498 __ Msr(NZCV, x10); // Set Z and C.
7499 // The Msr should have overwritten every flag set by the Cmn.
7500 __ Cinc(x7, x7, pl); // !N
7501 __ Cinc(x7, x7, eq); // Z
7502 __ Cinc(x7, x7, hs); // C
7503 __ Cinc(x7, x7, vc); // !V
7504
7505 // All core FPCR fields must be writable.
7506 __ Mov(x8, fpcr_core);
7507 __ Msr(FPCR, x8);
7508 __ Mrs(x8, FPCR);
7509
7510 // All FPCR fields, including optional ones. This part of the test doesn't
7511 // achieve much other than ensuring that supported fields can be cleared by
7512 // the next test.
7513 __ Mov(x9, fpcr_all);
7514 __ Msr(FPCR, x9);
7515 __ Mrs(x9, FPCR);
7516 __ And(x9, x9, fpcr_core);
7517
7518 // The undefined bits must ignore writes.
7519 // It's conceivable that a future version of the architecture could use these
7520 // fields (making this test fail), but in the meantime this is a useful test
7521 // for the simulator.
7522 __ Mov(x10, ~fpcr_all);
7523 __ Msr(FPCR, x10);
7524 __ Mrs(x10, FPCR);
7525
7526 END();
7527
7528 RUN();
7529
7530 // We should have incremented x7 (from 0) exactly 8 times.
7531 ASSERT_EQUAL_64(8, x7);
7532
7533 ASSERT_EQUAL_64(fpcr_core, x8);
7534 ASSERT_EQUAL_64(fpcr_core, x9);
7535 ASSERT_EQUAL_64(0, x10);
7536
7537 TEARDOWN();
7538 }
7539
7540
7541 TEST(system_nop) {
7542 INIT_V8();
7543 SETUP();
7544 RegisterDump before;
7545
7546 START();
7547 before.Dump(&masm);
7548 __ Nop();
7549 END();
7550
7551 RUN();
7552
7553 ASSERT_EQUAL_REGISTERS(before);
7554 ASSERT_EQUAL_NZCV(before.flags_nzcv());
7555
7556 TEARDOWN();
7557 }
7558
7559
7560 TEST(zero_dest) {
7561 INIT_V8();
7562 SETUP();
7563 RegisterDump before;
7564
7565 START();
7566 // Preserve the system stack pointer, in case we clobber it.
7567 __ Mov(x30, csp);
7568 // Initialize the other registers used in this test.
7569 uint64_t literal_base = 0x0100001000100101UL;
7570 __ Mov(x0, 0);
7571 __ Mov(x1, literal_base);
7572 for (unsigned i = 2; i < x30.code(); i++) {
7573 __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
7574 }
7575 before.Dump(&masm);
7576
7577 // All of these instructions should be NOPs in these forms, but have
7578 // alternate forms which can write into the stack pointer.
7579 __ add(xzr, x0, x1);
7580 __ add(xzr, x1, xzr);
7581 __ add(xzr, xzr, x1);
7582
7583 __ and_(xzr, x0, x2);
7584 __ and_(xzr, x2, xzr);
7585 __ and_(xzr, xzr, x2);
7586
7587 __ bic(xzr, x0, x3);
7588 __ bic(xzr, x3, xzr);
7589 __ bic(xzr, xzr, x3);
7590
7591 __ eon(xzr, x0, x4);
7592 __ eon(xzr, x4, xzr);
7593 __ eon(xzr, xzr, x4);
7594
7595 __ eor(xzr, x0, x5);
7596 __ eor(xzr, x5, xzr);
7597 __ eor(xzr, xzr, x5);
7598
7599 __ orr(xzr, x0, x6);
7600 __ orr(xzr, x6, xzr);
7601 __ orr(xzr, xzr, x6);
7602
7603 __ sub(xzr, x0, x7);
7604 __ sub(xzr, x7, xzr);
7605 __ sub(xzr, xzr, x7);
7606
7607 // Swap the saved system stack pointer with the real one. If csp was written
7608 // during the test, it will show up in x30. This is done because the test
7609 // framework assumes that csp will be valid at the end of the test.
7610 __ Mov(x29, x30);
7611 __ Mov(x30, csp);
7612 __ Mov(csp, x29);
7613 // We used x29 as a scratch register, so reset it to make sure it doesn't
7614 // trigger a test failure.
7615 __ Add(x29, x28, x1);
7616 END();
7617
7618 RUN();
7619
7620 ASSERT_EQUAL_REGISTERS(before);
7621 ASSERT_EQUAL_NZCV(before.flags_nzcv());
7622
7623 TEARDOWN();
7624 }
7625
7626
7627 TEST(zero_dest_setflags) {
7628 INIT_V8();
7629 SETUP();
7630 RegisterDump before;
7631
7632 START();
7633 // Preserve the system stack pointer, in case we clobber it.
7634 __ Mov(x30, csp);
7635 // Initialize the other registers used in this test.
7636 uint64_t literal_base = 0x0100001000100101UL;
7637 __ Mov(x0, 0);
7638 __ Mov(x1, literal_base);
7639 for (int i = 2; i < 30; i++) {
7640 __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
7641 }
7642 before.Dump(&masm);
7643
7644 // All of these instructions should only write to the flags in these forms,
7645 // but have alternate forms which can write into the stack pointer.
7646 __ adds(xzr, x0, Operand(x1, UXTX));
7647 __ adds(xzr, x1, Operand(xzr, UXTX));
7648 __ adds(xzr, x1, 1234);
7649 __ adds(xzr, x0, x1);
7650 __ adds(xzr, x1, xzr);
7651 __ adds(xzr, xzr, x1);
7652
7653 __ ands(xzr, x2, ~0xf);
7654 __ ands(xzr, xzr, ~0xf);
7655 __ ands(xzr, x0, x2);
7656 __ ands(xzr, x2, xzr);
7657 __ ands(xzr, xzr, x2);
7658
7659 __ bics(xzr, x3, ~0xf);
7660 __ bics(xzr, xzr, ~0xf);
7661 __ bics(xzr, x0, x3);
7662 __ bics(xzr, x3, xzr);
7663 __ bics(xzr, xzr, x3);
7664
7665 __ subs(xzr, x0, Operand(x3, UXTX));
7666 __ subs(xzr, x3, Operand(xzr, UXTX));
7667 __ subs(xzr, x3, 1234);
7668 __ subs(xzr, x0, x3);
7669 __ subs(xzr, x3, xzr);
7670 __ subs(xzr, xzr, x3);
7671
7672 // Swap the saved system stack pointer with the real one. If csp was written
7673 // during the test, it will show up in x30. This is done because the test
7674 // framework assumes that csp will be valid at the end of the test.
7675 __ Mov(x29, x30);
7676 __ Mov(x30, csp);
7677 __ Mov(csp, x29);
7678 // We used x29 as a scratch register, so reset it to make sure it doesn't
7679 // trigger a test failure.
7680 __ Add(x29, x28, x1);
7681 END();
7682
7683 RUN();
7684
7685 ASSERT_EQUAL_REGISTERS(before);
7686
7687 TEARDOWN();
7688 }
7689
7690
7691 TEST(register_bit) {
7692 // No code generation takes place in this test, so no need to setup and
7693 // teardown.
7694
7695 // Simple tests.
7696 CHECK(x0.Bit() == (1UL << 0));
7697 CHECK(x1.Bit() == (1UL << 1));
7698 CHECK(x10.Bit() == (1UL << 10));
7699
7700 // AAPCS64 definitions.
7701 CHECK(fp.Bit() == (1UL << kFramePointerRegCode));
7702 CHECK(lr.Bit() == (1UL << kLinkRegCode));
7703
7704 // Fixed (hardware) definitions.
7705 CHECK(xzr.Bit() == (1UL << kZeroRegCode));
7706
7707 // Internal ABI definitions.
7708 CHECK(jssp.Bit() == (1UL << kJSSPCode));
7709 CHECK(csp.Bit() == (1UL << kSPRegInternalCode));
7710 CHECK(csp.Bit() != xzr.Bit());
7711
7712 // xn.Bit() == wn.Bit() at all times, for the same n.
7713 CHECK(x0.Bit() == w0.Bit());
7714 CHECK(x1.Bit() == w1.Bit());
7715 CHECK(x10.Bit() == w10.Bit());
7716 CHECK(jssp.Bit() == wjssp.Bit());
7717 CHECK(xzr.Bit() == wzr.Bit());
7718 CHECK(csp.Bit() == wcsp.Bit());
7719 }
7720
7721
7722 TEST(stack_pointer_override) {
7723 // This test generates some stack maintenance code, but the test only checks
7724 // the reported state.
7725 INIT_V8();
7726 SETUP();
7727 START();
7728
7729 // The default stack pointer in V8 is jssp, but for compatibility with W16,
7730 // the test framework sets it to csp before calling the test.
7731 CHECK(csp.Is(__ StackPointer()));
7732 __ SetStackPointer(x0);
7733 CHECK(x0.Is(__ StackPointer()));
7734 __ SetStackPointer(jssp);
7735 CHECK(jssp.Is(__ StackPointer()));
7736 __ SetStackPointer(csp);
7737 CHECK(csp.Is(__ StackPointer()));
7738
7739 END();
7740 RUN();
7741 TEARDOWN();
7742 }
7743
7744
7745 TEST(peek_poke_simple) {
7746 INIT_V8();
7747 SETUP();
7748 START();
7749
7750 static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
7751 static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
7752 x12.Bit() | x13.Bit();
7753
7754 // The literal base is chosen to have two useful properties:
7755 // * When multiplied by small values (such as a register index), this value
7756 // is clearly readable in the result.
7757 // * The value is not formed from repeating fixed-size smaller values, so it
7758 // can be used to detect endianness-related errors.
7759 uint64_t literal_base = 0x0100001000100101UL;
7760
7761 // Initialize the registers.
7762 __ Mov(x0, literal_base);
7763 __ Add(x1, x0, x0);
7764 __ Add(x2, x1, x0);
7765 __ Add(x3, x2, x0);
7766
7767 __ Claim(4);
7768
7769 // Simple exchange.
7770 // After this test:
7771 // x0-x3 should be unchanged.
7772 // w10-w13 should contain the lower words of x0-x3.
7773 __ Poke(x0, 0);
7774 __ Poke(x1, 8);
7775 __ Poke(x2, 16);
7776 __ Poke(x3, 24);
7777 Clobber(&masm, x0_to_x3);
7778 __ Peek(x0, 0);
7779 __ Peek(x1, 8);
7780 __ Peek(x2, 16);
7781 __ Peek(x3, 24);
7782
7783 __ Poke(w0, 0);
7784 __ Poke(w1, 4);
7785 __ Poke(w2, 8);
7786 __ Poke(w3, 12);
7787 Clobber(&masm, x10_to_x13);
7788 __ Peek(w10, 0);
7789 __ Peek(w11, 4);
7790 __ Peek(w12, 8);
7791 __ Peek(w13, 12);
7792
7793 __ Drop(4);
7794
7795 END();
7796 RUN();
7797
7798 ASSERT_EQUAL_64(literal_base * 1, x0);
7799 ASSERT_EQUAL_64(literal_base * 2, x1);
7800 ASSERT_EQUAL_64(literal_base * 3, x2);
7801 ASSERT_EQUAL_64(literal_base * 4, x3);
7802
7803 ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
7804 ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
7805 ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
7806 ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
7807
7808 TEARDOWN();
7809 }
7810
7811
7812 TEST(peek_poke_unaligned) {
7813 INIT_V8();
7814 SETUP();
7815 START();
7816
7817 // The literal base is chosen to have two useful properties:
7818 // * When multiplied by small values (such as a register index), this value
7819 // is clearly readable in the result.
7820 // * The value is not formed from repeating fixed-size smaller values, so it
7821 // can be used to detect endianness-related errors.
7822 uint64_t literal_base = 0x0100001000100101UL;
7823
7824 // Initialize the registers.
7825 __ Mov(x0, literal_base);
7826 __ Add(x1, x0, x0);
7827 __ Add(x2, x1, x0);
7828 __ Add(x3, x2, x0);
7829 __ Add(x4, x3, x0);
7830 __ Add(x5, x4, x0);
7831 __ Add(x6, x5, x0);
7832
7833 __ Claim(4);
7834
7835 // Unaligned exchanges.
7836 // After this test:
7837 // x0-x6 should be unchanged.
7838 // w10-w12 should contain the lower words of x0-x2.
7839 __ Poke(x0, 1);
7840 Clobber(&masm, x0.Bit());
7841 __ Peek(x0, 1);
7842 __ Poke(x1, 2);
7843 Clobber(&masm, x1.Bit());
7844 __ Peek(x1, 2);
7845 __ Poke(x2, 3);
7846 Clobber(&masm, x2.Bit());
7847 __ Peek(x2, 3);
7848 __ Poke(x3, 4);
7849 Clobber(&masm, x3.Bit());
7850 __ Peek(x3, 4);
7851 __ Poke(x4, 5);
7852 Clobber(&masm, x4.Bit());
7853 __ Peek(x4, 5);
7854 __ Poke(x5, 6);
7855 Clobber(&masm, x5.Bit());
7856 __ Peek(x5, 6);
7857 __ Poke(x6, 7);
7858 Clobber(&masm, x6.Bit());
7859 __ Peek(x6, 7);
7860
7861 __ Poke(w0, 1);
7862 Clobber(&masm, w10.Bit());
7863 __ Peek(w10, 1);
7864 __ Poke(w1, 2);
7865 Clobber(&masm, w11.Bit());
7866 __ Peek(w11, 2);
7867 __ Poke(w2, 3);
7868 Clobber(&masm, w12.Bit());
7869 __ Peek(w12, 3);
7870
7871 __ Drop(4);
7872
7873 END();
7874 RUN();
7875
7876 ASSERT_EQUAL_64(literal_base * 1, x0);
7877 ASSERT_EQUAL_64(literal_base * 2, x1);
7878 ASSERT_EQUAL_64(literal_base * 3, x2);
7879 ASSERT_EQUAL_64(literal_base * 4, x3);
7880 ASSERT_EQUAL_64(literal_base * 5, x4);
7881 ASSERT_EQUAL_64(literal_base * 6, x5);
7882 ASSERT_EQUAL_64(literal_base * 7, x6);
7883
7884 ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
7885 ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
7886 ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
7887
7888 TEARDOWN();
7889 }
7890
7891
7892 TEST(peek_poke_endianness) {
7893 INIT_V8();
7894 SETUP();
7895 START();
7896
7897 // The literal base is chosen to have two useful properties:
7898 // * When multiplied by small values (such as a register index), this value
7899 // is clearly readable in the result.
7900 // * The value is not formed from repeating fixed-size smaller values, so it
7901 // can be used to detect endianness-related errors.
7902 uint64_t literal_base = 0x0100001000100101UL;
7903
7904 // Initialize the registers.
7905 __ Mov(x0, literal_base);
7906 __ Add(x1, x0, x0);
7907
7908 __ Claim(4);
7909
7910 // Endianness tests.
7911 // After this section:
7912 // x4 should match x0[31:0]:x0[63:32]
7913 // w5 should match w1[15:0]:w1[31:16]
7914 __ Poke(x0, 0);
7915 __ Poke(x0, 8);
7916 __ Peek(x4, 4);
7917
7918 __ Poke(w1, 0);
7919 __ Poke(w1, 4);
7920 __ Peek(w5, 2);
7921
7922 __ Drop(4);
7923
7924 END();
7925 RUN();
7926
7927 uint64_t x0_expected = literal_base * 1;
7928 uint64_t x1_expected = literal_base * 2;
7929 uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
7930 uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
7931 ((x1_expected >> 16) & 0x0000ffff);
7932
7933 ASSERT_EQUAL_64(x0_expected, x0);
7934 ASSERT_EQUAL_64(x1_expected, x1);
7935 ASSERT_EQUAL_64(x4_expected, x4);
7936 ASSERT_EQUAL_64(x5_expected, x5);
7937
7938 TEARDOWN();
7939 }
7940
7941
7942 TEST(peek_poke_mixed) {
7943 INIT_V8();
7944 SETUP();
7945 START();
7946
7947 // The literal base is chosen to have two useful properties:
7948 // * When multiplied by small values (such as a register index), this value
7949 // is clearly readable in the result.
7950 // * The value is not formed from repeating fixed-size smaller values, so it
7951 // can be used to detect endianness-related errors.
7952 uint64_t literal_base = 0x0100001000100101UL;
7953
7954 // Initialize the registers.
7955 __ Mov(x0, literal_base);
7956 __ Add(x1, x0, x0);
7957 __ Add(x2, x1, x0);
7958 __ Add(x3, x2, x0);
7959
7960 __ Claim(4);
7961
7962 // Mix with other stack operations.
7963 // After this section:
7964 // x0-x3 should be unchanged.
7965 // x6 should match x1[31:0]:x0[63:32]
7966 // w7 should match x1[15:0]:x0[63:48]
7967 __ Poke(x1, 8);
7968 __ Poke(x0, 0);
7969 {
7970 ASSERT(__ StackPointer().Is(csp));
7971 __ Mov(x4, __ StackPointer());
7972 __ SetStackPointer(x4);
7973
7974 __ Poke(wzr, 0); // Clobber the space we're about to drop.
7975 __ Drop(1, kWRegSizeInBytes);
7976 __ Peek(x6, 0);
7977 __ Claim(1);
7978 __ Peek(w7, 10);
7979 __ Poke(x3, 28);
7980 __ Poke(xzr, 0); // Clobber the space we're about to drop.
7981 __ Drop(1);
7982 __ Poke(x2, 12);
7983 __ Push(w0);
7984
7985 __ Mov(csp, __ StackPointer());
7986 __ SetStackPointer(csp);
7987 }
7988
7989 __ Pop(x0, x1, x2, x3);
7990
7991 END();
7992 RUN();
7993
7994 uint64_t x0_expected = literal_base * 1;
7995 uint64_t x1_expected = literal_base * 2;
7996 uint64_t x2_expected = literal_base * 3;
7997 uint64_t x3_expected = literal_base * 4;
7998 uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
7999 uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
8000 ((x0_expected >> 48) & 0x0000ffff);
8001
8002 ASSERT_EQUAL_64(x0_expected, x0);
8003 ASSERT_EQUAL_64(x1_expected, x1);
8004 ASSERT_EQUAL_64(x2_expected, x2);
8005 ASSERT_EQUAL_64(x3_expected, x3);
8006 ASSERT_EQUAL_64(x6_expected, x6);
8007 ASSERT_EQUAL_64(x7_expected, x7);
8008
8009 TEARDOWN();
8010 }
8011
8012
8013 // This enum is used only as an argument to the push-pop test helpers.
8014 enum PushPopMethod {
8015 // Push or Pop using the Push and Pop methods, with blocks of up to four
8016 // registers. (Smaller blocks will be used if necessary.)
8017 PushPopByFour,
8018
8019 // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
8020 PushPopRegList
8021 };
8022
8023
8024 // The maximum number of registers that can be used by the PushPopJssp* tests,
8025 // where a reg_count field is provided.
8026 static int const kPushPopJsspMaxRegCount = -1;
8027
8028 // Test a simple push-pop pattern:
8029 // * Claim <claim> bytes to set the stack alignment.
8030 // * Push <reg_count> registers with size <reg_size>.
8031 // * Clobber the register contents.
8032 // * Pop <reg_count> registers to restore the original contents.
8033 // * Drop <claim> bytes to restore the original stack pointer.
8034 //
8035 // Different push and pop methods can be specified independently to test for
8036 // proper word-endian behaviour.
8037 static void PushPopJsspSimpleHelper(int reg_count,
8038 int claim,
8039 int reg_size,
8040 PushPopMethod push_method,
8041 PushPopMethod pop_method) {
8042 SETUP();
8043
8044 START();
8045
8046 // Registers x8 and x9 are used by the macro assembler for debug code (for
8047 // example in 'Pop'), so we can't use them here. We can't use jssp because it
8048 // will be the stack pointer for this test.
8049 static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit());
8050 if (reg_count == kPushPopJsspMaxRegCount) {
8051 reg_count = CountSetBits(allowed, kNumberOfRegisters);
8052 }
8053 // Work out which registers to use, based on reg_size.
8054 Register r[kNumberOfRegisters];
8055 Register x[kNumberOfRegisters];
8056 RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
8057 allowed);
8058
8059 // The literal base is chosen to have two useful properties:
8060 // * When multiplied by small values (such as a register index), this value
8061 // is clearly readable in the result.
8062 // * The value is not formed from repeating fixed-size smaller values, so it
8063 // can be used to detect endianness-related errors.
8064 uint64_t literal_base = 0x0100001000100101UL;
8065
8066 {
8067 ASSERT(__ StackPointer().Is(csp));
8068 __ Mov(jssp, __ StackPointer());
8069 __ SetStackPointer(jssp);
8070
8071 int i;
8072
8073 // Initialize the registers.
8074 for (i = 0; i < reg_count; i++) {
8075 // Always write into the X register, to ensure that the upper word is
8076 // properly ignored by Push when testing W registers.
8077 if (!x[i].IsZero()) {
8078 __ Mov(x[i], literal_base * i);
8079 }
8080 }
8081
8082 // Claim memory first, as requested.
8083 __ Claim(claim, kByteSizeInBytes);
8084
8085 switch (push_method) {
8086 case PushPopByFour:
8087 // Push high-numbered registers first (to the highest addresses).
8088 for (i = reg_count; i >= 4; i -= 4) {
8089 __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
8090 }
8091 // Finish off the leftovers.
8092 switch (i) {
8093 case 3: __ Push(r[2], r[1], r[0]); break;
8094 case 2: __ Push(r[1], r[0]); break;
8095 case 1: __ Push(r[0]); break;
8096 default: ASSERT(i == 0); break;
8097 }
8098 break;
8099 case PushPopRegList:
8100 __ PushSizeRegList(list, reg_size);
8101 break;
8102 }
8103
8104 // Clobber all the registers, to ensure that they get repopulated by Pop.
8105 Clobber(&masm, list);
8106
8107 switch (pop_method) {
8108 case PushPopByFour:
8109 // Pop low-numbered registers first (from the lowest addresses).
8110 for (i = 0; i <= (reg_count-4); i += 4) {
8111 __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
8112 }
8113 // Finish off the leftovers.
8114 switch (reg_count - i) {
8115 case 3: __ Pop(r[i], r[i+1], r[i+2]); break;
8116 case 2: __ Pop(r[i], r[i+1]); break;
8117 case 1: __ Pop(r[i]); break;
8118 default: ASSERT(i == reg_count); break;
8119 }
8120 break;
8121 case PushPopRegList:
8122 __ PopSizeRegList(list, reg_size);
8123 break;
8124 }
8125
8126 // Drop memory to restore jssp.
8127 __ Drop(claim, kByteSizeInBytes);
8128
8129 __ Mov(csp, __ StackPointer());
8130 __ SetStackPointer(csp);
8131 }
8132
8133 END();
8134
8135 RUN();
8136
8137 // Check that the register contents were preserved.
8138 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8139 // that the upper word was properly cleared by Pop.
8140 literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8141 for (int i = 0; i < reg_count; i++) {
8142 if (x[i].IsZero()) {
8143 ASSERT_EQUAL_64(0, x[i]);
8144 } else {
8145 ASSERT_EQUAL_64(literal_base * i, x[i]);
8146 }
8147 }
8148
8149 TEARDOWN();
8150 }
8151
8152
8153 TEST(push_pop_jssp_simple_32) {
8154 INIT_V8();
8155 for (int claim = 0; claim <= 8; claim++) {
8156 for (int count = 0; count <= 8; count++) {
8157 PushPopJsspSimpleHelper(count, claim, kWRegSize,
8158 PushPopByFour, PushPopByFour);
8159 PushPopJsspSimpleHelper(count, claim, kWRegSize,
8160 PushPopByFour, PushPopRegList);
8161 PushPopJsspSimpleHelper(count, claim, kWRegSize,
8162 PushPopRegList, PushPopByFour);
8163 PushPopJsspSimpleHelper(count, claim, kWRegSize,
8164 PushPopRegList, PushPopRegList);
8165 }
8166 // Test with the maximum number of registers.
8167 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
8168 PushPopByFour, PushPopByFour);
8169 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
8170 PushPopByFour, PushPopRegList);
8171 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
8172 PushPopRegList, PushPopByFour);
8173 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
8174 PushPopRegList, PushPopRegList);
8175 }
8176 }
8177
8178
8179 TEST(push_pop_jssp_simple_64) {
8180 INIT_V8();
8181 for (int claim = 0; claim <= 8; claim++) {
8182 for (int count = 0; count <= 8; count++) {
8183 PushPopJsspSimpleHelper(count, claim, kXRegSize,
8184 PushPopByFour, PushPopByFour);
8185 PushPopJsspSimpleHelper(count, claim, kXRegSize,
8186 PushPopByFour, PushPopRegList);
8187 PushPopJsspSimpleHelper(count, claim, kXRegSize,
8188 PushPopRegList, PushPopByFour);
8189 PushPopJsspSimpleHelper(count, claim, kXRegSize,
8190 PushPopRegList, PushPopRegList);
8191 }
8192 // Test with the maximum number of registers.
8193 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
8194 PushPopByFour, PushPopByFour);
8195 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
8196 PushPopByFour, PushPopRegList);
8197 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
8198 PushPopRegList, PushPopByFour);
8199 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
8200 PushPopRegList, PushPopRegList);
8201 }
8202 }
8203
8204
8205 // The maximum number of registers that can be used by the PushPopFPJssp* tests,
8206 // where a reg_count field is provided.
8207 static int const kPushPopFPJsspMaxRegCount = -1;
8208
8209 // Test a simple push-pop pattern:
8210 // * Claim <claim> bytes to set the stack alignment.
8211 // * Push <reg_count> FP registers with size <reg_size>.
8212 // * Clobber the register contents.
8213 // * Pop <reg_count> FP registers to restore the original contents.
8214 // * Drop <claim> bytes to restore the original stack pointer.
8215 //
8216 // Different push and pop methods can be specified independently to test for
8217 // proper word-endian behaviour.
8218 static void PushPopFPJsspSimpleHelper(int reg_count,
8219 int claim,
8220 int reg_size,
8221 PushPopMethod push_method,
8222 PushPopMethod pop_method) {
8223 SETUP();
8224
8225 START();
8226
8227 // We can use any floating-point register. None of them are reserved for
8228 // debug code, for example.
8229 static RegList const allowed = ~0;
8230 if (reg_count == kPushPopFPJsspMaxRegCount) {
8231 reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
8232 }
8233 // Work out which registers to use, based on reg_size.
8234 FPRegister v[kNumberOfRegisters];
8235 FPRegister d[kNumberOfRegisters];
8236 RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
8237 allowed);
8238
8239 // The literal base is chosen to have two useful properties:
8240 // * When multiplied (using an integer) by small values (such as a register
8241 // index), this value is clearly readable in the result.
8242 // * The value is not formed from repeating fixed-size smaller values, so it
8243 // can be used to detect endianness-related errors.
8244 // * It is never a floating-point NaN, and will therefore always compare
8245 // equal to itself.
8246 uint64_t literal_base = 0x0100001000100101UL;
8247
8248 {
8249 ASSERT(__ StackPointer().Is(csp));
8250 __ Mov(jssp, __ StackPointer());
8251 __ SetStackPointer(jssp);
8252
8253 int i;
8254
8255 // Initialize the registers, using X registers to load the literal.
8256 __ Mov(x0, 0);
8257 __ Mov(x1, literal_base);
8258 for (i = 0; i < reg_count; i++) {
8259 // Always write into the D register, to ensure that the upper word is
8260 // properly ignored by Push when testing S registers.
8261 __ Fmov(d[i], x0);
8262 // Calculate the next literal.
8263 __ Add(x0, x0, x1);
8264 }
8265
8266 // Claim memory first, as requested.
8267 __ Claim(claim, kByteSizeInBytes);
8268
8269 switch (push_method) {
8270 case PushPopByFour:
8271 // Push high-numbered registers first (to the highest addresses).
8272 for (i = reg_count; i >= 4; i -= 4) {
8273 __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
8274 }
8275 // Finish off the leftovers.
8276 switch (i) {
8277 case 3: __ Push(v[2], v[1], v[0]); break;
8278 case 2: __ Push(v[1], v[0]); break;
8279 case 1: __ Push(v[0]); break;
8280 default: ASSERT(i == 0); break;
8281 }
8282 break;
8283 case PushPopRegList:
8284 __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
8285 break;
8286 }
8287
8288 // Clobber all the registers, to ensure that they get repopulated by Pop.
8289 ClobberFP(&masm, list);
8290
8291 switch (pop_method) {
8292 case PushPopByFour:
8293 // Pop low-numbered registers first (from the lowest addresses).
8294 for (i = 0; i <= (reg_count-4); i += 4) {
8295 __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
8296 }
8297 // Finish off the leftovers.
8298 switch (reg_count - i) {
8299 case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
8300 case 2: __ Pop(v[i], v[i+1]); break;
8301 case 1: __ Pop(v[i]); break;
8302 default: ASSERT(i == reg_count); break;
8303 }
8304 break;
8305 case PushPopRegList:
8306 __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
8307 break;
8308 }
8309
8310 // Drop memory to restore jssp.
8311 __ Drop(claim, kByteSizeInBytes);
8312
8313 __ Mov(csp, __ StackPointer());
8314 __ SetStackPointer(csp);
8315 }
8316
8317 END();
8318
8319 RUN();
8320
8321 // Check that the register contents were preserved.
8322 // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
8323 // test that the upper word was properly cleared by Pop.
8324 literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8325 for (int i = 0; i < reg_count; i++) {
8326 uint64_t literal = literal_base * i;
8327 double expected;
8328 memcpy(&expected, &literal, sizeof(expected));
8329 ASSERT_EQUAL_FP64(expected, d[i]);
8330 }
8331
8332 TEARDOWN();
8333 }
8334
8335
8336 TEST(push_pop_fp_jssp_simple_32) {
8337 INIT_V8();
8338 for (int claim = 0; claim <= 8; claim++) {
8339 for (int count = 0; count <= 8; count++) {
8340 PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
8341 PushPopByFour, PushPopByFour);
8342 PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
8343 PushPopByFour, PushPopRegList);
8344 PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
8345 PushPopRegList, PushPopByFour);
8346 PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
8347 PushPopRegList, PushPopRegList);
8348 }
8349 // Test with the maximum number of registers.
8350 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
8351 PushPopByFour, PushPopByFour);
8352 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
8353 PushPopByFour, PushPopRegList);
8354 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
8355 PushPopRegList, PushPopByFour);
8356 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
8357 PushPopRegList, PushPopRegList);
8358 }
8359 }
8360
8361
8362 TEST(push_pop_fp_jssp_simple_64) {
8363 INIT_V8();
8364 for (int claim = 0; claim <= 8; claim++) {
8365 for (int count = 0; count <= 8; count++) {
8366 PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
8367 PushPopByFour, PushPopByFour);
8368 PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
8369 PushPopByFour, PushPopRegList);
8370 PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
8371 PushPopRegList, PushPopByFour);
8372 PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
8373 PushPopRegList, PushPopRegList);
8374 }
8375 // Test with the maximum number of registers.
8376 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
8377 PushPopByFour, PushPopByFour);
8378 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
8379 PushPopByFour, PushPopRegList);
8380 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
8381 PushPopRegList, PushPopByFour);
8382 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
8383 PushPopRegList, PushPopRegList);
8384 }
8385 }
8386
8387
8388 // Push and pop data using an overlapping combination of Push/Pop and
8389 // RegList-based methods.
8390 static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
8391 SETUP();
8392
8393 // Registers x8 and x9 are used by the macro assembler for debug code (for
8394 // example in 'Pop'), so we can't use them here. We can't use jssp because it
8395 // will be the stack pointer for this test.
8396 static RegList const allowed =
8397 ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit());
8398 // Work out which registers to use, based on reg_size.
8399 Register r[10];
8400 Register x[10];
8401 PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
8402
8403 // Calculate some handy register lists.
8404 RegList r0_to_r3 = 0;
8405 for (int i = 0; i <= 3; i++) {
8406 r0_to_r3 |= x[i].Bit();
8407 }
8408 RegList r4_to_r5 = 0;
8409 for (int i = 4; i <= 5; i++) {
8410 r4_to_r5 |= x[i].Bit();
8411 }
8412 RegList r6_to_r9 = 0;
8413 for (int i = 6; i <= 9; i++) {
8414 r6_to_r9 |= x[i].Bit();
8415 }
8416
8417 // The literal base is chosen to have two useful properties:
8418 // * When multiplied by small values (such as a register index), this value
8419 // is clearly readable in the result.
8420 // * The value is not formed from repeating fixed-size smaller values, so it
8421 // can be used to detect endianness-related errors.
8422 uint64_t literal_base = 0x0100001000100101UL;
8423
8424 START();
8425 {
8426 ASSERT(__ StackPointer().Is(csp));
8427 __ Mov(jssp, __ StackPointer());
8428 __ SetStackPointer(jssp);
8429
8430 // Claim memory first, as requested.
8431 __ Claim(claim, kByteSizeInBytes);
8432
8433 __ Mov(x[3], literal_base * 3);
8434 __ Mov(x[2], literal_base * 2);
8435 __ Mov(x[1], literal_base * 1);
8436 __ Mov(x[0], literal_base * 0);
8437
8438 __ PushSizeRegList(r0_to_r3, reg_size);
8439 __ Push(r[3], r[2]);
8440
8441 Clobber(&masm, r0_to_r3);
8442 __ PopSizeRegList(r0_to_r3, reg_size);
8443
8444 __ Push(r[2], r[1], r[3], r[0]);
8445
8446 Clobber(&masm, r4_to_r5);
8447 __ Pop(r[4], r[5]);
8448 Clobber(&masm, r6_to_r9);
8449 __ Pop(r[6], r[7], r[8], r[9]);
8450
8451 // Drop memory to restore jssp.
8452 __ Drop(claim, kByteSizeInBytes);
8453
8454 __ Mov(csp, __ StackPointer());
8455 __ SetStackPointer(csp);
8456 }
8457
8458 END();
8459
8460 RUN();
8461
8462 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8463 // that the upper word was properly cleared by Pop.
8464 literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8465
8466 ASSERT_EQUAL_64(literal_base * 3, x[9]);
8467 ASSERT_EQUAL_64(literal_base * 2, x[8]);
8468 ASSERT_EQUAL_64(literal_base * 0, x[7]);
8469 ASSERT_EQUAL_64(literal_base * 3, x[6]);
8470 ASSERT_EQUAL_64(literal_base * 1, x[5]);
8471 ASSERT_EQUAL_64(literal_base * 2, x[4]);
8472
8473 TEARDOWN();
8474 }
8475
8476
8477 TEST(push_pop_jssp_mixed_methods_64) {
8478 INIT_V8();
8479 for (int claim = 0; claim <= 8; claim++) {
8480 PushPopJsspMixedMethodsHelper(claim, kXRegSize);
8481 }
8482 }
8483
8484
8485 TEST(push_pop_jssp_mixed_methods_32) {
8486 INIT_V8();
8487 for (int claim = 0; claim <= 8; claim++) {
8488 PushPopJsspMixedMethodsHelper(claim, kWRegSize);
8489 }
8490 }
8491
8492
8493 // Push and pop data using overlapping X- and W-sized quantities.
8494 static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
8495 // This test emits rather a lot of code.
8496 SETUP_SIZE(BUF_SIZE * 2);
8497
8498 // Work out which registers to use, based on reg_size.
8499 Register tmp = x8;
8500 static RegList const allowed = ~(tmp.Bit() | jssp.Bit());
8501 if (reg_count == kPushPopJsspMaxRegCount) {
8502 reg_count = CountSetBits(allowed, kNumberOfRegisters);
8503 }
8504 Register w[kNumberOfRegisters];
8505 Register x[kNumberOfRegisters];
8506 RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
8507
8508 // The number of W-sized slots we expect to pop. When we pop, we alternate
8509 // between W and X registers, so we need reg_count*1.5 W-sized slots.
8510 int const requested_w_slots = reg_count + reg_count / 2;
8511
8512 // Track what _should_ be on the stack, using W-sized slots.
8513 static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
8514 uint32_t stack[kMaxWSlots];
8515 for (int i = 0; i < kMaxWSlots; i++) {
8516 stack[i] = 0xdeadbeef;
8517 }
8518
8519 // The literal base is chosen to have two useful properties:
8520 // * When multiplied by small values (such as a register index), this value
8521 // is clearly readable in the result.
8522 // * The value is not formed from repeating fixed-size smaller values, so it
8523 // can be used to detect endianness-related errors.
8524 static uint64_t const literal_base = 0x0100001000100101UL;
8525 static uint64_t const literal_base_hi = literal_base >> 32;
8526 static uint64_t const literal_base_lo = literal_base & 0xffffffff;
8527 static uint64_t const literal_base_w = literal_base & 0xffffffff;
8528
8529 START();
8530 {
8531 ASSERT(__ StackPointer().Is(csp));
8532 __ Mov(jssp, __ StackPointer());
8533 __ SetStackPointer(jssp);
8534
8535 // Initialize the registers.
8536 for (int i = 0; i < reg_count; i++) {
8537 // Always write into the X register, to ensure that the upper word is
8538 // properly ignored by Push when testing W registers.
8539 if (!x[i].IsZero()) {
8540 __ Mov(x[i], literal_base * i);
8541 }
8542 }
8543
8544 // Claim memory first, as requested.
8545 __ Claim(claim, kByteSizeInBytes);
8546
8547 // The push-pop pattern is as follows:
8548 // Push: Pop:
8549 // x[0](hi) -> w[0]
8550 // x[0](lo) -> x[1](hi)
8551 // w[1] -> x[1](lo)
8552 // w[1] -> w[2]
8553 // x[2](hi) -> x[2](hi)
8554 // x[2](lo) -> x[2](lo)
8555 // x[2](hi) -> w[3]
8556 // x[2](lo) -> x[4](hi)
8557 // x[2](hi) -> x[4](lo)
8558 // x[2](lo) -> w[5]
8559 // w[3] -> x[5](hi)
8560 // w[3] -> x[6](lo)
8561 // w[3] -> w[7]
8562 // w[3] -> x[8](hi)
8563 // x[4](hi) -> x[8](lo)
8564 // x[4](lo) -> w[9]
8565 // ... pattern continues ...
8566 //
8567 // That is, registers are pushed starting with the lower numbers,
8568 // alternating between x and w registers, and pushing i%4+1 copies of each,
8569 // where i is the register number.
8570 // Registers are popped starting with the higher numbers one-by-one,
8571 // alternating between x and w registers, but only popping one at a time.
8572 //
8573 // This pattern provides a wide variety of alignment effects and overlaps.
8574
8575 // ---- Push ----
8576
8577 int active_w_slots = 0;
8578 for (int i = 0; active_w_slots < requested_w_slots; i++) {
8579 ASSERT(i < reg_count);
8580 // In order to test various arguments to PushMultipleTimes, and to try to
8581 // exercise different alignment and overlap effects, we push each
8582 // register a different number of times.
8583 int times = i % 4 + 1;
8584 if (i & 1) {
8585 // Push odd-numbered registers as W registers.
8586 if (i & 2) {
8587 __ PushMultipleTimes(w[i], times);
8588 } else {
8589 // Use a register to specify the count.
8590 __ Mov(tmp.W(), times);
8591 __ PushMultipleTimes(w[i], tmp.W());
8592 }
8593 // Fill in the expected stack slots.
8594 for (int j = 0; j < times; j++) {
8595 if (w[i].Is(wzr)) {
8596 // The zero register always writes zeroes.
8597 stack[active_w_slots++] = 0;
8598 } else {
8599 stack[active_w_slots++] = literal_base_w * i;
8600 }
8601 }
8602 } else {
8603 // Push even-numbered registers as X registers.
8604 if (i & 2) {
8605 __ PushMultipleTimes(x[i], times);
8606 } else {
8607 // Use a register to specify the count.
8608 __ Mov(tmp, times);
8609 __ PushMultipleTimes(x[i], tmp);
8610 }
8611 // Fill in the expected stack slots.
8612 for (int j = 0; j < times; j++) {
8613 if (x[i].IsZero()) {
8614 // The zero register always writes zeroes.
8615 stack[active_w_slots++] = 0;
8616 stack[active_w_slots++] = 0;
8617 } else {
8618 stack[active_w_slots++] = literal_base_hi * i;
8619 stack[active_w_slots++] = literal_base_lo * i;
8620 }
8621 }
8622 }
8623 }
8624 // Because we were pushing several registers at a time, we probably pushed
8625 // more than we needed to.
8626 if (active_w_slots > requested_w_slots) {
8627 __ Drop(active_w_slots - requested_w_slots, kWRegSizeInBytes);
8628 // Bump the number of active W-sized slots back to where it should be,
8629 // and fill the empty space with a dummy value.
8630 do {
8631 stack[active_w_slots--] = 0xdeadbeef;
8632 } while (active_w_slots > requested_w_slots);
8633 }
8634
8635 // ---- Pop ----
8636
8637 Clobber(&masm, list);
8638
8639 // If popping an even number of registers, the first one will be X-sized.
8640 // Otherwise, the first one will be W-sized.
8641 bool next_is_64 = !(reg_count & 1);
8642 for (int i = reg_count-1; i >= 0; i--) {
8643 if (next_is_64) {
8644 __ Pop(x[i]);
8645 active_w_slots -= 2;
8646 } else {
8647 __ Pop(w[i]);
8648 active_w_slots -= 1;
8649 }
8650 next_is_64 = !next_is_64;
8651 }
8652 ASSERT(active_w_slots == 0);
8653
8654 // Drop memory to restore jssp.
8655 __ Drop(claim, kByteSizeInBytes);
8656
8657 __ Mov(csp, __ StackPointer());
8658 __ SetStackPointer(csp);
8659 }
8660
8661 END();
8662
8663 RUN();
8664
8665 int slot = 0;
8666 for (int i = 0; i < reg_count; i++) {
8667 // Even-numbered registers were written as W registers.
8668 // Odd-numbered registers were written as X registers.
8669 bool expect_64 = (i & 1);
8670 uint64_t expected;
8671
8672 if (expect_64) {
8673 uint64_t hi = stack[slot++];
8674 uint64_t lo = stack[slot++];
8675 expected = (hi << 32) | lo;
8676 } else {
8677 expected = stack[slot++];
8678 }
8679
8680 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
8681 // test that the upper word was properly cleared by Pop.
8682 if (x[i].IsZero()) {
8683 ASSERT_EQUAL_64(0, x[i]);
8684 } else {
8685 ASSERT_EQUAL_64(expected, x[i]);
8686 }
8687 }
8688 ASSERT(slot == requested_w_slots);
8689
8690 TEARDOWN();
8691 }
8692
8693
8694 TEST(push_pop_jssp_wx_overlap) {
8695 INIT_V8();
8696 for (int claim = 0; claim <= 8; claim++) {
8697 for (int count = 1; count <= 8; count++) {
8698 PushPopJsspWXOverlapHelper(count, claim);
8699 PushPopJsspWXOverlapHelper(count, claim);
8700 PushPopJsspWXOverlapHelper(count, claim);
8701 PushPopJsspWXOverlapHelper(count, claim);
8702 }
8703 // Test with the maximum number of registers.
8704 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8705 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8706 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8707 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8708 }
8709 }
8710
8711
8712 TEST(push_pop_csp) {
8713 INIT_V8();
8714 SETUP();
8715
8716 START();
8717
8718 ASSERT(csp.Is(__ StackPointer()));
8719
8720 __ Mov(x3, 0x3333333333333333UL);
8721 __ Mov(x2, 0x2222222222222222UL);
8722 __ Mov(x1, 0x1111111111111111UL);
8723 __ Mov(x0, 0x0000000000000000UL);
8724 __ Claim(2);
8725 __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
8726 __ Push(x3, x2);
8727 __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
8728 __ Push(x2, x1, x3, x0);
8729 __ Pop(x4, x5);
8730 __ Pop(x6, x7, x8, x9);
8731
8732 __ Claim(2);
8733 __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
8734 __ Push(w3, w1, w2, w0);
8735 __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
8736 __ Pop(w14, w15, w16, w17);
8737
8738 __ Claim(2);
8739 __ Push(w2, w2, w1, w1);
8740 __ Push(x3, x3);
8741 __ Pop(w18, w19, w20, w21);
8742 __ Pop(x22, x23);
8743
8744 __ Claim(2);
8745 __ PushXRegList(x1.Bit() | x22.Bit());
8746 __ PopXRegList(x24.Bit() | x26.Bit());
8747
8748 __ Claim(2);
8749 __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
8750 __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
8751
8752 __ Claim(2);
8753 __ PushXRegList(0);
8754 __ PopXRegList(0);
8755 __ PushXRegList(0xffffffff);
8756 __ PopXRegList(0xffffffff);
8757 __ Drop(12);
8758
8759 END();
8760
8761 RUN();
8762
8763 ASSERT_EQUAL_64(0x1111111111111111UL, x3);
8764 ASSERT_EQUAL_64(0x0000000000000000UL, x2);
8765 ASSERT_EQUAL_64(0x3333333333333333UL, x1);
8766 ASSERT_EQUAL_64(0x2222222222222222UL, x0);
8767 ASSERT_EQUAL_64(0x3333333333333333UL, x9);
8768 ASSERT_EQUAL_64(0x2222222222222222UL, x8);
8769 ASSERT_EQUAL_64(0x0000000000000000UL, x7);
8770 ASSERT_EQUAL_64(0x3333333333333333UL, x6);
8771 ASSERT_EQUAL_64(0x1111111111111111UL, x5);
8772 ASSERT_EQUAL_64(0x2222222222222222UL, x4);
8773
8774 ASSERT_EQUAL_32(0x11111111U, w13);
8775 ASSERT_EQUAL_32(0x33333333U, w12);
8776 ASSERT_EQUAL_32(0x00000000U, w11);
8777 ASSERT_EQUAL_32(0x22222222U, w10);
8778 ASSERT_EQUAL_32(0x11111111U, w17);
8779 ASSERT_EQUAL_32(0x00000000U, w16);
8780 ASSERT_EQUAL_32(0x33333333U, w15);
8781 ASSERT_EQUAL_32(0x22222222U, w14);
8782
8783 ASSERT_EQUAL_32(0x11111111U, w18);
8784 ASSERT_EQUAL_32(0x11111111U, w19);
8785 ASSERT_EQUAL_32(0x11111111U, w20);
8786 ASSERT_EQUAL_32(0x11111111U, w21);
8787 ASSERT_EQUAL_64(0x3333333333333333UL, x22);
8788 ASSERT_EQUAL_64(0x0000000000000000UL, x23);
8789
8790 ASSERT_EQUAL_64(0x3333333333333333UL, x24);
8791 ASSERT_EQUAL_64(0x3333333333333333UL, x26);
8792
8793 ASSERT_EQUAL_32(0x33333333U, w25);
8794 ASSERT_EQUAL_32(0x00000000U, w27);
8795 ASSERT_EQUAL_32(0x22222222U, w28);
8796 ASSERT_EQUAL_32(0x33333333U, w29);
8797 TEARDOWN();
8798 }
8799
8800
8801 TEST(push_queued) {
8802 INIT_V8();
8803 SETUP();
8804
8805 START();
8806
8807 ASSERT(__ StackPointer().Is(csp));
8808 __ Mov(jssp, __ StackPointer());
8809 __ SetStackPointer(jssp);
8810
8811 MacroAssembler::PushPopQueue queue(&masm);
8812
8813 // Queue up registers.
8814 queue.Queue(x0);
8815 queue.Queue(x1);
8816 queue.Queue(x2);
8817 queue.Queue(x3);
8818
8819 queue.Queue(w4);
8820 queue.Queue(w5);
8821 queue.Queue(w6);
8822
8823 queue.Queue(d0);
8824 queue.Queue(d1);
8825
8826 queue.Queue(s2);
8827
8828 __ Mov(x0, 0x1234000000000000);
8829 __ Mov(x1, 0x1234000100010001);
8830 __ Mov(x2, 0x1234000200020002);
8831 __ Mov(x3, 0x1234000300030003);
8832 __ Mov(w4, 0x12340004);
8833 __ Mov(w5, 0x12340005);
8834 __ Mov(w6, 0x12340006);
8835 __ Fmov(d0, 123400.0);
8836 __ Fmov(d1, 123401.0);
8837 __ Fmov(s2, 123402.0);
8838
8839 // Actually push them.
8840 queue.PushQueued();
8841
8842 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSize, 0, 6));
8843 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSize, 0, 2));
8844
8845 // Pop them conventionally.
8846 __ Pop(s2);
8847 __ Pop(d1, d0);
8848 __ Pop(w6, w5, w4);
8849 __ Pop(x3, x2, x1, x0);
8850
8851 __ Mov(csp, __ StackPointer());
8852 __ SetStackPointer(csp);
8853
8854 END();
8855
8856 RUN();
8857
8858 ASSERT_EQUAL_64(0x1234000000000000, x0);
8859 ASSERT_EQUAL_64(0x1234000100010001, x1);
8860 ASSERT_EQUAL_64(0x1234000200020002, x2);
8861 ASSERT_EQUAL_64(0x1234000300030003, x3);
8862
8863 ASSERT_EQUAL_32(0x12340004, w4);
8864 ASSERT_EQUAL_32(0x12340005, w5);
8865 ASSERT_EQUAL_32(0x12340006, w6);
8866
8867 ASSERT_EQUAL_FP64(123400.0, d0);
8868 ASSERT_EQUAL_FP64(123401.0, d1);
8869
8870 ASSERT_EQUAL_FP32(123402.0, s2);
8871
8872 TEARDOWN();
8873 }
8874
8875
8876 TEST(pop_queued) {
8877 INIT_V8();
8878 SETUP();
8879
8880 START();
8881
8882 ASSERT(__ StackPointer().Is(csp));
8883 __ Mov(jssp, __ StackPointer());
8884 __ SetStackPointer(jssp);
8885
8886 MacroAssembler::PushPopQueue queue(&masm);
8887
8888 __ Mov(x0, 0x1234000000000000);
8889 __ Mov(x1, 0x1234000100010001);
8890 __ Mov(x2, 0x1234000200020002);
8891 __ Mov(x3, 0x1234000300030003);
8892 __ Mov(w4, 0x12340004);
8893 __ Mov(w5, 0x12340005);
8894 __ Mov(w6, 0x12340006);
8895 __ Fmov(d0, 123400.0);
8896 __ Fmov(d1, 123401.0);
8897 __ Fmov(s2, 123402.0);
8898
8899 // Push registers conventionally.
8900 __ Push(x0, x1, x2, x3);
8901 __ Push(w4, w5, w6);
8902 __ Push(d0, d1);
8903 __ Push(s2);
8904
8905 // Queue up a pop.
8906 queue.Queue(s2);
8907
8908 queue.Queue(d1);
8909 queue.Queue(d0);
8910
8911 queue.Queue(w6);
8912 queue.Queue(w5);
8913 queue.Queue(w4);
8914
8915 queue.Queue(x3);
8916 queue.Queue(x2);
8917 queue.Queue(x1);
8918 queue.Queue(x0);
8919
8920 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSize, 0, 6));
8921 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSize, 0, 2));
8922
8923 // Actually pop them.
8924 queue.PopQueued();
8925
8926 __ Mov(csp, __ StackPointer());
8927 __ SetStackPointer(csp);
8928
8929 END();
8930
8931 RUN();
8932
8933 ASSERT_EQUAL_64(0x1234000000000000, x0);
8934 ASSERT_EQUAL_64(0x1234000100010001, x1);
8935 ASSERT_EQUAL_64(0x1234000200020002, x2);
8936 ASSERT_EQUAL_64(0x1234000300030003, x3);
8937
8938 ASSERT_EQUAL_64(0x0000000012340004, x4);
8939 ASSERT_EQUAL_64(0x0000000012340005, x5);
8940 ASSERT_EQUAL_64(0x0000000012340006, x6);
8941
8942 ASSERT_EQUAL_FP64(123400.0, d0);
8943 ASSERT_EQUAL_FP64(123401.0, d1);
8944
8945 ASSERT_EQUAL_FP32(123402.0, s2);
8946
8947 TEARDOWN();
8948 }
8949
8950
8951 TEST(jump_both_smi) {
8952 INIT_V8();
8953 SETUP();
8954
8955 Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
8956 Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
8957 Label return1, return2, return3, done;
8958
8959 START();
8960
8961 __ Mov(x0, 0x5555555500000001UL); // A pointer.
8962 __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
8963 __ Mov(x2, 0x1234567800000000UL); // A smi.
8964 __ Mov(x3, 0x8765432100000000UL); // A smi.
8965 __ Mov(x4, 0xdead);
8966 __ Mov(x5, 0xdead);
8967 __ Mov(x6, 0xdead);
8968 __ Mov(x7, 0xdead);
8969
8970 __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
8971 __ Bind(&return1);
8972 __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
8973 __ Bind(&return2);
8974 __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
8975 __ Bind(&return3);
8976 __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
8977
8978 __ Bind(&cond_fail_00);
8979 __ Mov(x4, 0);
8980 __ B(&return1);
8981 __ Bind(&cond_pass_00);
8982 __ Mov(x4, 1);
8983 __ B(&return1);
8984
8985 __ Bind(&cond_fail_01);
8986 __ Mov(x5, 0);
8987 __ B(&return2);
8988 __ Bind(&cond_pass_01);
8989 __ Mov(x5, 1);
8990 __ B(&return2);
8991
8992 __ Bind(&cond_fail_10);
8993 __ Mov(x6, 0);
8994 __ B(&return3);
8995 __ Bind(&cond_pass_10);
8996 __ Mov(x6, 1);
8997 __ B(&return3);
8998
8999 __ Bind(&cond_fail_11);
9000 __ Mov(x7, 0);
9001 __ B(&done);
9002 __ Bind(&cond_pass_11);
9003 __ Mov(x7, 1);
9004
9005 __ Bind(&done);
9006
9007 END();
9008
9009 RUN();
9010
9011 ASSERT_EQUAL_64(0x5555555500000001UL, x0);
9012 ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9013 ASSERT_EQUAL_64(0x1234567800000000UL, x2);
9014 ASSERT_EQUAL_64(0x8765432100000000UL, x3);
9015 ASSERT_EQUAL_64(0, x4);
9016 ASSERT_EQUAL_64(0, x5);
9017 ASSERT_EQUAL_64(0, x6);
9018 ASSERT_EQUAL_64(1, x7);
9019
9020 TEARDOWN();
9021 }
9022
9023
9024 TEST(jump_either_smi) {
9025 INIT_V8();
9026 SETUP();
9027
9028 Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
9029 Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
9030 Label return1, return2, return3, done;
9031
9032 START();
9033
9034 __ Mov(x0, 0x5555555500000001UL); // A pointer.
9035 __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
9036 __ Mov(x2, 0x1234567800000000UL); // A smi.
9037 __ Mov(x3, 0x8765432100000000UL); // A smi.
9038 __ Mov(x4, 0xdead);
9039 __ Mov(x5, 0xdead);
9040 __ Mov(x6, 0xdead);
9041 __ Mov(x7, 0xdead);
9042
9043 __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
9044 __ Bind(&return1);
9045 __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
9046 __ Bind(&return2);
9047 __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
9048 __ Bind(&return3);
9049 __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
9050
9051 __ Bind(&cond_fail_00);
9052 __ Mov(x4, 0);
9053 __ B(&return1);
9054 __ Bind(&cond_pass_00);
9055 __ Mov(x4, 1);
9056 __ B(&return1);
9057
9058 __ Bind(&cond_fail_01);
9059 __ Mov(x5, 0);
9060 __ B(&return2);
9061 __ Bind(&cond_pass_01);
9062 __ Mov(x5, 1);
9063 __ B(&return2);
9064
9065 __ Bind(&cond_fail_10);
9066 __ Mov(x6, 0);
9067 __ B(&return3);
9068 __ Bind(&cond_pass_10);
9069 __ Mov(x6, 1);
9070 __ B(&return3);
9071
9072 __ Bind(&cond_fail_11);
9073 __ Mov(x7, 0);
9074 __ B(&done);
9075 __ Bind(&cond_pass_11);
9076 __ Mov(x7, 1);
9077
9078 __ Bind(&done);
9079
9080 END();
9081
9082 RUN();
9083
9084 ASSERT_EQUAL_64(0x5555555500000001UL, x0);
9085 ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9086 ASSERT_EQUAL_64(0x1234567800000000UL, x2);
9087 ASSERT_EQUAL_64(0x8765432100000000UL, x3);
9088 ASSERT_EQUAL_64(0, x4);
9089 ASSERT_EQUAL_64(1, x5);
9090 ASSERT_EQUAL_64(1, x6);
9091 ASSERT_EQUAL_64(1, x7);
9092
9093 TEARDOWN();
9094 }
9095
9096
9097 TEST(noreg) {
9098 // This test doesn't generate any code, but it verifies some invariants
9099 // related to NoReg.
9100 CHECK(NoReg.Is(NoFPReg));
9101 CHECK(NoFPReg.Is(NoReg));
9102 CHECK(NoReg.Is(NoCPUReg));
9103 CHECK(NoCPUReg.Is(NoReg));
9104 CHECK(NoFPReg.Is(NoCPUReg));
9105 CHECK(NoCPUReg.Is(NoFPReg));
9106
9107 CHECK(NoReg.IsNone());
9108 CHECK(NoFPReg.IsNone());
9109 CHECK(NoCPUReg.IsNone());
9110 }
9111
9112
9113 TEST(isvalid) {
9114 // This test doesn't generate any code, but it verifies some invariants
9115 // related to IsValid().
9116 CHECK(!NoReg.IsValid());
9117 CHECK(!NoFPReg.IsValid());
9118 CHECK(!NoCPUReg.IsValid());
9119
9120 CHECK(x0.IsValid());
9121 CHECK(w0.IsValid());
9122 CHECK(x30.IsValid());
9123 CHECK(w30.IsValid());
9124 CHECK(xzr.IsValid());
9125 CHECK(wzr.IsValid());
9126
9127 CHECK(csp.IsValid());
9128 CHECK(wcsp.IsValid());
9129
9130 CHECK(d0.IsValid());
9131 CHECK(s0.IsValid());
9132 CHECK(d31.IsValid());
9133 CHECK(s31.IsValid());
9134
9135 CHECK(x0.IsValidRegister());
9136 CHECK(w0.IsValidRegister());
9137 CHECK(xzr.IsValidRegister());
9138 CHECK(wzr.IsValidRegister());
9139 CHECK(csp.IsValidRegister());
9140 CHECK(wcsp.IsValidRegister());
9141 CHECK(!x0.IsValidFPRegister());
9142 CHECK(!w0.IsValidFPRegister());
9143 CHECK(!xzr.IsValidFPRegister());
9144 CHECK(!wzr.IsValidFPRegister());
9145 CHECK(!csp.IsValidFPRegister());
9146 CHECK(!wcsp.IsValidFPRegister());
9147
9148 CHECK(d0.IsValidFPRegister());
9149 CHECK(s0.IsValidFPRegister());
9150 CHECK(!d0.IsValidRegister());
9151 CHECK(!s0.IsValidRegister());
9152
9153 // Test the same as before, but using CPURegister types. This shouldn't make
9154 // any difference.
9155 CHECK(static_cast<CPURegister>(x0).IsValid());
9156 CHECK(static_cast<CPURegister>(w0).IsValid());
9157 CHECK(static_cast<CPURegister>(x30).IsValid());
9158 CHECK(static_cast<CPURegister>(w30).IsValid());
9159 CHECK(static_cast<CPURegister>(xzr).IsValid());
9160 CHECK(static_cast<CPURegister>(wzr).IsValid());
9161
9162 CHECK(static_cast<CPURegister>(csp).IsValid());
9163 CHECK(static_cast<CPURegister>(wcsp).IsValid());
9164
9165 CHECK(static_cast<CPURegister>(d0).IsValid());
9166 CHECK(static_cast<CPURegister>(s0).IsValid());
9167 CHECK(static_cast<CPURegister>(d31).IsValid());
9168 CHECK(static_cast<CPURegister>(s31).IsValid());
9169
9170 CHECK(static_cast<CPURegister>(x0).IsValidRegister());
9171 CHECK(static_cast<CPURegister>(w0).IsValidRegister());
9172 CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
9173 CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
9174 CHECK(static_cast<CPURegister>(csp).IsValidRegister());
9175 CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
9176 CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
9177 CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
9178 CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
9179 CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
9180 CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister());
9181 CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister());
9182
9183 CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
9184 CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
9185 CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
9186 CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
9187 }
9188
9189
9190 TEST(cpureglist_utils_x) {
9191 // This test doesn't generate any code, but it verifies the behaviour of
9192 // the CPURegList utility methods.
9193
9194 // Test a list of X registers.
9195 CPURegList test(x0, x1, x2, x3);
9196
9197 CHECK(test.IncludesAliasOf(x0));
9198 CHECK(test.IncludesAliasOf(x1));
9199 CHECK(test.IncludesAliasOf(x2));
9200 CHECK(test.IncludesAliasOf(x3));
9201 CHECK(test.IncludesAliasOf(w0));
9202 CHECK(test.IncludesAliasOf(w1));
9203 CHECK(test.IncludesAliasOf(w2));
9204 CHECK(test.IncludesAliasOf(w3));
9205
9206 CHECK(!test.IncludesAliasOf(x4));
9207 CHECK(!test.IncludesAliasOf(x30));
9208 CHECK(!test.IncludesAliasOf(xzr));
9209 CHECK(!test.IncludesAliasOf(csp));
9210 CHECK(!test.IncludesAliasOf(w4));
9211 CHECK(!test.IncludesAliasOf(w30));
9212 CHECK(!test.IncludesAliasOf(wzr));
9213 CHECK(!test.IncludesAliasOf(wcsp));
9214
9215 CHECK(!test.IncludesAliasOf(d0));
9216 CHECK(!test.IncludesAliasOf(d1));
9217 CHECK(!test.IncludesAliasOf(d2));
9218 CHECK(!test.IncludesAliasOf(d3));
9219 CHECK(!test.IncludesAliasOf(s0));
9220 CHECK(!test.IncludesAliasOf(s1));
9221 CHECK(!test.IncludesAliasOf(s2));
9222 CHECK(!test.IncludesAliasOf(s3));
9223
9224 CHECK(!test.IsEmpty());
9225
9226 CHECK(test.type() == x0.type());
9227
9228 CHECK(test.PopHighestIndex().Is(x3));
9229 CHECK(test.PopLowestIndex().Is(x0));
9230
9231 CHECK(test.IncludesAliasOf(x1));
9232 CHECK(test.IncludesAliasOf(x2));
9233 CHECK(test.IncludesAliasOf(w1));
9234 CHECK(test.IncludesAliasOf(w2));
9235 CHECK(!test.IncludesAliasOf(x0));
9236 CHECK(!test.IncludesAliasOf(x3));
9237 CHECK(!test.IncludesAliasOf(w0));
9238 CHECK(!test.IncludesAliasOf(w3));
9239
9240 CHECK(test.PopHighestIndex().Is(x2));
9241 CHECK(test.PopLowestIndex().Is(x1));
9242
9243 CHECK(!test.IncludesAliasOf(x1));
9244 CHECK(!test.IncludesAliasOf(x2));
9245 CHECK(!test.IncludesAliasOf(w1));
9246 CHECK(!test.IncludesAliasOf(w2));
9247
9248 CHECK(test.IsEmpty());
9249 }
9250
9251
9252 TEST(cpureglist_utils_w) {
9253 // This test doesn't generate any code, but it verifies the behaviour of
9254 // the CPURegList utility methods.
9255
9256 // Test a list of W registers.
9257 CPURegList test(w10, w11, w12, w13);
9258
9259 CHECK(test.IncludesAliasOf(x10));
9260 CHECK(test.IncludesAliasOf(x11));
9261 CHECK(test.IncludesAliasOf(x12));
9262 CHECK(test.IncludesAliasOf(x13));
9263 CHECK(test.IncludesAliasOf(w10));
9264 CHECK(test.IncludesAliasOf(w11));
9265 CHECK(test.IncludesAliasOf(w12));
9266 CHECK(test.IncludesAliasOf(w13));
9267
9268 CHECK(!test.IncludesAliasOf(x0));
9269 CHECK(!test.IncludesAliasOf(x9));
9270 CHECK(!test.IncludesAliasOf(x14));
9271 CHECK(!test.IncludesAliasOf(x30));
9272 CHECK(!test.IncludesAliasOf(xzr));
9273 CHECK(!test.IncludesAliasOf(csp));
9274 CHECK(!test.IncludesAliasOf(w0));
9275 CHECK(!test.IncludesAliasOf(w9));
9276 CHECK(!test.IncludesAliasOf(w14));
9277 CHECK(!test.IncludesAliasOf(w30));
9278 CHECK(!test.IncludesAliasOf(wzr));
9279 CHECK(!test.IncludesAliasOf(wcsp));
9280
9281 CHECK(!test.IncludesAliasOf(d10));
9282 CHECK(!test.IncludesAliasOf(d11));
9283 CHECK(!test.IncludesAliasOf(d12));
9284 CHECK(!test.IncludesAliasOf(d13));
9285 CHECK(!test.IncludesAliasOf(s10));
9286 CHECK(!test.IncludesAliasOf(s11));
9287 CHECK(!test.IncludesAliasOf(s12));
9288 CHECK(!test.IncludesAliasOf(s13));
9289
9290 CHECK(!test.IsEmpty());
9291
9292 CHECK(test.type() == w10.type());
9293
9294 CHECK(test.PopHighestIndex().Is(w13));
9295 CHECK(test.PopLowestIndex().Is(w10));
9296
9297 CHECK(test.IncludesAliasOf(x11));
9298 CHECK(test.IncludesAliasOf(x12));
9299 CHECK(test.IncludesAliasOf(w11));
9300 CHECK(test.IncludesAliasOf(w12));
9301 CHECK(!test.IncludesAliasOf(x10));
9302 CHECK(!test.IncludesAliasOf(x13));
9303 CHECK(!test.IncludesAliasOf(w10));
9304 CHECK(!test.IncludesAliasOf(w13));
9305
9306 CHECK(test.PopHighestIndex().Is(w12));
9307 CHECK(test.PopLowestIndex().Is(w11));
9308
9309 CHECK(!test.IncludesAliasOf(x11));
9310 CHECK(!test.IncludesAliasOf(x12));
9311 CHECK(!test.IncludesAliasOf(w11));
9312 CHECK(!test.IncludesAliasOf(w12));
9313
9314 CHECK(test.IsEmpty());
9315 }
9316
9317
9318 TEST(cpureglist_utils_d) {
9319 // This test doesn't generate any code, but it verifies the behaviour of
9320 // the CPURegList utility methods.
9321
9322 // Test a list of D registers.
9323 CPURegList test(d20, d21, d22, d23);
9324
9325 CHECK(test.IncludesAliasOf(d20));
9326 CHECK(test.IncludesAliasOf(d21));
9327 CHECK(test.IncludesAliasOf(d22));
9328 CHECK(test.IncludesAliasOf(d23));
9329 CHECK(test.IncludesAliasOf(s20));
9330 CHECK(test.IncludesAliasOf(s21));
9331 CHECK(test.IncludesAliasOf(s22));
9332 CHECK(test.IncludesAliasOf(s23));
9333
9334 CHECK(!test.IncludesAliasOf(d0));
9335 CHECK(!test.IncludesAliasOf(d19));
9336 CHECK(!test.IncludesAliasOf(d24));
9337 CHECK(!test.IncludesAliasOf(d31));
9338 CHECK(!test.IncludesAliasOf(s0));
9339 CHECK(!test.IncludesAliasOf(s19));
9340 CHECK(!test.IncludesAliasOf(s24));
9341 CHECK(!test.IncludesAliasOf(s31));
9342
9343 CHECK(!test.IncludesAliasOf(x20));
9344 CHECK(!test.IncludesAliasOf(x21));
9345 CHECK(!test.IncludesAliasOf(x22));
9346 CHECK(!test.IncludesAliasOf(x23));
9347 CHECK(!test.IncludesAliasOf(w20));
9348 CHECK(!test.IncludesAliasOf(w21));
9349 CHECK(!test.IncludesAliasOf(w22));
9350 CHECK(!test.IncludesAliasOf(w23));
9351
9352 CHECK(!test.IncludesAliasOf(xzr));
9353 CHECK(!test.IncludesAliasOf(wzr));
9354 CHECK(!test.IncludesAliasOf(csp));
9355 CHECK(!test.IncludesAliasOf(wcsp));
9356
9357 CHECK(!test.IsEmpty());
9358
9359 CHECK(test.type() == d20.type());
9360
9361 CHECK(test.PopHighestIndex().Is(d23));
9362 CHECK(test.PopLowestIndex().Is(d20));
9363
9364 CHECK(test.IncludesAliasOf(d21));
9365 CHECK(test.IncludesAliasOf(d22));
9366 CHECK(test.IncludesAliasOf(s21));
9367 CHECK(test.IncludesAliasOf(s22));
9368 CHECK(!test.IncludesAliasOf(d20));
9369 CHECK(!test.IncludesAliasOf(d23));
9370 CHECK(!test.IncludesAliasOf(s20));
9371 CHECK(!test.IncludesAliasOf(s23));
9372
9373 CHECK(test.PopHighestIndex().Is(d22));
9374 CHECK(test.PopLowestIndex().Is(d21));
9375
9376 CHECK(!test.IncludesAliasOf(d21));
9377 CHECK(!test.IncludesAliasOf(d22));
9378 CHECK(!test.IncludesAliasOf(s21));
9379 CHECK(!test.IncludesAliasOf(s22));
9380
9381 CHECK(test.IsEmpty());
9382 }
9383
9384
9385 TEST(cpureglist_utils_s) {
9386 // This test doesn't generate any code, but it verifies the behaviour of
9387 // the CPURegList utility methods.
9388
9389 // Test a list of S registers.
9390 CPURegList test(s20, s21, s22, s23);
9391
9392 // The type and size mechanisms are already covered, so here we just test
9393 // that lists of S registers alias individual D registers.
9394
9395 CHECK(test.IncludesAliasOf(d20));
9396 CHECK(test.IncludesAliasOf(d21));
9397 CHECK(test.IncludesAliasOf(d22));
9398 CHECK(test.IncludesAliasOf(d23));
9399 CHECK(test.IncludesAliasOf(s20));
9400 CHECK(test.IncludesAliasOf(s21));
9401 CHECK(test.IncludesAliasOf(s22));
9402 CHECK(test.IncludesAliasOf(s23));
9403 }
9404
9405
9406 TEST(cpureglist_utils_empty) {
9407 // This test doesn't generate any code, but it verifies the behaviour of
9408 // the CPURegList utility methods.
9409
9410 // Test an empty list.
9411 // Empty lists can have type and size properties. Check that we can create
9412 // them, and that they are empty.
9413 CPURegList reg32(CPURegister::kRegister, kWRegSize, 0);
9414 CPURegList reg64(CPURegister::kRegister, kXRegSize, 0);
9415 CPURegList fpreg32(CPURegister::kFPRegister, kSRegSize, 0);
9416 CPURegList fpreg64(CPURegister::kFPRegister, kDRegSize, 0);
9417
9418 CHECK(reg32.IsEmpty());
9419 CHECK(reg64.IsEmpty());
9420 CHECK(fpreg32.IsEmpty());
9421 CHECK(fpreg64.IsEmpty());
9422
9423 CHECK(reg32.PopLowestIndex().IsNone());
9424 CHECK(reg64.PopLowestIndex().IsNone());
9425 CHECK(fpreg32.PopLowestIndex().IsNone());
9426 CHECK(fpreg64.PopLowestIndex().IsNone());
9427
9428 CHECK(reg32.PopHighestIndex().IsNone());
9429 CHECK(reg64.PopHighestIndex().IsNone());
9430 CHECK(fpreg32.PopHighestIndex().IsNone());
9431 CHECK(fpreg64.PopHighestIndex().IsNone());
9432
9433 CHECK(reg32.IsEmpty());
9434 CHECK(reg64.IsEmpty());
9435 CHECK(fpreg32.IsEmpty());
9436 CHECK(fpreg64.IsEmpty());
9437 }
9438
9439
9440 TEST(printf) {
9441 INIT_V8();
9442 SETUP();
9443 START();
9444
9445 char const * test_plain_string = "Printf with no arguments.\n";
9446 char const * test_substring = "'This is a substring.'";
9447 RegisterDump before;
9448
9449 // Initialize x29 to the value of the stack pointer. We will use x29 as a
9450 // temporary stack pointer later, and initializing it in this way allows the
9451 // RegisterDump check to pass.
9452 __ Mov(x29, __ StackPointer());
9453
9454 // Test simple integer arguments.
9455 __ Mov(x0, 1234);
9456 __ Mov(x1, 0x1234);
9457
9458 // Test simple floating-point arguments.
9459 __ Fmov(d0, 1.234);
9460
9461 // Test pointer (string) arguments.
9462 __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9463
9464 // Test the maximum number of arguments, and sign extension.
9465 __ Mov(w3, 0xffffffff);
9466 __ Mov(w4, 0xffffffff);
9467 __ Mov(x5, 0xffffffffffffffff);
9468 __ Mov(x6, 0xffffffffffffffff);
9469 __ Fmov(s1, 1.234);
9470 __ Fmov(s2, 2.345);
9471 __ Fmov(d3, 3.456);
9472 __ Fmov(d4, 4.567);
9473
9474 // Test printing callee-saved registers.
9475 __ Mov(x28, 0x123456789abcdef);
9476 __ Fmov(d10, 42.0);
9477
9478 // Test with three arguments.
9479 __ Mov(x10, 3);
9480 __ Mov(x11, 40);
9481 __ Mov(x12, 500);
9482
9483 // x8 and x9 are used by debug code in part of the macro assembler. However,
9484 // Printf guarantees to preserve them (so we can use Printf in debug code),
9485 // and we need to test that they are properly preserved. The above code
9486 // shouldn't need to use them, but we initialize x8 and x9 last to be on the
9487 // safe side. This test still assumes that none of the code from
9488 // before->Dump() to the end of the test can clobber x8 or x9, so where
9489 // possible we use the Assembler directly to be safe.
9490 __ orr(x8, xzr, 0x8888888888888888);
9491 __ orr(x9, xzr, 0x9999999999999999);
9492
9493 // Check that we don't clobber any registers, except those that we explicitly
9494 // write results into.
9495 before.Dump(&masm);
9496
9497 __ Printf(test_plain_string); // NOLINT(runtime/printf)
9498 __ Printf("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
9499 __ Printf("d0: %f\n", d0);
9500 __ Printf("Test %%s: %s\n", x2);
9501 __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
9502 "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9503 w3, w4, x5, x6);
9504 __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
9505 __ Printf("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
9506 __ Printf("%g\n", d10);
9507
9508 // Test with a different stack pointer.
9509 const Register old_stack_pointer = __ StackPointer();
9510 __ mov(x29, old_stack_pointer);
9511 __ SetStackPointer(x29);
9512 __ Printf("old_stack_pointer: 0x%016" PRIx64 "\n", old_stack_pointer);
9513 __ mov(old_stack_pointer, __ StackPointer());
9514 __ SetStackPointer(old_stack_pointer);
9515
9516 __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
9517
9518 END();
9519 RUN();
9520
9521 // We cannot easily test the output of the Printf sequences, and because
9522 // Printf preserves all registers by default, we can't look at the number of
9523 // bytes that were printed. However, the printf_no_preserve test should check
9524 // that, and here we just test that we didn't clobber any registers.
9525 ASSERT_EQUAL_REGISTERS(before);
9526
9527 TEARDOWN();
9528 }
9529
9530
9531 TEST(printf_no_preserve) {
9532 INIT_V8();
9533 SETUP();
9534 START();
9535
9536 char const * test_plain_string = "Printf with no arguments.\n";
9537 char const * test_substring = "'This is a substring.'";
9538
9539 __ PrintfNoPreserve(test_plain_string); // NOLINT(runtime/printf)
9540 __ Mov(x19, x0);
9541
9542 // Test simple integer arguments.
9543 __ Mov(x0, 1234);
9544 __ Mov(x1, 0x1234);
9545 __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
9546 __ Mov(x20, x0);
9547
9548 // Test simple floating-point arguments.
9549 __ Fmov(d0, 1.234);
9550 __ PrintfNoPreserve("d0: %f\n", d0);
9551 __ Mov(x21, x0);
9552
9553 // Test pointer (string) arguments.
9554 __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9555 __ PrintfNoPreserve("Test %%s: %s\n", x2);
9556 __ Mov(x22, x0);
9557
9558 // Test the maximum number of arguments, and sign extension.
9559 __ Mov(w3, 0xffffffff);
9560 __ Mov(w4, 0xffffffff);
9561 __ Mov(x5, 0xffffffffffffffff);
9562 __ Mov(x6, 0xffffffffffffffff);
9563 __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
9564 "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9565 w3, w4, x5, x6);
9566 __ Mov(x23, x0);
9567
9568 __ Fmov(s1, 1.234);
9569 __ Fmov(s2, 2.345);
9570 __ Fmov(d3, 3.456);
9571 __ Fmov(d4, 4.567);
9572 __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
9573 __ Mov(x24, x0);
9574
9575 // Test printing callee-saved registers.
9576 __ Mov(x28, 0x123456789abcdef);
9577 __ PrintfNoPreserve("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
9578 __ Mov(x25, x0);
9579
9580 __ Fmov(d10, 42.0);
9581 __ PrintfNoPreserve("%g\n", d10);
9582 __ Mov(x26, x0);
9583
9584 // Test with a different stack pointer.
9585 const Register old_stack_pointer = __ StackPointer();
9586 __ Mov(x29, old_stack_pointer);
9587 __ SetStackPointer(x29);
9588
9589 __ PrintfNoPreserve("old_stack_pointer: 0x%016" PRIx64 "\n",
9590 old_stack_pointer);
9591 __ Mov(x27, x0);
9592
9593 __ Mov(old_stack_pointer, __ StackPointer());
9594 __ SetStackPointer(old_stack_pointer);
9595
9596 // Test with three arguments.
9597 __ Mov(x3, 3);
9598 __ Mov(x4, 40);
9599 __ Mov(x5, 500);
9600 __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
9601 __ Mov(x28, x0);
9602
9603 END();
9604 RUN();
9605
9606 // We cannot easily test the exact output of the Printf sequences, but we can
9607 // use the return code to check that the string length was correct.
9608
9609 // Printf with no arguments.
9610 ASSERT_EQUAL_64(strlen(test_plain_string), x19);
9611 // x0: 1234, x1: 0x00001234
9612 ASSERT_EQUAL_64(25, x20);
9613 // d0: 1.234000
9614 ASSERT_EQUAL_64(13, x21);
9615 // Test %s: 'This is a substring.'
9616 ASSERT_EQUAL_64(32, x22);
9617 // w3(uint32): 4294967295
9618 // w4(int32): -1
9619 // x5(uint64): 18446744073709551615
9620 // x6(int64): -1
9621 ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
9622 // %f: 1.234000
9623 // %g: 2.345
9624 // %e: 3.456000e+00
9625 // %E: 4.567000E+00
9626 ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
9627 // 0x89abcdef, 0x0123456789abcdef
9628 ASSERT_EQUAL_64(31, x25);
9629 // 42
9630 ASSERT_EQUAL_64(3, x26);
9631 // old_stack_pointer: 0x00007fb037ae2370
9632 // Note: This is an example value, but the field width is fixed here so the
9633 // string length is still predictable.
9634 ASSERT_EQUAL_64(38, x27);
9635 // 3=3, 4=40, 5=500
9636 ASSERT_EQUAL_64(17, x28);
9637
9638 TEARDOWN();
9639 }
9640
9641
9642 // This is a V8-specific test.
9643 static void CopyFieldsHelper(CPURegList temps) {
9644 static const uint64_t kLiteralBase = 0x0100001000100101UL;
9645 static const uint64_t src[] = {kLiteralBase * 1,
9646 kLiteralBase * 2,
9647 kLiteralBase * 3,
9648 kLiteralBase * 4,
9649 kLiteralBase * 5,
9650 kLiteralBase * 6,
9651 kLiteralBase * 7,
9652 kLiteralBase * 8,
9653 kLiteralBase * 9,
9654 kLiteralBase * 10,
9655 kLiteralBase * 11};
9656 static const uint64_t src_tagged =
9657 reinterpret_cast<uint64_t>(src) + kHeapObjectTag;
9658
9659 static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1;
9660 uint64_t* dst[kTestCount];
9661 uint64_t dst_tagged[kTestCount];
9662
9663 // The first test will be to copy 0 fields. The destination (and source)
9664 // should not be accessed in any way.
9665 dst[0] = NULL;
9666 dst_tagged[0] = kHeapObjectTag;
9667
9668 // Allocate memory for each other test. Each test <n> will have <n> fields.
9669 // This is intended to exercise as many paths in CopyFields as possible.
9670 for (unsigned i = 1; i < kTestCount; i++) {
9671 dst[i] = new uint64_t[i];
9672 memset(dst[i], 0, i * sizeof(kLiteralBase));
9673 dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag;
9674 }
9675
9676 SETUP();
9677 START();
9678
9679 __ Mov(x0, dst_tagged[0]);
9680 __ Mov(x1, 0);
9681 __ CopyFields(x0, x1, temps, 0);
9682 for (unsigned i = 1; i < kTestCount; i++) {
9683 __ Mov(x0, dst_tagged[i]);
9684 __ Mov(x1, src_tagged);
9685 __ CopyFields(x0, x1, temps, i);
9686 }
9687
9688 END();
9689 RUN();
9690 TEARDOWN();
9691
9692 for (unsigned i = 1; i < kTestCount; i++) {
9693 for (unsigned j = 0; j < i; j++) {
9694 CHECK(src[j] == dst[i][j]);
9695 }
9696 delete [] dst[i];
9697 }
9698 }
9699
9700
9701 // This is a V8-specific test.
9702 TEST(copyfields) {
9703 INIT_V8();
9704 CopyFieldsHelper(CPURegList(x10));
9705 CopyFieldsHelper(CPURegList(x10, x11));
9706 CopyFieldsHelper(CPURegList(x10, x11, x12));
9707 CopyFieldsHelper(CPURegList(x10, x11, x12, x13));
9708 }
9709
9710
9711 static void DoSmiAbsTest(int32_t value, bool must_fail = false) {
9712 SETUP();
9713
9714 START();
9715 Label end, slow;
9716 __ Mov(x2, 0xc001c0de);
9717 __ Mov(x1, value);
9718 __ SmiTag(x1);
9719 __ SmiAbs(x1, &slow);
9720 __ SmiUntag(x1);
9721 __ B(&end);
9722
9723 __ Bind(&slow);
9724 __ Mov(x2, 0xbad);
9725
9726 __ Bind(&end);
9727 END();
9728
9729 RUN();
9730
9731 if (must_fail) {
9732 // We tested an invalid conversion. The code must have jump on slow.
9733 ASSERT_EQUAL_64(0xbad, x2);
9734 } else {
9735 // The conversion is valid, check the result.
9736 int32_t result = (value >= 0) ? value : -value;
9737 ASSERT_EQUAL_64(result, x1);
9738
9739 // Check that we didn't jump on slow.
9740 ASSERT_EQUAL_64(0xc001c0de, x2);
9741 }
9742
9743 TEARDOWN();
9744 }
9745
9746
9747 TEST(smi_abs) {
9748 INIT_V8();
9749 // Simple and edge cases.
9750 DoSmiAbsTest(0);
9751 DoSmiAbsTest(0x12345);
9752 DoSmiAbsTest(0x40000000);
9753 DoSmiAbsTest(0x7fffffff);
9754 DoSmiAbsTest(-1);
9755 DoSmiAbsTest(-12345);
9756 DoSmiAbsTest(0x80000001);
9757
9758 // Check that the most negative SMI is detected.
9759 DoSmiAbsTest(0x80000000, true);
9760 }
9761
9762
9763 TEST(blr_lr) {
9764 // A simple test to check that the simulator correcty handle "blr lr".
9765 INIT_V8();
9766 SETUP();
9767
9768 START();
9769 Label target;
9770 Label end;
9771
9772 __ Mov(x0, 0x0);
9773 __ Adr(lr, &target);
9774
9775 __ Blr(lr);
9776 __ Mov(x0, 0xdeadbeef);
9777 __ B(&end);
9778
9779 __ Bind(&target);
9780 __ Mov(x0, 0xc001c0de);
9781
9782 __ Bind(&end);
9783 END();
9784
9785 RUN();
9786
9787 ASSERT_EQUAL_64(0xc001c0de, x0);
9788
9789 TEARDOWN();
9790 }
9791
9792
9793 TEST(barriers) {
9794 // Generate all supported barriers, this is just a smoke test
9795 INIT_V8();
9796 SETUP();
9797
9798 START();
9799
9800 // DMB
9801 __ Dmb(FullSystem, BarrierAll);
9802 __ Dmb(FullSystem, BarrierReads);
9803 __ Dmb(FullSystem, BarrierWrites);
9804 __ Dmb(FullSystem, BarrierOther);
9805
9806 __ Dmb(InnerShareable, BarrierAll);
9807 __ Dmb(InnerShareable, BarrierReads);
9808 __ Dmb(InnerShareable, BarrierWrites);
9809 __ Dmb(InnerShareable, BarrierOther);
9810
9811 __ Dmb(NonShareable, BarrierAll);
9812 __ Dmb(NonShareable, BarrierReads);
9813 __ Dmb(NonShareable, BarrierWrites);
9814 __ Dmb(NonShareable, BarrierOther);
9815
9816 __ Dmb(OuterShareable, BarrierAll);
9817 __ Dmb(OuterShareable, BarrierReads);
9818 __ Dmb(OuterShareable, BarrierWrites);
9819 __ Dmb(OuterShareable, BarrierOther);
9820
9821 // DSB
9822 __ Dsb(FullSystem, BarrierAll);
9823 __ Dsb(FullSystem, BarrierReads);
9824 __ Dsb(FullSystem, BarrierWrites);
9825 __ Dsb(FullSystem, BarrierOther);
9826
9827 __ Dsb(InnerShareable, BarrierAll);
9828 __ Dsb(InnerShareable, BarrierReads);
9829 __ Dsb(InnerShareable, BarrierWrites);
9830 __ Dsb(InnerShareable, BarrierOther);
9831
9832 __ Dsb(NonShareable, BarrierAll);
9833 __ Dsb(NonShareable, BarrierReads);
9834 __ Dsb(NonShareable, BarrierWrites);
9835 __ Dsb(NonShareable, BarrierOther);
9836
9837 __ Dsb(OuterShareable, BarrierAll);
9838 __ Dsb(OuterShareable, BarrierReads);
9839 __ Dsb(OuterShareable, BarrierWrites);
9840 __ Dsb(OuterShareable, BarrierOther);
9841
9842 // ISB
9843 __ Isb();
9844
9845 END();
9846
9847 RUN();
9848
9849 TEARDOWN();
9850 }
9851
9852
9853 TEST(call_no_relocation) {
9854 Address call_start;
9855 Address return_address;
9856
9857 INIT_V8();
9858 SETUP();
9859
9860 START();
9861
9862 Label function;
9863 Label test;
9864
9865 __ B(&test);
9866
9867 __ Bind(&function);
9868 __ Mov(x0, 0x1);
9869 __ Ret();
9870
9871 __ Bind(&test);
9872 __ Mov(x0, 0x0);
9873 __ Push(lr, xzr);
9874 {
9875 Assembler::BlockConstPoolScope scope(&masm);
9876 call_start = buf + __ pc_offset();
9877 __ Call(buf + function.pos(), RelocInfo::NONE64);
9878 return_address = buf + __ pc_offset();
9879 }
9880 __ Pop(xzr, lr);
9881 END();
9882
9883 RUN();
9884
9885 ASSERT_EQUAL_64(1, x0);
9886
9887 // The return_address_from_call_start function doesn't currently encounter any
9888 // non-relocatable sequences, so we check it here to make sure it works.
9889 // TODO(jbramley): Once Crankshaft is complete, decide if we need to support
9890 // non-relocatable calls at all.
9891 CHECK(return_address ==
9892 Assembler::return_address_from_call_start(call_start));
9893
9894 TEARDOWN();
9895 }
9896
9897
9898 static void AbsHelperX(int64_t value) {
9899 int64_t expected;
9900
9901 SETUP();
9902 START();
9903
9904 Label fail;
9905 Label done;
9906
9907 __ Mov(x0, 0);
9908 __ Mov(x1, value);
9909
9910 if (value != kXMinInt) {
9911 expected = labs(value);
9912
9913 Label next;
9914 // The result is representable.
9915 __ Abs(x10, x1);
9916 __ Abs(x11, x1, &fail);
9917 __ Abs(x12, x1, &fail, &next);
9918 __ Bind(&next);
9919 __ Abs(x13, x1, NULL, &done);
9920 } else {
9921 // labs is undefined for kXMinInt but our implementation in the
9922 // MacroAssembler will return kXMinInt in such a case.
9923 expected = kXMinInt;
9924
9925 Label next;
9926 // The result is not representable.
9927 __ Abs(x10, x1);
9928 __ Abs(x11, x1, NULL, &fail);
9929 __ Abs(x12, x1, &next, &fail);
9930 __ Bind(&next);
9931 __ Abs(x13, x1, &done);
9932 }
9933
9934 __ Bind(&fail);
9935 __ Mov(x0, -1);
9936
9937 __ Bind(&done);
9938
9939 END();
9940 RUN();
9941
9942 ASSERT_EQUAL_64(0, x0);
9943 ASSERT_EQUAL_64(value, x1);
9944 ASSERT_EQUAL_64(expected, x10);
9945 ASSERT_EQUAL_64(expected, x11);
9946 ASSERT_EQUAL_64(expected, x12);
9947 ASSERT_EQUAL_64(expected, x13);
9948
9949 TEARDOWN();
9950 }
9951
9952
9953 static void AbsHelperW(int32_t value) {
9954 int32_t expected;
9955
9956 SETUP();
9957 START();
9958
9959 Label fail;
9960 Label done;
9961
9962 __ Mov(w0, 0);
9963 // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL.
9964 // Once it is fixed, we should remove the cast.
9965 __ Mov(w1, static_cast<uint32_t>(value));
9966
9967 if (value != kWMinInt) {
9968 expected = abs(value);
9969
9970 Label next;
9971 // The result is representable.
9972 __ Abs(w10, w1);
9973 __ Abs(w11, w1, &fail);
9974 __ Abs(w12, w1, &fail, &next);
9975 __ Bind(&next);
9976 __ Abs(w13, w1, NULL, &done);
9977 } else {
9978 // abs is undefined for kWMinInt but our implementation in the
9979 // MacroAssembler will return kWMinInt in such a case.
9980 expected = kWMinInt;
9981
9982 Label next;
9983 // The result is not representable.
9984 __ Abs(w10, w1);
9985 __ Abs(w11, w1, NULL, &fail);
9986 __ Abs(w12, w1, &next, &fail);
9987 __ Bind(&next);
9988 __ Abs(w13, w1, &done);
9989 }
9990
9991 __ Bind(&fail);
9992 __ Mov(w0, -1);
9993
9994 __ Bind(&done);
9995
9996 END();
9997 RUN();
9998
9999 ASSERT_EQUAL_32(0, w0);
10000 ASSERT_EQUAL_32(value, w1);
10001 ASSERT_EQUAL_32(expected, w10);
10002 ASSERT_EQUAL_32(expected, w11);
10003 ASSERT_EQUAL_32(expected, w12);
10004 ASSERT_EQUAL_32(expected, w13);
10005
10006 TEARDOWN();
10007 }
10008
10009
10010 TEST(abs) {
10011 INIT_V8();
10012 AbsHelperX(0);
10013 AbsHelperX(42);
10014 AbsHelperX(-42);
10015 AbsHelperX(kXMinInt);
10016 AbsHelperX(kXMaxInt);
10017
10018 AbsHelperW(0);
10019 AbsHelperW(42);
10020 AbsHelperW(-42);
10021 AbsHelperW(kWMinInt);
10022 AbsHelperW(kWMaxInt);
10023 }
OLDNEW
« no previous file with comments | « test/cctest/test-api.cc ('k') | test/cctest/test-code-stubs.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698