Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: test/cctest/test-assembler-a64.cc

Issue 185653004: Experimental parser: merge to r19637 (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « test/cctest/test-api.cc ('k') | test/cctest/test-assembler-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <cmath>
32 #include <limits>
33
34 #include "v8.h"
35
36 #include "macro-assembler.h"
37 #include "a64/simulator-a64.h"
38 #include "a64/decoder-a64-inl.h"
39 #include "a64/disasm-a64.h"
40 #include "a64/utils-a64.h"
41 #include "cctest.h"
42 #include "test-utils-a64.h"
43
44 using namespace v8::internal;
45
46 // Test infrastructure.
47 //
48 // Tests are functions which accept no parameters and have no return values.
49 // The testing code should not perform an explicit return once completed. For
50 // example to test the mov immediate instruction a very simple test would be:
51 //
52 // TEST(mov_x0_one) {
53 // SETUP();
54 //
55 // START();
56 // __ mov(x0, Operand(1));
57 // END();
58 //
59 // RUN();
60 //
61 // ASSERT_EQUAL_64(1, x0);
62 //
63 // TEARDOWN();
64 // }
65 //
66 // Within a START ... END block all registers but sp can be modified. sp has to
67 // be explicitly saved/restored. The END() macro replaces the function return
68 // so it may appear multiple times in a test if the test has multiple exit
69 // points.
70 //
71 // Once the test has been run all integer and floating point registers as well
72 // as flags are accessible through a RegisterDump instance, see
73 // utils-a64.cc for more info on RegisterDump.
74 //
75 // We provide some helper assert to handle common cases:
76 //
77 // ASSERT_EQUAL_32(int32_t, int_32t)
78 // ASSERT_EQUAL_FP32(float, float)
79 // ASSERT_EQUAL_32(int32_t, W register)
80 // ASSERT_EQUAL_FP32(float, S register)
81 // ASSERT_EQUAL_64(int64_t, int_64t)
82 // ASSERT_EQUAL_FP64(double, double)
83 // ASSERT_EQUAL_64(int64_t, X register)
84 // ASSERT_EQUAL_64(X register, X register)
85 // ASSERT_EQUAL_FP64(double, D register)
86 //
87 // e.g. ASSERT_EQUAL_64(0.5, d30);
88 //
89 // If more advance computation is required before the assert then access the
90 // RegisterDump named core directly:
91 //
92 // ASSERT_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
93
94
95 #if 0 // TODO(all): enable.
96 static v8::Persistent<v8::Context> env;
97
98 static void InitializeVM() {
99 if (env.IsEmpty()) {
100 env = v8::Context::New();
101 }
102 }
103 #endif
104
105 #define __ masm.
106
107 #define BUF_SIZE 8192
108 #define SETUP() SETUP_SIZE(BUF_SIZE)
109
110 #define INIT_V8() \
111 CcTest::InitializeVM(); \
112
113 #ifdef USE_SIMULATOR
114
115 // Run tests with the simulator.
116 #define SETUP_SIZE(buf_size) \
117 Isolate* isolate = Isolate::Current(); \
118 HandleScope scope(isolate); \
119 ASSERT(isolate != NULL); \
120 byte* buf = new byte[buf_size]; \
121 MacroAssembler masm(isolate, buf, buf_size); \
122 Decoder<DispatchingDecoderVisitor>* decoder = \
123 new Decoder<DispatchingDecoderVisitor>(); \
124 Simulator simulator(decoder); \
125 PrintDisassembler* pdis = NULL; \
126 RegisterDump core;
127
128 /* if (Cctest::trace_sim()) { \
129 pdis = new PrintDisassembler(stdout); \
130 decoder.PrependVisitor(pdis); \
131 } \
132 */
133
134 // Reset the assembler and simulator, so that instructions can be generated,
135 // but don't actually emit any code. This can be used by tests that need to
136 // emit instructions at the start of the buffer. Note that START_AFTER_RESET
137 // must be called before any callee-saved register is modified, and before an
138 // END is encountered.
139 //
140 // Most tests should call START, rather than call RESET directly.
141 #define RESET() \
142 __ Reset(); \
143 simulator.ResetState();
144
145 #define START_AFTER_RESET() \
146 __ SetStackPointer(csp); \
147 __ PushCalleeSavedRegisters(); \
148 __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
149
150 #define START() \
151 RESET(); \
152 START_AFTER_RESET();
153
154 #define RUN() \
155 simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
156
157 #define END() \
158 __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
159 core.Dump(&masm); \
160 __ PopCalleeSavedRegisters(); \
161 __ Ret(); \
162 __ GetCode(NULL);
163
164 #define TEARDOWN() \
165 delete pdis; \
166 delete[] buf;
167
168 #else // ifdef USE_SIMULATOR.
169 // Run the test on real hardware or models.
170 #define SETUP_SIZE(buf_size) \
171 Isolate* isolate = Isolate::Current(); \
172 HandleScope scope(isolate); \
173 ASSERT(isolate != NULL); \
174 byte* buf = new byte[buf_size]; \
175 MacroAssembler masm(isolate, buf, buf_size); \
176 RegisterDump core; \
177 CPU::SetUp();
178
179 #define RESET() \
180 __ Reset();
181
182 #define START_AFTER_RESET() \
183 __ SetStackPointer(csp); \
184 __ PushCalleeSavedRegisters();
185
186 #define START() \
187 RESET(); \
188 START_AFTER_RESET();
189
190 #define RUN() \
191 CPU::FlushICache(buf, masm.SizeOfGeneratedCode()); \
192 { \
193 void (*test_function)(void); \
194 memcpy(&test_function, &buf, sizeof(buf)); \
195 test_function(); \
196 }
197
198 #define END() \
199 core.Dump(&masm); \
200 __ PopCalleeSavedRegisters(); \
201 __ Ret(); \
202 __ GetCode(NULL);
203
204 #define TEARDOWN() \
205 delete[] buf;
206
207 #endif // ifdef USE_SIMULATOR.
208
209 #define ASSERT_EQUAL_NZCV(expected) \
210 CHECK(EqualNzcv(expected, core.flags_nzcv()))
211
212 #define ASSERT_EQUAL_REGISTERS(expected) \
213 CHECK(EqualRegisters(&expected, &core))
214
215 #define ASSERT_EQUAL_32(expected, result) \
216 CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
217
218 #define ASSERT_EQUAL_FP32(expected, result) \
219 CHECK(EqualFP32(expected, &core, result))
220
221 #define ASSERT_EQUAL_64(expected, result) \
222 CHECK(Equal64(expected, &core, result))
223
224 #define ASSERT_EQUAL_FP64(expected, result) \
225 CHECK(EqualFP64(expected, &core, result))
226
227 #ifdef DEBUG
228 #define ASSERT_LITERAL_POOL_SIZE(expected) \
229 CHECK((expected) == (__ LiteralPoolSize()))
230 #else
231 #define ASSERT_LITERAL_POOL_SIZE(expected) \
232 ((void) 0)
233 #endif
234
235
236 TEST(stack_ops) {
237 INIT_V8();
238 SETUP();
239
240 START();
241 // save csp.
242 __ Mov(x29, csp);
243
244 // Set the csp to a known value.
245 __ Mov(x16, 0x1000);
246 __ Mov(csp, x16);
247 __ Mov(x0, csp);
248
249 // Add immediate to the csp, and move the result to a normal register.
250 __ Add(csp, csp, Operand(0x50));
251 __ Mov(x1, csp);
252
253 // Add extended to the csp, and move the result to a normal register.
254 __ Mov(x17, 0xfff);
255 __ Add(csp, csp, Operand(x17, SXTB));
256 __ Mov(x2, csp);
257
258 // Create an csp using a logical instruction, and move to normal register.
259 __ Orr(csp, xzr, Operand(0x1fff));
260 __ Mov(x3, csp);
261
262 // Write wcsp using a logical instruction.
263 __ Orr(wcsp, wzr, Operand(0xfffffff8L));
264 __ Mov(x4, csp);
265
266 // Write csp, and read back wcsp.
267 __ Orr(csp, xzr, Operand(0xfffffff8L));
268 __ Mov(w5, wcsp);
269
270 // restore csp.
271 __ Mov(csp, x29);
272 END();
273
274 RUN();
275
276 ASSERT_EQUAL_64(0x1000, x0);
277 ASSERT_EQUAL_64(0x1050, x1);
278 ASSERT_EQUAL_64(0x104f, x2);
279 ASSERT_EQUAL_64(0x1fff, x3);
280 ASSERT_EQUAL_64(0xfffffff8, x4);
281 ASSERT_EQUAL_64(0xfffffff8, x5);
282
283 TEARDOWN();
284 }
285
286
287 TEST(mvn) {
288 INIT_V8();
289 SETUP();
290
291 START();
292 __ Mvn(w0, 0xfff);
293 __ Mvn(x1, 0xfff);
294 __ Mvn(w2, Operand(w0, LSL, 1));
295 __ Mvn(x3, Operand(x1, LSL, 2));
296 __ Mvn(w4, Operand(w0, LSR, 3));
297 __ Mvn(x5, Operand(x1, LSR, 4));
298 __ Mvn(w6, Operand(w0, ASR, 11));
299 __ Mvn(x7, Operand(x1, ASR, 12));
300 __ Mvn(w8, Operand(w0, ROR, 13));
301 __ Mvn(x9, Operand(x1, ROR, 14));
302 __ Mvn(w10, Operand(w2, UXTB));
303 __ Mvn(x11, Operand(x2, SXTB, 1));
304 __ Mvn(w12, Operand(w2, UXTH, 2));
305 __ Mvn(x13, Operand(x2, SXTH, 3));
306 __ Mvn(x14, Operand(w2, UXTW, 4));
307 __ Mvn(x15, Operand(w2, SXTW, 4));
308 END();
309
310 RUN();
311
312 ASSERT_EQUAL_64(0xfffff000, x0);
313 ASSERT_EQUAL_64(0xfffffffffffff000UL, x1);
314 ASSERT_EQUAL_64(0x00001fff, x2);
315 ASSERT_EQUAL_64(0x0000000000003fffUL, x3);
316 ASSERT_EQUAL_64(0xe00001ff, x4);
317 ASSERT_EQUAL_64(0xf0000000000000ffUL, x5);
318 ASSERT_EQUAL_64(0x00000001, x6);
319 ASSERT_EQUAL_64(0x0, x7);
320 ASSERT_EQUAL_64(0x7ff80000, x8);
321 ASSERT_EQUAL_64(0x3ffc000000000000UL, x9);
322 ASSERT_EQUAL_64(0xffffff00, x10);
323 ASSERT_EQUAL_64(0x0000000000000001UL, x11);
324 ASSERT_EQUAL_64(0xffff8003, x12);
325 ASSERT_EQUAL_64(0xffffffffffff0007UL, x13);
326 ASSERT_EQUAL_64(0xfffffffffffe000fUL, x14);
327 ASSERT_EQUAL_64(0xfffffffffffe000fUL, x15);
328
329 TEARDOWN();
330 }
331
332
333 TEST(mov) {
334 INIT_V8();
335 SETUP();
336
337 START();
338 __ Mov(x0, 0xffffffffffffffffL);
339 __ Mov(x1, 0xffffffffffffffffL);
340 __ Mov(x2, 0xffffffffffffffffL);
341 __ Mov(x3, 0xffffffffffffffffL);
342
343 __ Mov(x0, 0x0123456789abcdefL);
344
345 __ movz(x1, 0xabcdL << 16);
346 __ movk(x2, 0xabcdL << 32);
347 __ movn(x3, 0xabcdL << 48);
348
349 __ Mov(x4, 0x0123456789abcdefL);
350 __ Mov(x5, x4);
351
352 __ Mov(w6, -1);
353
354 // Test that moves back to the same register have the desired effect. This
355 // is a no-op for X registers, and a truncation for W registers.
356 __ Mov(x7, 0x0123456789abcdefL);
357 __ Mov(x7, x7);
358 __ Mov(x8, 0x0123456789abcdefL);
359 __ Mov(w8, w8);
360 __ Mov(x9, 0x0123456789abcdefL);
361 __ Mov(x9, Operand(x9));
362 __ Mov(x10, 0x0123456789abcdefL);
363 __ Mov(w10, Operand(w10));
364
365 __ Mov(w11, 0xfff);
366 __ Mov(x12, 0xfff);
367 __ Mov(w13, Operand(w11, LSL, 1));
368 __ Mov(x14, Operand(x12, LSL, 2));
369 __ Mov(w15, Operand(w11, LSR, 3));
370 __ Mov(x18, Operand(x12, LSR, 4));
371 __ Mov(w19, Operand(w11, ASR, 11));
372 __ Mov(x20, Operand(x12, ASR, 12));
373 __ Mov(w21, Operand(w11, ROR, 13));
374 __ Mov(x22, Operand(x12, ROR, 14));
375 __ Mov(w23, Operand(w13, UXTB));
376 __ Mov(x24, Operand(x13, SXTB, 1));
377 __ Mov(w25, Operand(w13, UXTH, 2));
378 __ Mov(x26, Operand(x13, SXTH, 3));
379 __ Mov(x27, Operand(w13, UXTW, 4));
380 END();
381
382 RUN();
383
384 ASSERT_EQUAL_64(0x0123456789abcdefL, x0);
385 ASSERT_EQUAL_64(0x00000000abcd0000L, x1);
386 ASSERT_EQUAL_64(0xffffabcdffffffffL, x2);
387 ASSERT_EQUAL_64(0x5432ffffffffffffL, x3);
388 ASSERT_EQUAL_64(x4, x5);
389 ASSERT_EQUAL_32(-1, w6);
390 ASSERT_EQUAL_64(0x0123456789abcdefL, x7);
391 ASSERT_EQUAL_32(0x89abcdefL, w8);
392 ASSERT_EQUAL_64(0x0123456789abcdefL, x9);
393 ASSERT_EQUAL_32(0x89abcdefL, w10);
394 ASSERT_EQUAL_64(0x00000fff, x11);
395 ASSERT_EQUAL_64(0x0000000000000fffUL, x12);
396 ASSERT_EQUAL_64(0x00001ffe, x13);
397 ASSERT_EQUAL_64(0x0000000000003ffcUL, x14);
398 ASSERT_EQUAL_64(0x000001ff, x15);
399 ASSERT_EQUAL_64(0x00000000000000ffUL, x18);
400 ASSERT_EQUAL_64(0x00000001, x19);
401 ASSERT_EQUAL_64(0x0, x20);
402 ASSERT_EQUAL_64(0x7ff80000, x21);
403 ASSERT_EQUAL_64(0x3ffc000000000000UL, x22);
404 ASSERT_EQUAL_64(0x000000fe, x23);
405 ASSERT_EQUAL_64(0xfffffffffffffffcUL, x24);
406 ASSERT_EQUAL_64(0x00007ff8, x25);
407 ASSERT_EQUAL_64(0x000000000000fff0UL, x26);
408 ASSERT_EQUAL_64(0x000000000001ffe0UL, x27);
409
410 TEARDOWN();
411 }
412
413
414 TEST(mov_imm_w) {
415 INIT_V8();
416 SETUP();
417
418 START();
419 __ Mov(w0, 0xffffffffL);
420 __ Mov(w1, 0xffff1234L);
421 __ Mov(w2, 0x1234ffffL);
422 __ Mov(w3, 0x00000000L);
423 __ Mov(w4, 0x00001234L);
424 __ Mov(w5, 0x12340000L);
425 __ Mov(w6, 0x12345678L);
426 END();
427
428 RUN();
429
430 ASSERT_EQUAL_64(0xffffffffL, x0);
431 ASSERT_EQUAL_64(0xffff1234L, x1);
432 ASSERT_EQUAL_64(0x1234ffffL, x2);
433 ASSERT_EQUAL_64(0x00000000L, x3);
434 ASSERT_EQUAL_64(0x00001234L, x4);
435 ASSERT_EQUAL_64(0x12340000L, x5);
436 ASSERT_EQUAL_64(0x12345678L, x6);
437
438 TEARDOWN();
439 }
440
441
442 TEST(mov_imm_x) {
443 INIT_V8();
444 SETUP();
445
446 START();
447 __ Mov(x0, 0xffffffffffffffffL);
448 __ Mov(x1, 0xffffffffffff1234L);
449 __ Mov(x2, 0xffffffff12345678L);
450 __ Mov(x3, 0xffff1234ffff5678L);
451 __ Mov(x4, 0x1234ffffffff5678L);
452 __ Mov(x5, 0x1234ffff5678ffffL);
453 __ Mov(x6, 0x12345678ffffffffL);
454 __ Mov(x7, 0x1234ffffffffffffL);
455 __ Mov(x8, 0x123456789abcffffL);
456 __ Mov(x9, 0x12345678ffff9abcL);
457 __ Mov(x10, 0x1234ffff56789abcL);
458 __ Mov(x11, 0xffff123456789abcL);
459 __ Mov(x12, 0x0000000000000000L);
460 __ Mov(x13, 0x0000000000001234L);
461 __ Mov(x14, 0x0000000012345678L);
462 __ Mov(x15, 0x0000123400005678L);
463 __ Mov(x18, 0x1234000000005678L);
464 __ Mov(x19, 0x1234000056780000L);
465 __ Mov(x20, 0x1234567800000000L);
466 __ Mov(x21, 0x1234000000000000L);
467 __ Mov(x22, 0x123456789abc0000L);
468 __ Mov(x23, 0x1234567800009abcL);
469 __ Mov(x24, 0x1234000056789abcL);
470 __ Mov(x25, 0x0000123456789abcL);
471 __ Mov(x26, 0x123456789abcdef0L);
472 __ Mov(x27, 0xffff000000000001L);
473 __ Mov(x28, 0x8000ffff00000000L);
474 END();
475
476 RUN();
477
478 ASSERT_EQUAL_64(0xffffffffffff1234L, x1);
479 ASSERT_EQUAL_64(0xffffffff12345678L, x2);
480 ASSERT_EQUAL_64(0xffff1234ffff5678L, x3);
481 ASSERT_EQUAL_64(0x1234ffffffff5678L, x4);
482 ASSERT_EQUAL_64(0x1234ffff5678ffffL, x5);
483 ASSERT_EQUAL_64(0x12345678ffffffffL, x6);
484 ASSERT_EQUAL_64(0x1234ffffffffffffL, x7);
485 ASSERT_EQUAL_64(0x123456789abcffffL, x8);
486 ASSERT_EQUAL_64(0x12345678ffff9abcL, x9);
487 ASSERT_EQUAL_64(0x1234ffff56789abcL, x10);
488 ASSERT_EQUAL_64(0xffff123456789abcL, x11);
489 ASSERT_EQUAL_64(0x0000000000000000L, x12);
490 ASSERT_EQUAL_64(0x0000000000001234L, x13);
491 ASSERT_EQUAL_64(0x0000000012345678L, x14);
492 ASSERT_EQUAL_64(0x0000123400005678L, x15);
493 ASSERT_EQUAL_64(0x1234000000005678L, x18);
494 ASSERT_EQUAL_64(0x1234000056780000L, x19);
495 ASSERT_EQUAL_64(0x1234567800000000L, x20);
496 ASSERT_EQUAL_64(0x1234000000000000L, x21);
497 ASSERT_EQUAL_64(0x123456789abc0000L, x22);
498 ASSERT_EQUAL_64(0x1234567800009abcL, x23);
499 ASSERT_EQUAL_64(0x1234000056789abcL, x24);
500 ASSERT_EQUAL_64(0x0000123456789abcL, x25);
501 ASSERT_EQUAL_64(0x123456789abcdef0L, x26);
502 ASSERT_EQUAL_64(0xffff000000000001L, x27);
503 ASSERT_EQUAL_64(0x8000ffff00000000L, x28);
504
505 TEARDOWN();
506 }
507
508
509 TEST(orr) {
510 INIT_V8();
511 SETUP();
512
513 START();
514 __ Mov(x0, 0xf0f0);
515 __ Mov(x1, 0xf00000ff);
516
517 __ Orr(x2, x0, Operand(x1));
518 __ Orr(w3, w0, Operand(w1, LSL, 28));
519 __ Orr(x4, x0, Operand(x1, LSL, 32));
520 __ Orr(x5, x0, Operand(x1, LSR, 4));
521 __ Orr(w6, w0, Operand(w1, ASR, 4));
522 __ Orr(x7, x0, Operand(x1, ASR, 4));
523 __ Orr(w8, w0, Operand(w1, ROR, 12));
524 __ Orr(x9, x0, Operand(x1, ROR, 12));
525 __ Orr(w10, w0, Operand(0xf));
526 __ Orr(x11, x0, Operand(0xf0000000f0000000L));
527 END();
528
529 RUN();
530
531 ASSERT_EQUAL_64(0xf000f0ff, x2);
532 ASSERT_EQUAL_64(0xf000f0f0, x3);
533 ASSERT_EQUAL_64(0xf00000ff0000f0f0L, x4);
534 ASSERT_EQUAL_64(0x0f00f0ff, x5);
535 ASSERT_EQUAL_64(0xff00f0ff, x6);
536 ASSERT_EQUAL_64(0x0f00f0ff, x7);
537 ASSERT_EQUAL_64(0x0ffff0f0, x8);
538 ASSERT_EQUAL_64(0x0ff00000000ff0f0L, x9);
539 ASSERT_EQUAL_64(0xf0ff, x10);
540 ASSERT_EQUAL_64(0xf0000000f000f0f0L, x11);
541
542 TEARDOWN();
543 }
544
545
546 TEST(orr_extend) {
547 INIT_V8();
548 SETUP();
549
550 START();
551 __ Mov(x0, 1);
552 __ Mov(x1, 0x8000000080008080UL);
553 __ Orr(w6, w0, Operand(w1, UXTB));
554 __ Orr(x7, x0, Operand(x1, UXTH, 1));
555 __ Orr(w8, w0, Operand(w1, UXTW, 2));
556 __ Orr(x9, x0, Operand(x1, UXTX, 3));
557 __ Orr(w10, w0, Operand(w1, SXTB));
558 __ Orr(x11, x0, Operand(x1, SXTH, 1));
559 __ Orr(x12, x0, Operand(x1, SXTW, 2));
560 __ Orr(x13, x0, Operand(x1, SXTX, 3));
561 END();
562
563 RUN();
564
565 ASSERT_EQUAL_64(0x00000081, x6);
566 ASSERT_EQUAL_64(0x00010101, x7);
567 ASSERT_EQUAL_64(0x00020201, x8);
568 ASSERT_EQUAL_64(0x0000000400040401UL, x9);
569 ASSERT_EQUAL_64(0x00000000ffffff81UL, x10);
570 ASSERT_EQUAL_64(0xffffffffffff0101UL, x11);
571 ASSERT_EQUAL_64(0xfffffffe00020201UL, x12);
572 ASSERT_EQUAL_64(0x0000000400040401UL, x13);
573
574 TEARDOWN();
575 }
576
577
578 TEST(bitwise_wide_imm) {
579 INIT_V8();
580 SETUP();
581
582 START();
583 __ Mov(x0, 0);
584 __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
585
586 __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
587 __ Orr(w11, w1, Operand(0x90abcdef));
588 END();
589
590 RUN();
591
592 ASSERT_EQUAL_64(0, x0);
593 ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
594 ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
595 ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
596
597 TEARDOWN();
598 }
599
600
601 TEST(orn) {
602 INIT_V8();
603 SETUP();
604
605 START();
606 __ Mov(x0, 0xf0f0);
607 __ Mov(x1, 0xf00000ff);
608
609 __ Orn(x2, x0, Operand(x1));
610 __ Orn(w3, w0, Operand(w1, LSL, 4));
611 __ Orn(x4, x0, Operand(x1, LSL, 4));
612 __ Orn(x5, x0, Operand(x1, LSR, 1));
613 __ Orn(w6, w0, Operand(w1, ASR, 1));
614 __ Orn(x7, x0, Operand(x1, ASR, 1));
615 __ Orn(w8, w0, Operand(w1, ROR, 16));
616 __ Orn(x9, x0, Operand(x1, ROR, 16));
617 __ Orn(w10, w0, Operand(0xffff));
618 __ Orn(x11, x0, Operand(0xffff0000ffffL));
619 END();
620
621 RUN();
622
623 ASSERT_EQUAL_64(0xffffffff0ffffff0L, x2);
624 ASSERT_EQUAL_64(0xfffff0ff, x3);
625 ASSERT_EQUAL_64(0xfffffff0fffff0ffL, x4);
626 ASSERT_EQUAL_64(0xffffffff87fffff0L, x5);
627 ASSERT_EQUAL_64(0x07fffff0, x6);
628 ASSERT_EQUAL_64(0xffffffff87fffff0L, x7);
629 ASSERT_EQUAL_64(0xff00ffff, x8);
630 ASSERT_EQUAL_64(0xff00ffffffffffffL, x9);
631 ASSERT_EQUAL_64(0xfffff0f0, x10);
632 ASSERT_EQUAL_64(0xffff0000fffff0f0L, x11);
633
634 TEARDOWN();
635 }
636
637
638 TEST(orn_extend) {
639 INIT_V8();
640 SETUP();
641
642 START();
643 __ Mov(x0, 1);
644 __ Mov(x1, 0x8000000080008081UL);
645 __ Orn(w6, w0, Operand(w1, UXTB));
646 __ Orn(x7, x0, Operand(x1, UXTH, 1));
647 __ Orn(w8, w0, Operand(w1, UXTW, 2));
648 __ Orn(x9, x0, Operand(x1, UXTX, 3));
649 __ Orn(w10, w0, Operand(w1, SXTB));
650 __ Orn(x11, x0, Operand(x1, SXTH, 1));
651 __ Orn(x12, x0, Operand(x1, SXTW, 2));
652 __ Orn(x13, x0, Operand(x1, SXTX, 3));
653 END();
654
655 RUN();
656
657 ASSERT_EQUAL_64(0xffffff7f, x6);
658 ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
659 ASSERT_EQUAL_64(0xfffdfdfb, x8);
660 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
661 ASSERT_EQUAL_64(0x0000007f, x10);
662 ASSERT_EQUAL_64(0x0000fefd, x11);
663 ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
664 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
665
666 TEARDOWN();
667 }
668
669
670 TEST(and_) {
671 INIT_V8();
672 SETUP();
673
674 START();
675 __ Mov(x0, 0xfff0);
676 __ Mov(x1, 0xf00000ff);
677
678 __ And(x2, x0, Operand(x1));
679 __ And(w3, w0, Operand(w1, LSL, 4));
680 __ And(x4, x0, Operand(x1, LSL, 4));
681 __ And(x5, x0, Operand(x1, LSR, 1));
682 __ And(w6, w0, Operand(w1, ASR, 20));
683 __ And(x7, x0, Operand(x1, ASR, 20));
684 __ And(w8, w0, Operand(w1, ROR, 28));
685 __ And(x9, x0, Operand(x1, ROR, 28));
686 __ And(w10, w0, Operand(0xff00));
687 __ And(x11, x0, Operand(0xff));
688 END();
689
690 RUN();
691
692 ASSERT_EQUAL_64(0x000000f0, x2);
693 ASSERT_EQUAL_64(0x00000ff0, x3);
694 ASSERT_EQUAL_64(0x00000ff0, x4);
695 ASSERT_EQUAL_64(0x00000070, x5);
696 ASSERT_EQUAL_64(0x0000ff00, x6);
697 ASSERT_EQUAL_64(0x00000f00, x7);
698 ASSERT_EQUAL_64(0x00000ff0, x8);
699 ASSERT_EQUAL_64(0x00000000, x9);
700 ASSERT_EQUAL_64(0x0000ff00, x10);
701 ASSERT_EQUAL_64(0x000000f0, x11);
702
703 TEARDOWN();
704 }
705
706
707 TEST(and_extend) {
708 INIT_V8();
709 SETUP();
710
711 START();
712 __ Mov(x0, 0xffffffffffffffffUL);
713 __ Mov(x1, 0x8000000080008081UL);
714 __ And(w6, w0, Operand(w1, UXTB));
715 __ And(x7, x0, Operand(x1, UXTH, 1));
716 __ And(w8, w0, Operand(w1, UXTW, 2));
717 __ And(x9, x0, Operand(x1, UXTX, 3));
718 __ And(w10, w0, Operand(w1, SXTB));
719 __ And(x11, x0, Operand(x1, SXTH, 1));
720 __ And(x12, x0, Operand(x1, SXTW, 2));
721 __ And(x13, x0, Operand(x1, SXTX, 3));
722 END();
723
724 RUN();
725
726 ASSERT_EQUAL_64(0x00000081, x6);
727 ASSERT_EQUAL_64(0x00010102, x7);
728 ASSERT_EQUAL_64(0x00020204, x8);
729 ASSERT_EQUAL_64(0x0000000400040408UL, x9);
730 ASSERT_EQUAL_64(0xffffff81, x10);
731 ASSERT_EQUAL_64(0xffffffffffff0102UL, x11);
732 ASSERT_EQUAL_64(0xfffffffe00020204UL, x12);
733 ASSERT_EQUAL_64(0x0000000400040408UL, x13);
734
735 TEARDOWN();
736 }
737
738
739 TEST(ands) {
740 INIT_V8();
741 SETUP();
742
743 START();
744 __ Mov(x1, 0xf00000ff);
745 __ Ands(w0, w1, Operand(w1));
746 END();
747
748 RUN();
749
750 ASSERT_EQUAL_NZCV(NFlag);
751 ASSERT_EQUAL_64(0xf00000ff, x0);
752
753 START();
754 __ Mov(x0, 0xfff0);
755 __ Mov(x1, 0xf00000ff);
756 __ Ands(w0, w0, Operand(w1, LSR, 4));
757 END();
758
759 RUN();
760
761 ASSERT_EQUAL_NZCV(ZFlag);
762 ASSERT_EQUAL_64(0x00000000, x0);
763
764 START();
765 __ Mov(x0, 0x8000000000000000L);
766 __ Mov(x1, 0x00000001);
767 __ Ands(x0, x0, Operand(x1, ROR, 1));
768 END();
769
770 RUN();
771
772 ASSERT_EQUAL_NZCV(NFlag);
773 ASSERT_EQUAL_64(0x8000000000000000L, x0);
774
775 START();
776 __ Mov(x0, 0xfff0);
777 __ Ands(w0, w0, Operand(0xf));
778 END();
779
780 RUN();
781
782 ASSERT_EQUAL_NZCV(ZFlag);
783 ASSERT_EQUAL_64(0x00000000, x0);
784
785 START();
786 __ Mov(x0, 0xff000000);
787 __ Ands(w0, w0, Operand(0x80000000));
788 END();
789
790 RUN();
791
792 ASSERT_EQUAL_NZCV(NFlag);
793 ASSERT_EQUAL_64(0x80000000, x0);
794
795 TEARDOWN();
796 }
797
798
799 TEST(bic) {
800 INIT_V8();
801 SETUP();
802
803 START();
804 __ Mov(x0, 0xfff0);
805 __ Mov(x1, 0xf00000ff);
806
807 __ Bic(x2, x0, Operand(x1));
808 __ Bic(w3, w0, Operand(w1, LSL, 4));
809 __ Bic(x4, x0, Operand(x1, LSL, 4));
810 __ Bic(x5, x0, Operand(x1, LSR, 1));
811 __ Bic(w6, w0, Operand(w1, ASR, 20));
812 __ Bic(x7, x0, Operand(x1, ASR, 20));
813 __ Bic(w8, w0, Operand(w1, ROR, 28));
814 __ Bic(x9, x0, Operand(x1, ROR, 24));
815 __ Bic(x10, x0, Operand(0x1f));
816 __ Bic(x11, x0, Operand(0x100));
817
818 // Test bic into csp when the constant cannot be encoded in the immediate
819 // field.
820 // Use x20 to preserve csp. We check for the result via x21 because the
821 // test infrastructure requires that csp be restored to its original value.
822 __ Mov(x20, csp);
823 __ Mov(x0, 0xffffff);
824 __ Bic(csp, x0, Operand(0xabcdef));
825 __ Mov(x21, csp);
826 __ Mov(csp, x20);
827 END();
828
829 RUN();
830
831 ASSERT_EQUAL_64(0x0000ff00, x2);
832 ASSERT_EQUAL_64(0x0000f000, x3);
833 ASSERT_EQUAL_64(0x0000f000, x4);
834 ASSERT_EQUAL_64(0x0000ff80, x5);
835 ASSERT_EQUAL_64(0x000000f0, x6);
836 ASSERT_EQUAL_64(0x0000f0f0, x7);
837 ASSERT_EQUAL_64(0x0000f000, x8);
838 ASSERT_EQUAL_64(0x0000ff00, x9);
839 ASSERT_EQUAL_64(0x0000ffe0, x10);
840 ASSERT_EQUAL_64(0x0000fef0, x11);
841
842 ASSERT_EQUAL_64(0x543210, x21);
843
844 TEARDOWN();
845 }
846
847
848 TEST(bic_extend) {
849 INIT_V8();
850 SETUP();
851
852 START();
853 __ Mov(x0, 0xffffffffffffffffUL);
854 __ Mov(x1, 0x8000000080008081UL);
855 __ Bic(w6, w0, Operand(w1, UXTB));
856 __ Bic(x7, x0, Operand(x1, UXTH, 1));
857 __ Bic(w8, w0, Operand(w1, UXTW, 2));
858 __ Bic(x9, x0, Operand(x1, UXTX, 3));
859 __ Bic(w10, w0, Operand(w1, SXTB));
860 __ Bic(x11, x0, Operand(x1, SXTH, 1));
861 __ Bic(x12, x0, Operand(x1, SXTW, 2));
862 __ Bic(x13, x0, Operand(x1, SXTX, 3));
863 END();
864
865 RUN();
866
867 ASSERT_EQUAL_64(0xffffff7e, x6);
868 ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
869 ASSERT_EQUAL_64(0xfffdfdfb, x8);
870 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
871 ASSERT_EQUAL_64(0x0000007e, x10);
872 ASSERT_EQUAL_64(0x0000fefd, x11);
873 ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
874 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
875
876 TEARDOWN();
877 }
878
879
880 TEST(bics) {
881 INIT_V8();
882 SETUP();
883
884 START();
885 __ Mov(x1, 0xffff);
886 __ Bics(w0, w1, Operand(w1));
887 END();
888
889 RUN();
890
891 ASSERT_EQUAL_NZCV(ZFlag);
892 ASSERT_EQUAL_64(0x00000000, x0);
893
894 START();
895 __ Mov(x0, 0xffffffff);
896 __ Bics(w0, w0, Operand(w0, LSR, 1));
897 END();
898
899 RUN();
900
901 ASSERT_EQUAL_NZCV(NFlag);
902 ASSERT_EQUAL_64(0x80000000, x0);
903
904 START();
905 __ Mov(x0, 0x8000000000000000L);
906 __ Mov(x1, 0x00000001);
907 __ Bics(x0, x0, Operand(x1, ROR, 1));
908 END();
909
910 RUN();
911
912 ASSERT_EQUAL_NZCV(ZFlag);
913 ASSERT_EQUAL_64(0x00000000, x0);
914
915 START();
916 __ Mov(x0, 0xffffffffffffffffL);
917 __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
918 END();
919
920 RUN();
921
922 ASSERT_EQUAL_NZCV(NFlag);
923 ASSERT_EQUAL_64(0x8000000000000000L, x0);
924
925 START();
926 __ Mov(w0, 0xffff0000);
927 __ Bics(w0, w0, Operand(0xfffffff0));
928 END();
929
930 RUN();
931
932 ASSERT_EQUAL_NZCV(ZFlag);
933 ASSERT_EQUAL_64(0x00000000, x0);
934
935 TEARDOWN();
936 }
937
938
939 TEST(eor) {
940 INIT_V8();
941 SETUP();
942
943 START();
944 __ Mov(x0, 0xfff0);
945 __ Mov(x1, 0xf00000ff);
946
947 __ Eor(x2, x0, Operand(x1));
948 __ Eor(w3, w0, Operand(w1, LSL, 4));
949 __ Eor(x4, x0, Operand(x1, LSL, 4));
950 __ Eor(x5, x0, Operand(x1, LSR, 1));
951 __ Eor(w6, w0, Operand(w1, ASR, 20));
952 __ Eor(x7, x0, Operand(x1, ASR, 20));
953 __ Eor(w8, w0, Operand(w1, ROR, 28));
954 __ Eor(x9, x0, Operand(x1, ROR, 28));
955 __ Eor(w10, w0, Operand(0xff00ff00));
956 __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
957 END();
958
959 RUN();
960
961 ASSERT_EQUAL_64(0xf000ff0f, x2);
962 ASSERT_EQUAL_64(0x0000f000, x3);
963 ASSERT_EQUAL_64(0x0000000f0000f000L, x4);
964 ASSERT_EQUAL_64(0x7800ff8f, x5);
965 ASSERT_EQUAL_64(0xffff00f0, x6);
966 ASSERT_EQUAL_64(0x0000f0f0, x7);
967 ASSERT_EQUAL_64(0x0000f00f, x8);
968 ASSERT_EQUAL_64(0x00000ff00000ffffL, x9);
969 ASSERT_EQUAL_64(0xff0000f0, x10);
970 ASSERT_EQUAL_64(0xff00ff00ff0000f0L, x11);
971
972 TEARDOWN();
973 }
974
975
976 TEST(eor_extend) {
977 INIT_V8();
978 SETUP();
979
980 START();
981 __ Mov(x0, 0x1111111111111111UL);
982 __ Mov(x1, 0x8000000080008081UL);
983 __ Eor(w6, w0, Operand(w1, UXTB));
984 __ Eor(x7, x0, Operand(x1, UXTH, 1));
985 __ Eor(w8, w0, Operand(w1, UXTW, 2));
986 __ Eor(x9, x0, Operand(x1, UXTX, 3));
987 __ Eor(w10, w0, Operand(w1, SXTB));
988 __ Eor(x11, x0, Operand(x1, SXTH, 1));
989 __ Eor(x12, x0, Operand(x1, SXTW, 2));
990 __ Eor(x13, x0, Operand(x1, SXTX, 3));
991 END();
992
993 RUN();
994
995 ASSERT_EQUAL_64(0x11111190, x6);
996 ASSERT_EQUAL_64(0x1111111111101013UL, x7);
997 ASSERT_EQUAL_64(0x11131315, x8);
998 ASSERT_EQUAL_64(0x1111111511151519UL, x9);
999 ASSERT_EQUAL_64(0xeeeeee90, x10);
1000 ASSERT_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
1001 ASSERT_EQUAL_64(0xeeeeeeef11131315UL, x12);
1002 ASSERT_EQUAL_64(0x1111111511151519UL, x13);
1003
1004 TEARDOWN();
1005 }
1006
1007
1008 TEST(eon) {
1009 INIT_V8();
1010 SETUP();
1011
1012 START();
1013 __ Mov(x0, 0xfff0);
1014 __ Mov(x1, 0xf00000ff);
1015
1016 __ Eon(x2, x0, Operand(x1));
1017 __ Eon(w3, w0, Operand(w1, LSL, 4));
1018 __ Eon(x4, x0, Operand(x1, LSL, 4));
1019 __ Eon(x5, x0, Operand(x1, LSR, 1));
1020 __ Eon(w6, w0, Operand(w1, ASR, 20));
1021 __ Eon(x7, x0, Operand(x1, ASR, 20));
1022 __ Eon(w8, w0, Operand(w1, ROR, 28));
1023 __ Eon(x9, x0, Operand(x1, ROR, 28));
1024 __ Eon(w10, w0, Operand(0x03c003c0));
1025 __ Eon(x11, x0, Operand(0x0000100000001000L));
1026 END();
1027
1028 RUN();
1029
1030 ASSERT_EQUAL_64(0xffffffff0fff00f0L, x2);
1031 ASSERT_EQUAL_64(0xffff0fff, x3);
1032 ASSERT_EQUAL_64(0xfffffff0ffff0fffL, x4);
1033 ASSERT_EQUAL_64(0xffffffff87ff0070L, x5);
1034 ASSERT_EQUAL_64(0x0000ff0f, x6);
1035 ASSERT_EQUAL_64(0xffffffffffff0f0fL, x7);
1036 ASSERT_EQUAL_64(0xffff0ff0, x8);
1037 ASSERT_EQUAL_64(0xfffff00fffff0000L, x9);
1038 ASSERT_EQUAL_64(0xfc3f03cf, x10);
1039 ASSERT_EQUAL_64(0xffffefffffff100fL, x11);
1040
1041 TEARDOWN();
1042 }
1043
1044
1045 TEST(eon_extend) {
1046 INIT_V8();
1047 SETUP();
1048
1049 START();
1050 __ Mov(x0, 0x1111111111111111UL);
1051 __ Mov(x1, 0x8000000080008081UL);
1052 __ Eon(w6, w0, Operand(w1, UXTB));
1053 __ Eon(x7, x0, Operand(x1, UXTH, 1));
1054 __ Eon(w8, w0, Operand(w1, UXTW, 2));
1055 __ Eon(x9, x0, Operand(x1, UXTX, 3));
1056 __ Eon(w10, w0, Operand(w1, SXTB));
1057 __ Eon(x11, x0, Operand(x1, SXTH, 1));
1058 __ Eon(x12, x0, Operand(x1, SXTW, 2));
1059 __ Eon(x13, x0, Operand(x1, SXTX, 3));
1060 END();
1061
1062 RUN();
1063
1064 ASSERT_EQUAL_64(0xeeeeee6f, x6);
1065 ASSERT_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
1066 ASSERT_EQUAL_64(0xeeececea, x8);
1067 ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
1068 ASSERT_EQUAL_64(0x1111116f, x10);
1069 ASSERT_EQUAL_64(0x111111111111efecUL, x11);
1070 ASSERT_EQUAL_64(0x11111110eeececeaUL, x12);
1071 ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
1072
1073 TEARDOWN();
1074 }
1075
1076
1077 TEST(mul) {
1078 INIT_V8();
1079 SETUP();
1080
1081 START();
1082 __ Mov(x16, 0);
1083 __ Mov(x17, 1);
1084 __ Mov(x18, 0xffffffff);
1085 __ Mov(x19, 0xffffffffffffffffUL);
1086
1087 __ Mul(w0, w16, w16);
1088 __ Mul(w1, w16, w17);
1089 __ Mul(w2, w17, w18);
1090 __ Mul(w3, w18, w19);
1091 __ Mul(x4, x16, x16);
1092 __ Mul(x5, x17, x18);
1093 __ Mul(x6, x18, x19);
1094 __ Mul(x7, x19, x19);
1095 __ Smull(x8, w17, w18);
1096 __ Smull(x9, w18, w18);
1097 __ Smull(x10, w19, w19);
1098 __ Mneg(w11, w16, w16);
1099 __ Mneg(w12, w16, w17);
1100 __ Mneg(w13, w17, w18);
1101 __ Mneg(w14, w18, w19);
1102 __ Mneg(x20, x16, x16);
1103 __ Mneg(x21, x17, x18);
1104 __ Mneg(x22, x18, x19);
1105 __ Mneg(x23, x19, x19);
1106 END();
1107
1108 RUN();
1109
1110 ASSERT_EQUAL_64(0, x0);
1111 ASSERT_EQUAL_64(0, x1);
1112 ASSERT_EQUAL_64(0xffffffff, x2);
1113 ASSERT_EQUAL_64(1, x3);
1114 ASSERT_EQUAL_64(0, x4);
1115 ASSERT_EQUAL_64(0xffffffff, x5);
1116 ASSERT_EQUAL_64(0xffffffff00000001UL, x6);
1117 ASSERT_EQUAL_64(1, x7);
1118 ASSERT_EQUAL_64(0xffffffffffffffffUL, x8);
1119 ASSERT_EQUAL_64(1, x9);
1120 ASSERT_EQUAL_64(1, x10);
1121 ASSERT_EQUAL_64(0, x11);
1122 ASSERT_EQUAL_64(0, x12);
1123 ASSERT_EQUAL_64(1, x13);
1124 ASSERT_EQUAL_64(0xffffffff, x14);
1125 ASSERT_EQUAL_64(0, x20);
1126 ASSERT_EQUAL_64(0xffffffff00000001UL, x21);
1127 ASSERT_EQUAL_64(0xffffffff, x22);
1128 ASSERT_EQUAL_64(0xffffffffffffffffUL, x23);
1129
1130 TEARDOWN();
1131 }
1132
1133
1134 static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
1135 SETUP();
1136 START();
1137 __ Mov(w0, a);
1138 __ Mov(w1, b);
1139 __ Smull(x2, w0, w1);
1140 END();
1141 RUN();
1142 ASSERT_EQUAL_64(expected, x2);
1143 TEARDOWN();
1144 }
1145
1146
1147 TEST(smull) {
1148 INIT_V8();
1149 SmullHelper(0, 0, 0);
1150 SmullHelper(1, 1, 1);
1151 SmullHelper(-1, -1, 1);
1152 SmullHelper(1, -1, -1);
1153 SmullHelper(0xffffffff80000000, 0x80000000, 1);
1154 SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
1155 }
1156
1157
1158 TEST(madd) {
1159 INIT_V8();
1160 SETUP();
1161
1162 START();
1163 __ Mov(x16, 0);
1164 __ Mov(x17, 1);
1165 __ Mov(x18, 0xffffffff);
1166 __ Mov(x19, 0xffffffffffffffffUL);
1167
1168 __ Madd(w0, w16, w16, w16);
1169 __ Madd(w1, w16, w16, w17);
1170 __ Madd(w2, w16, w16, w18);
1171 __ Madd(w3, w16, w16, w19);
1172 __ Madd(w4, w16, w17, w17);
1173 __ Madd(w5, w17, w17, w18);
1174 __ Madd(w6, w17, w17, w19);
1175 __ Madd(w7, w17, w18, w16);
1176 __ Madd(w8, w17, w18, w18);
1177 __ Madd(w9, w18, w18, w17);
1178 __ Madd(w10, w18, w19, w18);
1179 __ Madd(w11, w19, w19, w19);
1180
1181 __ Madd(x12, x16, x16, x16);
1182 __ Madd(x13, x16, x16, x17);
1183 __ Madd(x14, x16, x16, x18);
1184 __ Madd(x15, x16, x16, x19);
1185 __ Madd(x20, x16, x17, x17);
1186 __ Madd(x21, x17, x17, x18);
1187 __ Madd(x22, x17, x17, x19);
1188 __ Madd(x23, x17, x18, x16);
1189 __ Madd(x24, x17, x18, x18);
1190 __ Madd(x25, x18, x18, x17);
1191 __ Madd(x26, x18, x19, x18);
1192 __ Madd(x27, x19, x19, x19);
1193
1194 END();
1195
1196 RUN();
1197
1198 ASSERT_EQUAL_64(0, x0);
1199 ASSERT_EQUAL_64(1, x1);
1200 ASSERT_EQUAL_64(0xffffffff, x2);
1201 ASSERT_EQUAL_64(0xffffffff, x3);
1202 ASSERT_EQUAL_64(1, x4);
1203 ASSERT_EQUAL_64(0, x5);
1204 ASSERT_EQUAL_64(0, x6);
1205 ASSERT_EQUAL_64(0xffffffff, x7);
1206 ASSERT_EQUAL_64(0xfffffffe, x8);
1207 ASSERT_EQUAL_64(2, x9);
1208 ASSERT_EQUAL_64(0, x10);
1209 ASSERT_EQUAL_64(0, x11);
1210
1211 ASSERT_EQUAL_64(0, x12);
1212 ASSERT_EQUAL_64(1, x13);
1213 ASSERT_EQUAL_64(0xffffffff, x14);
1214 ASSERT_EQUAL_64(0xffffffffffffffff, x15);
1215 ASSERT_EQUAL_64(1, x20);
1216 ASSERT_EQUAL_64(0x100000000UL, x21);
1217 ASSERT_EQUAL_64(0, x22);
1218 ASSERT_EQUAL_64(0xffffffff, x23);
1219 ASSERT_EQUAL_64(0x1fffffffe, x24);
1220 ASSERT_EQUAL_64(0xfffffffe00000002UL, x25);
1221 ASSERT_EQUAL_64(0, x26);
1222 ASSERT_EQUAL_64(0, x27);
1223
1224 TEARDOWN();
1225 }
1226
1227
1228 TEST(msub) {
1229 INIT_V8();
1230 SETUP();
1231
1232 START();
1233 __ Mov(x16, 0);
1234 __ Mov(x17, 1);
1235 __ Mov(x18, 0xffffffff);
1236 __ Mov(x19, 0xffffffffffffffffUL);
1237
1238 __ Msub(w0, w16, w16, w16);
1239 __ Msub(w1, w16, w16, w17);
1240 __ Msub(w2, w16, w16, w18);
1241 __ Msub(w3, w16, w16, w19);
1242 __ Msub(w4, w16, w17, w17);
1243 __ Msub(w5, w17, w17, w18);
1244 __ Msub(w6, w17, w17, w19);
1245 __ Msub(w7, w17, w18, w16);
1246 __ Msub(w8, w17, w18, w18);
1247 __ Msub(w9, w18, w18, w17);
1248 __ Msub(w10, w18, w19, w18);
1249 __ Msub(w11, w19, w19, w19);
1250
1251 __ Msub(x12, x16, x16, x16);
1252 __ Msub(x13, x16, x16, x17);
1253 __ Msub(x14, x16, x16, x18);
1254 __ Msub(x15, x16, x16, x19);
1255 __ Msub(x20, x16, x17, x17);
1256 __ Msub(x21, x17, x17, x18);
1257 __ Msub(x22, x17, x17, x19);
1258 __ Msub(x23, x17, x18, x16);
1259 __ Msub(x24, x17, x18, x18);
1260 __ Msub(x25, x18, x18, x17);
1261 __ Msub(x26, x18, x19, x18);
1262 __ Msub(x27, x19, x19, x19);
1263
1264 END();
1265
1266 RUN();
1267
1268 ASSERT_EQUAL_64(0, x0);
1269 ASSERT_EQUAL_64(1, x1);
1270 ASSERT_EQUAL_64(0xffffffff, x2);
1271 ASSERT_EQUAL_64(0xffffffff, x3);
1272 ASSERT_EQUAL_64(1, x4);
1273 ASSERT_EQUAL_64(0xfffffffe, x5);
1274 ASSERT_EQUAL_64(0xfffffffe, x6);
1275 ASSERT_EQUAL_64(1, x7);
1276 ASSERT_EQUAL_64(0, x8);
1277 ASSERT_EQUAL_64(0, x9);
1278 ASSERT_EQUAL_64(0xfffffffe, x10);
1279 ASSERT_EQUAL_64(0xfffffffe, x11);
1280
1281 ASSERT_EQUAL_64(0, x12);
1282 ASSERT_EQUAL_64(1, x13);
1283 ASSERT_EQUAL_64(0xffffffff, x14);
1284 ASSERT_EQUAL_64(0xffffffffffffffffUL, x15);
1285 ASSERT_EQUAL_64(1, x20);
1286 ASSERT_EQUAL_64(0xfffffffeUL, x21);
1287 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x22);
1288 ASSERT_EQUAL_64(0xffffffff00000001UL, x23);
1289 ASSERT_EQUAL_64(0, x24);
1290 ASSERT_EQUAL_64(0x200000000UL, x25);
1291 ASSERT_EQUAL_64(0x1fffffffeUL, x26);
1292 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x27);
1293
1294 TEARDOWN();
1295 }
1296
1297
1298 TEST(smulh) {
1299 INIT_V8();
1300 SETUP();
1301
1302 START();
1303 __ Mov(x20, 0);
1304 __ Mov(x21, 1);
1305 __ Mov(x22, 0x0000000100000000L);
1306 __ Mov(x23, 0x12345678);
1307 __ Mov(x24, 0x0123456789abcdefL);
1308 __ Mov(x25, 0x0000000200000000L);
1309 __ Mov(x26, 0x8000000000000000UL);
1310 __ Mov(x27, 0xffffffffffffffffUL);
1311 __ Mov(x28, 0x5555555555555555UL);
1312 __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
1313
1314 __ Smulh(x0, x20, x24);
1315 __ Smulh(x1, x21, x24);
1316 __ Smulh(x2, x22, x23);
1317 __ Smulh(x3, x22, x24);
1318 __ Smulh(x4, x24, x25);
1319 __ Smulh(x5, x23, x27);
1320 __ Smulh(x6, x26, x26);
1321 __ Smulh(x7, x26, x27);
1322 __ Smulh(x8, x27, x27);
1323 __ Smulh(x9, x28, x28);
1324 __ Smulh(x10, x28, x29);
1325 __ Smulh(x11, x29, x29);
1326 END();
1327
1328 RUN();
1329
1330 ASSERT_EQUAL_64(0, x0);
1331 ASSERT_EQUAL_64(0, x1);
1332 ASSERT_EQUAL_64(0, x2);
1333 ASSERT_EQUAL_64(0x01234567, x3);
1334 ASSERT_EQUAL_64(0x02468acf, x4);
1335 ASSERT_EQUAL_64(0xffffffffffffffffUL, x5);
1336 ASSERT_EQUAL_64(0x4000000000000000UL, x6);
1337 ASSERT_EQUAL_64(0, x7);
1338 ASSERT_EQUAL_64(0, x8);
1339 ASSERT_EQUAL_64(0x1c71c71c71c71c71UL, x9);
1340 ASSERT_EQUAL_64(0xe38e38e38e38e38eUL, x10);
1341 ASSERT_EQUAL_64(0x1c71c71c71c71c72UL, x11);
1342
1343 TEARDOWN();
1344 }
1345
1346
1347 TEST(smaddl_umaddl) {
1348 INIT_V8();
1349 SETUP();
1350
1351 START();
1352 __ Mov(x17, 1);
1353 __ Mov(x18, 0xffffffff);
1354 __ Mov(x19, 0xffffffffffffffffUL);
1355 __ Mov(x20, 4);
1356 __ Mov(x21, 0x200000000UL);
1357
1358 __ Smaddl(x9, w17, w18, x20);
1359 __ Smaddl(x10, w18, w18, x20);
1360 __ Smaddl(x11, w19, w19, x20);
1361 __ Smaddl(x12, w19, w19, x21);
1362 __ Umaddl(x13, w17, w18, x20);
1363 __ Umaddl(x14, w18, w18, x20);
1364 __ Umaddl(x15, w19, w19, x20);
1365 __ Umaddl(x22, w19, w19, x21);
1366 END();
1367
1368 RUN();
1369
1370 ASSERT_EQUAL_64(3, x9);
1371 ASSERT_EQUAL_64(5, x10);
1372 ASSERT_EQUAL_64(5, x11);
1373 ASSERT_EQUAL_64(0x200000001UL, x12);
1374 ASSERT_EQUAL_64(0x100000003UL, x13);
1375 ASSERT_EQUAL_64(0xfffffffe00000005UL, x14);
1376 ASSERT_EQUAL_64(0xfffffffe00000005UL, x15);
1377 ASSERT_EQUAL_64(0x1, x22);
1378
1379 TEARDOWN();
1380 }
1381
1382
1383 TEST(smsubl_umsubl) {
1384 INIT_V8();
1385 SETUP();
1386
1387 START();
1388 __ Mov(x17, 1);
1389 __ Mov(x18, 0xffffffff);
1390 __ Mov(x19, 0xffffffffffffffffUL);
1391 __ Mov(x20, 4);
1392 __ Mov(x21, 0x200000000UL);
1393
1394 __ Smsubl(x9, w17, w18, x20);
1395 __ Smsubl(x10, w18, w18, x20);
1396 __ Smsubl(x11, w19, w19, x20);
1397 __ Smsubl(x12, w19, w19, x21);
1398 __ Umsubl(x13, w17, w18, x20);
1399 __ Umsubl(x14, w18, w18, x20);
1400 __ Umsubl(x15, w19, w19, x20);
1401 __ Umsubl(x22, w19, w19, x21);
1402 END();
1403
1404 RUN();
1405
1406 ASSERT_EQUAL_64(5, x9);
1407 ASSERT_EQUAL_64(3, x10);
1408 ASSERT_EQUAL_64(3, x11);
1409 ASSERT_EQUAL_64(0x1ffffffffUL, x12);
1410 ASSERT_EQUAL_64(0xffffffff00000005UL, x13);
1411 ASSERT_EQUAL_64(0x200000003UL, x14);
1412 ASSERT_EQUAL_64(0x200000003UL, x15);
1413 ASSERT_EQUAL_64(0x3ffffffffUL, x22);
1414
1415 TEARDOWN();
1416 }
1417
1418
1419 TEST(div) {
1420 INIT_V8();
1421 SETUP();
1422
1423 START();
1424 __ Mov(x16, 1);
1425 __ Mov(x17, 0xffffffff);
1426 __ Mov(x18, 0xffffffffffffffffUL);
1427 __ Mov(x19, 0x80000000);
1428 __ Mov(x20, 0x8000000000000000UL);
1429 __ Mov(x21, 2);
1430
1431 __ Udiv(w0, w16, w16);
1432 __ Udiv(w1, w17, w16);
1433 __ Sdiv(w2, w16, w16);
1434 __ Sdiv(w3, w16, w17);
1435 __ Sdiv(w4, w17, w18);
1436
1437 __ Udiv(x5, x16, x16);
1438 __ Udiv(x6, x17, x18);
1439 __ Sdiv(x7, x16, x16);
1440 __ Sdiv(x8, x16, x17);
1441 __ Sdiv(x9, x17, x18);
1442
1443 __ Udiv(w10, w19, w21);
1444 __ Sdiv(w11, w19, w21);
1445 __ Udiv(x12, x19, x21);
1446 __ Sdiv(x13, x19, x21);
1447 __ Udiv(x14, x20, x21);
1448 __ Sdiv(x15, x20, x21);
1449
1450 __ Udiv(w22, w19, w17);
1451 __ Sdiv(w23, w19, w17);
1452 __ Udiv(x24, x20, x18);
1453 __ Sdiv(x25, x20, x18);
1454
1455 __ Udiv(x26, x16, x21);
1456 __ Sdiv(x27, x16, x21);
1457 __ Udiv(x28, x18, x21);
1458 __ Sdiv(x29, x18, x21);
1459
1460 __ Mov(x17, 0);
1461 __ Udiv(w18, w16, w17);
1462 __ Sdiv(w19, w16, w17);
1463 __ Udiv(x20, x16, x17);
1464 __ Sdiv(x21, x16, x17);
1465 END();
1466
1467 RUN();
1468
1469 ASSERT_EQUAL_64(1, x0);
1470 ASSERT_EQUAL_64(0xffffffff, x1);
1471 ASSERT_EQUAL_64(1, x2);
1472 ASSERT_EQUAL_64(0xffffffff, x3);
1473 ASSERT_EQUAL_64(1, x4);
1474 ASSERT_EQUAL_64(1, x5);
1475 ASSERT_EQUAL_64(0, x6);
1476 ASSERT_EQUAL_64(1, x7);
1477 ASSERT_EQUAL_64(0, x8);
1478 ASSERT_EQUAL_64(0xffffffff00000001UL, x9);
1479 ASSERT_EQUAL_64(0x40000000, x10);
1480 ASSERT_EQUAL_64(0xC0000000, x11);
1481 ASSERT_EQUAL_64(0x40000000, x12);
1482 ASSERT_EQUAL_64(0x40000000, x13);
1483 ASSERT_EQUAL_64(0x4000000000000000UL, x14);
1484 ASSERT_EQUAL_64(0xC000000000000000UL, x15);
1485 ASSERT_EQUAL_64(0, x22);
1486 ASSERT_EQUAL_64(0x80000000, x23);
1487 ASSERT_EQUAL_64(0, x24);
1488 ASSERT_EQUAL_64(0x8000000000000000UL, x25);
1489 ASSERT_EQUAL_64(0, x26);
1490 ASSERT_EQUAL_64(0, x27);
1491 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x28);
1492 ASSERT_EQUAL_64(0, x29);
1493 ASSERT_EQUAL_64(0, x18);
1494 ASSERT_EQUAL_64(0, x19);
1495 ASSERT_EQUAL_64(0, x20);
1496 ASSERT_EQUAL_64(0, x21);
1497
1498 TEARDOWN();
1499 }
1500
1501
1502 TEST(rbit_rev) {
1503 INIT_V8();
1504 SETUP();
1505
1506 START();
1507 __ Mov(x24, 0xfedcba9876543210UL);
1508 __ Rbit(w0, w24);
1509 __ Rbit(x1, x24);
1510 __ Rev16(w2, w24);
1511 __ Rev16(x3, x24);
1512 __ Rev(w4, w24);
1513 __ Rev32(x5, x24);
1514 __ Rev(x6, x24);
1515 END();
1516
1517 RUN();
1518
1519 ASSERT_EQUAL_64(0x084c2a6e, x0);
1520 ASSERT_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
1521 ASSERT_EQUAL_64(0x54761032, x2);
1522 ASSERT_EQUAL_64(0xdcfe98ba54761032UL, x3);
1523 ASSERT_EQUAL_64(0x10325476, x4);
1524 ASSERT_EQUAL_64(0x98badcfe10325476UL, x5);
1525 ASSERT_EQUAL_64(0x1032547698badcfeUL, x6);
1526
1527 TEARDOWN();
1528 }
1529
1530
1531 TEST(clz_cls) {
1532 INIT_V8();
1533 SETUP();
1534
1535 START();
1536 __ Mov(x24, 0x0008000000800000UL);
1537 __ Mov(x25, 0xff800000fff80000UL);
1538 __ Mov(x26, 0);
1539 __ Clz(w0, w24);
1540 __ Clz(x1, x24);
1541 __ Clz(w2, w25);
1542 __ Clz(x3, x25);
1543 __ Clz(w4, w26);
1544 __ Clz(x5, x26);
1545 __ Cls(w6, w24);
1546 __ Cls(x7, x24);
1547 __ Cls(w8, w25);
1548 __ Cls(x9, x25);
1549 __ Cls(w10, w26);
1550 __ Cls(x11, x26);
1551 END();
1552
1553 RUN();
1554
1555 ASSERT_EQUAL_64(8, x0);
1556 ASSERT_EQUAL_64(12, x1);
1557 ASSERT_EQUAL_64(0, x2);
1558 ASSERT_EQUAL_64(0, x3);
1559 ASSERT_EQUAL_64(32, x4);
1560 ASSERT_EQUAL_64(64, x5);
1561 ASSERT_EQUAL_64(7, x6);
1562 ASSERT_EQUAL_64(11, x7);
1563 ASSERT_EQUAL_64(12, x8);
1564 ASSERT_EQUAL_64(8, x9);
1565 ASSERT_EQUAL_64(31, x10);
1566 ASSERT_EQUAL_64(63, x11);
1567
1568 TEARDOWN();
1569 }
1570
1571
1572 TEST(label) {
1573 INIT_V8();
1574 SETUP();
1575
1576 Label label_1, label_2, label_3, label_4;
1577
1578 START();
1579 __ Mov(x0, 0x1);
1580 __ Mov(x1, 0x0);
1581 __ Mov(x22, lr); // Save lr.
1582
1583 __ B(&label_1);
1584 __ B(&label_1);
1585 __ B(&label_1); // Multiple branches to the same label.
1586 __ Mov(x0, 0x0);
1587 __ Bind(&label_2);
1588 __ B(&label_3); // Forward branch.
1589 __ Mov(x0, 0x0);
1590 __ Bind(&label_1);
1591 __ B(&label_2); // Backward branch.
1592 __ Mov(x0, 0x0);
1593 __ Bind(&label_3);
1594 __ Bl(&label_4);
1595 END();
1596
1597 __ Bind(&label_4);
1598 __ Mov(x1, 0x1);
1599 __ Mov(lr, x22);
1600 END();
1601
1602 RUN();
1603
1604 ASSERT_EQUAL_64(0x1, x0);
1605 ASSERT_EQUAL_64(0x1, x1);
1606
1607 TEARDOWN();
1608 }
1609
1610
1611 TEST(branch_at_start) {
1612 INIT_V8();
1613 SETUP();
1614
1615 Label good, exit;
1616
1617 // Test that branches can exist at the start of the buffer. (This is a
1618 // boundary condition in the label-handling code.) To achieve this, we have
1619 // to work around the code generated by START.
1620 RESET();
1621 __ B(&good);
1622
1623 START_AFTER_RESET();
1624 __ Mov(x0, 0x0);
1625 END();
1626
1627 __ Bind(&exit);
1628 START_AFTER_RESET();
1629 __ Mov(x0, 0x1);
1630 END();
1631
1632 __ Bind(&good);
1633 __ B(&exit);
1634 END();
1635
1636 RUN();
1637
1638 ASSERT_EQUAL_64(0x1, x0);
1639 TEARDOWN();
1640 }
1641
1642
1643 TEST(adr) {
1644 INIT_V8();
1645 SETUP();
1646
1647 Label label_1, label_2, label_3, label_4;
1648
1649 START();
1650 __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
1651 __ Adr(x1, &label_3); // Set to zero to indicate success.
1652
1653 __ Adr(x2, &label_1); // Multiple forward references to the same label.
1654 __ Adr(x3, &label_1);
1655 __ Adr(x4, &label_1);
1656
1657 __ Bind(&label_2);
1658 __ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
1659 __ Eor(x6, x2, Operand(x4));
1660 __ Orr(x0, x0, Operand(x5));
1661 __ Orr(x0, x0, Operand(x6));
1662 __ Br(x2); // label_1, label_3
1663
1664 __ Bind(&label_3);
1665 __ Adr(x2, &label_3); // Self-reference (offset 0).
1666 __ Eor(x1, x1, Operand(x2));
1667 __ Adr(x2, &label_4); // Simple forward reference.
1668 __ Br(x2); // label_4
1669
1670 __ Bind(&label_1);
1671 __ Adr(x2, &label_3); // Multiple reverse references to the same label.
1672 __ Adr(x3, &label_3);
1673 __ Adr(x4, &label_3);
1674 __ Adr(x5, &label_2); // Simple reverse reference.
1675 __ Br(x5); // label_2
1676
1677 __ Bind(&label_4);
1678 END();
1679
1680 RUN();
1681
1682 ASSERT_EQUAL_64(0x0, x0);
1683 ASSERT_EQUAL_64(0x0, x1);
1684
1685 TEARDOWN();
1686 }
1687
1688
1689 TEST(branch_cond) {
1690 INIT_V8();
1691 SETUP();
1692
1693 Label wrong;
1694
1695 START();
1696 __ Mov(x0, 0x1);
1697 __ Mov(x1, 0x1);
1698 __ Mov(x2, 0x8000000000000000L);
1699
1700 // For each 'cmp' instruction below, condition codes other than the ones
1701 // following it would branch.
1702
1703 __ Cmp(x1, 0);
1704 __ B(&wrong, eq);
1705 __ B(&wrong, lo);
1706 __ B(&wrong, mi);
1707 __ B(&wrong, vs);
1708 __ B(&wrong, ls);
1709 __ B(&wrong, lt);
1710 __ B(&wrong, le);
1711 Label ok_1;
1712 __ B(&ok_1, ne);
1713 __ Mov(x0, 0x0);
1714 __ Bind(&ok_1);
1715
1716 __ Cmp(x1, 1);
1717 __ B(&wrong, ne);
1718 __ B(&wrong, lo);
1719 __ B(&wrong, mi);
1720 __ B(&wrong, vs);
1721 __ B(&wrong, hi);
1722 __ B(&wrong, lt);
1723 __ B(&wrong, gt);
1724 Label ok_2;
1725 __ B(&ok_2, pl);
1726 __ Mov(x0, 0x0);
1727 __ Bind(&ok_2);
1728
1729 __ Cmp(x1, 2);
1730 __ B(&wrong, eq);
1731 __ B(&wrong, hs);
1732 __ B(&wrong, pl);
1733 __ B(&wrong, vs);
1734 __ B(&wrong, hi);
1735 __ B(&wrong, ge);
1736 __ B(&wrong, gt);
1737 Label ok_3;
1738 __ B(&ok_3, vc);
1739 __ Mov(x0, 0x0);
1740 __ Bind(&ok_3);
1741
1742 __ Cmp(x2, 1);
1743 __ B(&wrong, eq);
1744 __ B(&wrong, lo);
1745 __ B(&wrong, mi);
1746 __ B(&wrong, vc);
1747 __ B(&wrong, ls);
1748 __ B(&wrong, ge);
1749 __ B(&wrong, gt);
1750 Label ok_4;
1751 __ B(&ok_4, le);
1752 __ Mov(x0, 0x0);
1753 __ Bind(&ok_4);
1754
1755 Label ok_5;
1756 __ b(&ok_5, al);
1757 __ Mov(x0, 0x0);
1758 __ Bind(&ok_5);
1759
1760 Label ok_6;
1761 __ b(&ok_6, nv);
1762 __ Mov(x0, 0x0);
1763 __ Bind(&ok_6);
1764
1765 END();
1766
1767 __ Bind(&wrong);
1768 __ Mov(x0, 0x0);
1769 END();
1770
1771 RUN();
1772
1773 ASSERT_EQUAL_64(0x1, x0);
1774
1775 TEARDOWN();
1776 }
1777
1778
1779 TEST(branch_to_reg) {
1780 INIT_V8();
1781 SETUP();
1782
1783 // Test br.
1784 Label fn1, after_fn1;
1785
1786 START();
1787 __ Mov(x29, lr);
1788
1789 __ Mov(x1, 0);
1790 __ B(&after_fn1);
1791
1792 __ Bind(&fn1);
1793 __ Mov(x0, lr);
1794 __ Mov(x1, 42);
1795 __ Br(x0);
1796
1797 __ Bind(&after_fn1);
1798 __ Bl(&fn1);
1799
1800 // Test blr.
1801 Label fn2, after_fn2;
1802
1803 __ Mov(x2, 0);
1804 __ B(&after_fn2);
1805
1806 __ Bind(&fn2);
1807 __ Mov(x0, lr);
1808 __ Mov(x2, 84);
1809 __ Blr(x0);
1810
1811 __ Bind(&after_fn2);
1812 __ Bl(&fn2);
1813 __ Mov(x3, lr);
1814
1815 __ Mov(lr, x29);
1816 END();
1817
1818 RUN();
1819
1820 ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
1821 ASSERT_EQUAL_64(42, x1);
1822 ASSERT_EQUAL_64(84, x2);
1823
1824 TEARDOWN();
1825 }
1826
1827
1828 TEST(compare_branch) {
1829 INIT_V8();
1830 SETUP();
1831
1832 START();
1833 __ Mov(x0, 0);
1834 __ Mov(x1, 0);
1835 __ Mov(x2, 0);
1836 __ Mov(x3, 0);
1837 __ Mov(x4, 0);
1838 __ Mov(x5, 0);
1839 __ Mov(x16, 0);
1840 __ Mov(x17, 42);
1841
1842 Label zt, zt_end;
1843 __ Cbz(w16, &zt);
1844 __ B(&zt_end);
1845 __ Bind(&zt);
1846 __ Mov(x0, 1);
1847 __ Bind(&zt_end);
1848
1849 Label zf, zf_end;
1850 __ Cbz(x17, &zf);
1851 __ B(&zf_end);
1852 __ Bind(&zf);
1853 __ Mov(x1, 1);
1854 __ Bind(&zf_end);
1855
1856 Label nzt, nzt_end;
1857 __ Cbnz(w17, &nzt);
1858 __ B(&nzt_end);
1859 __ Bind(&nzt);
1860 __ Mov(x2, 1);
1861 __ Bind(&nzt_end);
1862
1863 Label nzf, nzf_end;
1864 __ Cbnz(x16, &nzf);
1865 __ B(&nzf_end);
1866 __ Bind(&nzf);
1867 __ Mov(x3, 1);
1868 __ Bind(&nzf_end);
1869
1870 __ Mov(x18, 0xffffffff00000000UL);
1871
1872 Label a, a_end;
1873 __ Cbz(w18, &a);
1874 __ B(&a_end);
1875 __ Bind(&a);
1876 __ Mov(x4, 1);
1877 __ Bind(&a_end);
1878
1879 Label b, b_end;
1880 __ Cbnz(w18, &b);
1881 __ B(&b_end);
1882 __ Bind(&b);
1883 __ Mov(x5, 1);
1884 __ Bind(&b_end);
1885
1886 END();
1887
1888 RUN();
1889
1890 ASSERT_EQUAL_64(1, x0);
1891 ASSERT_EQUAL_64(0, x1);
1892 ASSERT_EQUAL_64(1, x2);
1893 ASSERT_EQUAL_64(0, x3);
1894 ASSERT_EQUAL_64(1, x4);
1895 ASSERT_EQUAL_64(0, x5);
1896
1897 TEARDOWN();
1898 }
1899
1900
1901 TEST(test_branch) {
1902 INIT_V8();
1903 SETUP();
1904
1905 START();
1906 __ Mov(x0, 0);
1907 __ Mov(x1, 0);
1908 __ Mov(x2, 0);
1909 __ Mov(x3, 0);
1910 __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
1911
1912 Label bz, bz_end;
1913 __ Tbz(w16, 0, &bz);
1914 __ B(&bz_end);
1915 __ Bind(&bz);
1916 __ Mov(x0, 1);
1917 __ Bind(&bz_end);
1918
1919 Label bo, bo_end;
1920 __ Tbz(x16, 63, &bo);
1921 __ B(&bo_end);
1922 __ Bind(&bo);
1923 __ Mov(x1, 1);
1924 __ Bind(&bo_end);
1925
1926 Label nbz, nbz_end;
1927 __ Tbnz(x16, 61, &nbz);
1928 __ B(&nbz_end);
1929 __ Bind(&nbz);
1930 __ Mov(x2, 1);
1931 __ Bind(&nbz_end);
1932
1933 Label nbo, nbo_end;
1934 __ Tbnz(w16, 2, &nbo);
1935 __ B(&nbo_end);
1936 __ Bind(&nbo);
1937 __ Mov(x3, 1);
1938 __ Bind(&nbo_end);
1939 END();
1940
1941 RUN();
1942
1943 ASSERT_EQUAL_64(1, x0);
1944 ASSERT_EQUAL_64(0, x1);
1945 ASSERT_EQUAL_64(1, x2);
1946 ASSERT_EQUAL_64(0, x3);
1947
1948 TEARDOWN();
1949 }
1950
1951
1952 TEST(far_branch_backward) {
1953 INIT_V8();
1954
1955 // Test that the MacroAssembler correctly resolves backward branches to labels
1956 // that are outside the immediate range of branch instructions.
1957 int max_range =
1958 std::max(Instruction::ImmBranchRange(TestBranchType),
1959 std::max(Instruction::ImmBranchRange(CompareBranchType),
1960 Instruction::ImmBranchRange(CondBranchType)));
1961
1962 SETUP_SIZE(max_range + 1000 * kInstructionSize);
1963
1964 START();
1965
1966 Label done, fail;
1967 Label test_tbz, test_cbz, test_bcond;
1968 Label success_tbz, success_cbz, success_bcond;
1969
1970 __ Mov(x0, 0);
1971 __ Mov(x1, 1);
1972 __ Mov(x10, 0);
1973
1974 __ B(&test_tbz);
1975 __ Bind(&success_tbz);
1976 __ Orr(x0, x0, 1 << 0);
1977 __ B(&test_cbz);
1978 __ Bind(&success_cbz);
1979 __ Orr(x0, x0, 1 << 1);
1980 __ B(&test_bcond);
1981 __ Bind(&success_bcond);
1982 __ Orr(x0, x0, 1 << 2);
1983
1984 __ B(&done);
1985
1986 // Generate enough code to overflow the immediate range of the three types of
1987 // branches below.
1988 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
1989 if (i % 100 == 0) {
1990 // If we do land in this code, we do not want to execute so many nops
1991 // before reaching the end of test (especially if tracing is activated).
1992 __ B(&fail);
1993 } else {
1994 __ Nop();
1995 }
1996 }
1997 __ B(&fail);
1998
1999 __ Bind(&test_tbz);
2000 __ Tbz(x10, 7, &success_tbz);
2001 __ Bind(&test_cbz);
2002 __ Cbz(x10, &success_cbz);
2003 __ Bind(&test_bcond);
2004 __ Cmp(x10, 0);
2005 __ B(eq, &success_bcond);
2006
2007 // For each out-of-range branch instructions, at least two instructions should
2008 // have been generated.
2009 CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
2010
2011 __ Bind(&fail);
2012 __ Mov(x1, 0);
2013 __ Bind(&done);
2014
2015 END();
2016
2017 RUN();
2018
2019 ASSERT_EQUAL_64(0x7, x0);
2020 ASSERT_EQUAL_64(0x1, x1);
2021
2022 TEARDOWN();
2023 }
2024
2025
2026 TEST(far_branch_simple_veneer) {
2027 INIT_V8();
2028
2029 // Test that the MacroAssembler correctly emits veneers for forward branches
2030 // to labels that are outside the immediate range of branch instructions.
2031 int max_range =
2032 std::max(Instruction::ImmBranchRange(TestBranchType),
2033 std::max(Instruction::ImmBranchRange(CompareBranchType),
2034 Instruction::ImmBranchRange(CondBranchType)));
2035
2036 SETUP_SIZE(max_range + 1000 * kInstructionSize);
2037
2038 START();
2039
2040 Label done, fail;
2041 Label test_tbz, test_cbz, test_bcond;
2042 Label success_tbz, success_cbz, success_bcond;
2043
2044 __ Mov(x0, 0);
2045 __ Mov(x1, 1);
2046 __ Mov(x10, 0);
2047
2048 __ Bind(&test_tbz);
2049 __ Tbz(x10, 7, &success_tbz);
2050 __ Bind(&test_cbz);
2051 __ Cbz(x10, &success_cbz);
2052 __ Bind(&test_bcond);
2053 __ Cmp(x10, 0);
2054 __ B(eq, &success_bcond);
2055
2056 // Generate enough code to overflow the immediate range of the three types of
2057 // branches below.
2058 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2059 if (i % 100 == 0) {
2060 // If we do land in this code, we do not want to execute so many nops
2061 // before reaching the end of test (especially if tracing is activated).
2062 // Also, the branches give the MacroAssembler the opportunity to emit the
2063 // veneers.
2064 __ B(&fail);
2065 } else {
2066 __ Nop();
2067 }
2068 }
2069 __ B(&fail);
2070
2071 __ Bind(&success_tbz);
2072 __ Orr(x0, x0, 1 << 0);
2073 __ B(&test_cbz);
2074 __ Bind(&success_cbz);
2075 __ Orr(x0, x0, 1 << 1);
2076 __ B(&test_bcond);
2077 __ Bind(&success_bcond);
2078 __ Orr(x0, x0, 1 << 2);
2079
2080 __ B(&done);
2081 __ Bind(&fail);
2082 __ Mov(x1, 0);
2083 __ Bind(&done);
2084
2085 END();
2086
2087 RUN();
2088
2089 ASSERT_EQUAL_64(0x7, x0);
2090 ASSERT_EQUAL_64(0x1, x1);
2091
2092 TEARDOWN();
2093 }
2094
2095
2096 TEST(far_branch_veneer_link_chain) {
2097 INIT_V8();
2098
2099 // Test that the MacroAssembler correctly emits veneers for forward branches
2100 // that target out-of-range labels and are part of multiple instructions
2101 // jumping to that label.
2102 //
2103 // We test the three situations with the different types of instruction:
2104 // (1)- When the branch is at the start of the chain with tbz.
2105 // (2)- When the branch is in the middle of the chain with cbz.
2106 // (3)- When the branch is at the end of the chain with bcond.
2107 int max_range =
2108 std::max(Instruction::ImmBranchRange(TestBranchType),
2109 std::max(Instruction::ImmBranchRange(CompareBranchType),
2110 Instruction::ImmBranchRange(CondBranchType)));
2111
2112 SETUP_SIZE(max_range + 1000 * kInstructionSize);
2113
2114 START();
2115
2116 Label skip, fail, done;
2117 Label test_tbz, test_cbz, test_bcond;
2118 Label success_tbz, success_cbz, success_bcond;
2119
2120 __ Mov(x0, 0);
2121 __ Mov(x1, 1);
2122 __ Mov(x10, 0);
2123
2124 __ B(&skip);
2125 // Branches at the start of the chain for situations (2) and (3).
2126 __ B(&success_cbz);
2127 __ B(&success_bcond);
2128 __ Nop();
2129 __ B(&success_bcond);
2130 __ B(&success_cbz);
2131 __ Bind(&skip);
2132
2133 __ Bind(&test_tbz);
2134 __ Tbz(x10, 7, &success_tbz);
2135 __ Bind(&test_cbz);
2136 __ Cbz(x10, &success_cbz);
2137 __ Bind(&test_bcond);
2138 __ Cmp(x10, 0);
2139 __ B(eq, &success_bcond);
2140
2141 skip.Unuse();
2142 __ B(&skip);
2143 // Branches at the end of the chain for situations (1) and (2).
2144 __ B(&success_cbz);
2145 __ B(&success_tbz);
2146 __ Nop();
2147 __ B(&success_tbz);
2148 __ B(&success_cbz);
2149 __ Bind(&skip);
2150
2151 // Generate enough code to overflow the immediate range of the three types of
2152 // branches below.
2153 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2154 if (i % 100 == 0) {
2155 // If we do land in this code, we do not want to execute so many nops
2156 // before reaching the end of test (especially if tracing is activated).
2157 // Also, the branches give the MacroAssembler the opportunity to emit the
2158 // veneers.
2159 __ B(&fail);
2160 } else {
2161 __ Nop();
2162 }
2163 }
2164 __ B(&fail);
2165
2166 __ Bind(&success_tbz);
2167 __ Orr(x0, x0, 1 << 0);
2168 __ B(&test_cbz);
2169 __ Bind(&success_cbz);
2170 __ Orr(x0, x0, 1 << 1);
2171 __ B(&test_bcond);
2172 __ Bind(&success_bcond);
2173 __ Orr(x0, x0, 1 << 2);
2174
2175 __ B(&done);
2176 __ Bind(&fail);
2177 __ Mov(x1, 0);
2178 __ Bind(&done);
2179
2180 END();
2181
2182 RUN();
2183
2184 ASSERT_EQUAL_64(0x7, x0);
2185 ASSERT_EQUAL_64(0x1, x1);
2186
2187 TEARDOWN();
2188 }
2189
2190
2191 TEST(far_branch_veneer_broken_link_chain) {
2192 INIT_V8();
2193
2194 // Check that the MacroAssembler correctly handles the situation when removing
2195 // a branch from the link chain of a label and the two links on each side of
2196 // the removed branch cannot be linked together (out of range).
2197 //
2198 // We test with tbz because it has a small range.
2199 int max_range = Instruction::ImmBranchRange(TestBranchType);
2200 int inter_range = max_range / 2 + max_range / 10;
2201
2202 SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
2203
2204 START();
2205
2206 Label skip, fail, done;
2207 Label test_1, test_2, test_3;
2208 Label far_target;
2209
2210 __ Mov(x0, 0); // Indicates the origin of the branch.
2211 __ Mov(x1, 1);
2212 __ Mov(x10, 0);
2213
2214 // First instruction in the label chain.
2215 __ Bind(&test_1);
2216 __ Mov(x0, 1);
2217 __ B(&far_target);
2218
2219 for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2220 if (i % 100 == 0) {
2221 // Do not allow generating veneers. They should not be needed.
2222 __ b(&fail);
2223 } else {
2224 __ Nop();
2225 }
2226 }
2227
2228 // Will need a veneer to point to reach the target.
2229 __ Bind(&test_2);
2230 __ Mov(x0, 2);
2231 __ Tbz(x10, 7, &far_target);
2232
2233 for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2234 if (i % 100 == 0) {
2235 // Do not allow generating veneers. They should not be needed.
2236 __ b(&fail);
2237 } else {
2238 __ Nop();
2239 }
2240 }
2241
2242 // Does not need a veneer to reach the target, but the initial branch
2243 // instruction is out of range.
2244 __ Bind(&test_3);
2245 __ Mov(x0, 3);
2246 __ Tbz(x10, 7, &far_target);
2247
2248 for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2249 if (i % 100 == 0) {
2250 // Allow generating veneers.
2251 __ B(&fail);
2252 } else {
2253 __ Nop();
2254 }
2255 }
2256
2257 __ B(&fail);
2258
2259 __ Bind(&far_target);
2260 __ Cmp(x0, 1);
2261 __ B(eq, &test_2);
2262 __ Cmp(x0, 2);
2263 __ B(eq, &test_3);
2264
2265 __ B(&done);
2266 __ Bind(&fail);
2267 __ Mov(x1, 0);
2268 __ Bind(&done);
2269
2270 END();
2271
2272 RUN();
2273
2274 ASSERT_EQUAL_64(0x3, x0);
2275 ASSERT_EQUAL_64(0x1, x1);
2276
2277 TEARDOWN();
2278 }
2279
2280
2281 TEST(branch_type) {
2282 INIT_V8();
2283
2284 SETUP();
2285
2286 Label fail, done;
2287
2288 START();
2289 __ Mov(x0, 0x0);
2290 __ Mov(x10, 0x7);
2291 __ Mov(x11, 0x0);
2292
2293 // Test non taken branches.
2294 __ Cmp(x10, 0x7);
2295 __ B(&fail, ne);
2296 __ B(&fail, never);
2297 __ B(&fail, reg_zero, x10);
2298 __ B(&fail, reg_not_zero, x11);
2299 __ B(&fail, reg_bit_clear, x10, 0);
2300 __ B(&fail, reg_bit_set, x10, 3);
2301
2302 // Test taken branches.
2303 Label l1, l2, l3, l4, l5;
2304 __ Cmp(x10, 0x7);
2305 __ B(&l1, eq);
2306 __ B(&fail);
2307 __ Bind(&l1);
2308 __ B(&l2, always);
2309 __ B(&fail);
2310 __ Bind(&l2);
2311 __ B(&l3, reg_not_zero, x10);
2312 __ B(&fail);
2313 __ Bind(&l3);
2314 __ B(&l4, reg_bit_clear, x10, 15);
2315 __ B(&fail);
2316 __ Bind(&l4);
2317 __ B(&l5, reg_bit_set, x10, 1);
2318 __ B(&fail);
2319 __ Bind(&l5);
2320
2321 __ B(&done);
2322
2323 __ Bind(&fail);
2324 __ Mov(x0, 0x1);
2325
2326 __ Bind(&done);
2327
2328 END();
2329
2330 RUN();
2331
2332 ASSERT_EQUAL_64(0x0, x0);
2333
2334 TEARDOWN();
2335 }
2336
2337
2338 TEST(ldr_str_offset) {
2339 INIT_V8();
2340 SETUP();
2341
2342 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2343 uint64_t dst[5] = {0, 0, 0, 0, 0};
2344 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2345 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2346
2347 START();
2348 __ Mov(x17, src_base);
2349 __ Mov(x18, dst_base);
2350 __ Ldr(w0, MemOperand(x17));
2351 __ Str(w0, MemOperand(x18));
2352 __ Ldr(w1, MemOperand(x17, 4));
2353 __ Str(w1, MemOperand(x18, 12));
2354 __ Ldr(x2, MemOperand(x17, 8));
2355 __ Str(x2, MemOperand(x18, 16));
2356 __ Ldrb(w3, MemOperand(x17, 1));
2357 __ Strb(w3, MemOperand(x18, 25));
2358 __ Ldrh(w4, MemOperand(x17, 2));
2359 __ Strh(w4, MemOperand(x18, 33));
2360 END();
2361
2362 RUN();
2363
2364 ASSERT_EQUAL_64(0x76543210, x0);
2365 ASSERT_EQUAL_64(0x76543210, dst[0]);
2366 ASSERT_EQUAL_64(0xfedcba98, x1);
2367 ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2368 ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
2369 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2370 ASSERT_EQUAL_64(0x32, x3);
2371 ASSERT_EQUAL_64(0x3200, dst[3]);
2372 ASSERT_EQUAL_64(0x7654, x4);
2373 ASSERT_EQUAL_64(0x765400, dst[4]);
2374 ASSERT_EQUAL_64(src_base, x17);
2375 ASSERT_EQUAL_64(dst_base, x18);
2376
2377 TEARDOWN();
2378 }
2379
2380
2381 TEST(ldr_str_wide) {
2382 INIT_V8();
2383 SETUP();
2384
2385 uint32_t src[8192];
2386 uint32_t dst[8192];
2387 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2388 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2389 memset(src, 0xaa, 8192 * sizeof(src[0]));
2390 memset(dst, 0xaa, 8192 * sizeof(dst[0]));
2391 src[0] = 0;
2392 src[6144] = 6144;
2393 src[8191] = 8191;
2394
2395 START();
2396 __ Mov(x22, src_base);
2397 __ Mov(x23, dst_base);
2398 __ Mov(x24, src_base);
2399 __ Mov(x25, dst_base);
2400 __ Mov(x26, src_base);
2401 __ Mov(x27, dst_base);
2402
2403 __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
2404 __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
2405 __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
2406 __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
2407 __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
2408 __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
2409 END();
2410
2411 RUN();
2412
2413 ASSERT_EQUAL_32(8191, w0);
2414 ASSERT_EQUAL_32(8191, dst[8191]);
2415 ASSERT_EQUAL_64(src_base, x22);
2416 ASSERT_EQUAL_64(dst_base, x23);
2417 ASSERT_EQUAL_32(0, w1);
2418 ASSERT_EQUAL_32(0, dst[0]);
2419 ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
2420 ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
2421 ASSERT_EQUAL_32(6144, w2);
2422 ASSERT_EQUAL_32(6144, dst[6144]);
2423 ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
2424 ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
2425
2426 TEARDOWN();
2427 }
2428
2429
2430 TEST(ldr_str_preindex) {
2431 INIT_V8();
2432 SETUP();
2433
2434 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2435 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2436 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2437 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2438
2439 START();
2440 __ Mov(x17, src_base);
2441 __ Mov(x18, dst_base);
2442 __ Mov(x19, src_base);
2443 __ Mov(x20, dst_base);
2444 __ Mov(x21, src_base + 16);
2445 __ Mov(x22, dst_base + 40);
2446 __ Mov(x23, src_base);
2447 __ Mov(x24, dst_base);
2448 __ Mov(x25, src_base);
2449 __ Mov(x26, dst_base);
2450 __ Ldr(w0, MemOperand(x17, 4, PreIndex));
2451 __ Str(w0, MemOperand(x18, 12, PreIndex));
2452 __ Ldr(x1, MemOperand(x19, 8, PreIndex));
2453 __ Str(x1, MemOperand(x20, 16, PreIndex));
2454 __ Ldr(w2, MemOperand(x21, -4, PreIndex));
2455 __ Str(w2, MemOperand(x22, -4, PreIndex));
2456 __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
2457 __ Strb(w3, MemOperand(x24, 25, PreIndex));
2458 __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
2459 __ Strh(w4, MemOperand(x26, 41, PreIndex));
2460 END();
2461
2462 RUN();
2463
2464 ASSERT_EQUAL_64(0xfedcba98, x0);
2465 ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2466 ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
2467 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2468 ASSERT_EQUAL_64(0x01234567, x2);
2469 ASSERT_EQUAL_64(0x0123456700000000UL, dst[4]);
2470 ASSERT_EQUAL_64(0x32, x3);
2471 ASSERT_EQUAL_64(0x3200, dst[3]);
2472 ASSERT_EQUAL_64(0x9876, x4);
2473 ASSERT_EQUAL_64(0x987600, dst[5]);
2474 ASSERT_EQUAL_64(src_base + 4, x17);
2475 ASSERT_EQUAL_64(dst_base + 12, x18);
2476 ASSERT_EQUAL_64(src_base + 8, x19);
2477 ASSERT_EQUAL_64(dst_base + 16, x20);
2478 ASSERT_EQUAL_64(src_base + 12, x21);
2479 ASSERT_EQUAL_64(dst_base + 36, x22);
2480 ASSERT_EQUAL_64(src_base + 1, x23);
2481 ASSERT_EQUAL_64(dst_base + 25, x24);
2482 ASSERT_EQUAL_64(src_base + 3, x25);
2483 ASSERT_EQUAL_64(dst_base + 41, x26);
2484
2485 TEARDOWN();
2486 }
2487
2488
2489 TEST(ldr_str_postindex) {
2490 INIT_V8();
2491 SETUP();
2492
2493 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2494 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2495 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2496 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2497
2498 START();
2499 __ Mov(x17, src_base + 4);
2500 __ Mov(x18, dst_base + 12);
2501 __ Mov(x19, src_base + 8);
2502 __ Mov(x20, dst_base + 16);
2503 __ Mov(x21, src_base + 8);
2504 __ Mov(x22, dst_base + 32);
2505 __ Mov(x23, src_base + 1);
2506 __ Mov(x24, dst_base + 25);
2507 __ Mov(x25, src_base + 3);
2508 __ Mov(x26, dst_base + 41);
2509 __ Ldr(w0, MemOperand(x17, 4, PostIndex));
2510 __ Str(w0, MemOperand(x18, 12, PostIndex));
2511 __ Ldr(x1, MemOperand(x19, 8, PostIndex));
2512 __ Str(x1, MemOperand(x20, 16, PostIndex));
2513 __ Ldr(x2, MemOperand(x21, -8, PostIndex));
2514 __ Str(x2, MemOperand(x22, -32, PostIndex));
2515 __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
2516 __ Strb(w3, MemOperand(x24, 5, PostIndex));
2517 __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
2518 __ Strh(w4, MemOperand(x26, -41, PostIndex));
2519 END();
2520
2521 RUN();
2522
2523 ASSERT_EQUAL_64(0xfedcba98, x0);
2524 ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2525 ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
2526 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2527 ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
2528 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[4]);
2529 ASSERT_EQUAL_64(0x32, x3);
2530 ASSERT_EQUAL_64(0x3200, dst[3]);
2531 ASSERT_EQUAL_64(0x9876, x4);
2532 ASSERT_EQUAL_64(0x987600, dst[5]);
2533 ASSERT_EQUAL_64(src_base + 8, x17);
2534 ASSERT_EQUAL_64(dst_base + 24, x18);
2535 ASSERT_EQUAL_64(src_base + 16, x19);
2536 ASSERT_EQUAL_64(dst_base + 32, x20);
2537 ASSERT_EQUAL_64(src_base, x21);
2538 ASSERT_EQUAL_64(dst_base, x22);
2539 ASSERT_EQUAL_64(src_base + 2, x23);
2540 ASSERT_EQUAL_64(dst_base + 30, x24);
2541 ASSERT_EQUAL_64(src_base, x25);
2542 ASSERT_EQUAL_64(dst_base, x26);
2543
2544 TEARDOWN();
2545 }
2546
2547
2548 TEST(load_signed) {
2549 INIT_V8();
2550 SETUP();
2551
2552 uint32_t src[2] = {0x80008080, 0x7fff7f7f};
2553 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2554
2555 START();
2556 __ Mov(x24, src_base);
2557 __ Ldrsb(w0, MemOperand(x24));
2558 __ Ldrsb(w1, MemOperand(x24, 4));
2559 __ Ldrsh(w2, MemOperand(x24));
2560 __ Ldrsh(w3, MemOperand(x24, 4));
2561 __ Ldrsb(x4, MemOperand(x24));
2562 __ Ldrsb(x5, MemOperand(x24, 4));
2563 __ Ldrsh(x6, MemOperand(x24));
2564 __ Ldrsh(x7, MemOperand(x24, 4));
2565 __ Ldrsw(x8, MemOperand(x24));
2566 __ Ldrsw(x9, MemOperand(x24, 4));
2567 END();
2568
2569 RUN();
2570
2571 ASSERT_EQUAL_64(0xffffff80, x0);
2572 ASSERT_EQUAL_64(0x0000007f, x1);
2573 ASSERT_EQUAL_64(0xffff8080, x2);
2574 ASSERT_EQUAL_64(0x00007f7f, x3);
2575 ASSERT_EQUAL_64(0xffffffffffffff80UL, x4);
2576 ASSERT_EQUAL_64(0x000000000000007fUL, x5);
2577 ASSERT_EQUAL_64(0xffffffffffff8080UL, x6);
2578 ASSERT_EQUAL_64(0x0000000000007f7fUL, x7);
2579 ASSERT_EQUAL_64(0xffffffff80008080UL, x8);
2580 ASSERT_EQUAL_64(0x000000007fff7f7fUL, x9);
2581
2582 TEARDOWN();
2583 }
2584
2585
2586 TEST(load_store_regoffset) {
2587 INIT_V8();
2588 SETUP();
2589
2590 uint32_t src[3] = {1, 2, 3};
2591 uint32_t dst[4] = {0, 0, 0, 0};
2592 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2593 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2594
2595 START();
2596 __ Mov(x16, src_base);
2597 __ Mov(x17, dst_base);
2598 __ Mov(x18, src_base + 3 * sizeof(src[0]));
2599 __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
2600 __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
2601 __ Mov(x24, 0);
2602 __ Mov(x25, 4);
2603 __ Mov(x26, -4);
2604 __ Mov(x27, 0xfffffffc); // 32-bit -4.
2605 __ Mov(x28, 0xfffffffe); // 32-bit -2.
2606 __ Mov(x29, 0xffffffff); // 32-bit -1.
2607
2608 __ Ldr(w0, MemOperand(x16, x24));
2609 __ Ldr(x1, MemOperand(x16, x25));
2610 __ Ldr(w2, MemOperand(x18, x26));
2611 __ Ldr(w3, MemOperand(x18, x27, SXTW));
2612 __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
2613 __ Str(w0, MemOperand(x17, x24));
2614 __ Str(x1, MemOperand(x17, x25));
2615 __ Str(w2, MemOperand(x20, x29, SXTW, 2));
2616 END();
2617
2618 RUN();
2619
2620 ASSERT_EQUAL_64(1, x0);
2621 ASSERT_EQUAL_64(0x0000000300000002UL, x1);
2622 ASSERT_EQUAL_64(3, x2);
2623 ASSERT_EQUAL_64(3, x3);
2624 ASSERT_EQUAL_64(2, x4);
2625 ASSERT_EQUAL_32(1, dst[0]);
2626 ASSERT_EQUAL_32(2, dst[1]);
2627 ASSERT_EQUAL_32(3, dst[2]);
2628 ASSERT_EQUAL_32(3, dst[3]);
2629
2630 TEARDOWN();
2631 }
2632
2633
2634 TEST(load_store_float) {
2635 INIT_V8();
2636 SETUP();
2637
2638 float src[3] = {1.0, 2.0, 3.0};
2639 float dst[3] = {0.0, 0.0, 0.0};
2640 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2641 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2642
2643 START();
2644 __ Mov(x17, src_base);
2645 __ Mov(x18, dst_base);
2646 __ Mov(x19, src_base);
2647 __ Mov(x20, dst_base);
2648 __ Mov(x21, src_base);
2649 __ Mov(x22, dst_base);
2650 __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
2651 __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2652 __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
2653 __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2654 __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2655 __ Str(s2, MemOperand(x22, sizeof(dst[0])));
2656 END();
2657
2658 RUN();
2659
2660 ASSERT_EQUAL_FP32(2.0, s0);
2661 ASSERT_EQUAL_FP32(2.0, dst[0]);
2662 ASSERT_EQUAL_FP32(1.0, s1);
2663 ASSERT_EQUAL_FP32(1.0, dst[2]);
2664 ASSERT_EQUAL_FP32(3.0, s2);
2665 ASSERT_EQUAL_FP32(3.0, dst[1]);
2666 ASSERT_EQUAL_64(src_base, x17);
2667 ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2668 ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2669 ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2670 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2671 ASSERT_EQUAL_64(dst_base, x22);
2672
2673 TEARDOWN();
2674 }
2675
2676
2677 TEST(load_store_double) {
2678 INIT_V8();
2679 SETUP();
2680
2681 double src[3] = {1.0, 2.0, 3.0};
2682 double dst[3] = {0.0, 0.0, 0.0};
2683 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2684 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2685
2686 START();
2687 __ Mov(x17, src_base);
2688 __ Mov(x18, dst_base);
2689 __ Mov(x19, src_base);
2690 __ Mov(x20, dst_base);
2691 __ Mov(x21, src_base);
2692 __ Mov(x22, dst_base);
2693 __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
2694 __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2695 __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
2696 __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2697 __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2698 __ Str(d2, MemOperand(x22, sizeof(dst[0])));
2699 END();
2700
2701 RUN();
2702
2703 ASSERT_EQUAL_FP64(2.0, d0);
2704 ASSERT_EQUAL_FP64(2.0, dst[0]);
2705 ASSERT_EQUAL_FP64(1.0, d1);
2706 ASSERT_EQUAL_FP64(1.0, dst[2]);
2707 ASSERT_EQUAL_FP64(3.0, d2);
2708 ASSERT_EQUAL_FP64(3.0, dst[1]);
2709 ASSERT_EQUAL_64(src_base, x17);
2710 ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2711 ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2712 ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2713 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2714 ASSERT_EQUAL_64(dst_base, x22);
2715
2716 TEARDOWN();
2717 }
2718
2719
2720 TEST(ldp_stp_float) {
2721 INIT_V8();
2722 SETUP();
2723
2724 float src[2] = {1.0, 2.0};
2725 float dst[3] = {0.0, 0.0, 0.0};
2726 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2727 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2728
2729 START();
2730 __ Mov(x16, src_base);
2731 __ Mov(x17, dst_base);
2732 __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2733 __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2734 END();
2735
2736 RUN();
2737
2738 ASSERT_EQUAL_FP32(1.0, s31);
2739 ASSERT_EQUAL_FP32(2.0, s0);
2740 ASSERT_EQUAL_FP32(0.0, dst[0]);
2741 ASSERT_EQUAL_FP32(2.0, dst[1]);
2742 ASSERT_EQUAL_FP32(1.0, dst[2]);
2743 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2744 ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2745
2746 TEARDOWN();
2747 }
2748
2749
2750 TEST(ldp_stp_double) {
2751 INIT_V8();
2752 SETUP();
2753
2754 double src[2] = {1.0, 2.0};
2755 double dst[3] = {0.0, 0.0, 0.0};
2756 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2757 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2758
2759 START();
2760 __ Mov(x16, src_base);
2761 __ Mov(x17, dst_base);
2762 __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2763 __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2764 END();
2765
2766 RUN();
2767
2768 ASSERT_EQUAL_FP64(1.0, d31);
2769 ASSERT_EQUAL_FP64(2.0, d0);
2770 ASSERT_EQUAL_FP64(0.0, dst[0]);
2771 ASSERT_EQUAL_FP64(2.0, dst[1]);
2772 ASSERT_EQUAL_FP64(1.0, dst[2]);
2773 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2774 ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2775
2776 TEARDOWN();
2777 }
2778
2779
2780 TEST(ldp_stp_offset) {
2781 INIT_V8();
2782 SETUP();
2783
2784 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2785 0xffeeddccbbaa9988UL};
2786 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2787 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2788 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2789
2790 START();
2791 __ Mov(x16, src_base);
2792 __ Mov(x17, dst_base);
2793 __ Mov(x18, src_base + 24);
2794 __ Mov(x19, dst_base + 56);
2795 __ Ldp(w0, w1, MemOperand(x16));
2796 __ Ldp(w2, w3, MemOperand(x16, 4));
2797 __ Ldp(x4, x5, MemOperand(x16, 8));
2798 __ Ldp(w6, w7, MemOperand(x18, -12));
2799 __ Ldp(x8, x9, MemOperand(x18, -16));
2800 __ Stp(w0, w1, MemOperand(x17));
2801 __ Stp(w2, w3, MemOperand(x17, 8));
2802 __ Stp(x4, x5, MemOperand(x17, 16));
2803 __ Stp(w6, w7, MemOperand(x19, -24));
2804 __ Stp(x8, x9, MemOperand(x19, -16));
2805 END();
2806
2807 RUN();
2808
2809 ASSERT_EQUAL_64(0x44556677, x0);
2810 ASSERT_EQUAL_64(0x00112233, x1);
2811 ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
2812 ASSERT_EQUAL_64(0x00112233, x2);
2813 ASSERT_EQUAL_64(0xccddeeff, x3);
2814 ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2815 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2816 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2817 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2818 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2819 ASSERT_EQUAL_64(0x8899aabb, x6);
2820 ASSERT_EQUAL_64(0xbbaa9988, x7);
2821 ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2822 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
2823 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2824 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2825 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2826 ASSERT_EQUAL_64(src_base, x16);
2827 ASSERT_EQUAL_64(dst_base, x17);
2828 ASSERT_EQUAL_64(src_base + 24, x18);
2829 ASSERT_EQUAL_64(dst_base + 56, x19);
2830
2831 TEARDOWN();
2832 }
2833
2834
2835 TEST(ldnp_stnp_offset) {
2836 INIT_V8();
2837 SETUP();
2838
2839 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2840 0xffeeddccbbaa9988UL};
2841 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2842 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2843 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2844
2845 START();
2846 __ Mov(x16, src_base);
2847 __ Mov(x17, dst_base);
2848 __ Mov(x18, src_base + 24);
2849 __ Mov(x19, dst_base + 56);
2850 __ Ldnp(w0, w1, MemOperand(x16));
2851 __ Ldnp(w2, w3, MemOperand(x16, 4));
2852 __ Ldnp(x4, x5, MemOperand(x16, 8));
2853 __ Ldnp(w6, w7, MemOperand(x18, -12));
2854 __ Ldnp(x8, x9, MemOperand(x18, -16));
2855 __ Stnp(w0, w1, MemOperand(x17));
2856 __ Stnp(w2, w3, MemOperand(x17, 8));
2857 __ Stnp(x4, x5, MemOperand(x17, 16));
2858 __ Stnp(w6, w7, MemOperand(x19, -24));
2859 __ Stnp(x8, x9, MemOperand(x19, -16));
2860 END();
2861
2862 RUN();
2863
2864 ASSERT_EQUAL_64(0x44556677, x0);
2865 ASSERT_EQUAL_64(0x00112233, x1);
2866 ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
2867 ASSERT_EQUAL_64(0x00112233, x2);
2868 ASSERT_EQUAL_64(0xccddeeff, x3);
2869 ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2870 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2871 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2872 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2873 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2874 ASSERT_EQUAL_64(0x8899aabb, x6);
2875 ASSERT_EQUAL_64(0xbbaa9988, x7);
2876 ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2877 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
2878 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2879 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2880 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2881 ASSERT_EQUAL_64(src_base, x16);
2882 ASSERT_EQUAL_64(dst_base, x17);
2883 ASSERT_EQUAL_64(src_base + 24, x18);
2884 ASSERT_EQUAL_64(dst_base + 56, x19);
2885
2886 TEARDOWN();
2887 }
2888
2889
2890 TEST(ldp_stp_preindex) {
2891 INIT_V8();
2892 SETUP();
2893
2894 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2895 0xffeeddccbbaa9988UL};
2896 uint64_t dst[5] = {0, 0, 0, 0, 0};
2897 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2898 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2899
2900 START();
2901 __ Mov(x16, src_base);
2902 __ Mov(x17, dst_base);
2903 __ Mov(x18, dst_base + 16);
2904 __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
2905 __ Mov(x19, x16);
2906 __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
2907 __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
2908 __ Mov(x20, x17);
2909 __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
2910 __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
2911 __ Mov(x21, x16);
2912 __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
2913 __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
2914 __ Mov(x22, x18);
2915 __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
2916 END();
2917
2918 RUN();
2919
2920 ASSERT_EQUAL_64(0x00112233, x0);
2921 ASSERT_EQUAL_64(0xccddeeff, x1);
2922 ASSERT_EQUAL_64(0x44556677, x2);
2923 ASSERT_EQUAL_64(0x00112233, x3);
2924 ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[0]);
2925 ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
2926 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2927 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2928 ASSERT_EQUAL_64(0x0011223344556677UL, x6);
2929 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x7);
2930 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
2931 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
2932 ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
2933 ASSERT_EQUAL_64(src_base, x16);
2934 ASSERT_EQUAL_64(dst_base, x17);
2935 ASSERT_EQUAL_64(dst_base + 16, x18);
2936 ASSERT_EQUAL_64(src_base + 4, x19);
2937 ASSERT_EQUAL_64(dst_base + 4, x20);
2938 ASSERT_EQUAL_64(src_base + 8, x21);
2939 ASSERT_EQUAL_64(dst_base + 24, x22);
2940
2941 TEARDOWN();
2942 }
2943
2944
2945 TEST(ldp_stp_postindex) {
2946 INIT_V8();
2947 SETUP();
2948
2949 uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2950 0xffeeddccbbaa9988UL, 0x7766554433221100UL};
2951 uint64_t dst[5] = {0, 0, 0, 0, 0};
2952 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2953 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2954
2955 START();
2956 __ Mov(x16, src_base);
2957 __ Mov(x17, dst_base);
2958 __ Mov(x18, dst_base + 16);
2959 __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
2960 __ Mov(x19, x16);
2961 __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
2962 __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
2963 __ Mov(x20, x17);
2964 __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
2965 __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
2966 __ Mov(x21, x16);
2967 __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
2968 __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
2969 __ Mov(x22, x18);
2970 __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
2971 END();
2972
2973 RUN();
2974
2975 ASSERT_EQUAL_64(0x44556677, x0);
2976 ASSERT_EQUAL_64(0x00112233, x1);
2977 ASSERT_EQUAL_64(0x00112233, x2);
2978 ASSERT_EQUAL_64(0xccddeeff, x3);
2979 ASSERT_EQUAL_64(0x4455667700112233UL, dst[0]);
2980 ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
2981 ASSERT_EQUAL_64(0x0011223344556677UL, x4);
2982 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x5);
2983 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x6);
2984 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x7);
2985 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
2986 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
2987 ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
2988 ASSERT_EQUAL_64(src_base, x16);
2989 ASSERT_EQUAL_64(dst_base, x17);
2990 ASSERT_EQUAL_64(dst_base + 16, x18);
2991 ASSERT_EQUAL_64(src_base + 4, x19);
2992 ASSERT_EQUAL_64(dst_base + 4, x20);
2993 ASSERT_EQUAL_64(src_base + 8, x21);
2994 ASSERT_EQUAL_64(dst_base + 24, x22);
2995
2996 TEARDOWN();
2997 }
2998
2999
3000 TEST(ldp_sign_extend) {
3001 INIT_V8();
3002 SETUP();
3003
3004 uint32_t src[2] = {0x80000000, 0x7fffffff};
3005 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3006
3007 START();
3008 __ Mov(x24, src_base);
3009 __ Ldpsw(x0, x1, MemOperand(x24));
3010 END();
3011
3012 RUN();
3013
3014 ASSERT_EQUAL_64(0xffffffff80000000UL, x0);
3015 ASSERT_EQUAL_64(0x000000007fffffffUL, x1);
3016
3017 TEARDOWN();
3018 }
3019
3020
3021 TEST(ldur_stur) {
3022 INIT_V8();
3023 SETUP();
3024
3025 int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
3026 int64_t dst[5] = {0, 0, 0, 0, 0};
3027 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3028 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3029
3030 START();
3031 __ Mov(x17, src_base);
3032 __ Mov(x18, dst_base);
3033 __ Mov(x19, src_base + 16);
3034 __ Mov(x20, dst_base + 32);
3035 __ Mov(x21, dst_base + 40);
3036 __ Ldr(w0, MemOperand(x17, 1));
3037 __ Str(w0, MemOperand(x18, 2));
3038 __ Ldr(x1, MemOperand(x17, 3));
3039 __ Str(x1, MemOperand(x18, 9));
3040 __ Ldr(w2, MemOperand(x19, -9));
3041 __ Str(w2, MemOperand(x20, -5));
3042 __ Ldrb(w3, MemOperand(x19, -1));
3043 __ Strb(w3, MemOperand(x21, -1));
3044 END();
3045
3046 RUN();
3047
3048 ASSERT_EQUAL_64(0x6789abcd, x0);
3049 ASSERT_EQUAL_64(0x6789abcd0000L, dst[0]);
3050 ASSERT_EQUAL_64(0xabcdef0123456789L, x1);
3051 ASSERT_EQUAL_64(0xcdef012345678900L, dst[1]);
3052 ASSERT_EQUAL_64(0x000000ab, dst[2]);
3053 ASSERT_EQUAL_64(0xabcdef01, x2);
3054 ASSERT_EQUAL_64(0x00abcdef01000000L, dst[3]);
3055 ASSERT_EQUAL_64(0x00000001, x3);
3056 ASSERT_EQUAL_64(0x0100000000000000L, dst[4]);
3057 ASSERT_EQUAL_64(src_base, x17);
3058 ASSERT_EQUAL_64(dst_base, x18);
3059 ASSERT_EQUAL_64(src_base + 16, x19);
3060 ASSERT_EQUAL_64(dst_base + 32, x20);
3061
3062 TEARDOWN();
3063 }
3064
3065
3066 #if 0 // TODO(all) enable.
3067 // TODO(rodolph): Adapt w16 Literal tests for RelocInfo.
3068 TEST(ldr_literal) {
3069 INIT_V8();
3070 SETUP();
3071
3072 START();
3073 __ Ldr(x2, 0x1234567890abcdefUL);
3074 __ Ldr(w3, 0xfedcba09);
3075 __ Ldr(d13, 1.234);
3076 __ Ldr(s25, 2.5);
3077 END();
3078
3079 RUN();
3080
3081 ASSERT_EQUAL_64(0x1234567890abcdefUL, x2);
3082 ASSERT_EQUAL_64(0xfedcba09, x3);
3083 ASSERT_EQUAL_FP64(1.234, d13);
3084 ASSERT_EQUAL_FP32(2.5, s25);
3085
3086 TEARDOWN();
3087 }
3088
3089
3090 static void LdrLiteralRangeHelper(ptrdiff_t range_,
3091 LiteralPoolEmitOption option,
3092 bool expect_dump) {
3093 ASSERT(range_ > 0);
3094 SETUP_SIZE(range_ + 1024);
3095
3096 Label label_1, label_2;
3097
3098 size_t range = static_cast<size_t>(range_);
3099 size_t code_size = 0;
3100 size_t pool_guard_size;
3101
3102 if (option == NoJumpRequired) {
3103 // Space for an explicit branch.
3104 pool_guard_size = sizeof(Instr);
3105 } else {
3106 pool_guard_size = 0;
3107 }
3108
3109 START();
3110 // Force a pool dump so the pool starts off empty.
3111 __ EmitLiteralPool(JumpRequired);
3112 ASSERT_LITERAL_POOL_SIZE(0);
3113
3114 __ Ldr(x0, 0x1234567890abcdefUL);
3115 __ Ldr(w1, 0xfedcba09);
3116 __ Ldr(d0, 1.234);
3117 __ Ldr(s1, 2.5);
3118 ASSERT_LITERAL_POOL_SIZE(4);
3119
3120 code_size += 4 * sizeof(Instr);
3121
3122 // Check that the requested range (allowing space for a branch over the pool)
3123 // can be handled by this test.
3124 ASSERT((code_size + pool_guard_size) <= range);
3125
3126 // Emit NOPs up to 'range', leaving space for the pool guard.
3127 while ((code_size + pool_guard_size) < range) {
3128 __ Nop();
3129 code_size += sizeof(Instr);
3130 }
3131
3132 // Emit the guard sequence before the literal pool.
3133 if (option == NoJumpRequired) {
3134 __ B(&label_1);
3135 code_size += sizeof(Instr);
3136 }
3137
3138 ASSERT(code_size == range);
3139 ASSERT_LITERAL_POOL_SIZE(4);
3140
3141 // Possibly generate a literal pool.
3142 __ CheckLiteralPool(option);
3143 __ Bind(&label_1);
3144 if (expect_dump) {
3145 ASSERT_LITERAL_POOL_SIZE(0);
3146 } else {
3147 ASSERT_LITERAL_POOL_SIZE(4);
3148 }
3149
3150 // Force a pool flush to check that a second pool functions correctly.
3151 __ EmitLiteralPool(JumpRequired);
3152 ASSERT_LITERAL_POOL_SIZE(0);
3153
3154 // These loads should be after the pool (and will require a new one).
3155 __ Ldr(x4, 0x34567890abcdef12UL);
3156 __ Ldr(w5, 0xdcba09fe);
3157 __ Ldr(d4, 123.4);
3158 __ Ldr(s5, 250.0);
3159 ASSERT_LITERAL_POOL_SIZE(4);
3160 END();
3161
3162 RUN();
3163
3164 // Check that the literals loaded correctly.
3165 ASSERT_EQUAL_64(0x1234567890abcdefUL, x0);
3166 ASSERT_EQUAL_64(0xfedcba09, x1);
3167 ASSERT_EQUAL_FP64(1.234, d0);
3168 ASSERT_EQUAL_FP32(2.5, s1);
3169 ASSERT_EQUAL_64(0x34567890abcdef12UL, x4);
3170 ASSERT_EQUAL_64(0xdcba09fe, x5);
3171 ASSERT_EQUAL_FP64(123.4, d4);
3172 ASSERT_EQUAL_FP32(250.0, s5);
3173
3174 TEARDOWN();
3175 }
3176
3177
3178 TEST(ldr_literal_range_1) {
3179 INIT_V8();
3180 LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
3181 NoJumpRequired,
3182 true);
3183 }
3184
3185
3186 TEST(ldr_literal_range_2) {
3187 INIT_V8();
3188 LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
3189 NoJumpRequired,
3190 false);
3191 }
3192
3193
3194 TEST(ldr_literal_range_3) {
3195 INIT_V8();
3196 LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
3197 JumpRequired,
3198 true);
3199 }
3200
3201
3202 TEST(ldr_literal_range_4) {
3203 INIT_V8();
3204 LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
3205 JumpRequired,
3206 false);
3207 }
3208
3209
3210 TEST(ldr_literal_range_5) {
3211 INIT_V8();
3212 LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
3213 JumpRequired,
3214 false);
3215 }
3216
3217
3218 TEST(ldr_literal_range_6) {
3219 INIT_V8();
3220 LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
3221 JumpRequired,
3222 false);
3223 }
3224 #endif
3225
3226 TEST(add_sub_imm) {
3227 INIT_V8();
3228 SETUP();
3229
3230 START();
3231 __ Mov(x0, 0x0);
3232 __ Mov(x1, 0x1111);
3233 __ Mov(x2, 0xffffffffffffffffL);
3234 __ Mov(x3, 0x8000000000000000L);
3235
3236 __ Add(x10, x0, Operand(0x123));
3237 __ Add(x11, x1, Operand(0x122000));
3238 __ Add(x12, x0, Operand(0xabc << 12));
3239 __ Add(x13, x2, Operand(1));
3240
3241 __ Add(w14, w0, Operand(0x123));
3242 __ Add(w15, w1, Operand(0x122000));
3243 __ Add(w16, w0, Operand(0xabc << 12));
3244 __ Add(w17, w2, Operand(1));
3245
3246 __ Sub(x20, x0, Operand(0x1));
3247 __ Sub(x21, x1, Operand(0x111));
3248 __ Sub(x22, x1, Operand(0x1 << 12));
3249 __ Sub(x23, x3, Operand(1));
3250
3251 __ Sub(w24, w0, Operand(0x1));
3252 __ Sub(w25, w1, Operand(0x111));
3253 __ Sub(w26, w1, Operand(0x1 << 12));
3254 __ Sub(w27, w3, Operand(1));
3255 END();
3256
3257 RUN();
3258
3259 ASSERT_EQUAL_64(0x123, x10);
3260 ASSERT_EQUAL_64(0x123111, x11);
3261 ASSERT_EQUAL_64(0xabc000, x12);
3262 ASSERT_EQUAL_64(0x0, x13);
3263
3264 ASSERT_EQUAL_32(0x123, w14);
3265 ASSERT_EQUAL_32(0x123111, w15);
3266 ASSERT_EQUAL_32(0xabc000, w16);
3267 ASSERT_EQUAL_32(0x0, w17);
3268
3269 ASSERT_EQUAL_64(0xffffffffffffffffL, x20);
3270 ASSERT_EQUAL_64(0x1000, x21);
3271 ASSERT_EQUAL_64(0x111, x22);
3272 ASSERT_EQUAL_64(0x7fffffffffffffffL, x23);
3273
3274 ASSERT_EQUAL_32(0xffffffff, w24);
3275 ASSERT_EQUAL_32(0x1000, w25);
3276 ASSERT_EQUAL_32(0x111, w26);
3277 ASSERT_EQUAL_32(0xffffffff, w27);
3278
3279 TEARDOWN();
3280 }
3281
3282
3283 TEST(add_sub_wide_imm) {
3284 INIT_V8();
3285 SETUP();
3286
3287 START();
3288 __ Mov(x0, 0x0);
3289 __ Mov(x1, 0x1);
3290
3291 __ Add(x10, x0, Operand(0x1234567890abcdefUL));
3292 __ Add(x11, x1, Operand(0xffffffff));
3293
3294 __ Add(w12, w0, Operand(0x12345678));
3295 __ Add(w13, w1, Operand(0xffffffff));
3296
3297 __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
3298
3299 __ Sub(w21, w0, Operand(0x12345678));
3300 END();
3301
3302 RUN();
3303
3304 ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
3305 ASSERT_EQUAL_64(0x100000000UL, x11);
3306
3307 ASSERT_EQUAL_32(0x12345678, w12);
3308 ASSERT_EQUAL_64(0x0, x13);
3309
3310 ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
3311
3312 ASSERT_EQUAL_32(-0x12345678, w21);
3313
3314 TEARDOWN();
3315 }
3316
3317
3318 TEST(add_sub_shifted) {
3319 INIT_V8();
3320 SETUP();
3321
3322 START();
3323 __ Mov(x0, 0);
3324 __ Mov(x1, 0x0123456789abcdefL);
3325 __ Mov(x2, 0xfedcba9876543210L);
3326 __ Mov(x3, 0xffffffffffffffffL);
3327
3328 __ Add(x10, x1, Operand(x2));
3329 __ Add(x11, x0, Operand(x1, LSL, 8));
3330 __ Add(x12, x0, Operand(x1, LSR, 8));
3331 __ Add(x13, x0, Operand(x1, ASR, 8));
3332 __ Add(x14, x0, Operand(x2, ASR, 8));
3333 __ Add(w15, w0, Operand(w1, ASR, 8));
3334 __ Add(w18, w3, Operand(w1, ROR, 8));
3335 __ Add(x19, x3, Operand(x1, ROR, 8));
3336
3337 __ Sub(x20, x3, Operand(x2));
3338 __ Sub(x21, x3, Operand(x1, LSL, 8));
3339 __ Sub(x22, x3, Operand(x1, LSR, 8));
3340 __ Sub(x23, x3, Operand(x1, ASR, 8));
3341 __ Sub(x24, x3, Operand(x2, ASR, 8));
3342 __ Sub(w25, w3, Operand(w1, ASR, 8));
3343 __ Sub(w26, w3, Operand(w1, ROR, 8));
3344 __ Sub(x27, x3, Operand(x1, ROR, 8));
3345 END();
3346
3347 RUN();
3348
3349 ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
3350 ASSERT_EQUAL_64(0x23456789abcdef00L, x11);
3351 ASSERT_EQUAL_64(0x000123456789abcdL, x12);
3352 ASSERT_EQUAL_64(0x000123456789abcdL, x13);
3353 ASSERT_EQUAL_64(0xfffedcba98765432L, x14);
3354 ASSERT_EQUAL_64(0xff89abcd, x15);
3355 ASSERT_EQUAL_64(0xef89abcc, x18);
3356 ASSERT_EQUAL_64(0xef0123456789abccL, x19);
3357
3358 ASSERT_EQUAL_64(0x0123456789abcdefL, x20);
3359 ASSERT_EQUAL_64(0xdcba9876543210ffL, x21);
3360 ASSERT_EQUAL_64(0xfffedcba98765432L, x22);
3361 ASSERT_EQUAL_64(0xfffedcba98765432L, x23);
3362 ASSERT_EQUAL_64(0x000123456789abcdL, x24);
3363 ASSERT_EQUAL_64(0x00765432, x25);
3364 ASSERT_EQUAL_64(0x10765432, x26);
3365 ASSERT_EQUAL_64(0x10fedcba98765432L, x27);
3366
3367 TEARDOWN();
3368 }
3369
3370
3371 TEST(add_sub_extended) {
3372 INIT_V8();
3373 SETUP();
3374
3375 START();
3376 __ Mov(x0, 0);
3377 __ Mov(x1, 0x0123456789abcdefL);
3378 __ Mov(x2, 0xfedcba9876543210L);
3379 __ Mov(w3, 0x80);
3380
3381 __ Add(x10, x0, Operand(x1, UXTB, 0));
3382 __ Add(x11, x0, Operand(x1, UXTB, 1));
3383 __ Add(x12, x0, Operand(x1, UXTH, 2));
3384 __ Add(x13, x0, Operand(x1, UXTW, 4));
3385
3386 __ Add(x14, x0, Operand(x1, SXTB, 0));
3387 __ Add(x15, x0, Operand(x1, SXTB, 1));
3388 __ Add(x16, x0, Operand(x1, SXTH, 2));
3389 __ Add(x17, x0, Operand(x1, SXTW, 3));
3390 __ Add(x18, x0, Operand(x2, SXTB, 0));
3391 __ Add(x19, x0, Operand(x2, SXTB, 1));
3392 __ Add(x20, x0, Operand(x2, SXTH, 2));
3393 __ Add(x21, x0, Operand(x2, SXTW, 3));
3394
3395 __ Add(x22, x1, Operand(x2, SXTB, 1));
3396 __ Sub(x23, x1, Operand(x2, SXTB, 1));
3397
3398 __ Add(w24, w1, Operand(w2, UXTB, 2));
3399 __ Add(w25, w0, Operand(w1, SXTB, 0));
3400 __ Add(w26, w0, Operand(w1, SXTB, 1));
3401 __ Add(w27, w2, Operand(w1, SXTW, 3));
3402
3403 __ Add(w28, w0, Operand(w1, SXTW, 3));
3404 __ Add(x29, x0, Operand(w1, SXTW, 3));
3405
3406 __ Sub(x30, x0, Operand(w3, SXTB, 1));
3407 END();
3408
3409 RUN();
3410
3411 ASSERT_EQUAL_64(0xefL, x10);
3412 ASSERT_EQUAL_64(0x1deL, x11);
3413 ASSERT_EQUAL_64(0x337bcL, x12);
3414 ASSERT_EQUAL_64(0x89abcdef0L, x13);
3415
3416 ASSERT_EQUAL_64(0xffffffffffffffefL, x14);
3417 ASSERT_EQUAL_64(0xffffffffffffffdeL, x15);
3418 ASSERT_EQUAL_64(0xffffffffffff37bcL, x16);
3419 ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x17);
3420 ASSERT_EQUAL_64(0x10L, x18);
3421 ASSERT_EQUAL_64(0x20L, x19);
3422 ASSERT_EQUAL_64(0xc840L, x20);
3423 ASSERT_EQUAL_64(0x3b2a19080L, x21);
3424
3425 ASSERT_EQUAL_64(0x0123456789abce0fL, x22);
3426 ASSERT_EQUAL_64(0x0123456789abcdcfL, x23);
3427
3428 ASSERT_EQUAL_32(0x89abce2f, w24);
3429 ASSERT_EQUAL_32(0xffffffef, w25);
3430 ASSERT_EQUAL_32(0xffffffde, w26);
3431 ASSERT_EQUAL_32(0xc3b2a188, w27);
3432
3433 ASSERT_EQUAL_32(0x4d5e6f78, w28);
3434 ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x29);
3435
3436 ASSERT_EQUAL_64(256, x30);
3437
3438 TEARDOWN();
3439 }
3440
3441
3442 TEST(add_sub_negative) {
3443 INIT_V8();
3444 SETUP();
3445
3446 START();
3447 __ Mov(x0, 0);
3448 __ Mov(x1, 4687);
3449 __ Mov(x2, 0x1122334455667788);
3450 __ Mov(w3, 0x11223344);
3451 __ Mov(w4, 400000);
3452
3453 __ Add(x10, x0, -42);
3454 __ Add(x11, x1, -687);
3455 __ Add(x12, x2, -0x88);
3456
3457 __ Sub(x13, x0, -600);
3458 __ Sub(x14, x1, -313);
3459 __ Sub(x15, x2, -0x555);
3460
3461 __ Add(w19, w3, -0x344);
3462 __ Add(w20, w4, -2000);
3463
3464 __ Sub(w21, w3, -0xbc);
3465 __ Sub(w22, w4, -2000);
3466 END();
3467
3468 RUN();
3469
3470 ASSERT_EQUAL_64(-42, x10);
3471 ASSERT_EQUAL_64(4000, x11);
3472 ASSERT_EQUAL_64(0x1122334455667700, x12);
3473
3474 ASSERT_EQUAL_64(600, x13);
3475 ASSERT_EQUAL_64(5000, x14);
3476 ASSERT_EQUAL_64(0x1122334455667cdd, x15);
3477
3478 ASSERT_EQUAL_32(0x11223000, w19);
3479 ASSERT_EQUAL_32(398000, w20);
3480
3481 ASSERT_EQUAL_32(0x11223400, w21);
3482 ASSERT_EQUAL_32(402000, w22);
3483
3484 TEARDOWN();
3485 }
3486
3487
3488 TEST(add_sub_zero) {
3489 INIT_V8();
3490 SETUP();
3491
3492 START();
3493 __ Mov(x0, 0);
3494 __ Mov(x1, 0);
3495 __ Mov(x2, 0);
3496
3497 Label blob1;
3498 __ Bind(&blob1);
3499 __ Add(x0, x0, 0);
3500 __ Sub(x1, x1, 0);
3501 __ Sub(x2, x2, xzr);
3502 CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1));
3503
3504 Label blob2;
3505 __ Bind(&blob2);
3506 __ Add(w3, w3, 0);
3507 CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2));
3508
3509 Label blob3;
3510 __ Bind(&blob3);
3511 __ Sub(w3, w3, wzr);
3512 CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3));
3513
3514 END();
3515
3516 RUN();
3517
3518 ASSERT_EQUAL_64(0, x0);
3519 ASSERT_EQUAL_64(0, x1);
3520 ASSERT_EQUAL_64(0, x2);
3521
3522 TEARDOWN();
3523 }
3524
3525
3526 TEST(claim_drop_zero) {
3527 INIT_V8();
3528 SETUP();
3529
3530 START();
3531
3532 Label start;
3533 __ Bind(&start);
3534 __ Claim(0);
3535 __ Drop(0);
3536 __ Claim(xzr, 8);
3537 __ Drop(xzr, 8);
3538 __ Claim(xzr, 0);
3539 __ Drop(xzr, 0);
3540 __ Claim(x7, 0);
3541 __ Drop(x7, 0);
3542 __ ClaimBySMI(xzr, 8);
3543 __ DropBySMI(xzr, 8);
3544 __ ClaimBySMI(xzr, 0);
3545 __ DropBySMI(xzr, 0);
3546 CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start));
3547
3548 END();
3549
3550 RUN();
3551
3552 TEARDOWN();
3553 }
3554
3555
3556 TEST(neg) {
3557 INIT_V8();
3558 SETUP();
3559
3560 START();
3561 __ Mov(x0, 0xf123456789abcdefL);
3562
3563 // Immediate.
3564 __ Neg(x1, 0x123);
3565 __ Neg(w2, 0x123);
3566
3567 // Shifted.
3568 __ Neg(x3, Operand(x0, LSL, 1));
3569 __ Neg(w4, Operand(w0, LSL, 2));
3570 __ Neg(x5, Operand(x0, LSR, 3));
3571 __ Neg(w6, Operand(w0, LSR, 4));
3572 __ Neg(x7, Operand(x0, ASR, 5));
3573 __ Neg(w8, Operand(w0, ASR, 6));
3574
3575 // Extended.
3576 __ Neg(w9, Operand(w0, UXTB));
3577 __ Neg(x10, Operand(x0, SXTB, 1));
3578 __ Neg(w11, Operand(w0, UXTH, 2));
3579 __ Neg(x12, Operand(x0, SXTH, 3));
3580 __ Neg(w13, Operand(w0, UXTW, 4));
3581 __ Neg(x14, Operand(x0, SXTW, 4));
3582 END();
3583
3584 RUN();
3585
3586 ASSERT_EQUAL_64(0xfffffffffffffeddUL, x1);
3587 ASSERT_EQUAL_64(0xfffffedd, x2);
3588 ASSERT_EQUAL_64(0x1db97530eca86422UL, x3);
3589 ASSERT_EQUAL_64(0xd950c844, x4);
3590 ASSERT_EQUAL_64(0xe1db97530eca8643UL, x5);
3591 ASSERT_EQUAL_64(0xf7654322, x6);
3592 ASSERT_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
3593 ASSERT_EQUAL_64(0x01d950c9, x8);
3594 ASSERT_EQUAL_64(0xffffff11, x9);
3595 ASSERT_EQUAL_64(0x0000000000000022UL, x10);
3596 ASSERT_EQUAL_64(0xfffcc844, x11);
3597 ASSERT_EQUAL_64(0x0000000000019088UL, x12);
3598 ASSERT_EQUAL_64(0x65432110, x13);
3599 ASSERT_EQUAL_64(0x0000000765432110UL, x14);
3600
3601 TEARDOWN();
3602 }
3603
3604
3605 TEST(adc_sbc_shift) {
3606 INIT_V8();
3607 SETUP();
3608
3609 START();
3610 __ Mov(x0, 0);
3611 __ Mov(x1, 1);
3612 __ Mov(x2, 0x0123456789abcdefL);
3613 __ Mov(x3, 0xfedcba9876543210L);
3614 __ Mov(x4, 0xffffffffffffffffL);
3615
3616 // Clear the C flag.
3617 __ Adds(x0, x0, Operand(0));
3618
3619 __ Adc(x5, x2, Operand(x3));
3620 __ Adc(x6, x0, Operand(x1, LSL, 60));
3621 __ Sbc(x7, x4, Operand(x3, LSR, 4));
3622 __ Adc(x8, x2, Operand(x3, ASR, 4));
3623 __ Adc(x9, x2, Operand(x3, ROR, 8));
3624
3625 __ Adc(w10, w2, Operand(w3));
3626 __ Adc(w11, w0, Operand(w1, LSL, 30));
3627 __ Sbc(w12, w4, Operand(w3, LSR, 4));
3628 __ Adc(w13, w2, Operand(w3, ASR, 4));
3629 __ Adc(w14, w2, Operand(w3, ROR, 8));
3630
3631 // Set the C flag.
3632 __ Cmp(w0, Operand(w0));
3633
3634 __ Adc(x18, x2, Operand(x3));
3635 __ Adc(x19, x0, Operand(x1, LSL, 60));
3636 __ Sbc(x20, x4, Operand(x3, LSR, 4));
3637 __ Adc(x21, x2, Operand(x3, ASR, 4));
3638 __ Adc(x22, x2, Operand(x3, ROR, 8));
3639
3640 __ Adc(w23, w2, Operand(w3));
3641 __ Adc(w24, w0, Operand(w1, LSL, 30));
3642 __ Sbc(w25, w4, Operand(w3, LSR, 4));
3643 __ Adc(w26, w2, Operand(w3, ASR, 4));
3644 __ Adc(w27, w2, Operand(w3, ROR, 8));
3645 END();
3646
3647 RUN();
3648
3649 ASSERT_EQUAL_64(0xffffffffffffffffL, x5);
3650 ASSERT_EQUAL_64(1L << 60, x6);
3651 ASSERT_EQUAL_64(0xf0123456789abcddL, x7);
3652 ASSERT_EQUAL_64(0x0111111111111110L, x8);
3653 ASSERT_EQUAL_64(0x1222222222222221L, x9);
3654
3655 ASSERT_EQUAL_32(0xffffffff, w10);
3656 ASSERT_EQUAL_32(1 << 30, w11);
3657 ASSERT_EQUAL_32(0xf89abcdd, w12);
3658 ASSERT_EQUAL_32(0x91111110, w13);
3659 ASSERT_EQUAL_32(0x9a222221, w14);
3660
3661 ASSERT_EQUAL_64(0xffffffffffffffffL + 1, x18);
3662 ASSERT_EQUAL_64((1L << 60) + 1, x19);
3663 ASSERT_EQUAL_64(0xf0123456789abcddL + 1, x20);
3664 ASSERT_EQUAL_64(0x0111111111111110L + 1, x21);
3665 ASSERT_EQUAL_64(0x1222222222222221L + 1, x22);
3666
3667 ASSERT_EQUAL_32(0xffffffff + 1, w23);
3668 ASSERT_EQUAL_32((1 << 30) + 1, w24);
3669 ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
3670 ASSERT_EQUAL_32(0x91111110 + 1, w26);
3671 ASSERT_EQUAL_32(0x9a222221 + 1, w27);
3672
3673 // Check that adc correctly sets the condition flags.
3674 START();
3675 __ Mov(x0, 1);
3676 __ Mov(x1, 0xffffffffffffffffL);
3677 // Clear the C flag.
3678 __ Adds(x0, x0, Operand(0));
3679 __ Adcs(x10, x0, Operand(x1));
3680 END();
3681
3682 RUN();
3683
3684 ASSERT_EQUAL_NZCV(ZCFlag);
3685 ASSERT_EQUAL_64(0, x10);
3686
3687 START();
3688 __ Mov(x0, 1);
3689 __ Mov(x1, 0x8000000000000000L);
3690 // Clear the C flag.
3691 __ Adds(x0, x0, Operand(0));
3692 __ Adcs(x10, x0, Operand(x1, ASR, 63));
3693 END();
3694
3695 RUN();
3696
3697 ASSERT_EQUAL_NZCV(ZCFlag);
3698 ASSERT_EQUAL_64(0, x10);
3699
3700 START();
3701 __ Mov(x0, 0x10);
3702 __ Mov(x1, 0x07ffffffffffffffL);
3703 // Clear the C flag.
3704 __ Adds(x0, x0, Operand(0));
3705 __ Adcs(x10, x0, Operand(x1, LSL, 4));
3706 END();
3707
3708 RUN();
3709
3710 ASSERT_EQUAL_NZCV(NVFlag);
3711 ASSERT_EQUAL_64(0x8000000000000000L, x10);
3712
3713 // Check that sbc correctly sets the condition flags.
3714 START();
3715 __ Mov(x0, 0);
3716 __ Mov(x1, 0xffffffffffffffffL);
3717 // Clear the C flag.
3718 __ Adds(x0, x0, Operand(0));
3719 __ Sbcs(x10, x0, Operand(x1));
3720 END();
3721
3722 RUN();
3723
3724 ASSERT_EQUAL_NZCV(ZFlag);
3725 ASSERT_EQUAL_64(0, x10);
3726
3727 START();
3728 __ Mov(x0, 1);
3729 __ Mov(x1, 0xffffffffffffffffL);
3730 // Clear the C flag.
3731 __ Adds(x0, x0, Operand(0));
3732 __ Sbcs(x10, x0, Operand(x1, LSR, 1));
3733 END();
3734
3735 RUN();
3736
3737 ASSERT_EQUAL_NZCV(NFlag);
3738 ASSERT_EQUAL_64(0x8000000000000001L, x10);
3739
3740 START();
3741 __ Mov(x0, 0);
3742 // Clear the C flag.
3743 __ Adds(x0, x0, Operand(0));
3744 __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
3745 END();
3746
3747 RUN();
3748
3749 ASSERT_EQUAL_NZCV(ZFlag);
3750 ASSERT_EQUAL_64(0, x10);
3751
3752 START()
3753 __ Mov(w0, 0x7fffffff);
3754 // Clear the C flag.
3755 __ Adds(x0, x0, Operand(0));
3756 __ Ngcs(w10, w0);
3757 END();
3758
3759 RUN();
3760
3761 ASSERT_EQUAL_NZCV(NFlag);
3762 ASSERT_EQUAL_64(0x80000000, x10);
3763
3764 START();
3765 // Clear the C flag.
3766 __ Adds(x0, x0, Operand(0));
3767 __ Ngcs(x10, 0x7fffffffffffffffL);
3768 END();
3769
3770 RUN();
3771
3772 ASSERT_EQUAL_NZCV(NFlag);
3773 ASSERT_EQUAL_64(0x8000000000000000L, x10);
3774
3775 START()
3776 __ Mov(x0, 0);
3777 // Set the C flag.
3778 __ Cmp(x0, Operand(x0));
3779 __ Sbcs(x10, x0, Operand(1));
3780 END();
3781
3782 RUN();
3783
3784 ASSERT_EQUAL_NZCV(NFlag);
3785 ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
3786
3787 START()
3788 __ Mov(x0, 0);
3789 // Set the C flag.
3790 __ Cmp(x0, Operand(x0));
3791 __ Ngcs(x10, 0x7fffffffffffffffL);
3792 END();
3793
3794 RUN();
3795
3796 ASSERT_EQUAL_NZCV(NFlag);
3797 ASSERT_EQUAL_64(0x8000000000000001L, x10);
3798
3799 TEARDOWN();
3800 }
3801
3802
3803 TEST(adc_sbc_extend) {
3804 INIT_V8();
3805 SETUP();
3806
3807 START();
3808 // Clear the C flag.
3809 __ Adds(x0, x0, Operand(0));
3810
3811 __ Mov(x0, 0);
3812 __ Mov(x1, 1);
3813 __ Mov(x2, 0x0123456789abcdefL);
3814
3815 __ Adc(x10, x1, Operand(w2, UXTB, 1));
3816 __ Adc(x11, x1, Operand(x2, SXTH, 2));
3817 __ Sbc(x12, x1, Operand(w2, UXTW, 4));
3818 __ Adc(x13, x1, Operand(x2, UXTX, 4));
3819
3820 __ Adc(w14, w1, Operand(w2, UXTB, 1));
3821 __ Adc(w15, w1, Operand(w2, SXTH, 2));
3822 __ Adc(w9, w1, Operand(w2, UXTW, 4));
3823
3824 // Set the C flag.
3825 __ Cmp(w0, Operand(w0));
3826
3827 __ Adc(x20, x1, Operand(w2, UXTB, 1));
3828 __ Adc(x21, x1, Operand(x2, SXTH, 2));
3829 __ Sbc(x22, x1, Operand(w2, UXTW, 4));
3830 __ Adc(x23, x1, Operand(x2, UXTX, 4));
3831
3832 __ Adc(w24, w1, Operand(w2, UXTB, 1));
3833 __ Adc(w25, w1, Operand(w2, SXTH, 2));
3834 __ Adc(w26, w1, Operand(w2, UXTW, 4));
3835 END();
3836
3837 RUN();
3838
3839 ASSERT_EQUAL_64(0x1df, x10);
3840 ASSERT_EQUAL_64(0xffffffffffff37bdL, x11);
3841 ASSERT_EQUAL_64(0xfffffff765432110L, x12);
3842 ASSERT_EQUAL_64(0x123456789abcdef1L, x13);
3843
3844 ASSERT_EQUAL_32(0x1df, w14);
3845 ASSERT_EQUAL_32(0xffff37bd, w15);
3846 ASSERT_EQUAL_32(0x9abcdef1, w9);
3847
3848 ASSERT_EQUAL_64(0x1df + 1, x20);
3849 ASSERT_EQUAL_64(0xffffffffffff37bdL + 1, x21);
3850 ASSERT_EQUAL_64(0xfffffff765432110L + 1, x22);
3851 ASSERT_EQUAL_64(0x123456789abcdef1L + 1, x23);
3852
3853 ASSERT_EQUAL_32(0x1df + 1, w24);
3854 ASSERT_EQUAL_32(0xffff37bd + 1, w25);
3855 ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
3856
3857 // Check that adc correctly sets the condition flags.
3858 START();
3859 __ Mov(x0, 0xff);
3860 __ Mov(x1, 0xffffffffffffffffL);
3861 // Clear the C flag.
3862 __ Adds(x0, x0, Operand(0));
3863 __ Adcs(x10, x0, Operand(x1, SXTX, 1));
3864 END();
3865
3866 RUN();
3867
3868 ASSERT_EQUAL_NZCV(CFlag);
3869
3870 START();
3871 __ Mov(x0, 0x7fffffffffffffffL);
3872 __ Mov(x1, 1);
3873 // Clear the C flag.
3874 __ Adds(x0, x0, Operand(0));
3875 __ Adcs(x10, x0, Operand(x1, UXTB, 2));
3876 END();
3877
3878 RUN();
3879
3880 ASSERT_EQUAL_NZCV(NVFlag);
3881
3882 START();
3883 __ Mov(x0, 0x7fffffffffffffffL);
3884 // Clear the C flag.
3885 __ Adds(x0, x0, Operand(0));
3886 __ Adcs(x10, x0, Operand(1));
3887 END();
3888
3889 RUN();
3890
3891 ASSERT_EQUAL_NZCV(NVFlag);
3892
3893 TEARDOWN();
3894 }
3895
3896
3897 TEST(adc_sbc_wide_imm) {
3898 INIT_V8();
3899 SETUP();
3900
3901 START();
3902 __ Mov(x0, 0);
3903
3904 // Clear the C flag.
3905 __ Adds(x0, x0, Operand(0));
3906
3907 __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
3908 __ Adc(w8, w0, Operand(0xffffffff));
3909 __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
3910 __ Sbc(w10, w0, Operand(0xffffffff));
3911 __ Ngc(x11, Operand(0xffffffff00000000UL));
3912 __ Ngc(w12, Operand(0xffff0000));
3913
3914 // Set the C flag.
3915 __ Cmp(w0, Operand(w0));
3916
3917 __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
3918 __ Adc(w19, w0, Operand(0xffffffff));
3919 __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
3920 __ Sbc(w21, w0, Operand(0xffffffff));
3921 __ Ngc(x22, Operand(0xffffffff00000000UL));
3922 __ Ngc(w23, Operand(0xffff0000));
3923 END();
3924
3925 RUN();
3926
3927 ASSERT_EQUAL_64(0x1234567890abcdefUL, x7);
3928 ASSERT_EQUAL_64(0xffffffff, x8);
3929 ASSERT_EQUAL_64(0xedcba9876f543210UL, x9);
3930 ASSERT_EQUAL_64(0, x10);
3931 ASSERT_EQUAL_64(0xffffffff, x11);
3932 ASSERT_EQUAL_64(0xffff, x12);
3933
3934 ASSERT_EQUAL_64(0x1234567890abcdefUL + 1, x18);
3935 ASSERT_EQUAL_64(0, x19);
3936 ASSERT_EQUAL_64(0xedcba9876f543211UL, x20);
3937 ASSERT_EQUAL_64(1, x21);
3938 ASSERT_EQUAL_64(0x100000000UL, x22);
3939 ASSERT_EQUAL_64(0x10000, x23);
3940
3941 TEARDOWN();
3942 }
3943
3944
3945 TEST(flags) {
3946 INIT_V8();
3947 SETUP();
3948
3949 START();
3950 __ Mov(x0, 0);
3951 __ Mov(x1, 0x1111111111111111L);
3952 __ Neg(x10, Operand(x0));
3953 __ Neg(x11, Operand(x1));
3954 __ Neg(w12, Operand(w1));
3955 // Clear the C flag.
3956 __ Adds(x0, x0, Operand(0));
3957 __ Ngc(x13, Operand(x0));
3958 // Set the C flag.
3959 __ Cmp(x0, Operand(x0));
3960 __ Ngc(w14, Operand(w0));
3961 END();
3962
3963 RUN();
3964
3965 ASSERT_EQUAL_64(0, x10);
3966 ASSERT_EQUAL_64(-0x1111111111111111L, x11);
3967 ASSERT_EQUAL_32(-0x11111111, w12);
3968 ASSERT_EQUAL_64(-1L, x13);
3969 ASSERT_EQUAL_32(0, w14);
3970
3971 START();
3972 __ Mov(x0, 0);
3973 __ Cmp(x0, Operand(x0));
3974 END();
3975
3976 RUN();
3977
3978 ASSERT_EQUAL_NZCV(ZCFlag);
3979
3980 START();
3981 __ Mov(w0, 0);
3982 __ Cmp(w0, Operand(w0));
3983 END();
3984
3985 RUN();
3986
3987 ASSERT_EQUAL_NZCV(ZCFlag);
3988
3989 START();
3990 __ Mov(x0, 0);
3991 __ Mov(x1, 0x1111111111111111L);
3992 __ Cmp(x0, Operand(x1));
3993 END();
3994
3995 RUN();
3996
3997 ASSERT_EQUAL_NZCV(NFlag);
3998
3999 START();
4000 __ Mov(w0, 0);
4001 __ Mov(w1, 0x11111111);
4002 __ Cmp(w0, Operand(w1));
4003 END();
4004
4005 RUN();
4006
4007 ASSERT_EQUAL_NZCV(NFlag);
4008
4009 START();
4010 __ Mov(x1, 0x1111111111111111L);
4011 __ Cmp(x1, Operand(0));
4012 END();
4013
4014 RUN();
4015
4016 ASSERT_EQUAL_NZCV(CFlag);
4017
4018 START();
4019 __ Mov(w1, 0x11111111);
4020 __ Cmp(w1, Operand(0));
4021 END();
4022
4023 RUN();
4024
4025 ASSERT_EQUAL_NZCV(CFlag);
4026
4027 START();
4028 __ Mov(x0, 1);
4029 __ Mov(x1, 0x7fffffffffffffffL);
4030 __ Cmn(x1, Operand(x0));
4031 END();
4032
4033 RUN();
4034
4035 ASSERT_EQUAL_NZCV(NVFlag);
4036
4037 START();
4038 __ Mov(w0, 1);
4039 __ Mov(w1, 0x7fffffff);
4040 __ Cmn(w1, Operand(w0));
4041 END();
4042
4043 RUN();
4044
4045 ASSERT_EQUAL_NZCV(NVFlag);
4046
4047 START();
4048 __ Mov(x0, 1);
4049 __ Mov(x1, 0xffffffffffffffffL);
4050 __ Cmn(x1, Operand(x0));
4051 END();
4052
4053 RUN();
4054
4055 ASSERT_EQUAL_NZCV(ZCFlag);
4056
4057 START();
4058 __ Mov(w0, 1);
4059 __ Mov(w1, 0xffffffff);
4060 __ Cmn(w1, Operand(w0));
4061 END();
4062
4063 RUN();
4064
4065 ASSERT_EQUAL_NZCV(ZCFlag);
4066
4067 START();
4068 __ Mov(w0, 0);
4069 __ Mov(w1, 1);
4070 // Clear the C flag.
4071 __ Adds(w0, w0, Operand(0));
4072 __ Ngcs(w0, Operand(w1));
4073 END();
4074
4075 RUN();
4076
4077 ASSERT_EQUAL_NZCV(NFlag);
4078
4079 START();
4080 __ Mov(w0, 0);
4081 __ Mov(w1, 0);
4082 // Set the C flag.
4083 __ Cmp(w0, Operand(w0));
4084 __ Ngcs(w0, Operand(w1));
4085 END();
4086
4087 RUN();
4088
4089 ASSERT_EQUAL_NZCV(ZCFlag);
4090
4091 TEARDOWN();
4092 }
4093
4094
4095 TEST(cmp_shift) {
4096 INIT_V8();
4097 SETUP();
4098
4099 START();
4100 __ Mov(x18, 0xf0000000);
4101 __ Mov(x19, 0xf000000010000000UL);
4102 __ Mov(x20, 0xf0000000f0000000UL);
4103 __ Mov(x21, 0x7800000078000000UL);
4104 __ Mov(x22, 0x3c0000003c000000UL);
4105 __ Mov(x23, 0x8000000780000000UL);
4106 __ Mov(x24, 0x0000000f00000000UL);
4107 __ Mov(x25, 0x00000003c0000000UL);
4108 __ Mov(x26, 0x8000000780000000UL);
4109 __ Mov(x27, 0xc0000003);
4110
4111 __ Cmp(w20, Operand(w21, LSL, 1));
4112 __ Mrs(x0, NZCV);
4113
4114 __ Cmp(x20, Operand(x22, LSL, 2));
4115 __ Mrs(x1, NZCV);
4116
4117 __ Cmp(w19, Operand(w23, LSR, 3));
4118 __ Mrs(x2, NZCV);
4119
4120 __ Cmp(x18, Operand(x24, LSR, 4));
4121 __ Mrs(x3, NZCV);
4122
4123 __ Cmp(w20, Operand(w25, ASR, 2));
4124 __ Mrs(x4, NZCV);
4125
4126 __ Cmp(x20, Operand(x26, ASR, 3));
4127 __ Mrs(x5, NZCV);
4128
4129 __ Cmp(w27, Operand(w22, ROR, 28));
4130 __ Mrs(x6, NZCV);
4131
4132 __ Cmp(x20, Operand(x21, ROR, 31));
4133 __ Mrs(x7, NZCV);
4134 END();
4135
4136 RUN();
4137
4138 ASSERT_EQUAL_32(ZCFlag, w0);
4139 ASSERT_EQUAL_32(ZCFlag, w1);
4140 ASSERT_EQUAL_32(ZCFlag, w2);
4141 ASSERT_EQUAL_32(ZCFlag, w3);
4142 ASSERT_EQUAL_32(ZCFlag, w4);
4143 ASSERT_EQUAL_32(ZCFlag, w5);
4144 ASSERT_EQUAL_32(ZCFlag, w6);
4145 ASSERT_EQUAL_32(ZCFlag, w7);
4146
4147 TEARDOWN();
4148 }
4149
4150
4151 TEST(cmp_extend) {
4152 INIT_V8();
4153 SETUP();
4154
4155 START();
4156 __ Mov(w20, 0x2);
4157 __ Mov(w21, 0x1);
4158 __ Mov(x22, 0xffffffffffffffffUL);
4159 __ Mov(x23, 0xff);
4160 __ Mov(x24, 0xfffffffffffffffeUL);
4161 __ Mov(x25, 0xffff);
4162 __ Mov(x26, 0xffffffff);
4163
4164 __ Cmp(w20, Operand(w21, LSL, 1));
4165 __ Mrs(x0, NZCV);
4166
4167 __ Cmp(x22, Operand(x23, SXTB, 0));
4168 __ Mrs(x1, NZCV);
4169
4170 __ Cmp(x24, Operand(x23, SXTB, 1));
4171 __ Mrs(x2, NZCV);
4172
4173 __ Cmp(x24, Operand(x23, UXTB, 1));
4174 __ Mrs(x3, NZCV);
4175
4176 __ Cmp(w22, Operand(w25, UXTH));
4177 __ Mrs(x4, NZCV);
4178
4179 __ Cmp(x22, Operand(x25, SXTH));
4180 __ Mrs(x5, NZCV);
4181
4182 __ Cmp(x22, Operand(x26, UXTW));
4183 __ Mrs(x6, NZCV);
4184
4185 __ Cmp(x24, Operand(x26, SXTW, 1));
4186 __ Mrs(x7, NZCV);
4187 END();
4188
4189 RUN();
4190
4191 ASSERT_EQUAL_32(ZCFlag, w0);
4192 ASSERT_EQUAL_32(ZCFlag, w1);
4193 ASSERT_EQUAL_32(ZCFlag, w2);
4194 ASSERT_EQUAL_32(NCFlag, w3);
4195 ASSERT_EQUAL_32(NCFlag, w4);
4196 ASSERT_EQUAL_32(ZCFlag, w5);
4197 ASSERT_EQUAL_32(NCFlag, w6);
4198 ASSERT_EQUAL_32(ZCFlag, w7);
4199
4200 TEARDOWN();
4201 }
4202
4203
4204 TEST(ccmp) {
4205 INIT_V8();
4206 SETUP();
4207
4208 START();
4209 __ Mov(w16, 0);
4210 __ Mov(w17, 1);
4211 __ Cmp(w16, w16);
4212 __ Ccmp(w16, w17, NCFlag, eq);
4213 __ Mrs(x0, NZCV);
4214
4215 __ Cmp(w16, w16);
4216 __ Ccmp(w16, w17, NCFlag, ne);
4217 __ Mrs(x1, NZCV);
4218
4219 __ Cmp(x16, x16);
4220 __ Ccmn(x16, 2, NZCVFlag, eq);
4221 __ Mrs(x2, NZCV);
4222
4223 __ Cmp(x16, x16);
4224 __ Ccmn(x16, 2, NZCVFlag, ne);
4225 __ Mrs(x3, NZCV);
4226
4227 __ ccmp(x16, x16, NZCVFlag, al);
4228 __ Mrs(x4, NZCV);
4229
4230 __ ccmp(x16, x16, NZCVFlag, nv);
4231 __ Mrs(x5, NZCV);
4232
4233 END();
4234
4235 RUN();
4236
4237 ASSERT_EQUAL_32(NFlag, w0);
4238 ASSERT_EQUAL_32(NCFlag, w1);
4239 ASSERT_EQUAL_32(NoFlag, w2);
4240 ASSERT_EQUAL_32(NZCVFlag, w3);
4241 ASSERT_EQUAL_32(ZCFlag, w4);
4242 ASSERT_EQUAL_32(ZCFlag, w5);
4243
4244 TEARDOWN();
4245 }
4246
4247
4248 TEST(ccmp_wide_imm) {
4249 INIT_V8();
4250 SETUP();
4251
4252 START();
4253 __ Mov(w20, 0);
4254
4255 __ Cmp(w20, Operand(w20));
4256 __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
4257 __ Mrs(x0, NZCV);
4258
4259 __ Cmp(w20, Operand(w20));
4260 __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
4261 __ Mrs(x1, NZCV);
4262 END();
4263
4264 RUN();
4265
4266 ASSERT_EQUAL_32(NFlag, w0);
4267 ASSERT_EQUAL_32(NoFlag, w1);
4268
4269 TEARDOWN();
4270 }
4271
4272
4273 TEST(ccmp_shift_extend) {
4274 INIT_V8();
4275 SETUP();
4276
4277 START();
4278 __ Mov(w20, 0x2);
4279 __ Mov(w21, 0x1);
4280 __ Mov(x22, 0xffffffffffffffffUL);
4281 __ Mov(x23, 0xff);
4282 __ Mov(x24, 0xfffffffffffffffeUL);
4283
4284 __ Cmp(w20, Operand(w20));
4285 __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
4286 __ Mrs(x0, NZCV);
4287
4288 __ Cmp(w20, Operand(w20));
4289 __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
4290 __ Mrs(x1, NZCV);
4291
4292 __ Cmp(w20, Operand(w20));
4293 __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
4294 __ Mrs(x2, NZCV);
4295
4296 __ Cmp(w20, Operand(w20));
4297 __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
4298 __ Mrs(x3, NZCV);
4299
4300 __ Cmp(w20, Operand(w20));
4301 __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
4302 __ Mrs(x4, NZCV);
4303 END();
4304
4305 RUN();
4306
4307 ASSERT_EQUAL_32(ZCFlag, w0);
4308 ASSERT_EQUAL_32(ZCFlag, w1);
4309 ASSERT_EQUAL_32(ZCFlag, w2);
4310 ASSERT_EQUAL_32(NCFlag, w3);
4311 ASSERT_EQUAL_32(NZCVFlag, w4);
4312
4313 TEARDOWN();
4314 }
4315
4316
4317 TEST(csel) {
4318 INIT_V8();
4319 SETUP();
4320
4321 START();
4322 __ Mov(x16, 0);
4323 __ Mov(x24, 0x0000000f0000000fUL);
4324 __ Mov(x25, 0x0000001f0000001fUL);
4325 __ Mov(x26, 0);
4326 __ Mov(x27, 0);
4327
4328 __ Cmp(w16, 0);
4329 __ Csel(w0, w24, w25, eq);
4330 __ Csel(w1, w24, w25, ne);
4331 __ Csinc(w2, w24, w25, mi);
4332 __ Csinc(w3, w24, w25, pl);
4333
4334 __ csel(w13, w24, w25, al);
4335 __ csel(x14, x24, x25, nv);
4336
4337 __ Cmp(x16, 1);
4338 __ Csinv(x4, x24, x25, gt);
4339 __ Csinv(x5, x24, x25, le);
4340 __ Csneg(x6, x24, x25, hs);
4341 __ Csneg(x7, x24, x25, lo);
4342
4343 __ Cset(w8, ne);
4344 __ Csetm(w9, ne);
4345 __ Cinc(x10, x25, ne);
4346 __ Cinv(x11, x24, ne);
4347 __ Cneg(x12, x24, ne);
4348
4349 __ csel(w15, w24, w25, al);
4350 __ csel(x18, x24, x25, nv);
4351
4352 __ CzeroX(x24, ne);
4353 __ CzeroX(x25, eq);
4354
4355 __ CmovX(x26, x25, ne);
4356 __ CmovX(x27, x25, eq);
4357 END();
4358
4359 RUN();
4360
4361 ASSERT_EQUAL_64(0x0000000f, x0);
4362 ASSERT_EQUAL_64(0x0000001f, x1);
4363 ASSERT_EQUAL_64(0x00000020, x2);
4364 ASSERT_EQUAL_64(0x0000000f, x3);
4365 ASSERT_EQUAL_64(0xffffffe0ffffffe0UL, x4);
4366 ASSERT_EQUAL_64(0x0000000f0000000fUL, x5);
4367 ASSERT_EQUAL_64(0xffffffe0ffffffe1UL, x6);
4368 ASSERT_EQUAL_64(0x0000000f0000000fUL, x7);
4369 ASSERT_EQUAL_64(0x00000001, x8);
4370 ASSERT_EQUAL_64(0xffffffff, x9);
4371 ASSERT_EQUAL_64(0x0000001f00000020UL, x10);
4372 ASSERT_EQUAL_64(0xfffffff0fffffff0UL, x11);
4373 ASSERT_EQUAL_64(0xfffffff0fffffff1UL, x12);
4374 ASSERT_EQUAL_64(0x0000000f, x13);
4375 ASSERT_EQUAL_64(0x0000000f0000000fUL, x14);
4376 ASSERT_EQUAL_64(0x0000000f, x15);
4377 ASSERT_EQUAL_64(0x0000000f0000000fUL, x18);
4378 ASSERT_EQUAL_64(0, x24);
4379 ASSERT_EQUAL_64(0x0000001f0000001fUL, x25);
4380 ASSERT_EQUAL_64(0x0000001f0000001fUL, x26);
4381 ASSERT_EQUAL_64(0, x27);
4382
4383 TEARDOWN();
4384 }
4385
4386
4387 TEST(csel_imm) {
4388 INIT_V8();
4389 SETUP();
4390
4391 START();
4392 __ Mov(x18, 0);
4393 __ Mov(x19, 0x80000000);
4394 __ Mov(x20, 0x8000000000000000UL);
4395
4396 __ Cmp(x18, Operand(0));
4397 __ Csel(w0, w19, -2, ne);
4398 __ Csel(w1, w19, -1, ne);
4399 __ Csel(w2, w19, 0, ne);
4400 __ Csel(w3, w19, 1, ne);
4401 __ Csel(w4, w19, 2, ne);
4402 __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
4403 __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
4404 __ Csel(w7, w19, 3, eq);
4405
4406 __ Csel(x8, x20, -2, ne);
4407 __ Csel(x9, x20, -1, ne);
4408 __ Csel(x10, x20, 0, ne);
4409 __ Csel(x11, x20, 1, ne);
4410 __ Csel(x12, x20, 2, ne);
4411 __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
4412 __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
4413 __ Csel(x15, x20, 3, eq);
4414
4415 END();
4416
4417 RUN();
4418
4419 ASSERT_EQUAL_32(-2, w0);
4420 ASSERT_EQUAL_32(-1, w1);
4421 ASSERT_EQUAL_32(0, w2);
4422 ASSERT_EQUAL_32(1, w3);
4423 ASSERT_EQUAL_32(2, w4);
4424 ASSERT_EQUAL_32(-1, w5);
4425 ASSERT_EQUAL_32(0x40000000, w6);
4426 ASSERT_EQUAL_32(0x80000000, w7);
4427
4428 ASSERT_EQUAL_64(-2, x8);
4429 ASSERT_EQUAL_64(-1, x9);
4430 ASSERT_EQUAL_64(0, x10);
4431 ASSERT_EQUAL_64(1, x11);
4432 ASSERT_EQUAL_64(2, x12);
4433 ASSERT_EQUAL_64(-1, x13);
4434 ASSERT_EQUAL_64(0x4000000000000000UL, x14);
4435 ASSERT_EQUAL_64(0x8000000000000000UL, x15);
4436
4437 TEARDOWN();
4438 }
4439
4440
4441 TEST(lslv) {
4442 INIT_V8();
4443 SETUP();
4444
4445 uint64_t value = 0x0123456789abcdefUL;
4446 int shift[] = {1, 3, 5, 9, 17, 33};
4447
4448 START();
4449 __ Mov(x0, value);
4450 __ Mov(w1, shift[0]);
4451 __ Mov(w2, shift[1]);
4452 __ Mov(w3, shift[2]);
4453 __ Mov(w4, shift[3]);
4454 __ Mov(w5, shift[4]);
4455 __ Mov(w6, shift[5]);
4456
4457 __ lslv(x0, x0, xzr);
4458
4459 __ Lsl(x16, x0, x1);
4460 __ Lsl(x17, x0, x2);
4461 __ Lsl(x18, x0, x3);
4462 __ Lsl(x19, x0, x4);
4463 __ Lsl(x20, x0, x5);
4464 __ Lsl(x21, x0, x6);
4465
4466 __ Lsl(w22, w0, w1);
4467 __ Lsl(w23, w0, w2);
4468 __ Lsl(w24, w0, w3);
4469 __ Lsl(w25, w0, w4);
4470 __ Lsl(w26, w0, w5);
4471 __ Lsl(w27, w0, w6);
4472 END();
4473
4474 RUN();
4475
4476 ASSERT_EQUAL_64(value, x0);
4477 ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
4478 ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
4479 ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
4480 ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
4481 ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
4482 ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
4483 ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
4484 ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
4485 ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
4486 ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
4487 ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
4488 ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
4489
4490 TEARDOWN();
4491 }
4492
4493
4494 TEST(lsrv) {
4495 INIT_V8();
4496 SETUP();
4497
4498 uint64_t value = 0x0123456789abcdefUL;
4499 int shift[] = {1, 3, 5, 9, 17, 33};
4500
4501 START();
4502 __ Mov(x0, value);
4503 __ Mov(w1, shift[0]);
4504 __ Mov(w2, shift[1]);
4505 __ Mov(w3, shift[2]);
4506 __ Mov(w4, shift[3]);
4507 __ Mov(w5, shift[4]);
4508 __ Mov(w6, shift[5]);
4509
4510 __ lsrv(x0, x0, xzr);
4511
4512 __ Lsr(x16, x0, x1);
4513 __ Lsr(x17, x0, x2);
4514 __ Lsr(x18, x0, x3);
4515 __ Lsr(x19, x0, x4);
4516 __ Lsr(x20, x0, x5);
4517 __ Lsr(x21, x0, x6);
4518
4519 __ Lsr(w22, w0, w1);
4520 __ Lsr(w23, w0, w2);
4521 __ Lsr(w24, w0, w3);
4522 __ Lsr(w25, w0, w4);
4523 __ Lsr(w26, w0, w5);
4524 __ Lsr(w27, w0, w6);
4525 END();
4526
4527 RUN();
4528
4529 ASSERT_EQUAL_64(value, x0);
4530 ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4531 ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4532 ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4533 ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4534 ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4535 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4536
4537 value &= 0xffffffffUL;
4538 ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
4539 ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
4540 ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
4541 ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
4542 ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
4543 ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
4544
4545 TEARDOWN();
4546 }
4547
4548
4549 TEST(asrv) {
4550 INIT_V8();
4551 SETUP();
4552
4553 int64_t value = 0xfedcba98fedcba98UL;
4554 int shift[] = {1, 3, 5, 9, 17, 33};
4555
4556 START();
4557 __ Mov(x0, value);
4558 __ Mov(w1, shift[0]);
4559 __ Mov(w2, shift[1]);
4560 __ Mov(w3, shift[2]);
4561 __ Mov(w4, shift[3]);
4562 __ Mov(w5, shift[4]);
4563 __ Mov(w6, shift[5]);
4564
4565 __ asrv(x0, x0, xzr);
4566
4567 __ Asr(x16, x0, x1);
4568 __ Asr(x17, x0, x2);
4569 __ Asr(x18, x0, x3);
4570 __ Asr(x19, x0, x4);
4571 __ Asr(x20, x0, x5);
4572 __ Asr(x21, x0, x6);
4573
4574 __ Asr(w22, w0, w1);
4575 __ Asr(w23, w0, w2);
4576 __ Asr(w24, w0, w3);
4577 __ Asr(w25, w0, w4);
4578 __ Asr(w26, w0, w5);
4579 __ Asr(w27, w0, w6);
4580 END();
4581
4582 RUN();
4583
4584 ASSERT_EQUAL_64(value, x0);
4585 ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4586 ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4587 ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4588 ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4589 ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4590 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4591
4592 int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
4593 ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
4594 ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
4595 ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
4596 ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
4597 ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
4598 ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
4599
4600 TEARDOWN();
4601 }
4602
4603
4604 TEST(rorv) {
4605 INIT_V8();
4606 SETUP();
4607
4608 uint64_t value = 0x0123456789abcdefUL;
4609 int shift[] = {4, 8, 12, 16, 24, 36};
4610
4611 START();
4612 __ Mov(x0, value);
4613 __ Mov(w1, shift[0]);
4614 __ Mov(w2, shift[1]);
4615 __ Mov(w3, shift[2]);
4616 __ Mov(w4, shift[3]);
4617 __ Mov(w5, shift[4]);
4618 __ Mov(w6, shift[5]);
4619
4620 __ rorv(x0, x0, xzr);
4621
4622 __ Ror(x16, x0, x1);
4623 __ Ror(x17, x0, x2);
4624 __ Ror(x18, x0, x3);
4625 __ Ror(x19, x0, x4);
4626 __ Ror(x20, x0, x5);
4627 __ Ror(x21, x0, x6);
4628
4629 __ Ror(w22, w0, w1);
4630 __ Ror(w23, w0, w2);
4631 __ Ror(w24, w0, w3);
4632 __ Ror(w25, w0, w4);
4633 __ Ror(w26, w0, w5);
4634 __ Ror(w27, w0, w6);
4635 END();
4636
4637 RUN();
4638
4639 ASSERT_EQUAL_64(value, x0);
4640 ASSERT_EQUAL_64(0xf0123456789abcdeUL, x16);
4641 ASSERT_EQUAL_64(0xef0123456789abcdUL, x17);
4642 ASSERT_EQUAL_64(0xdef0123456789abcUL, x18);
4643 ASSERT_EQUAL_64(0xcdef0123456789abUL, x19);
4644 ASSERT_EQUAL_64(0xabcdef0123456789UL, x20);
4645 ASSERT_EQUAL_64(0x789abcdef0123456UL, x21);
4646 ASSERT_EQUAL_32(0xf89abcde, w22);
4647 ASSERT_EQUAL_32(0xef89abcd, w23);
4648 ASSERT_EQUAL_32(0xdef89abc, w24);
4649 ASSERT_EQUAL_32(0xcdef89ab, w25);
4650 ASSERT_EQUAL_32(0xabcdef89, w26);
4651 ASSERT_EQUAL_32(0xf89abcde, w27);
4652
4653 TEARDOWN();
4654 }
4655
4656
4657 TEST(bfm) {
4658 INIT_V8();
4659 SETUP();
4660
4661 START();
4662 __ Mov(x1, 0x0123456789abcdefL);
4663
4664 __ Mov(x10, 0x8888888888888888L);
4665 __ Mov(x11, 0x8888888888888888L);
4666 __ Mov(x12, 0x8888888888888888L);
4667 __ Mov(x13, 0x8888888888888888L);
4668 __ Mov(w20, 0x88888888);
4669 __ Mov(w21, 0x88888888);
4670
4671 __ bfm(x10, x1, 16, 31);
4672 __ bfm(x11, x1, 32, 15);
4673
4674 __ bfm(w20, w1, 16, 23);
4675 __ bfm(w21, w1, 24, 15);
4676
4677 // Aliases.
4678 __ Bfi(x12, x1, 16, 8);
4679 __ Bfxil(x13, x1, 16, 8);
4680 END();
4681
4682 RUN();
4683
4684
4685 ASSERT_EQUAL_64(0x88888888888889abL, x10);
4686 ASSERT_EQUAL_64(0x8888cdef88888888L, x11);
4687
4688 ASSERT_EQUAL_32(0x888888ab, w20);
4689 ASSERT_EQUAL_32(0x88cdef88, w21);
4690
4691 ASSERT_EQUAL_64(0x8888888888ef8888L, x12);
4692 ASSERT_EQUAL_64(0x88888888888888abL, x13);
4693
4694 TEARDOWN();
4695 }
4696
4697
4698 TEST(sbfm) {
4699 INIT_V8();
4700 SETUP();
4701
4702 START();
4703 __ Mov(x1, 0x0123456789abcdefL);
4704 __ Mov(x2, 0xfedcba9876543210L);
4705
4706 __ sbfm(x10, x1, 16, 31);
4707 __ sbfm(x11, x1, 32, 15);
4708 __ sbfm(x12, x1, 32, 47);
4709 __ sbfm(x13, x1, 48, 35);
4710
4711 __ sbfm(w14, w1, 16, 23);
4712 __ sbfm(w15, w1, 24, 15);
4713 __ sbfm(w16, w2, 16, 23);
4714 __ sbfm(w17, w2, 24, 15);
4715
4716 // Aliases.
4717 __ Asr(x18, x1, 32);
4718 __ Asr(x19, x2, 32);
4719 __ Sbfiz(x20, x1, 8, 16);
4720 __ Sbfiz(x21, x2, 8, 16);
4721 __ Sbfx(x22, x1, 8, 16);
4722 __ Sbfx(x23, x2, 8, 16);
4723 __ Sxtb(x24, w1);
4724 __ Sxtb(x25, x2);
4725 __ Sxth(x26, w1);
4726 __ Sxth(x27, x2);
4727 __ Sxtw(x28, w1);
4728 __ Sxtw(x29, x2);
4729 END();
4730
4731 RUN();
4732
4733
4734 ASSERT_EQUAL_64(0xffffffffffff89abL, x10);
4735 ASSERT_EQUAL_64(0xffffcdef00000000L, x11);
4736 ASSERT_EQUAL_64(0x4567L, x12);
4737 ASSERT_EQUAL_64(0x789abcdef0000L, x13);
4738
4739 ASSERT_EQUAL_32(0xffffffab, w14);
4740 ASSERT_EQUAL_32(0xffcdef00, w15);
4741 ASSERT_EQUAL_32(0x54, w16);
4742 ASSERT_EQUAL_32(0x00321000, w17);
4743
4744 ASSERT_EQUAL_64(0x01234567L, x18);
4745 ASSERT_EQUAL_64(0xfffffffffedcba98L, x19);
4746 ASSERT_EQUAL_64(0xffffffffffcdef00L, x20);
4747 ASSERT_EQUAL_64(0x321000L, x21);
4748 ASSERT_EQUAL_64(0xffffffffffffabcdL, x22);
4749 ASSERT_EQUAL_64(0x5432L, x23);
4750 ASSERT_EQUAL_64(0xffffffffffffffefL, x24);
4751 ASSERT_EQUAL_64(0x10, x25);
4752 ASSERT_EQUAL_64(0xffffffffffffcdefL, x26);
4753 ASSERT_EQUAL_64(0x3210, x27);
4754 ASSERT_EQUAL_64(0xffffffff89abcdefL, x28);
4755 ASSERT_EQUAL_64(0x76543210, x29);
4756
4757 TEARDOWN();
4758 }
4759
4760
4761 TEST(ubfm) {
4762 INIT_V8();
4763 SETUP();
4764
4765 START();
4766 __ Mov(x1, 0x0123456789abcdefL);
4767 __ Mov(x2, 0xfedcba9876543210L);
4768
4769 __ Mov(x10, 0x8888888888888888L);
4770 __ Mov(x11, 0x8888888888888888L);
4771
4772 __ ubfm(x10, x1, 16, 31);
4773 __ ubfm(x11, x1, 32, 15);
4774 __ ubfm(x12, x1, 32, 47);
4775 __ ubfm(x13, x1, 48, 35);
4776
4777 __ ubfm(w25, w1, 16, 23);
4778 __ ubfm(w26, w1, 24, 15);
4779 __ ubfm(w27, w2, 16, 23);
4780 __ ubfm(w28, w2, 24, 15);
4781
4782 // Aliases
4783 __ Lsl(x15, x1, 63);
4784 __ Lsl(x16, x1, 0);
4785 __ Lsr(x17, x1, 32);
4786 __ Ubfiz(x18, x1, 8, 16);
4787 __ Ubfx(x19, x1, 8, 16);
4788 __ Uxtb(x20, x1);
4789 __ Uxth(x21, x1);
4790 __ Uxtw(x22, x1);
4791 END();
4792
4793 RUN();
4794
4795 ASSERT_EQUAL_64(0x00000000000089abL, x10);
4796 ASSERT_EQUAL_64(0x0000cdef00000000L, x11);
4797 ASSERT_EQUAL_64(0x4567L, x12);
4798 ASSERT_EQUAL_64(0x789abcdef0000L, x13);
4799
4800 ASSERT_EQUAL_32(0x000000ab, w25);
4801 ASSERT_EQUAL_32(0x00cdef00, w26);
4802 ASSERT_EQUAL_32(0x54, w27);
4803 ASSERT_EQUAL_32(0x00321000, w28);
4804
4805 ASSERT_EQUAL_64(0x8000000000000000L, x15);
4806 ASSERT_EQUAL_64(0x0123456789abcdefL, x16);
4807 ASSERT_EQUAL_64(0x01234567L, x17);
4808 ASSERT_EQUAL_64(0xcdef00L, x18);
4809 ASSERT_EQUAL_64(0xabcdL, x19);
4810 ASSERT_EQUAL_64(0xefL, x20);
4811 ASSERT_EQUAL_64(0xcdefL, x21);
4812 ASSERT_EQUAL_64(0x89abcdefL, x22);
4813
4814 TEARDOWN();
4815 }
4816
4817
4818 TEST(extr) {
4819 INIT_V8();
4820 SETUP();
4821
4822 START();
4823 __ Mov(x1, 0x0123456789abcdefL);
4824 __ Mov(x2, 0xfedcba9876543210L);
4825
4826 __ Extr(w10, w1, w2, 0);
4827 __ Extr(w11, w1, w2, 1);
4828 __ Extr(x12, x2, x1, 2);
4829
4830 __ Ror(w13, w1, 0);
4831 __ Ror(w14, w2, 17);
4832 __ Ror(w15, w1, 31);
4833 __ Ror(x18, x2, 1);
4834 __ Ror(x19, x1, 63);
4835 END();
4836
4837 RUN();
4838
4839 ASSERT_EQUAL_64(0x76543210, x10);
4840 ASSERT_EQUAL_64(0xbb2a1908, x11);
4841 ASSERT_EQUAL_64(0x0048d159e26af37bUL, x12);
4842 ASSERT_EQUAL_64(0x89abcdef, x13);
4843 ASSERT_EQUAL_64(0x19083b2a, x14);
4844 ASSERT_EQUAL_64(0x13579bdf, x15);
4845 ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908UL, x18);
4846 ASSERT_EQUAL_64(0x02468acf13579bdeUL, x19);
4847
4848 TEARDOWN();
4849 }
4850
4851
4852 TEST(fmov_imm) {
4853 INIT_V8();
4854 SETUP();
4855
4856 START();
4857 __ Fmov(s11, 1.0);
4858 __ Fmov(d22, -13.0);
4859 __ Fmov(s1, 255.0);
4860 __ Fmov(d2, 12.34567);
4861 __ Fmov(s3, 0.0);
4862 __ Fmov(d4, 0.0);
4863 __ Fmov(s5, kFP32PositiveInfinity);
4864 __ Fmov(d6, kFP64NegativeInfinity);
4865 END();
4866
4867 RUN();
4868
4869 ASSERT_EQUAL_FP32(1.0, s11);
4870 ASSERT_EQUAL_FP64(-13.0, d22);
4871 ASSERT_EQUAL_FP32(255.0, s1);
4872 ASSERT_EQUAL_FP64(12.34567, d2);
4873 ASSERT_EQUAL_FP32(0.0, s3);
4874 ASSERT_EQUAL_FP64(0.0, d4);
4875 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
4876 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
4877
4878 TEARDOWN();
4879 }
4880
4881
4882 TEST(fmov_reg) {
4883 INIT_V8();
4884 SETUP();
4885
4886 START();
4887 __ Fmov(s20, 1.0);
4888 __ Fmov(w10, s20);
4889 __ Fmov(s30, w10);
4890 __ Fmov(s5, s20);
4891 __ Fmov(d1, -13.0);
4892 __ Fmov(x1, d1);
4893 __ Fmov(d2, x1);
4894 __ Fmov(d4, d1);
4895 __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL));
4896 __ Fmov(s6, s6);
4897 END();
4898
4899 RUN();
4900
4901 ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
4902 ASSERT_EQUAL_FP32(1.0, s30);
4903 ASSERT_EQUAL_FP32(1.0, s5);
4904 ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
4905 ASSERT_EQUAL_FP64(-13.0, d2);
4906 ASSERT_EQUAL_FP64(-13.0, d4);
4907 ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
4908
4909 TEARDOWN();
4910 }
4911
4912
4913 TEST(fadd) {
4914 INIT_V8();
4915 SETUP();
4916
4917 START();
4918 __ Fmov(s13, -0.0);
4919 __ Fmov(s14, kFP32PositiveInfinity);
4920 __ Fmov(s15, kFP32NegativeInfinity);
4921 __ Fmov(s16, 3.25);
4922 __ Fmov(s17, 1.0);
4923 __ Fmov(s18, 0);
4924
4925 __ Fmov(d26, -0.0);
4926 __ Fmov(d27, kFP64PositiveInfinity);
4927 __ Fmov(d28, kFP64NegativeInfinity);
4928 __ Fmov(d29, 0);
4929 __ Fmov(d30, -2.0);
4930 __ Fmov(d31, 2.25);
4931
4932 __ Fadd(s0, s16, s17);
4933 __ Fadd(s1, s17, s18);
4934 __ Fadd(s2, s13, s17);
4935 __ Fadd(s3, s14, s17);
4936 __ Fadd(s4, s15, s17);
4937
4938 __ Fadd(d5, d30, d31);
4939 __ Fadd(d6, d29, d31);
4940 __ Fadd(d7, d26, d31);
4941 __ Fadd(d8, d27, d31);
4942 __ Fadd(d9, d28, d31);
4943 END();
4944
4945 RUN();
4946
4947 ASSERT_EQUAL_FP32(4.25, s0);
4948 ASSERT_EQUAL_FP32(1.0, s1);
4949 ASSERT_EQUAL_FP32(1.0, s2);
4950 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
4951 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
4952 ASSERT_EQUAL_FP64(0.25, d5);
4953 ASSERT_EQUAL_FP64(2.25, d6);
4954 ASSERT_EQUAL_FP64(2.25, d7);
4955 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d8);
4956 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d9);
4957
4958 TEARDOWN();
4959 }
4960
4961
4962 TEST(fsub) {
4963 INIT_V8();
4964 SETUP();
4965
4966 START();
4967 __ Fmov(s13, -0.0);
4968 __ Fmov(s14, kFP32PositiveInfinity);
4969 __ Fmov(s15, kFP32NegativeInfinity);
4970 __ Fmov(s16, 3.25);
4971 __ Fmov(s17, 1.0);
4972 __ Fmov(s18, 0);
4973
4974 __ Fmov(d26, -0.0);
4975 __ Fmov(d27, kFP64PositiveInfinity);
4976 __ Fmov(d28, kFP64NegativeInfinity);
4977 __ Fmov(d29, 0);
4978 __ Fmov(d30, -2.0);
4979 __ Fmov(d31, 2.25);
4980
4981 __ Fsub(s0, s16, s17);
4982 __ Fsub(s1, s17, s18);
4983 __ Fsub(s2, s13, s17);
4984 __ Fsub(s3, s17, s14);
4985 __ Fsub(s4, s17, s15);
4986
4987 __ Fsub(d5, d30, d31);
4988 __ Fsub(d6, d29, d31);
4989 __ Fsub(d7, d26, d31);
4990 __ Fsub(d8, d31, d27);
4991 __ Fsub(d9, d31, d28);
4992 END();
4993
4994 RUN();
4995
4996 ASSERT_EQUAL_FP32(2.25, s0);
4997 ASSERT_EQUAL_FP32(1.0, s1);
4998 ASSERT_EQUAL_FP32(-1.0, s2);
4999 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5000 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
5001 ASSERT_EQUAL_FP64(-4.25, d5);
5002 ASSERT_EQUAL_FP64(-2.25, d6);
5003 ASSERT_EQUAL_FP64(-2.25, d7);
5004 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
5005 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9);
5006
5007 TEARDOWN();
5008 }
5009
5010
5011 TEST(fmul) {
5012 INIT_V8();
5013 SETUP();
5014
5015 START();
5016 __ Fmov(s13, -0.0);
5017 __ Fmov(s14, kFP32PositiveInfinity);
5018 __ Fmov(s15, kFP32NegativeInfinity);
5019 __ Fmov(s16, 3.25);
5020 __ Fmov(s17, 2.0);
5021 __ Fmov(s18, 0);
5022 __ Fmov(s19, -2.0);
5023
5024 __ Fmov(d26, -0.0);
5025 __ Fmov(d27, kFP64PositiveInfinity);
5026 __ Fmov(d28, kFP64NegativeInfinity);
5027 __ Fmov(d29, 0);
5028 __ Fmov(d30, -2.0);
5029 __ Fmov(d31, 2.25);
5030
5031 __ Fmul(s0, s16, s17);
5032 __ Fmul(s1, s17, s18);
5033 __ Fmul(s2, s13, s13);
5034 __ Fmul(s3, s14, s19);
5035 __ Fmul(s4, s15, s19);
5036
5037 __ Fmul(d5, d30, d31);
5038 __ Fmul(d6, d29, d31);
5039 __ Fmul(d7, d26, d26);
5040 __ Fmul(d8, d27, d30);
5041 __ Fmul(d9, d28, d30);
5042 END();
5043
5044 RUN();
5045
5046 ASSERT_EQUAL_FP32(6.5, s0);
5047 ASSERT_EQUAL_FP32(0.0, s1);
5048 ASSERT_EQUAL_FP32(0.0, s2);
5049 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5050 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
5051 ASSERT_EQUAL_FP64(-4.5, d5);
5052 ASSERT_EQUAL_FP64(0.0, d6);
5053 ASSERT_EQUAL_FP64(0.0, d7);
5054 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
5055 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d9);
5056
5057 TEARDOWN();
5058 }
5059
5060
5061 static void FmaddFmsubDoubleHelper(double n, double m, double a,
5062 double fmadd, double fmsub) {
5063 SETUP();
5064 START();
5065
5066 __ Fmov(d0, n);
5067 __ Fmov(d1, m);
5068 __ Fmov(d2, a);
5069 __ Fmadd(d28, d0, d1, d2);
5070 __ Fmsub(d29, d0, d1, d2);
5071 __ Fnmadd(d30, d0, d1, d2);
5072 __ Fnmsub(d31, d0, d1, d2);
5073
5074 END();
5075 RUN();
5076
5077 ASSERT_EQUAL_FP64(fmadd, d28);
5078 ASSERT_EQUAL_FP64(fmsub, d29);
5079 ASSERT_EQUAL_FP64(-fmadd, d30);
5080 ASSERT_EQUAL_FP64(-fmsub, d31);
5081
5082 TEARDOWN();
5083 }
5084
5085
5086 TEST(fmadd_fmsub_double) {
5087 INIT_V8();
5088 double inputs[] = {
5089 // Normal numbers, including -0.0.
5090 DBL_MAX, DBL_MIN, 3.25, 2.0, 0.0,
5091 -DBL_MAX, -DBL_MIN, -3.25, -2.0, -0.0,
5092 // Infinities.
5093 kFP64NegativeInfinity, kFP64PositiveInfinity,
5094 // Subnormal numbers.
5095 rawbits_to_double(0x000fffffffffffff),
5096 rawbits_to_double(0x0000000000000001),
5097 rawbits_to_double(0x000123456789abcd),
5098 -rawbits_to_double(0x000fffffffffffff),
5099 -rawbits_to_double(0x0000000000000001),
5100 -rawbits_to_double(0x000123456789abcd),
5101 // NaN.
5102 kFP64QuietNaN,
5103 -kFP64QuietNaN,
5104 };
5105 const int count = sizeof(inputs) / sizeof(inputs[0]);
5106
5107 for (int in = 0; in < count; in++) {
5108 double n = inputs[in];
5109 for (int im = 0; im < count; im++) {
5110 double m = inputs[im];
5111 for (int ia = 0; ia < count; ia++) {
5112 double a = inputs[ia];
5113 double fmadd = fma(n, m, a);
5114 double fmsub = fma(-n, m, a);
5115
5116 FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
5117 }
5118 }
5119 }
5120 }
5121
5122
5123 TEST(fmadd_fmsub_double_rounding) {
5124 INIT_V8();
5125 // Make sure we run plenty of tests where an intermediate rounding stage would
5126 // produce an incorrect result.
5127 const int limit = 1000;
5128 int count_fmadd = 0;
5129 int count_fmsub = 0;
5130
5131 uint16_t seed[3] = {42, 43, 44};
5132 seed48(seed);
5133
5134 while ((count_fmadd < limit) || (count_fmsub < limit)) {
5135 double n, m, a;
5136 uint32_t r[2];
5137 ASSERT(sizeof(r) == sizeof(n));
5138
5139 r[0] = mrand48();
5140 r[1] = mrand48();
5141 memcpy(&n, r, sizeof(r));
5142 r[0] = mrand48();
5143 r[1] = mrand48();
5144 memcpy(&m, r, sizeof(r));
5145 r[0] = mrand48();
5146 r[1] = mrand48();
5147 memcpy(&a, r, sizeof(r));
5148
5149 if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
5150 continue;
5151 }
5152
5153 // Calculate the expected results.
5154 double fmadd = fma(n, m, a);
5155 double fmsub = fma(-n, m, a);
5156
5157 bool test_fmadd = (fmadd != (a + n * m));
5158 bool test_fmsub = (fmsub != (a - n * m));
5159
5160 // If rounding would produce a different result, increment the test count.
5161 count_fmadd += test_fmadd;
5162 count_fmsub += test_fmsub;
5163
5164 if (test_fmadd || test_fmsub) {
5165 FmaddFmsubDoubleHelper(n, m, a, fmadd, fmsub);
5166 }
5167 }
5168 }
5169
5170
5171 static void FmaddFmsubFloatHelper(float n, float m, float a,
5172 float fmadd, float fmsub) {
5173 SETUP();
5174 START();
5175
5176 __ Fmov(s0, n);
5177 __ Fmov(s1, m);
5178 __ Fmov(s2, a);
5179 __ Fmadd(s30, s0, s1, s2);
5180 __ Fmsub(s31, s0, s1, s2);
5181
5182 END();
5183 RUN();
5184
5185 ASSERT_EQUAL_FP32(fmadd, s30);
5186 ASSERT_EQUAL_FP32(fmsub, s31);
5187
5188 TEARDOWN();
5189 }
5190
5191
5192 TEST(fmadd_fmsub_float) {
5193 INIT_V8();
5194 float inputs[] = {
5195 // Normal numbers, including -0.0f.
5196 FLT_MAX, FLT_MIN, 3.25f, 2.0f, 0.0f,
5197 -FLT_MAX, -FLT_MIN, -3.25f, -2.0f, -0.0f,
5198 // Infinities.
5199 kFP32NegativeInfinity, kFP32PositiveInfinity,
5200 // Subnormal numbers.
5201 rawbits_to_float(0x07ffffff),
5202 rawbits_to_float(0x00000001),
5203 rawbits_to_float(0x01234567),
5204 -rawbits_to_float(0x07ffffff),
5205 -rawbits_to_float(0x00000001),
5206 -rawbits_to_float(0x01234567),
5207 // NaN.
5208 kFP32QuietNaN,
5209 -kFP32QuietNaN,
5210 };
5211 const int count = sizeof(inputs) / sizeof(inputs[0]);
5212
5213 for (int in = 0; in < count; in++) {
5214 float n = inputs[in];
5215 for (int im = 0; im < count; im++) {
5216 float m = inputs[im];
5217 for (int ia = 0; ia < count; ia++) {
5218 float a = inputs[ia];
5219 float fmadd = fmaf(n, m, a);
5220 float fmsub = fmaf(-n, m, a);
5221
5222 FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub);
5223 }
5224 }
5225 }
5226 }
5227
5228
5229 TEST(fmadd_fmsub_float_rounding) {
5230 INIT_V8();
5231 // Make sure we run plenty of tests where an intermediate rounding stage would
5232 // produce an incorrect result.
5233 const int limit = 1000;
5234 int count_fmadd = 0;
5235 int count_fmsub = 0;
5236
5237 uint16_t seed[3] = {42, 43, 44};
5238 seed48(seed);
5239
5240 while ((count_fmadd < limit) || (count_fmsub < limit)) {
5241 float n, m, a;
5242 uint32_t r;
5243 ASSERT(sizeof(r) == sizeof(n));
5244
5245 r = mrand48();
5246 memcpy(&n, &r, sizeof(r));
5247 r = mrand48();
5248 memcpy(&m, &r, sizeof(r));
5249 r = mrand48();
5250 memcpy(&a, &r, sizeof(r));
5251
5252 if (!std::isfinite(a) || !std::isfinite(n) || !std::isfinite(m)) {
5253 continue;
5254 }
5255
5256 // Calculate the expected results.
5257 float fmadd = fmaf(n, m, a);
5258 float fmsub = fmaf(-n, m, a);
5259
5260 bool test_fmadd = (fmadd != (a + n * m));
5261 bool test_fmsub = (fmsub != (a - n * m));
5262
5263 // If rounding would produce a different result, increment the test count.
5264 count_fmadd += test_fmadd;
5265 count_fmsub += test_fmsub;
5266
5267 if (test_fmadd || test_fmsub) {
5268 FmaddFmsubFloatHelper(n, m, a, fmadd, fmsub);
5269 }
5270 }
5271 }
5272
5273
5274 TEST(fdiv) {
5275 INIT_V8();
5276 SETUP();
5277
5278 START();
5279 __ Fmov(s13, -0.0);
5280 __ Fmov(s14, kFP32PositiveInfinity);
5281 __ Fmov(s15, kFP32NegativeInfinity);
5282 __ Fmov(s16, 3.25);
5283 __ Fmov(s17, 2.0);
5284 __ Fmov(s18, 2.0);
5285 __ Fmov(s19, -2.0);
5286
5287 __ Fmov(d26, -0.0);
5288 __ Fmov(d27, kFP64PositiveInfinity);
5289 __ Fmov(d28, kFP64NegativeInfinity);
5290 __ Fmov(d29, 0);
5291 __ Fmov(d30, -2.0);
5292 __ Fmov(d31, 2.25);
5293
5294 __ Fdiv(s0, s16, s17);
5295 __ Fdiv(s1, s17, s18);
5296 __ Fdiv(s2, s13, s17);
5297 __ Fdiv(s3, s17, s14);
5298 __ Fdiv(s4, s17, s15);
5299 __ Fdiv(d5, d31, d30);
5300 __ Fdiv(d6, d29, d31);
5301 __ Fdiv(d7, d26, d31);
5302 __ Fdiv(d8, d31, d27);
5303 __ Fdiv(d9, d31, d28);
5304 END();
5305
5306 RUN();
5307
5308 ASSERT_EQUAL_FP32(1.625, s0);
5309 ASSERT_EQUAL_FP32(1.0, s1);
5310 ASSERT_EQUAL_FP32(-0.0, s2);
5311 ASSERT_EQUAL_FP32(0.0, s3);
5312 ASSERT_EQUAL_FP32(-0.0, s4);
5313 ASSERT_EQUAL_FP64(-1.125, d5);
5314 ASSERT_EQUAL_FP64(0.0, d6);
5315 ASSERT_EQUAL_FP64(-0.0, d7);
5316 ASSERT_EQUAL_FP64(0.0, d8);
5317 ASSERT_EQUAL_FP64(-0.0, d9);
5318
5319 TEARDOWN();
5320 }
5321
5322
5323 static float MinMaxHelper(float n,
5324 float m,
5325 bool min,
5326 float quiet_nan_substitute = 0.0) {
5327 const uint64_t kFP32QuietNaNMask = 0x00400000UL;
5328 uint32_t raw_n = float_to_rawbits(n);
5329 uint32_t raw_m = float_to_rawbits(m);
5330
5331 if (std::isnan(n) && ((raw_n & kFP32QuietNaNMask) == 0)) {
5332 // n is signalling NaN.
5333 return n;
5334 } else if (std::isnan(m) && ((raw_m & kFP32QuietNaNMask) == 0)) {
5335 // m is signalling NaN.
5336 return m;
5337 } else if (quiet_nan_substitute == 0.0) {
5338 if (std::isnan(n)) {
5339 // n is quiet NaN.
5340 return n;
5341 } else if (std::isnan(m)) {
5342 // m is quiet NaN.
5343 return m;
5344 }
5345 } else {
5346 // Substitute n or m if one is quiet, but not both.
5347 if (std::isnan(n) && !std::isnan(m)) {
5348 // n is quiet NaN: replace with substitute.
5349 n = quiet_nan_substitute;
5350 } else if (!std::isnan(n) && std::isnan(m)) {
5351 // m is quiet NaN: replace with substitute.
5352 m = quiet_nan_substitute;
5353 }
5354 }
5355
5356 if ((n == 0.0) && (m == 0.0) &&
5357 (copysign(1.0, n) != copysign(1.0, m))) {
5358 return min ? -0.0 : 0.0;
5359 }
5360
5361 return min ? fminf(n, m) : fmaxf(n, m);
5362 }
5363
5364
5365 static double MinMaxHelper(double n,
5366 double m,
5367 bool min,
5368 double quiet_nan_substitute = 0.0) {
5369 const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
5370 uint64_t raw_n = double_to_rawbits(n);
5371 uint64_t raw_m = double_to_rawbits(m);
5372
5373 if (std::isnan(n) && ((raw_n & kFP64QuietNaNMask) == 0)) {
5374 // n is signalling NaN.
5375 return n;
5376 } else if (std::isnan(m) && ((raw_m & kFP64QuietNaNMask) == 0)) {
5377 // m is signalling NaN.
5378 return m;
5379 } else if (quiet_nan_substitute == 0.0) {
5380 if (std::isnan(n)) {
5381 // n is quiet NaN.
5382 return n;
5383 } else if (std::isnan(m)) {
5384 // m is quiet NaN.
5385 return m;
5386 }
5387 } else {
5388 // Substitute n or m if one is quiet, but not both.
5389 if (std::isnan(n) && !std::isnan(m)) {
5390 // n is quiet NaN: replace with substitute.
5391 n = quiet_nan_substitute;
5392 } else if (!std::isnan(n) && std::isnan(m)) {
5393 // m is quiet NaN: replace with substitute.
5394 m = quiet_nan_substitute;
5395 }
5396 }
5397
5398 if ((n == 0.0) && (m == 0.0) &&
5399 (copysign(1.0, n) != copysign(1.0, m))) {
5400 return min ? -0.0 : 0.0;
5401 }
5402
5403 return min ? fmin(n, m) : fmax(n, m);
5404 }
5405
5406
5407 static void FminFmaxDoubleHelper(double n, double m, double min, double max,
5408 double minnm, double maxnm) {
5409 SETUP();
5410
5411 START();
5412 __ Fmov(d0, n);
5413 __ Fmov(d1, m);
5414 __ Fmin(d28, d0, d1);
5415 __ Fmax(d29, d0, d1);
5416 __ Fminnm(d30, d0, d1);
5417 __ Fmaxnm(d31, d0, d1);
5418 END();
5419
5420 RUN();
5421
5422 ASSERT_EQUAL_FP64(min, d28);
5423 ASSERT_EQUAL_FP64(max, d29);
5424 ASSERT_EQUAL_FP64(minnm, d30);
5425 ASSERT_EQUAL_FP64(maxnm, d31);
5426
5427 TEARDOWN();
5428 }
5429
5430
5431 TEST(fmax_fmin_d) {
5432 INIT_V8();
5433 // Bootstrap tests.
5434 FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
5435 FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
5436 FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
5437 kFP64NegativeInfinity, kFP64PositiveInfinity,
5438 kFP64NegativeInfinity, kFP64PositiveInfinity);
5439 FminFmaxDoubleHelper(kFP64SignallingNaN, 0,
5440 kFP64SignallingNaN, kFP64SignallingNaN,
5441 kFP64SignallingNaN, kFP64SignallingNaN);
5442 FminFmaxDoubleHelper(kFP64QuietNaN, 0,
5443 kFP64QuietNaN, kFP64QuietNaN,
5444 0, 0);
5445 FminFmaxDoubleHelper(kFP64QuietNaN, kFP64SignallingNaN,
5446 kFP64SignallingNaN, kFP64SignallingNaN,
5447 kFP64SignallingNaN, kFP64SignallingNaN);
5448
5449 // Iterate over all combinations of inputs.
5450 double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
5451 -DBL_MAX, -DBL_MIN, -1.0, -0.0,
5452 kFP64PositiveInfinity, kFP64NegativeInfinity,
5453 kFP64QuietNaN, kFP64SignallingNaN };
5454
5455 const int count = sizeof(inputs) / sizeof(inputs[0]);
5456
5457 for (int in = 0; in < count; in++) {
5458 double n = inputs[in];
5459 for (int im = 0; im < count; im++) {
5460 double m = inputs[im];
5461 FminFmaxDoubleHelper(n, m,
5462 MinMaxHelper(n, m, true),
5463 MinMaxHelper(n, m, false),
5464 MinMaxHelper(n, m, true, kFP64PositiveInfinity),
5465 MinMaxHelper(n, m, false, kFP64NegativeInfinity));
5466 }
5467 }
5468 }
5469
5470
5471 static void FminFmaxFloatHelper(float n, float m, float min, float max,
5472 float minnm, float maxnm) {
5473 SETUP();
5474
5475 START();
5476 // TODO(all): Signalling NaNs are sometimes converted by the C compiler to
5477 // quiet NaNs on implicit casts from float to double. Here, we move the raw
5478 // bits into a W register first, so we get the correct value. Fix Fmov so this
5479 // additional step is no longer needed.
5480 __ Mov(w0, float_to_rawbits(n));
5481 __ Fmov(s0, w0);
5482 __ Mov(w0, float_to_rawbits(m));
5483 __ Fmov(s1, w0);
5484 __ Fmin(s28, s0, s1);
5485 __ Fmax(s29, s0, s1);
5486 __ Fminnm(s30, s0, s1);
5487 __ Fmaxnm(s31, s0, s1);
5488 END();
5489
5490 RUN();
5491
5492 ASSERT_EQUAL_FP32(min, s28);
5493 ASSERT_EQUAL_FP32(max, s29);
5494 ASSERT_EQUAL_FP32(minnm, s30);
5495 ASSERT_EQUAL_FP32(maxnm, s31);
5496
5497 TEARDOWN();
5498 }
5499
5500
5501 TEST(fmax_fmin_s) {
5502 INIT_V8();
5503 // Bootstrap tests.
5504 FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
5505 FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
5506 FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
5507 kFP32NegativeInfinity, kFP32PositiveInfinity,
5508 kFP32NegativeInfinity, kFP32PositiveInfinity);
5509 FminFmaxFloatHelper(kFP32SignallingNaN, 0,
5510 kFP32SignallingNaN, kFP32SignallingNaN,
5511 kFP32SignallingNaN, kFP32SignallingNaN);
5512 FminFmaxFloatHelper(kFP32QuietNaN, 0,
5513 kFP32QuietNaN, kFP32QuietNaN,
5514 0, 0);
5515 FminFmaxFloatHelper(kFP32QuietNaN, kFP32SignallingNaN,
5516 kFP32SignallingNaN, kFP32SignallingNaN,
5517 kFP32SignallingNaN, kFP32SignallingNaN);
5518
5519 // Iterate over all combinations of inputs.
5520 float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
5521 -FLT_MAX, -FLT_MIN, -1.0, -0.0,
5522 kFP32PositiveInfinity, kFP32NegativeInfinity,
5523 kFP32QuietNaN, kFP32SignallingNaN };
5524
5525 const int count = sizeof(inputs) / sizeof(inputs[0]);
5526
5527 for (int in = 0; in < count; in++) {
5528 float n = inputs[in];
5529 for (int im = 0; im < count; im++) {
5530 float m = inputs[im];
5531 FminFmaxFloatHelper(n, m,
5532 MinMaxHelper(n, m, true),
5533 MinMaxHelper(n, m, false),
5534 MinMaxHelper(n, m, true, kFP32PositiveInfinity),
5535 MinMaxHelper(n, m, false, kFP32NegativeInfinity));
5536 }
5537 }
5538 }
5539
5540
5541 TEST(fccmp) {
5542 INIT_V8();
5543 SETUP();
5544
5545 START();
5546 __ Fmov(s16, 0.0);
5547 __ Fmov(s17, 0.5);
5548 __ Fmov(d18, -0.5);
5549 __ Fmov(d19, -1.0);
5550 __ Mov(x20, 0);
5551
5552 __ Cmp(x20, 0);
5553 __ Fccmp(s16, s16, NoFlag, eq);
5554 __ Mrs(x0, NZCV);
5555
5556 __ Cmp(x20, 0);
5557 __ Fccmp(s16, s16, VFlag, ne);
5558 __ Mrs(x1, NZCV);
5559
5560 __ Cmp(x20, 0);
5561 __ Fccmp(s16, s17, CFlag, ge);
5562 __ Mrs(x2, NZCV);
5563
5564 __ Cmp(x20, 0);
5565 __ Fccmp(s16, s17, CVFlag, lt);
5566 __ Mrs(x3, NZCV);
5567
5568 __ Cmp(x20, 0);
5569 __ Fccmp(d18, d18, ZFlag, le);
5570 __ Mrs(x4, NZCV);
5571
5572 __ Cmp(x20, 0);
5573 __ Fccmp(d18, d18, ZVFlag, gt);
5574 __ Mrs(x5, NZCV);
5575
5576 __ Cmp(x20, 0);
5577 __ Fccmp(d18, d19, ZCVFlag, ls);
5578 __ Mrs(x6, NZCV);
5579
5580 __ Cmp(x20, 0);
5581 __ Fccmp(d18, d19, NFlag, hi);
5582 __ Mrs(x7, NZCV);
5583
5584 __ fccmp(s16, s16, NFlag, al);
5585 __ Mrs(x8, NZCV);
5586
5587 __ fccmp(d18, d18, NFlag, nv);
5588 __ Mrs(x9, NZCV);
5589
5590 END();
5591
5592 RUN();
5593
5594 ASSERT_EQUAL_32(ZCFlag, w0);
5595 ASSERT_EQUAL_32(VFlag, w1);
5596 ASSERT_EQUAL_32(NFlag, w2);
5597 ASSERT_EQUAL_32(CVFlag, w3);
5598 ASSERT_EQUAL_32(ZCFlag, w4);
5599 ASSERT_EQUAL_32(ZVFlag, w5);
5600 ASSERT_EQUAL_32(CFlag, w6);
5601 ASSERT_EQUAL_32(NFlag, w7);
5602 ASSERT_EQUAL_32(ZCFlag, w8);
5603 ASSERT_EQUAL_32(ZCFlag, w9);
5604
5605 TEARDOWN();
5606 }
5607
5608
5609 TEST(fcmp) {
5610 INIT_V8();
5611 SETUP();
5612
5613 START();
5614
5615 // Some of these tests require a floating-point scratch register assigned to
5616 // the macro assembler, but most do not.
5617 __ SetFPScratchRegister(NoFPReg);
5618
5619 __ Fmov(s8, 0.0);
5620 __ Fmov(s9, 0.5);
5621 __ Mov(w18, 0x7f800001); // Single precision NaN.
5622 __ Fmov(s18, w18);
5623
5624 __ Fcmp(s8, s8);
5625 __ Mrs(x0, NZCV);
5626 __ Fcmp(s8, s9);
5627 __ Mrs(x1, NZCV);
5628 __ Fcmp(s9, s8);
5629 __ Mrs(x2, NZCV);
5630 __ Fcmp(s8, s18);
5631 __ Mrs(x3, NZCV);
5632 __ Fcmp(s18, s18);
5633 __ Mrs(x4, NZCV);
5634 __ Fcmp(s8, 0.0);
5635 __ Mrs(x5, NZCV);
5636 __ SetFPScratchRegister(d0);
5637 __ Fcmp(s8, 255.0);
5638 __ SetFPScratchRegister(NoFPReg);
5639 __ Mrs(x6, NZCV);
5640
5641 __ Fmov(d19, 0.0);
5642 __ Fmov(d20, 0.5);
5643 __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN.
5644 __ Fmov(d21, x21);
5645
5646 __ Fcmp(d19, d19);
5647 __ Mrs(x10, NZCV);
5648 __ Fcmp(d19, d20);
5649 __ Mrs(x11, NZCV);
5650 __ Fcmp(d20, d19);
5651 __ Mrs(x12, NZCV);
5652 __ Fcmp(d19, d21);
5653 __ Mrs(x13, NZCV);
5654 __ Fcmp(d21, d21);
5655 __ Mrs(x14, NZCV);
5656 __ Fcmp(d19, 0.0);
5657 __ Mrs(x15, NZCV);
5658 __ SetFPScratchRegister(d0);
5659 __ Fcmp(d19, 12.3456);
5660 __ SetFPScratchRegister(NoFPReg);
5661 __ Mrs(x16, NZCV);
5662 END();
5663
5664 RUN();
5665
5666 ASSERT_EQUAL_32(ZCFlag, w0);
5667 ASSERT_EQUAL_32(NFlag, w1);
5668 ASSERT_EQUAL_32(CFlag, w2);
5669 ASSERT_EQUAL_32(CVFlag, w3);
5670 ASSERT_EQUAL_32(CVFlag, w4);
5671 ASSERT_EQUAL_32(ZCFlag, w5);
5672 ASSERT_EQUAL_32(NFlag, w6);
5673 ASSERT_EQUAL_32(ZCFlag, w10);
5674 ASSERT_EQUAL_32(NFlag, w11);
5675 ASSERT_EQUAL_32(CFlag, w12);
5676 ASSERT_EQUAL_32(CVFlag, w13);
5677 ASSERT_EQUAL_32(CVFlag, w14);
5678 ASSERT_EQUAL_32(ZCFlag, w15);
5679 ASSERT_EQUAL_32(NFlag, w16);
5680
5681 TEARDOWN();
5682 }
5683
5684
5685 TEST(fcsel) {
5686 INIT_V8();
5687 SETUP();
5688
5689 START();
5690 __ Mov(x16, 0);
5691 __ Fmov(s16, 1.0);
5692 __ Fmov(s17, 2.0);
5693 __ Fmov(d18, 3.0);
5694 __ Fmov(d19, 4.0);
5695
5696 __ Cmp(x16, 0);
5697 __ Fcsel(s0, s16, s17, eq);
5698 __ Fcsel(s1, s16, s17, ne);
5699 __ Fcsel(d2, d18, d19, eq);
5700 __ Fcsel(d3, d18, d19, ne);
5701 __ fcsel(s4, s16, s17, al);
5702 __ fcsel(d5, d18, d19, nv);
5703 END();
5704
5705 RUN();
5706
5707 ASSERT_EQUAL_FP32(1.0, s0);
5708 ASSERT_EQUAL_FP32(2.0, s1);
5709 ASSERT_EQUAL_FP64(3.0, d2);
5710 ASSERT_EQUAL_FP64(4.0, d3);
5711 ASSERT_EQUAL_FP32(1.0, s4);
5712 ASSERT_EQUAL_FP64(3.0, d5);
5713
5714 TEARDOWN();
5715 }
5716
5717
5718 TEST(fneg) {
5719 INIT_V8();
5720 SETUP();
5721
5722 START();
5723 __ Fmov(s16, 1.0);
5724 __ Fmov(s17, 0.0);
5725 __ Fmov(s18, kFP32PositiveInfinity);
5726 __ Fmov(d19, 1.0);
5727 __ Fmov(d20, 0.0);
5728 __ Fmov(d21, kFP64PositiveInfinity);
5729
5730 __ Fneg(s0, s16);
5731 __ Fneg(s1, s0);
5732 __ Fneg(s2, s17);
5733 __ Fneg(s3, s2);
5734 __ Fneg(s4, s18);
5735 __ Fneg(s5, s4);
5736 __ Fneg(d6, d19);
5737 __ Fneg(d7, d6);
5738 __ Fneg(d8, d20);
5739 __ Fneg(d9, d8);
5740 __ Fneg(d10, d21);
5741 __ Fneg(d11, d10);
5742 END();
5743
5744 RUN();
5745
5746 ASSERT_EQUAL_FP32(-1.0, s0);
5747 ASSERT_EQUAL_FP32(1.0, s1);
5748 ASSERT_EQUAL_FP32(-0.0, s2);
5749 ASSERT_EQUAL_FP32(0.0, s3);
5750 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
5751 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
5752 ASSERT_EQUAL_FP64(-1.0, d6);
5753 ASSERT_EQUAL_FP64(1.0, d7);
5754 ASSERT_EQUAL_FP64(-0.0, d8);
5755 ASSERT_EQUAL_FP64(0.0, d9);
5756 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5757 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5758
5759 TEARDOWN();
5760 }
5761
5762
5763 TEST(fabs) {
5764 INIT_V8();
5765 SETUP();
5766
5767 START();
5768 __ Fmov(s16, -1.0);
5769 __ Fmov(s17, -0.0);
5770 __ Fmov(s18, kFP32NegativeInfinity);
5771 __ Fmov(d19, -1.0);
5772 __ Fmov(d20, -0.0);
5773 __ Fmov(d21, kFP64NegativeInfinity);
5774
5775 __ Fabs(s0, s16);
5776 __ Fabs(s1, s0);
5777 __ Fabs(s2, s17);
5778 __ Fabs(s3, s18);
5779 __ Fabs(d4, d19);
5780 __ Fabs(d5, d4);
5781 __ Fabs(d6, d20);
5782 __ Fabs(d7, d21);
5783 END();
5784
5785 RUN();
5786
5787 ASSERT_EQUAL_FP32(1.0, s0);
5788 ASSERT_EQUAL_FP32(1.0, s1);
5789 ASSERT_EQUAL_FP32(0.0, s2);
5790 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
5791 ASSERT_EQUAL_FP64(1.0, d4);
5792 ASSERT_EQUAL_FP64(1.0, d5);
5793 ASSERT_EQUAL_FP64(0.0, d6);
5794 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
5795
5796 TEARDOWN();
5797 }
5798
5799
5800 TEST(fsqrt) {
5801 INIT_V8();
5802 SETUP();
5803
5804 START();
5805 __ Fmov(s16, 0.0);
5806 __ Fmov(s17, 1.0);
5807 __ Fmov(s18, 0.25);
5808 __ Fmov(s19, 65536.0);
5809 __ Fmov(s20, -0.0);
5810 __ Fmov(s21, kFP32PositiveInfinity);
5811 __ Fmov(d22, 0.0);
5812 __ Fmov(d23, 1.0);
5813 __ Fmov(d24, 0.25);
5814 __ Fmov(d25, 4294967296.0);
5815 __ Fmov(d26, -0.0);
5816 __ Fmov(d27, kFP64PositiveInfinity);
5817
5818 __ Fsqrt(s0, s16);
5819 __ Fsqrt(s1, s17);
5820 __ Fsqrt(s2, s18);
5821 __ Fsqrt(s3, s19);
5822 __ Fsqrt(s4, s20);
5823 __ Fsqrt(s5, s21);
5824 __ Fsqrt(d6, d22);
5825 __ Fsqrt(d7, d23);
5826 __ Fsqrt(d8, d24);
5827 __ Fsqrt(d9, d25);
5828 __ Fsqrt(d10, d26);
5829 __ Fsqrt(d11, d27);
5830 END();
5831
5832 RUN();
5833
5834 ASSERT_EQUAL_FP32(0.0, s0);
5835 ASSERT_EQUAL_FP32(1.0, s1);
5836 ASSERT_EQUAL_FP32(0.5, s2);
5837 ASSERT_EQUAL_FP32(256.0, s3);
5838 ASSERT_EQUAL_FP32(-0.0, s4);
5839 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
5840 ASSERT_EQUAL_FP64(0.0, d6);
5841 ASSERT_EQUAL_FP64(1.0, d7);
5842 ASSERT_EQUAL_FP64(0.5, d8);
5843 ASSERT_EQUAL_FP64(65536.0, d9);
5844 ASSERT_EQUAL_FP64(-0.0, d10);
5845 ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d11);
5846
5847 TEARDOWN();
5848 }
5849
5850
5851 TEST(frinta) {
5852 INIT_V8();
5853 SETUP();
5854
5855 START();
5856 __ Fmov(s16, 1.0);
5857 __ Fmov(s17, 1.1);
5858 __ Fmov(s18, 1.5);
5859 __ Fmov(s19, 1.9);
5860 __ Fmov(s20, 2.5);
5861 __ Fmov(s21, -1.5);
5862 __ Fmov(s22, -2.5);
5863 __ Fmov(s23, kFP32PositiveInfinity);
5864 __ Fmov(s24, kFP32NegativeInfinity);
5865 __ Fmov(s25, 0.0);
5866 __ Fmov(s26, -0.0);
5867
5868 __ Frinta(s0, s16);
5869 __ Frinta(s1, s17);
5870 __ Frinta(s2, s18);
5871 __ Frinta(s3, s19);
5872 __ Frinta(s4, s20);
5873 __ Frinta(s5, s21);
5874 __ Frinta(s6, s22);
5875 __ Frinta(s7, s23);
5876 __ Frinta(s8, s24);
5877 __ Frinta(s9, s25);
5878 __ Frinta(s10, s26);
5879
5880 __ Fmov(d16, 1.0);
5881 __ Fmov(d17, 1.1);
5882 __ Fmov(d18, 1.5);
5883 __ Fmov(d19, 1.9);
5884 __ Fmov(d20, 2.5);
5885 __ Fmov(d21, -1.5);
5886 __ Fmov(d22, -2.5);
5887 __ Fmov(d23, kFP32PositiveInfinity);
5888 __ Fmov(d24, kFP32NegativeInfinity);
5889 __ Fmov(d25, 0.0);
5890 __ Fmov(d26, -0.0);
5891
5892 __ Frinta(d11, d16);
5893 __ Frinta(d12, d17);
5894 __ Frinta(d13, d18);
5895 __ Frinta(d14, d19);
5896 __ Frinta(d15, d20);
5897 __ Frinta(d16, d21);
5898 __ Frinta(d17, d22);
5899 __ Frinta(d18, d23);
5900 __ Frinta(d19, d24);
5901 __ Frinta(d20, d25);
5902 __ Frinta(d21, d26);
5903 END();
5904
5905 RUN();
5906
5907 ASSERT_EQUAL_FP32(1.0, s0);
5908 ASSERT_EQUAL_FP32(1.0, s1);
5909 ASSERT_EQUAL_FP32(2.0, s2);
5910 ASSERT_EQUAL_FP32(2.0, s3);
5911 ASSERT_EQUAL_FP32(3.0, s4);
5912 ASSERT_EQUAL_FP32(-2.0, s5);
5913 ASSERT_EQUAL_FP32(-3.0, s6);
5914 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
5915 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
5916 ASSERT_EQUAL_FP32(0.0, s9);
5917 ASSERT_EQUAL_FP32(-0.0, s10);
5918 ASSERT_EQUAL_FP64(1.0, d11);
5919 ASSERT_EQUAL_FP64(1.0, d12);
5920 ASSERT_EQUAL_FP64(2.0, d13);
5921 ASSERT_EQUAL_FP64(2.0, d14);
5922 ASSERT_EQUAL_FP64(3.0, d15);
5923 ASSERT_EQUAL_FP64(-2.0, d16);
5924 ASSERT_EQUAL_FP64(-3.0, d17);
5925 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
5926 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
5927 ASSERT_EQUAL_FP64(0.0, d20);
5928 ASSERT_EQUAL_FP64(-0.0, d21);
5929
5930 TEARDOWN();
5931 }
5932
5933
5934 TEST(frintn) {
5935 INIT_V8();
5936 SETUP();
5937
5938 START();
5939 __ Fmov(s16, 1.0);
5940 __ Fmov(s17, 1.1);
5941 __ Fmov(s18, 1.5);
5942 __ Fmov(s19, 1.9);
5943 __ Fmov(s20, 2.5);
5944 __ Fmov(s21, -1.5);
5945 __ Fmov(s22, -2.5);
5946 __ Fmov(s23, kFP32PositiveInfinity);
5947 __ Fmov(s24, kFP32NegativeInfinity);
5948 __ Fmov(s25, 0.0);
5949 __ Fmov(s26, -0.0);
5950
5951 __ Frintn(s0, s16);
5952 __ Frintn(s1, s17);
5953 __ Frintn(s2, s18);
5954 __ Frintn(s3, s19);
5955 __ Frintn(s4, s20);
5956 __ Frintn(s5, s21);
5957 __ Frintn(s6, s22);
5958 __ Frintn(s7, s23);
5959 __ Frintn(s8, s24);
5960 __ Frintn(s9, s25);
5961 __ Frintn(s10, s26);
5962
5963 __ Fmov(d16, 1.0);
5964 __ Fmov(d17, 1.1);
5965 __ Fmov(d18, 1.5);
5966 __ Fmov(d19, 1.9);
5967 __ Fmov(d20, 2.5);
5968 __ Fmov(d21, -1.5);
5969 __ Fmov(d22, -2.5);
5970 __ Fmov(d23, kFP32PositiveInfinity);
5971 __ Fmov(d24, kFP32NegativeInfinity);
5972 __ Fmov(d25, 0.0);
5973 __ Fmov(d26, -0.0);
5974
5975 __ Frintn(d11, d16);
5976 __ Frintn(d12, d17);
5977 __ Frintn(d13, d18);
5978 __ Frintn(d14, d19);
5979 __ Frintn(d15, d20);
5980 __ Frintn(d16, d21);
5981 __ Frintn(d17, d22);
5982 __ Frintn(d18, d23);
5983 __ Frintn(d19, d24);
5984 __ Frintn(d20, d25);
5985 __ Frintn(d21, d26);
5986 END();
5987
5988 RUN();
5989
5990 ASSERT_EQUAL_FP32(1.0, s0);
5991 ASSERT_EQUAL_FP32(1.0, s1);
5992 ASSERT_EQUAL_FP32(2.0, s2);
5993 ASSERT_EQUAL_FP32(2.0, s3);
5994 ASSERT_EQUAL_FP32(2.0, s4);
5995 ASSERT_EQUAL_FP32(-2.0, s5);
5996 ASSERT_EQUAL_FP32(-2.0, s6);
5997 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
5998 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
5999 ASSERT_EQUAL_FP32(0.0, s9);
6000 ASSERT_EQUAL_FP32(-0.0, s10);
6001 ASSERT_EQUAL_FP64(1.0, d11);
6002 ASSERT_EQUAL_FP64(1.0, d12);
6003 ASSERT_EQUAL_FP64(2.0, d13);
6004 ASSERT_EQUAL_FP64(2.0, d14);
6005 ASSERT_EQUAL_FP64(2.0, d15);
6006 ASSERT_EQUAL_FP64(-2.0, d16);
6007 ASSERT_EQUAL_FP64(-2.0, d17);
6008 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
6009 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
6010 ASSERT_EQUAL_FP64(0.0, d20);
6011 ASSERT_EQUAL_FP64(-0.0, d21);
6012
6013 TEARDOWN();
6014 }
6015
6016
6017 TEST(frintz) {
6018 INIT_V8();
6019 SETUP();
6020
6021 START();
6022 __ Fmov(s16, 1.0);
6023 __ Fmov(s17, 1.1);
6024 __ Fmov(s18, 1.5);
6025 __ Fmov(s19, 1.9);
6026 __ Fmov(s20, 2.5);
6027 __ Fmov(s21, -1.5);
6028 __ Fmov(s22, -2.5);
6029 __ Fmov(s23, kFP32PositiveInfinity);
6030 __ Fmov(s24, kFP32NegativeInfinity);
6031 __ Fmov(s25, 0.0);
6032 __ Fmov(s26, -0.0);
6033
6034 __ Frintz(s0, s16);
6035 __ Frintz(s1, s17);
6036 __ Frintz(s2, s18);
6037 __ Frintz(s3, s19);
6038 __ Frintz(s4, s20);
6039 __ Frintz(s5, s21);
6040 __ Frintz(s6, s22);
6041 __ Frintz(s7, s23);
6042 __ Frintz(s8, s24);
6043 __ Frintz(s9, s25);
6044 __ Frintz(s10, s26);
6045
6046 __ Fmov(d16, 1.0);
6047 __ Fmov(d17, 1.1);
6048 __ Fmov(d18, 1.5);
6049 __ Fmov(d19, 1.9);
6050 __ Fmov(d20, 2.5);
6051 __ Fmov(d21, -1.5);
6052 __ Fmov(d22, -2.5);
6053 __ Fmov(d23, kFP32PositiveInfinity);
6054 __ Fmov(d24, kFP32NegativeInfinity);
6055 __ Fmov(d25, 0.0);
6056 __ Fmov(d26, -0.0);
6057
6058 __ Frintz(d11, d16);
6059 __ Frintz(d12, d17);
6060 __ Frintz(d13, d18);
6061 __ Frintz(d14, d19);
6062 __ Frintz(d15, d20);
6063 __ Frintz(d16, d21);
6064 __ Frintz(d17, d22);
6065 __ Frintz(d18, d23);
6066 __ Frintz(d19, d24);
6067 __ Frintz(d20, d25);
6068 __ Frintz(d21, d26);
6069 END();
6070
6071 RUN();
6072
6073 ASSERT_EQUAL_FP32(1.0, s0);
6074 ASSERT_EQUAL_FP32(1.0, s1);
6075 ASSERT_EQUAL_FP32(1.0, s2);
6076 ASSERT_EQUAL_FP32(1.0, s3);
6077 ASSERT_EQUAL_FP32(2.0, s4);
6078 ASSERT_EQUAL_FP32(-1.0, s5);
6079 ASSERT_EQUAL_FP32(-2.0, s6);
6080 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6081 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6082 ASSERT_EQUAL_FP32(0.0, s9);
6083 ASSERT_EQUAL_FP32(-0.0, s10);
6084 ASSERT_EQUAL_FP64(1.0, d11);
6085 ASSERT_EQUAL_FP64(1.0, d12);
6086 ASSERT_EQUAL_FP64(1.0, d13);
6087 ASSERT_EQUAL_FP64(1.0, d14);
6088 ASSERT_EQUAL_FP64(2.0, d15);
6089 ASSERT_EQUAL_FP64(-1.0, d16);
6090 ASSERT_EQUAL_FP64(-2.0, d17);
6091 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
6092 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
6093 ASSERT_EQUAL_FP64(0.0, d20);
6094 ASSERT_EQUAL_FP64(-0.0, d21);
6095
6096 TEARDOWN();
6097 }
6098
6099
6100 TEST(fcvt_ds) {
6101 INIT_V8();
6102 SETUP();
6103
6104 START();
6105 __ Fmov(s16, 1.0);
6106 __ Fmov(s17, 1.1);
6107 __ Fmov(s18, 1.5);
6108 __ Fmov(s19, 1.9);
6109 __ Fmov(s20, 2.5);
6110 __ Fmov(s21, -1.5);
6111 __ Fmov(s22, -2.5);
6112 __ Fmov(s23, kFP32PositiveInfinity);
6113 __ Fmov(s24, kFP32NegativeInfinity);
6114 __ Fmov(s25, 0.0);
6115 __ Fmov(s26, -0.0);
6116 __ Fmov(s27, FLT_MAX);
6117 __ Fmov(s28, FLT_MIN);
6118 __ Fmov(s29, rawbits_to_float(0x7fc12345)); // Quiet NaN.
6119 __ Fmov(s30, rawbits_to_float(0x7f812345)); // Signalling NaN.
6120
6121 __ Fcvt(d0, s16);
6122 __ Fcvt(d1, s17);
6123 __ Fcvt(d2, s18);
6124 __ Fcvt(d3, s19);
6125 __ Fcvt(d4, s20);
6126 __ Fcvt(d5, s21);
6127 __ Fcvt(d6, s22);
6128 __ Fcvt(d7, s23);
6129 __ Fcvt(d8, s24);
6130 __ Fcvt(d9, s25);
6131 __ Fcvt(d10, s26);
6132 __ Fcvt(d11, s27);
6133 __ Fcvt(d12, s28);
6134 __ Fcvt(d13, s29);
6135 __ Fcvt(d14, s30);
6136 END();
6137
6138 RUN();
6139
6140 ASSERT_EQUAL_FP64(1.0f, d0);
6141 ASSERT_EQUAL_FP64(1.1f, d1);
6142 ASSERT_EQUAL_FP64(1.5f, d2);
6143 ASSERT_EQUAL_FP64(1.9f, d3);
6144 ASSERT_EQUAL_FP64(2.5f, d4);
6145 ASSERT_EQUAL_FP64(-1.5f, d5);
6146 ASSERT_EQUAL_FP64(-2.5f, d6);
6147 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
6148 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
6149 ASSERT_EQUAL_FP64(0.0f, d9);
6150 ASSERT_EQUAL_FP64(-0.0f, d10);
6151 ASSERT_EQUAL_FP64(FLT_MAX, d11);
6152 ASSERT_EQUAL_FP64(FLT_MIN, d12);
6153
6154 // Check that the NaN payload is preserved according to A64 conversion rules:
6155 // - The sign bit is preserved.
6156 // - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
6157 // - The remaining mantissa bits are copied until they run out.
6158 // - The low-order bits that haven't already been assigned are set to 0.
6159 ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
6160 ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
6161
6162 TEARDOWN();
6163 }
6164
6165
6166 TEST(fcvt_sd) {
6167 INIT_V8();
6168 // There are a huge number of corner-cases to check, so this test iterates
6169 // through a list. The list is then negated and checked again (since the sign
6170 // is irrelevant in ties-to-even rounding), so the list shouldn't include any
6171 // negative values.
6172 //
6173 // Note that this test only checks ties-to-even rounding, because that is all
6174 // that the simulator supports.
6175 struct {double in; float expected;} test[] = {
6176 // Check some simple conversions.
6177 {0.0, 0.0f},
6178 {1.0, 1.0f},
6179 {1.5, 1.5f},
6180 {2.0, 2.0f},
6181 {FLT_MAX, FLT_MAX},
6182 // - The smallest normalized float.
6183 {pow(2.0, -126), powf(2, -126)},
6184 // - Normal floats that need (ties-to-even) rounding.
6185 // For normalized numbers:
6186 // bit 29 (0x0000000020000000) is the lowest-order bit which will
6187 // fit in the float's mantissa.
6188 {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)},
6189 {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)},
6190 {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)},
6191 {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)},
6192 {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)},
6193 {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)},
6194 {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)},
6195 {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)},
6196 {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)},
6197 {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)},
6198 {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)},
6199 {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)},
6200 {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)},
6201 // - A mantissa that overflows into the exponent during rounding.
6202 {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)},
6203 // - The largest double that rounds to a normal float.
6204 {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)},
6205
6206 // Doubles that are too big for a float.
6207 {kFP64PositiveInfinity, kFP32PositiveInfinity},
6208 {DBL_MAX, kFP32PositiveInfinity},
6209 // - The smallest exponent that's too big for a float.
6210 {pow(2.0, 128), kFP32PositiveInfinity},
6211 // - This exponent is in range, but the value rounds to infinity.
6212 {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity},
6213
6214 // Doubles that are too small for a float.
6215 // - The smallest (subnormal) double.
6216 {DBL_MIN, 0.0},
6217 // - The largest double which is too small for a subnormal float.
6218 {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)},
6219
6220 // Normal doubles that become subnormal floats.
6221 // - The largest subnormal float.
6222 {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)},
6223 // - The smallest subnormal float.
6224 {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)},
6225 // - Subnormal floats that need (ties-to-even) rounding.
6226 // For these subnormals:
6227 // bit 34 (0x0000000400000000) is the lowest-order bit which will
6228 // fit in the float's mantissa.
6229 {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)},
6230 {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)},
6231 {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)},
6232 {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)},
6233 {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)},
6234 {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)},
6235 {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)},
6236 {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)},
6237 {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)},
6238 {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)},
6239 {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)},
6240 {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)},
6241 {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)},
6242 // - The smallest double which rounds up to become a subnormal float.
6243 {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)},
6244
6245 // Check NaN payload preservation.
6246 {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)},
6247 {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)},
6248 // - Signalling NaNs become quiet NaNs.
6249 {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)},
6250 {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)},
6251 {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)},
6252 };
6253 int count = sizeof(test) / sizeof(test[0]);
6254
6255 for (int i = 0; i < count; i++) {
6256 double in = test[i].in;
6257 float expected = test[i].expected;
6258
6259 // We only expect positive input.
6260 ASSERT(std::signbit(in) == 0);
6261 ASSERT(std::signbit(expected) == 0);
6262
6263 SETUP();
6264 START();
6265
6266 __ Fmov(d10, in);
6267 __ Fcvt(s20, d10);
6268
6269 __ Fmov(d11, -in);
6270 __ Fcvt(s21, d11);
6271
6272 END();
6273 RUN();
6274 ASSERT_EQUAL_FP32(expected, s20);
6275 ASSERT_EQUAL_FP32(-expected, s21);
6276 TEARDOWN();
6277 }
6278 }
6279
6280
6281 TEST(fcvtas) {
6282 INIT_V8();
6283 SETUP();
6284
6285 START();
6286 __ Fmov(s0, 1.0);
6287 __ Fmov(s1, 1.1);
6288 __ Fmov(s2, 2.5);
6289 __ Fmov(s3, -2.5);
6290 __ Fmov(s4, kFP32PositiveInfinity);
6291 __ Fmov(s5, kFP32NegativeInfinity);
6292 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6293 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6294 __ Fmov(d8, 1.0);
6295 __ Fmov(d9, 1.1);
6296 __ Fmov(d10, 2.5);
6297 __ Fmov(d11, -2.5);
6298 __ Fmov(d12, kFP64PositiveInfinity);
6299 __ Fmov(d13, kFP64NegativeInfinity);
6300 __ Fmov(d14, kWMaxInt - 1);
6301 __ Fmov(d15, kWMinInt + 1);
6302 __ Fmov(s17, 1.1);
6303 __ Fmov(s18, 2.5);
6304 __ Fmov(s19, -2.5);
6305 __ Fmov(s20, kFP32PositiveInfinity);
6306 __ Fmov(s21, kFP32NegativeInfinity);
6307 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6308 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6309 __ Fmov(d24, 1.1);
6310 __ Fmov(d25, 2.5);
6311 __ Fmov(d26, -2.5);
6312 __ Fmov(d27, kFP64PositiveInfinity);
6313 __ Fmov(d28, kFP64NegativeInfinity);
6314 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6315 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6316
6317 __ Fcvtas(w0, s0);
6318 __ Fcvtas(w1, s1);
6319 __ Fcvtas(w2, s2);
6320 __ Fcvtas(w3, s3);
6321 __ Fcvtas(w4, s4);
6322 __ Fcvtas(w5, s5);
6323 __ Fcvtas(w6, s6);
6324 __ Fcvtas(w7, s7);
6325 __ Fcvtas(w8, d8);
6326 __ Fcvtas(w9, d9);
6327 __ Fcvtas(w10, d10);
6328 __ Fcvtas(w11, d11);
6329 __ Fcvtas(w12, d12);
6330 __ Fcvtas(w13, d13);
6331 __ Fcvtas(w14, d14);
6332 __ Fcvtas(w15, d15);
6333 __ Fcvtas(x17, s17);
6334 __ Fcvtas(x18, s18);
6335 __ Fcvtas(x19, s19);
6336 __ Fcvtas(x20, s20);
6337 __ Fcvtas(x21, s21);
6338 __ Fcvtas(x22, s22);
6339 __ Fcvtas(x23, s23);
6340 __ Fcvtas(x24, d24);
6341 __ Fcvtas(x25, d25);
6342 __ Fcvtas(x26, d26);
6343 __ Fcvtas(x27, d27);
6344 __ Fcvtas(x28, d28);
6345 __ Fcvtas(x29, d29);
6346 __ Fcvtas(x30, d30);
6347 END();
6348
6349 RUN();
6350
6351 ASSERT_EQUAL_64(1, x0);
6352 ASSERT_EQUAL_64(1, x1);
6353 ASSERT_EQUAL_64(3, x2);
6354 ASSERT_EQUAL_64(0xfffffffd, x3);
6355 ASSERT_EQUAL_64(0x7fffffff, x4);
6356 ASSERT_EQUAL_64(0x80000000, x5);
6357 ASSERT_EQUAL_64(0x7fffff80, x6);
6358 ASSERT_EQUAL_64(0x80000080, x7);
6359 ASSERT_EQUAL_64(1, x8);
6360 ASSERT_EQUAL_64(1, x9);
6361 ASSERT_EQUAL_64(3, x10);
6362 ASSERT_EQUAL_64(0xfffffffd, x11);
6363 ASSERT_EQUAL_64(0x7fffffff, x12);
6364 ASSERT_EQUAL_64(0x80000000, x13);
6365 ASSERT_EQUAL_64(0x7ffffffe, x14);
6366 ASSERT_EQUAL_64(0x80000001, x15);
6367 ASSERT_EQUAL_64(1, x17);
6368 ASSERT_EQUAL_64(3, x18);
6369 ASSERT_EQUAL_64(0xfffffffffffffffdUL, x19);
6370 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6371 ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6372 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6373 ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6374 ASSERT_EQUAL_64(1, x24);
6375 ASSERT_EQUAL_64(3, x25);
6376 ASSERT_EQUAL_64(0xfffffffffffffffdUL, x26);
6377 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6378 ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6379 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6380 ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6381
6382 TEARDOWN();
6383 }
6384
6385
6386 TEST(fcvtau) {
6387 INIT_V8();
6388 SETUP();
6389
6390 START();
6391 __ Fmov(s0, 1.0);
6392 __ Fmov(s1, 1.1);
6393 __ Fmov(s2, 2.5);
6394 __ Fmov(s3, -2.5);
6395 __ Fmov(s4, kFP32PositiveInfinity);
6396 __ Fmov(s5, kFP32NegativeInfinity);
6397 __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
6398 __ Fmov(d8, 1.0);
6399 __ Fmov(d9, 1.1);
6400 __ Fmov(d10, 2.5);
6401 __ Fmov(d11, -2.5);
6402 __ Fmov(d12, kFP64PositiveInfinity);
6403 __ Fmov(d13, kFP64NegativeInfinity);
6404 __ Fmov(d14, 0xfffffffe);
6405 __ Fmov(s16, 1.0);
6406 __ Fmov(s17, 1.1);
6407 __ Fmov(s18, 2.5);
6408 __ Fmov(s19, -2.5);
6409 __ Fmov(s20, kFP32PositiveInfinity);
6410 __ Fmov(s21, kFP32NegativeInfinity);
6411 __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
6412 __ Fmov(d24, 1.1);
6413 __ Fmov(d25, 2.5);
6414 __ Fmov(d26, -2.5);
6415 __ Fmov(d27, kFP64PositiveInfinity);
6416 __ Fmov(d28, kFP64NegativeInfinity);
6417 __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
6418 __ Fmov(s30, 0x100000000UL);
6419
6420 __ Fcvtau(w0, s0);
6421 __ Fcvtau(w1, s1);
6422 __ Fcvtau(w2, s2);
6423 __ Fcvtau(w3, s3);
6424 __ Fcvtau(w4, s4);
6425 __ Fcvtau(w5, s5);
6426 __ Fcvtau(w6, s6);
6427 __ Fcvtau(w8, d8);
6428 __ Fcvtau(w9, d9);
6429 __ Fcvtau(w10, d10);
6430 __ Fcvtau(w11, d11);
6431 __ Fcvtau(w12, d12);
6432 __ Fcvtau(w13, d13);
6433 __ Fcvtau(w14, d14);
6434 __ Fcvtau(w15, d15);
6435 __ Fcvtau(x16, s16);
6436 __ Fcvtau(x17, s17);
6437 __ Fcvtau(x18, s18);
6438 __ Fcvtau(x19, s19);
6439 __ Fcvtau(x20, s20);
6440 __ Fcvtau(x21, s21);
6441 __ Fcvtau(x22, s22);
6442 __ Fcvtau(x24, d24);
6443 __ Fcvtau(x25, d25);
6444 __ Fcvtau(x26, d26);
6445 __ Fcvtau(x27, d27);
6446 __ Fcvtau(x28, d28);
6447 __ Fcvtau(x29, d29);
6448 __ Fcvtau(w30, s30);
6449 END();
6450
6451 RUN();
6452
6453 ASSERT_EQUAL_64(1, x0);
6454 ASSERT_EQUAL_64(1, x1);
6455 ASSERT_EQUAL_64(3, x2);
6456 ASSERT_EQUAL_64(0, x3);
6457 ASSERT_EQUAL_64(0xffffffff, x4);
6458 ASSERT_EQUAL_64(0, x5);
6459 ASSERT_EQUAL_64(0xffffff00, x6);
6460 ASSERT_EQUAL_64(1, x8);
6461 ASSERT_EQUAL_64(1, x9);
6462 ASSERT_EQUAL_64(3, x10);
6463 ASSERT_EQUAL_64(0, x11);
6464 ASSERT_EQUAL_64(0xffffffff, x12);
6465 ASSERT_EQUAL_64(0, x13);
6466 ASSERT_EQUAL_64(0xfffffffe, x14);
6467 ASSERT_EQUAL_64(1, x16);
6468 ASSERT_EQUAL_64(1, x17);
6469 ASSERT_EQUAL_64(3, x18);
6470 ASSERT_EQUAL_64(0, x19);
6471 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
6472 ASSERT_EQUAL_64(0, x21);
6473 ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
6474 ASSERT_EQUAL_64(1, x24);
6475 ASSERT_EQUAL_64(3, x25);
6476 ASSERT_EQUAL_64(0, x26);
6477 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
6478 ASSERT_EQUAL_64(0, x28);
6479 ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
6480 ASSERT_EQUAL_64(0xffffffff, x30);
6481
6482 TEARDOWN();
6483 }
6484
6485
6486 TEST(fcvtms) {
6487 INIT_V8();
6488 SETUP();
6489
6490 START();
6491 __ Fmov(s0, 1.0);
6492 __ Fmov(s1, 1.1);
6493 __ Fmov(s2, 1.5);
6494 __ Fmov(s3, -1.5);
6495 __ Fmov(s4, kFP32PositiveInfinity);
6496 __ Fmov(s5, kFP32NegativeInfinity);
6497 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6498 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6499 __ Fmov(d8, 1.0);
6500 __ Fmov(d9, 1.1);
6501 __ Fmov(d10, 1.5);
6502 __ Fmov(d11, -1.5);
6503 __ Fmov(d12, kFP64PositiveInfinity);
6504 __ Fmov(d13, kFP64NegativeInfinity);
6505 __ Fmov(d14, kWMaxInt - 1);
6506 __ Fmov(d15, kWMinInt + 1);
6507 __ Fmov(s17, 1.1);
6508 __ Fmov(s18, 1.5);
6509 __ Fmov(s19, -1.5);
6510 __ Fmov(s20, kFP32PositiveInfinity);
6511 __ Fmov(s21, kFP32NegativeInfinity);
6512 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6513 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6514 __ Fmov(d24, 1.1);
6515 __ Fmov(d25, 1.5);
6516 __ Fmov(d26, -1.5);
6517 __ Fmov(d27, kFP64PositiveInfinity);
6518 __ Fmov(d28, kFP64NegativeInfinity);
6519 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6520 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6521
6522 __ Fcvtms(w0, s0);
6523 __ Fcvtms(w1, s1);
6524 __ Fcvtms(w2, s2);
6525 __ Fcvtms(w3, s3);
6526 __ Fcvtms(w4, s4);
6527 __ Fcvtms(w5, s5);
6528 __ Fcvtms(w6, s6);
6529 __ Fcvtms(w7, s7);
6530 __ Fcvtms(w8, d8);
6531 __ Fcvtms(w9, d9);
6532 __ Fcvtms(w10, d10);
6533 __ Fcvtms(w11, d11);
6534 __ Fcvtms(w12, d12);
6535 __ Fcvtms(w13, d13);
6536 __ Fcvtms(w14, d14);
6537 __ Fcvtms(w15, d15);
6538 __ Fcvtms(x17, s17);
6539 __ Fcvtms(x18, s18);
6540 __ Fcvtms(x19, s19);
6541 __ Fcvtms(x20, s20);
6542 __ Fcvtms(x21, s21);
6543 __ Fcvtms(x22, s22);
6544 __ Fcvtms(x23, s23);
6545 __ Fcvtms(x24, d24);
6546 __ Fcvtms(x25, d25);
6547 __ Fcvtms(x26, d26);
6548 __ Fcvtms(x27, d27);
6549 __ Fcvtms(x28, d28);
6550 __ Fcvtms(x29, d29);
6551 __ Fcvtms(x30, d30);
6552 END();
6553
6554 RUN();
6555
6556 ASSERT_EQUAL_64(1, x0);
6557 ASSERT_EQUAL_64(1, x1);
6558 ASSERT_EQUAL_64(1, x2);
6559 ASSERT_EQUAL_64(0xfffffffe, x3);
6560 ASSERT_EQUAL_64(0x7fffffff, x4);
6561 ASSERT_EQUAL_64(0x80000000, x5);
6562 ASSERT_EQUAL_64(0x7fffff80, x6);
6563 ASSERT_EQUAL_64(0x80000080, x7);
6564 ASSERT_EQUAL_64(1, x8);
6565 ASSERT_EQUAL_64(1, x9);
6566 ASSERT_EQUAL_64(1, x10);
6567 ASSERT_EQUAL_64(0xfffffffe, x11);
6568 ASSERT_EQUAL_64(0x7fffffff, x12);
6569 ASSERT_EQUAL_64(0x80000000, x13);
6570 ASSERT_EQUAL_64(0x7ffffffe, x14);
6571 ASSERT_EQUAL_64(0x80000001, x15);
6572 ASSERT_EQUAL_64(1, x17);
6573 ASSERT_EQUAL_64(1, x18);
6574 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
6575 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6576 ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6577 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6578 ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6579 ASSERT_EQUAL_64(1, x24);
6580 ASSERT_EQUAL_64(1, x25);
6581 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
6582 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6583 ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6584 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6585 ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6586
6587 TEARDOWN();
6588 }
6589
6590
6591 TEST(fcvtmu) {
6592 INIT_V8();
6593 SETUP();
6594
6595 START();
6596 __ Fmov(s0, 1.0);
6597 __ Fmov(s1, 1.1);
6598 __ Fmov(s2, 1.5);
6599 __ Fmov(s3, -1.5);
6600 __ Fmov(s4, kFP32PositiveInfinity);
6601 __ Fmov(s5, kFP32NegativeInfinity);
6602 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6603 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6604 __ Fmov(d8, 1.0);
6605 __ Fmov(d9, 1.1);
6606 __ Fmov(d10, 1.5);
6607 __ Fmov(d11, -1.5);
6608 __ Fmov(d12, kFP64PositiveInfinity);
6609 __ Fmov(d13, kFP64NegativeInfinity);
6610 __ Fmov(d14, kWMaxInt - 1);
6611 __ Fmov(d15, kWMinInt + 1);
6612 __ Fmov(s17, 1.1);
6613 __ Fmov(s18, 1.5);
6614 __ Fmov(s19, -1.5);
6615 __ Fmov(s20, kFP32PositiveInfinity);
6616 __ Fmov(s21, kFP32NegativeInfinity);
6617 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6618 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6619 __ Fmov(d24, 1.1);
6620 __ Fmov(d25, 1.5);
6621 __ Fmov(d26, -1.5);
6622 __ Fmov(d27, kFP64PositiveInfinity);
6623 __ Fmov(d28, kFP64NegativeInfinity);
6624 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6625 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6626
6627 __ Fcvtmu(w0, s0);
6628 __ Fcvtmu(w1, s1);
6629 __ Fcvtmu(w2, s2);
6630 __ Fcvtmu(w3, s3);
6631 __ Fcvtmu(w4, s4);
6632 __ Fcvtmu(w5, s5);
6633 __ Fcvtmu(w6, s6);
6634 __ Fcvtmu(w7, s7);
6635 __ Fcvtmu(w8, d8);
6636 __ Fcvtmu(w9, d9);
6637 __ Fcvtmu(w10, d10);
6638 __ Fcvtmu(w11, d11);
6639 __ Fcvtmu(w12, d12);
6640 __ Fcvtmu(w13, d13);
6641 __ Fcvtmu(w14, d14);
6642 __ Fcvtmu(x17, s17);
6643 __ Fcvtmu(x18, s18);
6644 __ Fcvtmu(x19, s19);
6645 __ Fcvtmu(x20, s20);
6646 __ Fcvtmu(x21, s21);
6647 __ Fcvtmu(x22, s22);
6648 __ Fcvtmu(x23, s23);
6649 __ Fcvtmu(x24, d24);
6650 __ Fcvtmu(x25, d25);
6651 __ Fcvtmu(x26, d26);
6652 __ Fcvtmu(x27, d27);
6653 __ Fcvtmu(x28, d28);
6654 __ Fcvtmu(x29, d29);
6655 __ Fcvtmu(x30, d30);
6656 END();
6657
6658 RUN();
6659
6660 ASSERT_EQUAL_64(1, x0);
6661 ASSERT_EQUAL_64(1, x1);
6662 ASSERT_EQUAL_64(1, x2);
6663 ASSERT_EQUAL_64(0, x3);
6664 ASSERT_EQUAL_64(0xffffffff, x4);
6665 ASSERT_EQUAL_64(0, x5);
6666 ASSERT_EQUAL_64(0x7fffff80, x6);
6667 ASSERT_EQUAL_64(0, x7);
6668 ASSERT_EQUAL_64(1, x8);
6669 ASSERT_EQUAL_64(1, x9);
6670 ASSERT_EQUAL_64(1, x10);
6671 ASSERT_EQUAL_64(0, x11);
6672 ASSERT_EQUAL_64(0xffffffff, x12);
6673 ASSERT_EQUAL_64(0, x13);
6674 ASSERT_EQUAL_64(0x7ffffffe, x14);
6675 ASSERT_EQUAL_64(1, x17);
6676 ASSERT_EQUAL_64(1, x18);
6677 ASSERT_EQUAL_64(0x0UL, x19);
6678 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
6679 ASSERT_EQUAL_64(0x0UL, x21);
6680 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6681 ASSERT_EQUAL_64(0x0UL, x23);
6682 ASSERT_EQUAL_64(1, x24);
6683 ASSERT_EQUAL_64(1, x25);
6684 ASSERT_EQUAL_64(0x0UL, x26);
6685 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
6686 ASSERT_EQUAL_64(0x0UL, x28);
6687 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6688 ASSERT_EQUAL_64(0x0UL, x30);
6689
6690 TEARDOWN();
6691 }
6692
6693
6694 TEST(fcvtns) {
6695 INIT_V8();
6696 SETUP();
6697
6698 START();
6699 __ Fmov(s0, 1.0);
6700 __ Fmov(s1, 1.1);
6701 __ Fmov(s2, 1.5);
6702 __ Fmov(s3, -1.5);
6703 __ Fmov(s4, kFP32PositiveInfinity);
6704 __ Fmov(s5, kFP32NegativeInfinity);
6705 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6706 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6707 __ Fmov(d8, 1.0);
6708 __ Fmov(d9, 1.1);
6709 __ Fmov(d10, 1.5);
6710 __ Fmov(d11, -1.5);
6711 __ Fmov(d12, kFP64PositiveInfinity);
6712 __ Fmov(d13, kFP64NegativeInfinity);
6713 __ Fmov(d14, kWMaxInt - 1);
6714 __ Fmov(d15, kWMinInt + 1);
6715 __ Fmov(s17, 1.1);
6716 __ Fmov(s18, 1.5);
6717 __ Fmov(s19, -1.5);
6718 __ Fmov(s20, kFP32PositiveInfinity);
6719 __ Fmov(s21, kFP32NegativeInfinity);
6720 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6721 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6722 __ Fmov(d24, 1.1);
6723 __ Fmov(d25, 1.5);
6724 __ Fmov(d26, -1.5);
6725 __ Fmov(d27, kFP64PositiveInfinity);
6726 __ Fmov(d28, kFP64NegativeInfinity);
6727 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6728 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6729
6730 __ Fcvtns(w0, s0);
6731 __ Fcvtns(w1, s1);
6732 __ Fcvtns(w2, s2);
6733 __ Fcvtns(w3, s3);
6734 __ Fcvtns(w4, s4);
6735 __ Fcvtns(w5, s5);
6736 __ Fcvtns(w6, s6);
6737 __ Fcvtns(w7, s7);
6738 __ Fcvtns(w8, d8);
6739 __ Fcvtns(w9, d9);
6740 __ Fcvtns(w10, d10);
6741 __ Fcvtns(w11, d11);
6742 __ Fcvtns(w12, d12);
6743 __ Fcvtns(w13, d13);
6744 __ Fcvtns(w14, d14);
6745 __ Fcvtns(w15, d15);
6746 __ Fcvtns(x17, s17);
6747 __ Fcvtns(x18, s18);
6748 __ Fcvtns(x19, s19);
6749 __ Fcvtns(x20, s20);
6750 __ Fcvtns(x21, s21);
6751 __ Fcvtns(x22, s22);
6752 __ Fcvtns(x23, s23);
6753 __ Fcvtns(x24, d24);
6754 __ Fcvtns(x25, d25);
6755 __ Fcvtns(x26, d26);
6756 __ Fcvtns(x27, d27);
6757 // __ Fcvtns(x28, d28);
6758 __ Fcvtns(x29, d29);
6759 __ Fcvtns(x30, d30);
6760 END();
6761
6762 RUN();
6763
6764 ASSERT_EQUAL_64(1, x0);
6765 ASSERT_EQUAL_64(1, x1);
6766 ASSERT_EQUAL_64(2, x2);
6767 ASSERT_EQUAL_64(0xfffffffe, x3);
6768 ASSERT_EQUAL_64(0x7fffffff, x4);
6769 ASSERT_EQUAL_64(0x80000000, x5);
6770 ASSERT_EQUAL_64(0x7fffff80, x6);
6771 ASSERT_EQUAL_64(0x80000080, x7);
6772 ASSERT_EQUAL_64(1, x8);
6773 ASSERT_EQUAL_64(1, x9);
6774 ASSERT_EQUAL_64(2, x10);
6775 ASSERT_EQUAL_64(0xfffffffe, x11);
6776 ASSERT_EQUAL_64(0x7fffffff, x12);
6777 ASSERT_EQUAL_64(0x80000000, x13);
6778 ASSERT_EQUAL_64(0x7ffffffe, x14);
6779 ASSERT_EQUAL_64(0x80000001, x15);
6780 ASSERT_EQUAL_64(1, x17);
6781 ASSERT_EQUAL_64(2, x18);
6782 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
6783 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6784 ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6785 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6786 ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6787 ASSERT_EQUAL_64(1, x24);
6788 ASSERT_EQUAL_64(2, x25);
6789 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
6790 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6791 // ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6792 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6793 ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6794
6795 TEARDOWN();
6796 }
6797
6798
6799 TEST(fcvtnu) {
6800 INIT_V8();
6801 SETUP();
6802
6803 START();
6804 __ Fmov(s0, 1.0);
6805 __ Fmov(s1, 1.1);
6806 __ Fmov(s2, 1.5);
6807 __ Fmov(s3, -1.5);
6808 __ Fmov(s4, kFP32PositiveInfinity);
6809 __ Fmov(s5, kFP32NegativeInfinity);
6810 __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
6811 __ Fmov(d8, 1.0);
6812 __ Fmov(d9, 1.1);
6813 __ Fmov(d10, 1.5);
6814 __ Fmov(d11, -1.5);
6815 __ Fmov(d12, kFP64PositiveInfinity);
6816 __ Fmov(d13, kFP64NegativeInfinity);
6817 __ Fmov(d14, 0xfffffffe);
6818 __ Fmov(s16, 1.0);
6819 __ Fmov(s17, 1.1);
6820 __ Fmov(s18, 1.5);
6821 __ Fmov(s19, -1.5);
6822 __ Fmov(s20, kFP32PositiveInfinity);
6823 __ Fmov(s21, kFP32NegativeInfinity);
6824 __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
6825 __ Fmov(d24, 1.1);
6826 __ Fmov(d25, 1.5);
6827 __ Fmov(d26, -1.5);
6828 __ Fmov(d27, kFP64PositiveInfinity);
6829 __ Fmov(d28, kFP64NegativeInfinity);
6830 __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
6831 __ Fmov(s30, 0x100000000UL);
6832
6833 __ Fcvtnu(w0, s0);
6834 __ Fcvtnu(w1, s1);
6835 __ Fcvtnu(w2, s2);
6836 __ Fcvtnu(w3, s3);
6837 __ Fcvtnu(w4, s4);
6838 __ Fcvtnu(w5, s5);
6839 __ Fcvtnu(w6, s6);
6840 __ Fcvtnu(w8, d8);
6841 __ Fcvtnu(w9, d9);
6842 __ Fcvtnu(w10, d10);
6843 __ Fcvtnu(w11, d11);
6844 __ Fcvtnu(w12, d12);
6845 __ Fcvtnu(w13, d13);
6846 __ Fcvtnu(w14, d14);
6847 __ Fcvtnu(w15, d15);
6848 __ Fcvtnu(x16, s16);
6849 __ Fcvtnu(x17, s17);
6850 __ Fcvtnu(x18, s18);
6851 __ Fcvtnu(x19, s19);
6852 __ Fcvtnu(x20, s20);
6853 __ Fcvtnu(x21, s21);
6854 __ Fcvtnu(x22, s22);
6855 __ Fcvtnu(x24, d24);
6856 __ Fcvtnu(x25, d25);
6857 __ Fcvtnu(x26, d26);
6858 __ Fcvtnu(x27, d27);
6859 // __ Fcvtnu(x28, d28);
6860 __ Fcvtnu(x29, d29);
6861 __ Fcvtnu(w30, s30);
6862 END();
6863
6864 RUN();
6865
6866 ASSERT_EQUAL_64(1, x0);
6867 ASSERT_EQUAL_64(1, x1);
6868 ASSERT_EQUAL_64(2, x2);
6869 ASSERT_EQUAL_64(0, x3);
6870 ASSERT_EQUAL_64(0xffffffff, x4);
6871 ASSERT_EQUAL_64(0, x5);
6872 ASSERT_EQUAL_64(0xffffff00, x6);
6873 ASSERT_EQUAL_64(1, x8);
6874 ASSERT_EQUAL_64(1, x9);
6875 ASSERT_EQUAL_64(2, x10);
6876 ASSERT_EQUAL_64(0, x11);
6877 ASSERT_EQUAL_64(0xffffffff, x12);
6878 ASSERT_EQUAL_64(0, x13);
6879 ASSERT_EQUAL_64(0xfffffffe, x14);
6880 ASSERT_EQUAL_64(1, x16);
6881 ASSERT_EQUAL_64(1, x17);
6882 ASSERT_EQUAL_64(2, x18);
6883 ASSERT_EQUAL_64(0, x19);
6884 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
6885 ASSERT_EQUAL_64(0, x21);
6886 ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
6887 ASSERT_EQUAL_64(1, x24);
6888 ASSERT_EQUAL_64(2, x25);
6889 ASSERT_EQUAL_64(0, x26);
6890 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
6891 // ASSERT_EQUAL_64(0, x28);
6892 ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
6893 ASSERT_EQUAL_64(0xffffffff, x30);
6894
6895 TEARDOWN();
6896 }
6897
6898
6899 TEST(fcvtzs) {
6900 INIT_V8();
6901 SETUP();
6902
6903 START();
6904 __ Fmov(s0, 1.0);
6905 __ Fmov(s1, 1.1);
6906 __ Fmov(s2, 1.5);
6907 __ Fmov(s3, -1.5);
6908 __ Fmov(s4, kFP32PositiveInfinity);
6909 __ Fmov(s5, kFP32NegativeInfinity);
6910 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6911 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6912 __ Fmov(d8, 1.0);
6913 __ Fmov(d9, 1.1);
6914 __ Fmov(d10, 1.5);
6915 __ Fmov(d11, -1.5);
6916 __ Fmov(d12, kFP64PositiveInfinity);
6917 __ Fmov(d13, kFP64NegativeInfinity);
6918 __ Fmov(d14, kWMaxInt - 1);
6919 __ Fmov(d15, kWMinInt + 1);
6920 __ Fmov(s17, 1.1);
6921 __ Fmov(s18, 1.5);
6922 __ Fmov(s19, -1.5);
6923 __ Fmov(s20, kFP32PositiveInfinity);
6924 __ Fmov(s21, kFP32NegativeInfinity);
6925 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6926 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6927 __ Fmov(d24, 1.1);
6928 __ Fmov(d25, 1.5);
6929 __ Fmov(d26, -1.5);
6930 __ Fmov(d27, kFP64PositiveInfinity);
6931 __ Fmov(d28, kFP64NegativeInfinity);
6932 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6933 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6934
6935 __ Fcvtzs(w0, s0);
6936 __ Fcvtzs(w1, s1);
6937 __ Fcvtzs(w2, s2);
6938 __ Fcvtzs(w3, s3);
6939 __ Fcvtzs(w4, s4);
6940 __ Fcvtzs(w5, s5);
6941 __ Fcvtzs(w6, s6);
6942 __ Fcvtzs(w7, s7);
6943 __ Fcvtzs(w8, d8);
6944 __ Fcvtzs(w9, d9);
6945 __ Fcvtzs(w10, d10);
6946 __ Fcvtzs(w11, d11);
6947 __ Fcvtzs(w12, d12);
6948 __ Fcvtzs(w13, d13);
6949 __ Fcvtzs(w14, d14);
6950 __ Fcvtzs(w15, d15);
6951 __ Fcvtzs(x17, s17);
6952 __ Fcvtzs(x18, s18);
6953 __ Fcvtzs(x19, s19);
6954 __ Fcvtzs(x20, s20);
6955 __ Fcvtzs(x21, s21);
6956 __ Fcvtzs(x22, s22);
6957 __ Fcvtzs(x23, s23);
6958 __ Fcvtzs(x24, d24);
6959 __ Fcvtzs(x25, d25);
6960 __ Fcvtzs(x26, d26);
6961 __ Fcvtzs(x27, d27);
6962 __ Fcvtzs(x28, d28);
6963 __ Fcvtzs(x29, d29);
6964 __ Fcvtzs(x30, d30);
6965 END();
6966
6967 RUN();
6968
6969 ASSERT_EQUAL_64(1, x0);
6970 ASSERT_EQUAL_64(1, x1);
6971 ASSERT_EQUAL_64(1, x2);
6972 ASSERT_EQUAL_64(0xffffffff, x3);
6973 ASSERT_EQUAL_64(0x7fffffff, x4);
6974 ASSERT_EQUAL_64(0x80000000, x5);
6975 ASSERT_EQUAL_64(0x7fffff80, x6);
6976 ASSERT_EQUAL_64(0x80000080, x7);
6977 ASSERT_EQUAL_64(1, x8);
6978 ASSERT_EQUAL_64(1, x9);
6979 ASSERT_EQUAL_64(1, x10);
6980 ASSERT_EQUAL_64(0xffffffff, x11);
6981 ASSERT_EQUAL_64(0x7fffffff, x12);
6982 ASSERT_EQUAL_64(0x80000000, x13);
6983 ASSERT_EQUAL_64(0x7ffffffe, x14);
6984 ASSERT_EQUAL_64(0x80000001, x15);
6985 ASSERT_EQUAL_64(1, x17);
6986 ASSERT_EQUAL_64(1, x18);
6987 ASSERT_EQUAL_64(0xffffffffffffffffUL, x19);
6988 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6989 ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6990 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6991 ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6992 ASSERT_EQUAL_64(1, x24);
6993 ASSERT_EQUAL_64(1, x25);
6994 ASSERT_EQUAL_64(0xffffffffffffffffUL, x26);
6995 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6996 ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6997 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6998 ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6999
7000 TEARDOWN();
7001 }
7002
7003
7004 TEST(fcvtzu) {
7005 INIT_V8();
7006 SETUP();
7007
7008 START();
7009 __ Fmov(s0, 1.0);
7010 __ Fmov(s1, 1.1);
7011 __ Fmov(s2, 1.5);
7012 __ Fmov(s3, -1.5);
7013 __ Fmov(s4, kFP32PositiveInfinity);
7014 __ Fmov(s5, kFP32NegativeInfinity);
7015 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
7016 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
7017 __ Fmov(d8, 1.0);
7018 __ Fmov(d9, 1.1);
7019 __ Fmov(d10, 1.5);
7020 __ Fmov(d11, -1.5);
7021 __ Fmov(d12, kFP64PositiveInfinity);
7022 __ Fmov(d13, kFP64NegativeInfinity);
7023 __ Fmov(d14, kWMaxInt - 1);
7024 __ Fmov(d15, kWMinInt + 1);
7025 __ Fmov(s17, 1.1);
7026 __ Fmov(s18, 1.5);
7027 __ Fmov(s19, -1.5);
7028 __ Fmov(s20, kFP32PositiveInfinity);
7029 __ Fmov(s21, kFP32NegativeInfinity);
7030 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
7031 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
7032 __ Fmov(d24, 1.1);
7033 __ Fmov(d25, 1.5);
7034 __ Fmov(d26, -1.5);
7035 __ Fmov(d27, kFP64PositiveInfinity);
7036 __ Fmov(d28, kFP64NegativeInfinity);
7037 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
7038 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
7039
7040 __ Fcvtzu(w0, s0);
7041 __ Fcvtzu(w1, s1);
7042 __ Fcvtzu(w2, s2);
7043 __ Fcvtzu(w3, s3);
7044 __ Fcvtzu(w4, s4);
7045 __ Fcvtzu(w5, s5);
7046 __ Fcvtzu(w6, s6);
7047 __ Fcvtzu(w7, s7);
7048 __ Fcvtzu(w8, d8);
7049 __ Fcvtzu(w9, d9);
7050 __ Fcvtzu(w10, d10);
7051 __ Fcvtzu(w11, d11);
7052 __ Fcvtzu(w12, d12);
7053 __ Fcvtzu(w13, d13);
7054 __ Fcvtzu(w14, d14);
7055 __ Fcvtzu(x17, s17);
7056 __ Fcvtzu(x18, s18);
7057 __ Fcvtzu(x19, s19);
7058 __ Fcvtzu(x20, s20);
7059 __ Fcvtzu(x21, s21);
7060 __ Fcvtzu(x22, s22);
7061 __ Fcvtzu(x23, s23);
7062 __ Fcvtzu(x24, d24);
7063 __ Fcvtzu(x25, d25);
7064 __ Fcvtzu(x26, d26);
7065 __ Fcvtzu(x27, d27);
7066 __ Fcvtzu(x28, d28);
7067 __ Fcvtzu(x29, d29);
7068 __ Fcvtzu(x30, d30);
7069 END();
7070
7071 RUN();
7072
7073 ASSERT_EQUAL_64(1, x0);
7074 ASSERT_EQUAL_64(1, x1);
7075 ASSERT_EQUAL_64(1, x2);
7076 ASSERT_EQUAL_64(0, x3);
7077 ASSERT_EQUAL_64(0xffffffff, x4);
7078 ASSERT_EQUAL_64(0, x5);
7079 ASSERT_EQUAL_64(0x7fffff80, x6);
7080 ASSERT_EQUAL_64(0, x7);
7081 ASSERT_EQUAL_64(1, x8);
7082 ASSERT_EQUAL_64(1, x9);
7083 ASSERT_EQUAL_64(1, x10);
7084 ASSERT_EQUAL_64(0, x11);
7085 ASSERT_EQUAL_64(0xffffffff, x12);
7086 ASSERT_EQUAL_64(0, x13);
7087 ASSERT_EQUAL_64(0x7ffffffe, x14);
7088 ASSERT_EQUAL_64(1, x17);
7089 ASSERT_EQUAL_64(1, x18);
7090 ASSERT_EQUAL_64(0x0UL, x19);
7091 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
7092 ASSERT_EQUAL_64(0x0UL, x21);
7093 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
7094 ASSERT_EQUAL_64(0x0UL, x23);
7095 ASSERT_EQUAL_64(1, x24);
7096 ASSERT_EQUAL_64(1, x25);
7097 ASSERT_EQUAL_64(0x0UL, x26);
7098 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
7099 ASSERT_EQUAL_64(0x0UL, x28);
7100 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
7101 ASSERT_EQUAL_64(0x0UL, x30);
7102
7103 TEARDOWN();
7104 }
7105
7106
7107 // Test that scvtf and ucvtf can convert the 64-bit input into the expected
7108 // value. All possible values of 'fbits' are tested. The expected value is
7109 // modified accordingly in each case.
7110 //
7111 // The expected value is specified as the bit encoding of the expected double
7112 // produced by scvtf (expected_scvtf_bits) as well as ucvtf
7113 // (expected_ucvtf_bits).
7114 //
7115 // Where the input value is representable by int32_t or uint32_t, conversions
7116 // from W registers will also be tested.
7117 static void TestUScvtfHelper(uint64_t in,
7118 uint64_t expected_scvtf_bits,
7119 uint64_t expected_ucvtf_bits) {
7120 uint64_t u64 = in;
7121 uint32_t u32 = u64 & 0xffffffff;
7122 int64_t s64 = static_cast<int64_t>(in);
7123 int32_t s32 = s64 & 0x7fffffff;
7124
7125 bool cvtf_s32 = (s64 == s32);
7126 bool cvtf_u32 = (u64 == u32);
7127
7128 double results_scvtf_x[65];
7129 double results_ucvtf_x[65];
7130 double results_scvtf_w[33];
7131 double results_ucvtf_w[33];
7132
7133 SETUP();
7134 START();
7135
7136 __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7137 __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7138 __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7139 __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7140
7141 __ Mov(x10, s64);
7142
7143 // Corrupt the top word, in case it is accidentally used during W-register
7144 // conversions.
7145 __ Mov(x11, 0x5555555555555555);
7146 __ Bfi(x11, x10, 0, kWRegSize);
7147
7148 // Test integer conversions.
7149 __ Scvtf(d0, x10);
7150 __ Ucvtf(d1, x10);
7151 __ Scvtf(d2, w11);
7152 __ Ucvtf(d3, w11);
7153 __ Str(d0, MemOperand(x0));
7154 __ Str(d1, MemOperand(x1));
7155 __ Str(d2, MemOperand(x2));
7156 __ Str(d3, MemOperand(x3));
7157
7158 // Test all possible values of fbits.
7159 for (int fbits = 1; fbits <= 32; fbits++) {
7160 __ Scvtf(d0, x10, fbits);
7161 __ Ucvtf(d1, x10, fbits);
7162 __ Scvtf(d2, w11, fbits);
7163 __ Ucvtf(d3, w11, fbits);
7164 __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
7165 __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
7166 __ Str(d2, MemOperand(x2, fbits * kDRegSizeInBytes));
7167 __ Str(d3, MemOperand(x3, fbits * kDRegSizeInBytes));
7168 }
7169
7170 // Conversions from W registers can only handle fbits values <= 32, so just
7171 // test conversions from X registers for 32 < fbits <= 64.
7172 for (int fbits = 33; fbits <= 64; fbits++) {
7173 __ Scvtf(d0, x10, fbits);
7174 __ Ucvtf(d1, x10, fbits);
7175 __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
7176 __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
7177 }
7178
7179 END();
7180 RUN();
7181
7182 // Check the results.
7183 double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
7184 double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
7185
7186 for (int fbits = 0; fbits <= 32; fbits++) {
7187 double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7188 double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7189 ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7190 ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7191 if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
7192 if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
7193 }
7194 for (int fbits = 33; fbits <= 64; fbits++) {
7195 double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7196 double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7197 ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7198 ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7199 }
7200
7201 TEARDOWN();
7202 }
7203
7204
7205 TEST(scvtf_ucvtf_double) {
7206 INIT_V8();
7207 // Simple conversions of positive numbers which require no rounding; the
7208 // results should not depened on the rounding mode, and ucvtf and scvtf should
7209 // produce the same result.
7210 TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
7211 TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
7212 TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
7213 TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
7214 TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
7215 // Test mantissa extremities.
7216 TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
7217 // The largest int32_t that fits in a double.
7218 TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
7219 // Values that would be negative if treated as an int32_t.
7220 TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
7221 TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
7222 TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
7223 // The largest int64_t that fits in a double.
7224 TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
7225 // Check for bit pattern reproduction.
7226 TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
7227 TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
7228
7229 // Simple conversions of negative int64_t values. These require no rounding,
7230 // and the results should not depend on the rounding mode.
7231 TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
7232 TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
7233 TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
7234
7235 // Conversions which require rounding.
7236 TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
7237 TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
7238 TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
7239 TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
7240 TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
7241 TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
7242 TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
7243 TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
7244 TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
7245 TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
7246 TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
7247 TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
7248 TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
7249 // Check rounding of negative int64_t values (and large uint64_t values).
7250 TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
7251 TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
7252 TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
7253 TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
7254 TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
7255 TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
7256 TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
7257 TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
7258 TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
7259 TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
7260 TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
7261 TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
7262 TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
7263 // Round up to produce a result that's too big for the input to represent.
7264 TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
7265 TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
7266 TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
7267 TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
7268 }
7269
7270
7271 // The same as TestUScvtfHelper, but convert to floats.
7272 static void TestUScvtf32Helper(uint64_t in,
7273 uint32_t expected_scvtf_bits,
7274 uint32_t expected_ucvtf_bits) {
7275 uint64_t u64 = in;
7276 uint32_t u32 = u64 & 0xffffffff;
7277 int64_t s64 = static_cast<int64_t>(in);
7278 int32_t s32 = s64 & 0x7fffffff;
7279
7280 bool cvtf_s32 = (s64 == s32);
7281 bool cvtf_u32 = (u64 == u32);
7282
7283 float results_scvtf_x[65];
7284 float results_ucvtf_x[65];
7285 float results_scvtf_w[33];
7286 float results_ucvtf_w[33];
7287
7288 SETUP();
7289 START();
7290
7291 __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7292 __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7293 __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7294 __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7295
7296 __ Mov(x10, s64);
7297
7298 // Corrupt the top word, in case it is accidentally used during W-register
7299 // conversions.
7300 __ Mov(x11, 0x5555555555555555);
7301 __ Bfi(x11, x10, 0, kWRegSize);
7302
7303 // Test integer conversions.
7304 __ Scvtf(s0, x10);
7305 __ Ucvtf(s1, x10);
7306 __ Scvtf(s2, w11);
7307 __ Ucvtf(s3, w11);
7308 __ Str(s0, MemOperand(x0));
7309 __ Str(s1, MemOperand(x1));
7310 __ Str(s2, MemOperand(x2));
7311 __ Str(s3, MemOperand(x3));
7312
7313 // Test all possible values of fbits.
7314 for (int fbits = 1; fbits <= 32; fbits++) {
7315 __ Scvtf(s0, x10, fbits);
7316 __ Ucvtf(s1, x10, fbits);
7317 __ Scvtf(s2, w11, fbits);
7318 __ Ucvtf(s3, w11, fbits);
7319 __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
7320 __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
7321 __ Str(s2, MemOperand(x2, fbits * kSRegSizeInBytes));
7322 __ Str(s3, MemOperand(x3, fbits * kSRegSizeInBytes));
7323 }
7324
7325 // Conversions from W registers can only handle fbits values <= 32, so just
7326 // test conversions from X registers for 32 < fbits <= 64.
7327 for (int fbits = 33; fbits <= 64; fbits++) {
7328 __ Scvtf(s0, x10, fbits);
7329 __ Ucvtf(s1, x10, fbits);
7330 __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
7331 __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
7332 }
7333
7334 END();
7335 RUN();
7336
7337 // Check the results.
7338 float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
7339 float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
7340
7341 for (int fbits = 0; fbits <= 32; fbits++) {
7342 float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7343 float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7344 ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7345 ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7346 if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
7347 if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
7348 break;
7349 }
7350 for (int fbits = 33; fbits <= 64; fbits++) {
7351 break;
7352 float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7353 float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7354 ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7355 ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7356 }
7357
7358 TEARDOWN();
7359 }
7360
7361
7362 TEST(scvtf_ucvtf_float) {
7363 INIT_V8();
7364 // Simple conversions of positive numbers which require no rounding; the
7365 // results should not depened on the rounding mode, and ucvtf and scvtf should
7366 // produce the same result.
7367 TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
7368 TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
7369 TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
7370 TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
7371 TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
7372 // Test mantissa extremities.
7373 TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
7374 TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
7375 // The largest int32_t that fits in a float.
7376 TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
7377 // Values that would be negative if treated as an int32_t.
7378 TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
7379 TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
7380 TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
7381 // The largest int64_t that fits in a float.
7382 TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
7383 // Check for bit pattern reproduction.
7384 TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
7385
7386 // Simple conversions of negative int64_t values. These require no rounding,
7387 // and the results should not depend on the rounding mode.
7388 TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
7389 TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
7390
7391 // Conversions which require rounding.
7392 TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
7393 TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
7394 TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
7395 TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
7396 TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
7397 TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
7398 TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
7399 TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
7400 TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
7401 TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
7402 TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
7403 TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
7404 TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
7405 // Check rounding of negative int64_t values (and large uint64_t values).
7406 TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
7407 TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
7408 TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
7409 TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
7410 TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
7411 TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
7412 TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
7413 TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
7414 TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
7415 TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
7416 TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
7417 TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
7418 TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
7419 // Round up to produce a result that's too big for the input to represent.
7420 TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
7421 TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
7422 TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
7423 TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
7424 TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
7425 TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
7426 TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
7427 TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
7428 }
7429
7430
7431 TEST(system_mrs) {
7432 INIT_V8();
7433 SETUP();
7434
7435 START();
7436 __ Mov(w0, 0);
7437 __ Mov(w1, 1);
7438 __ Mov(w2, 0x80000000);
7439
7440 // Set the Z and C flags.
7441 __ Cmp(w0, w0);
7442 __ Mrs(x3, NZCV);
7443
7444 // Set the N flag.
7445 __ Cmp(w0, w1);
7446 __ Mrs(x4, NZCV);
7447
7448 // Set the Z, C and V flags.
7449 __ Adds(w0, w2, w2);
7450 __ Mrs(x5, NZCV);
7451
7452 // Read the default FPCR.
7453 __ Mrs(x6, FPCR);
7454 END();
7455
7456 RUN();
7457
7458 // NZCV
7459 ASSERT_EQUAL_32(ZCFlag, w3);
7460 ASSERT_EQUAL_32(NFlag, w4);
7461 ASSERT_EQUAL_32(ZCVFlag, w5);
7462
7463 // FPCR
7464 // The default FPCR on Linux-based platforms is 0.
7465 ASSERT_EQUAL_32(0, w6);
7466
7467 TEARDOWN();
7468 }
7469
7470
7471 TEST(system_msr) {
7472 INIT_V8();
7473 // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
7474 const uint64_t fpcr_core = 0x07c00000;
7475
7476 // All FPCR fields (including fields which may be read-as-zero):
7477 // Stride, Len
7478 // IDE, IXE, UFE, OFE, DZE, IOE
7479 const uint64_t fpcr_all = fpcr_core | 0x00379f00;
7480
7481 SETUP();
7482
7483 START();
7484 __ Mov(w0, 0);
7485 __ Mov(w1, 0x7fffffff);
7486
7487 __ Mov(x7, 0);
7488
7489 __ Mov(x10, NVFlag);
7490 __ Cmp(w0, w0); // Set Z and C.
7491 __ Msr(NZCV, x10); // Set N and V.
7492 // The Msr should have overwritten every flag set by the Cmp.
7493 __ Cinc(x7, x7, mi); // N
7494 __ Cinc(x7, x7, ne); // !Z
7495 __ Cinc(x7, x7, lo); // !C
7496 __ Cinc(x7, x7, vs); // V
7497
7498 __ Mov(x10, ZCFlag);
7499 __ Cmn(w1, w1); // Set N and V.
7500 __ Msr(NZCV, x10); // Set Z and C.
7501 // The Msr should have overwritten every flag set by the Cmn.
7502 __ Cinc(x7, x7, pl); // !N
7503 __ Cinc(x7, x7, eq); // Z
7504 __ Cinc(x7, x7, hs); // C
7505 __ Cinc(x7, x7, vc); // !V
7506
7507 // All core FPCR fields must be writable.
7508 __ Mov(x8, fpcr_core);
7509 __ Msr(FPCR, x8);
7510 __ Mrs(x8, FPCR);
7511
7512 // All FPCR fields, including optional ones. This part of the test doesn't
7513 // achieve much other than ensuring that supported fields can be cleared by
7514 // the next test.
7515 __ Mov(x9, fpcr_all);
7516 __ Msr(FPCR, x9);
7517 __ Mrs(x9, FPCR);
7518 __ And(x9, x9, fpcr_core);
7519
7520 // The undefined bits must ignore writes.
7521 // It's conceivable that a future version of the architecture could use these
7522 // fields (making this test fail), but in the meantime this is a useful test
7523 // for the simulator.
7524 __ Mov(x10, ~fpcr_all);
7525 __ Msr(FPCR, x10);
7526 __ Mrs(x10, FPCR);
7527
7528 END();
7529
7530 RUN();
7531
7532 // We should have incremented x7 (from 0) exactly 8 times.
7533 ASSERT_EQUAL_64(8, x7);
7534
7535 ASSERT_EQUAL_64(fpcr_core, x8);
7536 ASSERT_EQUAL_64(fpcr_core, x9);
7537 ASSERT_EQUAL_64(0, x10);
7538
7539 TEARDOWN();
7540 }
7541
7542
7543 TEST(system_nop) {
7544 INIT_V8();
7545 SETUP();
7546 RegisterDump before;
7547
7548 START();
7549 before.Dump(&masm);
7550 __ Nop();
7551 END();
7552
7553 RUN();
7554
7555 ASSERT_EQUAL_REGISTERS(before);
7556 ASSERT_EQUAL_NZCV(before.flags_nzcv());
7557
7558 TEARDOWN();
7559 }
7560
7561
7562 TEST(zero_dest) {
7563 INIT_V8();
7564 SETUP();
7565 RegisterDump before;
7566
7567 START();
7568 // Preserve the system stack pointer, in case we clobber it.
7569 __ Mov(x30, csp);
7570 // Initialize the other registers used in this test.
7571 uint64_t literal_base = 0x0100001000100101UL;
7572 __ Mov(x0, 0);
7573 __ Mov(x1, literal_base);
7574 for (unsigned i = 2; i < x30.code(); i++) {
7575 __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
7576 }
7577 before.Dump(&masm);
7578
7579 // All of these instructions should be NOPs in these forms, but have
7580 // alternate forms which can write into the stack pointer.
7581 __ add(xzr, x0, x1);
7582 __ add(xzr, x1, xzr);
7583 __ add(xzr, xzr, x1);
7584
7585 __ and_(xzr, x0, x2);
7586 __ and_(xzr, x2, xzr);
7587 __ and_(xzr, xzr, x2);
7588
7589 __ bic(xzr, x0, x3);
7590 __ bic(xzr, x3, xzr);
7591 __ bic(xzr, xzr, x3);
7592
7593 __ eon(xzr, x0, x4);
7594 __ eon(xzr, x4, xzr);
7595 __ eon(xzr, xzr, x4);
7596
7597 __ eor(xzr, x0, x5);
7598 __ eor(xzr, x5, xzr);
7599 __ eor(xzr, xzr, x5);
7600
7601 __ orr(xzr, x0, x6);
7602 __ orr(xzr, x6, xzr);
7603 __ orr(xzr, xzr, x6);
7604
7605 __ sub(xzr, x0, x7);
7606 __ sub(xzr, x7, xzr);
7607 __ sub(xzr, xzr, x7);
7608
7609 // Swap the saved system stack pointer with the real one. If csp was written
7610 // during the test, it will show up in x30. This is done because the test
7611 // framework assumes that csp will be valid at the end of the test.
7612 __ Mov(x29, x30);
7613 __ Mov(x30, csp);
7614 __ Mov(csp, x29);
7615 // We used x29 as a scratch register, so reset it to make sure it doesn't
7616 // trigger a test failure.
7617 __ Add(x29, x28, x1);
7618 END();
7619
7620 RUN();
7621
7622 ASSERT_EQUAL_REGISTERS(before);
7623 ASSERT_EQUAL_NZCV(before.flags_nzcv());
7624
7625 TEARDOWN();
7626 }
7627
7628
7629 TEST(zero_dest_setflags) {
7630 INIT_V8();
7631 SETUP();
7632 RegisterDump before;
7633
7634 START();
7635 // Preserve the system stack pointer, in case we clobber it.
7636 __ Mov(x30, csp);
7637 // Initialize the other registers used in this test.
7638 uint64_t literal_base = 0x0100001000100101UL;
7639 __ Mov(x0, 0);
7640 __ Mov(x1, literal_base);
7641 for (int i = 2; i < 30; i++) {
7642 __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
7643 }
7644 before.Dump(&masm);
7645
7646 // All of these instructions should only write to the flags in these forms,
7647 // but have alternate forms which can write into the stack pointer.
7648 __ adds(xzr, x0, Operand(x1, UXTX));
7649 __ adds(xzr, x1, Operand(xzr, UXTX));
7650 __ adds(xzr, x1, 1234);
7651 __ adds(xzr, x0, x1);
7652 __ adds(xzr, x1, xzr);
7653 __ adds(xzr, xzr, x1);
7654
7655 __ ands(xzr, x2, ~0xf);
7656 __ ands(xzr, xzr, ~0xf);
7657 __ ands(xzr, x0, x2);
7658 __ ands(xzr, x2, xzr);
7659 __ ands(xzr, xzr, x2);
7660
7661 __ bics(xzr, x3, ~0xf);
7662 __ bics(xzr, xzr, ~0xf);
7663 __ bics(xzr, x0, x3);
7664 __ bics(xzr, x3, xzr);
7665 __ bics(xzr, xzr, x3);
7666
7667 __ subs(xzr, x0, Operand(x3, UXTX));
7668 __ subs(xzr, x3, Operand(xzr, UXTX));
7669 __ subs(xzr, x3, 1234);
7670 __ subs(xzr, x0, x3);
7671 __ subs(xzr, x3, xzr);
7672 __ subs(xzr, xzr, x3);
7673
7674 // Swap the saved system stack pointer with the real one. If csp was written
7675 // during the test, it will show up in x30. This is done because the test
7676 // framework assumes that csp will be valid at the end of the test.
7677 __ Mov(x29, x30);
7678 __ Mov(x30, csp);
7679 __ Mov(csp, x29);
7680 // We used x29 as a scratch register, so reset it to make sure it doesn't
7681 // trigger a test failure.
7682 __ Add(x29, x28, x1);
7683 END();
7684
7685 RUN();
7686
7687 ASSERT_EQUAL_REGISTERS(before);
7688
7689 TEARDOWN();
7690 }
7691
7692
7693 TEST(register_bit) {
7694 // No code generation takes place in this test, so no need to setup and
7695 // teardown.
7696
7697 // Simple tests.
7698 CHECK(x0.Bit() == (1UL << 0));
7699 CHECK(x1.Bit() == (1UL << 1));
7700 CHECK(x10.Bit() == (1UL << 10));
7701
7702 // AAPCS64 definitions.
7703 CHECK(fp.Bit() == (1UL << kFramePointerRegCode));
7704 CHECK(lr.Bit() == (1UL << kLinkRegCode));
7705
7706 // Fixed (hardware) definitions.
7707 CHECK(xzr.Bit() == (1UL << kZeroRegCode));
7708
7709 // Internal ABI definitions.
7710 CHECK(jssp.Bit() == (1UL << kJSSPCode));
7711 CHECK(csp.Bit() == (1UL << kSPRegInternalCode));
7712 CHECK(csp.Bit() != xzr.Bit());
7713
7714 // xn.Bit() == wn.Bit() at all times, for the same n.
7715 CHECK(x0.Bit() == w0.Bit());
7716 CHECK(x1.Bit() == w1.Bit());
7717 CHECK(x10.Bit() == w10.Bit());
7718 CHECK(jssp.Bit() == wjssp.Bit());
7719 CHECK(xzr.Bit() == wzr.Bit());
7720 CHECK(csp.Bit() == wcsp.Bit());
7721 }
7722
7723
7724 TEST(stack_pointer_override) {
7725 // This test generates some stack maintenance code, but the test only checks
7726 // the reported state.
7727 INIT_V8();
7728 SETUP();
7729 START();
7730
7731 // The default stack pointer in V8 is jssp, but for compatibility with W16,
7732 // the test framework sets it to csp before calling the test.
7733 CHECK(csp.Is(__ StackPointer()));
7734 __ SetStackPointer(x0);
7735 CHECK(x0.Is(__ StackPointer()));
7736 __ SetStackPointer(jssp);
7737 CHECK(jssp.Is(__ StackPointer()));
7738 __ SetStackPointer(csp);
7739 CHECK(csp.Is(__ StackPointer()));
7740
7741 END();
7742 RUN();
7743 TEARDOWN();
7744 }
7745
7746
7747 TEST(peek_poke_simple) {
7748 INIT_V8();
7749 SETUP();
7750 START();
7751
7752 static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
7753 static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
7754 x12.Bit() | x13.Bit();
7755
7756 // The literal base is chosen to have two useful properties:
7757 // * When multiplied by small values (such as a register index), this value
7758 // is clearly readable in the result.
7759 // * The value is not formed from repeating fixed-size smaller values, so it
7760 // can be used to detect endianness-related errors.
7761 uint64_t literal_base = 0x0100001000100101UL;
7762
7763 // Initialize the registers.
7764 __ Mov(x0, literal_base);
7765 __ Add(x1, x0, x0);
7766 __ Add(x2, x1, x0);
7767 __ Add(x3, x2, x0);
7768
7769 __ Claim(4);
7770
7771 // Simple exchange.
7772 // After this test:
7773 // x0-x3 should be unchanged.
7774 // w10-w13 should contain the lower words of x0-x3.
7775 __ Poke(x0, 0);
7776 __ Poke(x1, 8);
7777 __ Poke(x2, 16);
7778 __ Poke(x3, 24);
7779 Clobber(&masm, x0_to_x3);
7780 __ Peek(x0, 0);
7781 __ Peek(x1, 8);
7782 __ Peek(x2, 16);
7783 __ Peek(x3, 24);
7784
7785 __ Poke(w0, 0);
7786 __ Poke(w1, 4);
7787 __ Poke(w2, 8);
7788 __ Poke(w3, 12);
7789 Clobber(&masm, x10_to_x13);
7790 __ Peek(w10, 0);
7791 __ Peek(w11, 4);
7792 __ Peek(w12, 8);
7793 __ Peek(w13, 12);
7794
7795 __ Drop(4);
7796
7797 END();
7798 RUN();
7799
7800 ASSERT_EQUAL_64(literal_base * 1, x0);
7801 ASSERT_EQUAL_64(literal_base * 2, x1);
7802 ASSERT_EQUAL_64(literal_base * 3, x2);
7803 ASSERT_EQUAL_64(literal_base * 4, x3);
7804
7805 ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
7806 ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
7807 ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
7808 ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
7809
7810 TEARDOWN();
7811 }
7812
7813
7814 TEST(peek_poke_unaligned) {
7815 INIT_V8();
7816 SETUP();
7817 START();
7818
7819 // The literal base is chosen to have two useful properties:
7820 // * When multiplied by small values (such as a register index), this value
7821 // is clearly readable in the result.
7822 // * The value is not formed from repeating fixed-size smaller values, so it
7823 // can be used to detect endianness-related errors.
7824 uint64_t literal_base = 0x0100001000100101UL;
7825
7826 // Initialize the registers.
7827 __ Mov(x0, literal_base);
7828 __ Add(x1, x0, x0);
7829 __ Add(x2, x1, x0);
7830 __ Add(x3, x2, x0);
7831 __ Add(x4, x3, x0);
7832 __ Add(x5, x4, x0);
7833 __ Add(x6, x5, x0);
7834
7835 __ Claim(4);
7836
7837 // Unaligned exchanges.
7838 // After this test:
7839 // x0-x6 should be unchanged.
7840 // w10-w12 should contain the lower words of x0-x2.
7841 __ Poke(x0, 1);
7842 Clobber(&masm, x0.Bit());
7843 __ Peek(x0, 1);
7844 __ Poke(x1, 2);
7845 Clobber(&masm, x1.Bit());
7846 __ Peek(x1, 2);
7847 __ Poke(x2, 3);
7848 Clobber(&masm, x2.Bit());
7849 __ Peek(x2, 3);
7850 __ Poke(x3, 4);
7851 Clobber(&masm, x3.Bit());
7852 __ Peek(x3, 4);
7853 __ Poke(x4, 5);
7854 Clobber(&masm, x4.Bit());
7855 __ Peek(x4, 5);
7856 __ Poke(x5, 6);
7857 Clobber(&masm, x5.Bit());
7858 __ Peek(x5, 6);
7859 __ Poke(x6, 7);
7860 Clobber(&masm, x6.Bit());
7861 __ Peek(x6, 7);
7862
7863 __ Poke(w0, 1);
7864 Clobber(&masm, w10.Bit());
7865 __ Peek(w10, 1);
7866 __ Poke(w1, 2);
7867 Clobber(&masm, w11.Bit());
7868 __ Peek(w11, 2);
7869 __ Poke(w2, 3);
7870 Clobber(&masm, w12.Bit());
7871 __ Peek(w12, 3);
7872
7873 __ Drop(4);
7874
7875 END();
7876 RUN();
7877
7878 ASSERT_EQUAL_64(literal_base * 1, x0);
7879 ASSERT_EQUAL_64(literal_base * 2, x1);
7880 ASSERT_EQUAL_64(literal_base * 3, x2);
7881 ASSERT_EQUAL_64(literal_base * 4, x3);
7882 ASSERT_EQUAL_64(literal_base * 5, x4);
7883 ASSERT_EQUAL_64(literal_base * 6, x5);
7884 ASSERT_EQUAL_64(literal_base * 7, x6);
7885
7886 ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
7887 ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
7888 ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
7889
7890 TEARDOWN();
7891 }
7892
7893
7894 TEST(peek_poke_endianness) {
7895 INIT_V8();
7896 SETUP();
7897 START();
7898
7899 // The literal base is chosen to have two useful properties:
7900 // * When multiplied by small values (such as a register index), this value
7901 // is clearly readable in the result.
7902 // * The value is not formed from repeating fixed-size smaller values, so it
7903 // can be used to detect endianness-related errors.
7904 uint64_t literal_base = 0x0100001000100101UL;
7905
7906 // Initialize the registers.
7907 __ Mov(x0, literal_base);
7908 __ Add(x1, x0, x0);
7909
7910 __ Claim(4);
7911
7912 // Endianness tests.
7913 // After this section:
7914 // x4 should match x0[31:0]:x0[63:32]
7915 // w5 should match w1[15:0]:w1[31:16]
7916 __ Poke(x0, 0);
7917 __ Poke(x0, 8);
7918 __ Peek(x4, 4);
7919
7920 __ Poke(w1, 0);
7921 __ Poke(w1, 4);
7922 __ Peek(w5, 2);
7923
7924 __ Drop(4);
7925
7926 END();
7927 RUN();
7928
7929 uint64_t x0_expected = literal_base * 1;
7930 uint64_t x1_expected = literal_base * 2;
7931 uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
7932 uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
7933 ((x1_expected >> 16) & 0x0000ffff);
7934
7935 ASSERT_EQUAL_64(x0_expected, x0);
7936 ASSERT_EQUAL_64(x1_expected, x1);
7937 ASSERT_EQUAL_64(x4_expected, x4);
7938 ASSERT_EQUAL_64(x5_expected, x5);
7939
7940 TEARDOWN();
7941 }
7942
7943
7944 TEST(peek_poke_mixed) {
7945 INIT_V8();
7946 SETUP();
7947 START();
7948
7949 // The literal base is chosen to have two useful properties:
7950 // * When multiplied by small values (such as a register index), this value
7951 // is clearly readable in the result.
7952 // * The value is not formed from repeating fixed-size smaller values, so it
7953 // can be used to detect endianness-related errors.
7954 uint64_t literal_base = 0x0100001000100101UL;
7955
7956 // Initialize the registers.
7957 __ Mov(x0, literal_base);
7958 __ Add(x1, x0, x0);
7959 __ Add(x2, x1, x0);
7960 __ Add(x3, x2, x0);
7961
7962 __ Claim(4);
7963
7964 // Mix with other stack operations.
7965 // After this section:
7966 // x0-x3 should be unchanged.
7967 // x6 should match x1[31:0]:x0[63:32]
7968 // w7 should match x1[15:0]:x0[63:48]
7969 __ Poke(x1, 8);
7970 __ Poke(x0, 0);
7971 {
7972 ASSERT(__ StackPointer().Is(csp));
7973 __ Mov(x4, __ StackPointer());
7974 __ SetStackPointer(x4);
7975
7976 __ Poke(wzr, 0); // Clobber the space we're about to drop.
7977 __ Drop(1, kWRegSizeInBytes);
7978 __ Peek(x6, 0);
7979 __ Claim(1);
7980 __ Peek(w7, 10);
7981 __ Poke(x3, 28);
7982 __ Poke(xzr, 0); // Clobber the space we're about to drop.
7983 __ Drop(1);
7984 __ Poke(x2, 12);
7985 __ Push(w0);
7986
7987 __ Mov(csp, __ StackPointer());
7988 __ SetStackPointer(csp);
7989 }
7990
7991 __ Pop(x0, x1, x2, x3);
7992
7993 END();
7994 RUN();
7995
7996 uint64_t x0_expected = literal_base * 1;
7997 uint64_t x1_expected = literal_base * 2;
7998 uint64_t x2_expected = literal_base * 3;
7999 uint64_t x3_expected = literal_base * 4;
8000 uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
8001 uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
8002 ((x0_expected >> 48) & 0x0000ffff);
8003
8004 ASSERT_EQUAL_64(x0_expected, x0);
8005 ASSERT_EQUAL_64(x1_expected, x1);
8006 ASSERT_EQUAL_64(x2_expected, x2);
8007 ASSERT_EQUAL_64(x3_expected, x3);
8008 ASSERT_EQUAL_64(x6_expected, x6);
8009 ASSERT_EQUAL_64(x7_expected, x7);
8010
8011 TEARDOWN();
8012 }
8013
8014
8015 // This enum is used only as an argument to the push-pop test helpers.
8016 enum PushPopMethod {
8017 // Push or Pop using the Push and Pop methods, with blocks of up to four
8018 // registers. (Smaller blocks will be used if necessary.)
8019 PushPopByFour,
8020
8021 // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
8022 PushPopRegList
8023 };
8024
8025
8026 // The maximum number of registers that can be used by the PushPopJssp* tests,
8027 // where a reg_count field is provided.
8028 static int const kPushPopJsspMaxRegCount = -1;
8029
8030 // Test a simple push-pop pattern:
8031 // * Claim <claim> bytes to set the stack alignment.
8032 // * Push <reg_count> registers with size <reg_size>.
8033 // * Clobber the register contents.
8034 // * Pop <reg_count> registers to restore the original contents.
8035 // * Drop <claim> bytes to restore the original stack pointer.
8036 //
8037 // Different push and pop methods can be specified independently to test for
8038 // proper word-endian behaviour.
8039 static void PushPopJsspSimpleHelper(int reg_count,
8040 int claim,
8041 int reg_size,
8042 PushPopMethod push_method,
8043 PushPopMethod pop_method) {
8044 SETUP();
8045
8046 START();
8047
8048 // Registers x8 and x9 are used by the macro assembler for debug code (for
8049 // example in 'Pop'), so we can't use them here. We can't use jssp because it
8050 // will be the stack pointer for this test.
8051 static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit());
8052 if (reg_count == kPushPopJsspMaxRegCount) {
8053 reg_count = CountSetBits(allowed, kNumberOfRegisters);
8054 }
8055 // Work out which registers to use, based on reg_size.
8056 Register r[kNumberOfRegisters];
8057 Register x[kNumberOfRegisters];
8058 RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
8059 allowed);
8060
8061 // The literal base is chosen to have two useful properties:
8062 // * When multiplied by small values (such as a register index), this value
8063 // is clearly readable in the result.
8064 // * The value is not formed from repeating fixed-size smaller values, so it
8065 // can be used to detect endianness-related errors.
8066 uint64_t literal_base = 0x0100001000100101UL;
8067
8068 {
8069 ASSERT(__ StackPointer().Is(csp));
8070 __ Mov(jssp, __ StackPointer());
8071 __ SetStackPointer(jssp);
8072
8073 int i;
8074
8075 // Initialize the registers.
8076 for (i = 0; i < reg_count; i++) {
8077 // Always write into the X register, to ensure that the upper word is
8078 // properly ignored by Push when testing W registers.
8079 if (!x[i].IsZero()) {
8080 __ Mov(x[i], literal_base * i);
8081 }
8082 }
8083
8084 // Claim memory first, as requested.
8085 __ Claim(claim, kByteSizeInBytes);
8086
8087 switch (push_method) {
8088 case PushPopByFour:
8089 // Push high-numbered registers first (to the highest addresses).
8090 for (i = reg_count; i >= 4; i -= 4) {
8091 __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
8092 }
8093 // Finish off the leftovers.
8094 switch (i) {
8095 case 3: __ Push(r[2], r[1], r[0]); break;
8096 case 2: __ Push(r[1], r[0]); break;
8097 case 1: __ Push(r[0]); break;
8098 default: ASSERT(i == 0); break;
8099 }
8100 break;
8101 case PushPopRegList:
8102 __ PushSizeRegList(list, reg_size);
8103 break;
8104 }
8105
8106 // Clobber all the registers, to ensure that they get repopulated by Pop.
8107 Clobber(&masm, list);
8108
8109 switch (pop_method) {
8110 case PushPopByFour:
8111 // Pop low-numbered registers first (from the lowest addresses).
8112 for (i = 0; i <= (reg_count-4); i += 4) {
8113 __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
8114 }
8115 // Finish off the leftovers.
8116 switch (reg_count - i) {
8117 case 3: __ Pop(r[i], r[i+1], r[i+2]); break;
8118 case 2: __ Pop(r[i], r[i+1]); break;
8119 case 1: __ Pop(r[i]); break;
8120 default: ASSERT(i == reg_count); break;
8121 }
8122 break;
8123 case PushPopRegList:
8124 __ PopSizeRegList(list, reg_size);
8125 break;
8126 }
8127
8128 // Drop memory to restore jssp.
8129 __ Drop(claim, kByteSizeInBytes);
8130
8131 __ Mov(csp, __ StackPointer());
8132 __ SetStackPointer(csp);
8133 }
8134
8135 END();
8136
8137 RUN();
8138
8139 // Check that the register contents were preserved.
8140 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8141 // that the upper word was properly cleared by Pop.
8142 literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8143 for (int i = 0; i < reg_count; i++) {
8144 if (x[i].IsZero()) {
8145 ASSERT_EQUAL_64(0, x[i]);
8146 } else {
8147 ASSERT_EQUAL_64(literal_base * i, x[i]);
8148 }
8149 }
8150
8151 TEARDOWN();
8152 }
8153
8154
8155 TEST(push_pop_jssp_simple_32) {
8156 INIT_V8();
8157 for (int claim = 0; claim <= 8; claim++) {
8158 for (int count = 0; count <= 8; count++) {
8159 PushPopJsspSimpleHelper(count, claim, kWRegSize,
8160 PushPopByFour, PushPopByFour);
8161 PushPopJsspSimpleHelper(count, claim, kWRegSize,
8162 PushPopByFour, PushPopRegList);
8163 PushPopJsspSimpleHelper(count, claim, kWRegSize,
8164 PushPopRegList, PushPopByFour);
8165 PushPopJsspSimpleHelper(count, claim, kWRegSize,
8166 PushPopRegList, PushPopRegList);
8167 }
8168 // Test with the maximum number of registers.
8169 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
8170 PushPopByFour, PushPopByFour);
8171 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
8172 PushPopByFour, PushPopRegList);
8173 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
8174 PushPopRegList, PushPopByFour);
8175 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSize,
8176 PushPopRegList, PushPopRegList);
8177 }
8178 }
8179
8180
8181 TEST(push_pop_jssp_simple_64) {
8182 INIT_V8();
8183 for (int claim = 0; claim <= 8; claim++) {
8184 for (int count = 0; count <= 8; count++) {
8185 PushPopJsspSimpleHelper(count, claim, kXRegSize,
8186 PushPopByFour, PushPopByFour);
8187 PushPopJsspSimpleHelper(count, claim, kXRegSize,
8188 PushPopByFour, PushPopRegList);
8189 PushPopJsspSimpleHelper(count, claim, kXRegSize,
8190 PushPopRegList, PushPopByFour);
8191 PushPopJsspSimpleHelper(count, claim, kXRegSize,
8192 PushPopRegList, PushPopRegList);
8193 }
8194 // Test with the maximum number of registers.
8195 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
8196 PushPopByFour, PushPopByFour);
8197 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
8198 PushPopByFour, PushPopRegList);
8199 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
8200 PushPopRegList, PushPopByFour);
8201 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSize,
8202 PushPopRegList, PushPopRegList);
8203 }
8204 }
8205
8206
8207 // The maximum number of registers that can be used by the PushPopFPJssp* tests,
8208 // where a reg_count field is provided.
8209 static int const kPushPopFPJsspMaxRegCount = -1;
8210
8211 // Test a simple push-pop pattern:
8212 // * Claim <claim> bytes to set the stack alignment.
8213 // * Push <reg_count> FP registers with size <reg_size>.
8214 // * Clobber the register contents.
8215 // * Pop <reg_count> FP registers to restore the original contents.
8216 // * Drop <claim> bytes to restore the original stack pointer.
8217 //
8218 // Different push and pop methods can be specified independently to test for
8219 // proper word-endian behaviour.
8220 static void PushPopFPJsspSimpleHelper(int reg_count,
8221 int claim,
8222 int reg_size,
8223 PushPopMethod push_method,
8224 PushPopMethod pop_method) {
8225 SETUP();
8226
8227 START();
8228
8229 // We can use any floating-point register. None of them are reserved for
8230 // debug code, for example.
8231 static RegList const allowed = ~0;
8232 if (reg_count == kPushPopFPJsspMaxRegCount) {
8233 reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
8234 }
8235 // Work out which registers to use, based on reg_size.
8236 FPRegister v[kNumberOfRegisters];
8237 FPRegister d[kNumberOfRegisters];
8238 RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
8239 allowed);
8240
8241 // The literal base is chosen to have two useful properties:
8242 // * When multiplied (using an integer) by small values (such as a register
8243 // index), this value is clearly readable in the result.
8244 // * The value is not formed from repeating fixed-size smaller values, so it
8245 // can be used to detect endianness-related errors.
8246 // * It is never a floating-point NaN, and will therefore always compare
8247 // equal to itself.
8248 uint64_t literal_base = 0x0100001000100101UL;
8249
8250 {
8251 ASSERT(__ StackPointer().Is(csp));
8252 __ Mov(jssp, __ StackPointer());
8253 __ SetStackPointer(jssp);
8254
8255 int i;
8256
8257 // Initialize the registers, using X registers to load the literal.
8258 __ Mov(x0, 0);
8259 __ Mov(x1, literal_base);
8260 for (i = 0; i < reg_count; i++) {
8261 // Always write into the D register, to ensure that the upper word is
8262 // properly ignored by Push when testing S registers.
8263 __ Fmov(d[i], x0);
8264 // Calculate the next literal.
8265 __ Add(x0, x0, x1);
8266 }
8267
8268 // Claim memory first, as requested.
8269 __ Claim(claim, kByteSizeInBytes);
8270
8271 switch (push_method) {
8272 case PushPopByFour:
8273 // Push high-numbered registers first (to the highest addresses).
8274 for (i = reg_count; i >= 4; i -= 4) {
8275 __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
8276 }
8277 // Finish off the leftovers.
8278 switch (i) {
8279 case 3: __ Push(v[2], v[1], v[0]); break;
8280 case 2: __ Push(v[1], v[0]); break;
8281 case 1: __ Push(v[0]); break;
8282 default: ASSERT(i == 0); break;
8283 }
8284 break;
8285 case PushPopRegList:
8286 __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
8287 break;
8288 }
8289
8290 // Clobber all the registers, to ensure that they get repopulated by Pop.
8291 ClobberFP(&masm, list);
8292
8293 switch (pop_method) {
8294 case PushPopByFour:
8295 // Pop low-numbered registers first (from the lowest addresses).
8296 for (i = 0; i <= (reg_count-4); i += 4) {
8297 __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
8298 }
8299 // Finish off the leftovers.
8300 switch (reg_count - i) {
8301 case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
8302 case 2: __ Pop(v[i], v[i+1]); break;
8303 case 1: __ Pop(v[i]); break;
8304 default: ASSERT(i == reg_count); break;
8305 }
8306 break;
8307 case PushPopRegList:
8308 __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
8309 break;
8310 }
8311
8312 // Drop memory to restore jssp.
8313 __ Drop(claim, kByteSizeInBytes);
8314
8315 __ Mov(csp, __ StackPointer());
8316 __ SetStackPointer(csp);
8317 }
8318
8319 END();
8320
8321 RUN();
8322
8323 // Check that the register contents were preserved.
8324 // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
8325 // test that the upper word was properly cleared by Pop.
8326 literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8327 for (int i = 0; i < reg_count; i++) {
8328 uint64_t literal = literal_base * i;
8329 double expected;
8330 memcpy(&expected, &literal, sizeof(expected));
8331 ASSERT_EQUAL_FP64(expected, d[i]);
8332 }
8333
8334 TEARDOWN();
8335 }
8336
8337
8338 TEST(push_pop_fp_jssp_simple_32) {
8339 INIT_V8();
8340 for (int claim = 0; claim <= 8; claim++) {
8341 for (int count = 0; count <= 8; count++) {
8342 PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
8343 PushPopByFour, PushPopByFour);
8344 PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
8345 PushPopByFour, PushPopRegList);
8346 PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
8347 PushPopRegList, PushPopByFour);
8348 PushPopFPJsspSimpleHelper(count, claim, kSRegSize,
8349 PushPopRegList, PushPopRegList);
8350 }
8351 // Test with the maximum number of registers.
8352 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
8353 PushPopByFour, PushPopByFour);
8354 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
8355 PushPopByFour, PushPopRegList);
8356 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
8357 PushPopRegList, PushPopByFour);
8358 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSize,
8359 PushPopRegList, PushPopRegList);
8360 }
8361 }
8362
8363
8364 TEST(push_pop_fp_jssp_simple_64) {
8365 INIT_V8();
8366 for (int claim = 0; claim <= 8; claim++) {
8367 for (int count = 0; count <= 8; count++) {
8368 PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
8369 PushPopByFour, PushPopByFour);
8370 PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
8371 PushPopByFour, PushPopRegList);
8372 PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
8373 PushPopRegList, PushPopByFour);
8374 PushPopFPJsspSimpleHelper(count, claim, kDRegSize,
8375 PushPopRegList, PushPopRegList);
8376 }
8377 // Test with the maximum number of registers.
8378 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
8379 PushPopByFour, PushPopByFour);
8380 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
8381 PushPopByFour, PushPopRegList);
8382 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
8383 PushPopRegList, PushPopByFour);
8384 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSize,
8385 PushPopRegList, PushPopRegList);
8386 }
8387 }
8388
8389
8390 // Push and pop data using an overlapping combination of Push/Pop and
8391 // RegList-based methods.
8392 static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
8393 SETUP();
8394
8395 // Registers x8 and x9 are used by the macro assembler for debug code (for
8396 // example in 'Pop'), so we can't use them here. We can't use jssp because it
8397 // will be the stack pointer for this test.
8398 static RegList const allowed =
8399 ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit());
8400 // Work out which registers to use, based on reg_size.
8401 Register r[10];
8402 Register x[10];
8403 PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
8404
8405 // Calculate some handy register lists.
8406 RegList r0_to_r3 = 0;
8407 for (int i = 0; i <= 3; i++) {
8408 r0_to_r3 |= x[i].Bit();
8409 }
8410 RegList r4_to_r5 = 0;
8411 for (int i = 4; i <= 5; i++) {
8412 r4_to_r5 |= x[i].Bit();
8413 }
8414 RegList r6_to_r9 = 0;
8415 for (int i = 6; i <= 9; i++) {
8416 r6_to_r9 |= x[i].Bit();
8417 }
8418
8419 // The literal base is chosen to have two useful properties:
8420 // * When multiplied by small values (such as a register index), this value
8421 // is clearly readable in the result.
8422 // * The value is not formed from repeating fixed-size smaller values, so it
8423 // can be used to detect endianness-related errors.
8424 uint64_t literal_base = 0x0100001000100101UL;
8425
8426 START();
8427 {
8428 ASSERT(__ StackPointer().Is(csp));
8429 __ Mov(jssp, __ StackPointer());
8430 __ SetStackPointer(jssp);
8431
8432 // Claim memory first, as requested.
8433 __ Claim(claim, kByteSizeInBytes);
8434
8435 __ Mov(x[3], literal_base * 3);
8436 __ Mov(x[2], literal_base * 2);
8437 __ Mov(x[1], literal_base * 1);
8438 __ Mov(x[0], literal_base * 0);
8439
8440 __ PushSizeRegList(r0_to_r3, reg_size);
8441 __ Push(r[3], r[2]);
8442
8443 Clobber(&masm, r0_to_r3);
8444 __ PopSizeRegList(r0_to_r3, reg_size);
8445
8446 __ Push(r[2], r[1], r[3], r[0]);
8447
8448 Clobber(&masm, r4_to_r5);
8449 __ Pop(r[4], r[5]);
8450 Clobber(&masm, r6_to_r9);
8451 __ Pop(r[6], r[7], r[8], r[9]);
8452
8453 // Drop memory to restore jssp.
8454 __ Drop(claim, kByteSizeInBytes);
8455
8456 __ Mov(csp, __ StackPointer());
8457 __ SetStackPointer(csp);
8458 }
8459
8460 END();
8461
8462 RUN();
8463
8464 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8465 // that the upper word was properly cleared by Pop.
8466 literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8467
8468 ASSERT_EQUAL_64(literal_base * 3, x[9]);
8469 ASSERT_EQUAL_64(literal_base * 2, x[8]);
8470 ASSERT_EQUAL_64(literal_base * 0, x[7]);
8471 ASSERT_EQUAL_64(literal_base * 3, x[6]);
8472 ASSERT_EQUAL_64(literal_base * 1, x[5]);
8473 ASSERT_EQUAL_64(literal_base * 2, x[4]);
8474
8475 TEARDOWN();
8476 }
8477
8478
8479 TEST(push_pop_jssp_mixed_methods_64) {
8480 INIT_V8();
8481 for (int claim = 0; claim <= 8; claim++) {
8482 PushPopJsspMixedMethodsHelper(claim, kXRegSize);
8483 }
8484 }
8485
8486
8487 TEST(push_pop_jssp_mixed_methods_32) {
8488 INIT_V8();
8489 for (int claim = 0; claim <= 8; claim++) {
8490 PushPopJsspMixedMethodsHelper(claim, kWRegSize);
8491 }
8492 }
8493
8494
8495 // Push and pop data using overlapping X- and W-sized quantities.
8496 static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
8497 // This test emits rather a lot of code.
8498 SETUP_SIZE(BUF_SIZE * 2);
8499
8500 // Work out which registers to use, based on reg_size.
8501 Register tmp = x8;
8502 static RegList const allowed = ~(tmp.Bit() | jssp.Bit());
8503 if (reg_count == kPushPopJsspMaxRegCount) {
8504 reg_count = CountSetBits(allowed, kNumberOfRegisters);
8505 }
8506 Register w[kNumberOfRegisters];
8507 Register x[kNumberOfRegisters];
8508 RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
8509
8510 // The number of W-sized slots we expect to pop. When we pop, we alternate
8511 // between W and X registers, so we need reg_count*1.5 W-sized slots.
8512 int const requested_w_slots = reg_count + reg_count / 2;
8513
8514 // Track what _should_ be on the stack, using W-sized slots.
8515 static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
8516 uint32_t stack[kMaxWSlots];
8517 for (int i = 0; i < kMaxWSlots; i++) {
8518 stack[i] = 0xdeadbeef;
8519 }
8520
8521 // The literal base is chosen to have two useful properties:
8522 // * When multiplied by small values (such as a register index), this value
8523 // is clearly readable in the result.
8524 // * The value is not formed from repeating fixed-size smaller values, so it
8525 // can be used to detect endianness-related errors.
8526 static uint64_t const literal_base = 0x0100001000100101UL;
8527 static uint64_t const literal_base_hi = literal_base >> 32;
8528 static uint64_t const literal_base_lo = literal_base & 0xffffffff;
8529 static uint64_t const literal_base_w = literal_base & 0xffffffff;
8530
8531 START();
8532 {
8533 ASSERT(__ StackPointer().Is(csp));
8534 __ Mov(jssp, __ StackPointer());
8535 __ SetStackPointer(jssp);
8536
8537 // Initialize the registers.
8538 for (int i = 0; i < reg_count; i++) {
8539 // Always write into the X register, to ensure that the upper word is
8540 // properly ignored by Push when testing W registers.
8541 if (!x[i].IsZero()) {
8542 __ Mov(x[i], literal_base * i);
8543 }
8544 }
8545
8546 // Claim memory first, as requested.
8547 __ Claim(claim, kByteSizeInBytes);
8548
8549 // The push-pop pattern is as follows:
8550 // Push: Pop:
8551 // x[0](hi) -> w[0]
8552 // x[0](lo) -> x[1](hi)
8553 // w[1] -> x[1](lo)
8554 // w[1] -> w[2]
8555 // x[2](hi) -> x[2](hi)
8556 // x[2](lo) -> x[2](lo)
8557 // x[2](hi) -> w[3]
8558 // x[2](lo) -> x[4](hi)
8559 // x[2](hi) -> x[4](lo)
8560 // x[2](lo) -> w[5]
8561 // w[3] -> x[5](hi)
8562 // w[3] -> x[6](lo)
8563 // w[3] -> w[7]
8564 // w[3] -> x[8](hi)
8565 // x[4](hi) -> x[8](lo)
8566 // x[4](lo) -> w[9]
8567 // ... pattern continues ...
8568 //
8569 // That is, registers are pushed starting with the lower numbers,
8570 // alternating between x and w registers, and pushing i%4+1 copies of each,
8571 // where i is the register number.
8572 // Registers are popped starting with the higher numbers one-by-one,
8573 // alternating between x and w registers, but only popping one at a time.
8574 //
8575 // This pattern provides a wide variety of alignment effects and overlaps.
8576
8577 // ---- Push ----
8578
8579 int active_w_slots = 0;
8580 for (int i = 0; active_w_slots < requested_w_slots; i++) {
8581 ASSERT(i < reg_count);
8582 // In order to test various arguments to PushMultipleTimes, and to try to
8583 // exercise different alignment and overlap effects, we push each
8584 // register a different number of times.
8585 int times = i % 4 + 1;
8586 if (i & 1) {
8587 // Push odd-numbered registers as W registers.
8588 if (i & 2) {
8589 __ PushMultipleTimes(w[i], times);
8590 } else {
8591 // Use a register to specify the count.
8592 __ Mov(tmp.W(), times);
8593 __ PushMultipleTimes(w[i], tmp.W());
8594 }
8595 // Fill in the expected stack slots.
8596 for (int j = 0; j < times; j++) {
8597 if (w[i].Is(wzr)) {
8598 // The zero register always writes zeroes.
8599 stack[active_w_slots++] = 0;
8600 } else {
8601 stack[active_w_slots++] = literal_base_w * i;
8602 }
8603 }
8604 } else {
8605 // Push even-numbered registers as X registers.
8606 if (i & 2) {
8607 __ PushMultipleTimes(x[i], times);
8608 } else {
8609 // Use a register to specify the count.
8610 __ Mov(tmp, times);
8611 __ PushMultipleTimes(x[i], tmp);
8612 }
8613 // Fill in the expected stack slots.
8614 for (int j = 0; j < times; j++) {
8615 if (x[i].IsZero()) {
8616 // The zero register always writes zeroes.
8617 stack[active_w_slots++] = 0;
8618 stack[active_w_slots++] = 0;
8619 } else {
8620 stack[active_w_slots++] = literal_base_hi * i;
8621 stack[active_w_slots++] = literal_base_lo * i;
8622 }
8623 }
8624 }
8625 }
8626 // Because we were pushing several registers at a time, we probably pushed
8627 // more than we needed to.
8628 if (active_w_slots > requested_w_slots) {
8629 __ Drop(active_w_slots - requested_w_slots, kWRegSizeInBytes);
8630 // Bump the number of active W-sized slots back to where it should be,
8631 // and fill the empty space with a dummy value.
8632 do {
8633 stack[active_w_slots--] = 0xdeadbeef;
8634 } while (active_w_slots > requested_w_slots);
8635 }
8636
8637 // ---- Pop ----
8638
8639 Clobber(&masm, list);
8640
8641 // If popping an even number of registers, the first one will be X-sized.
8642 // Otherwise, the first one will be W-sized.
8643 bool next_is_64 = !(reg_count & 1);
8644 for (int i = reg_count-1; i >= 0; i--) {
8645 if (next_is_64) {
8646 __ Pop(x[i]);
8647 active_w_slots -= 2;
8648 } else {
8649 __ Pop(w[i]);
8650 active_w_slots -= 1;
8651 }
8652 next_is_64 = !next_is_64;
8653 }
8654 ASSERT(active_w_slots == 0);
8655
8656 // Drop memory to restore jssp.
8657 __ Drop(claim, kByteSizeInBytes);
8658
8659 __ Mov(csp, __ StackPointer());
8660 __ SetStackPointer(csp);
8661 }
8662
8663 END();
8664
8665 RUN();
8666
8667 int slot = 0;
8668 for (int i = 0; i < reg_count; i++) {
8669 // Even-numbered registers were written as W registers.
8670 // Odd-numbered registers were written as X registers.
8671 bool expect_64 = (i & 1);
8672 uint64_t expected;
8673
8674 if (expect_64) {
8675 uint64_t hi = stack[slot++];
8676 uint64_t lo = stack[slot++];
8677 expected = (hi << 32) | lo;
8678 } else {
8679 expected = stack[slot++];
8680 }
8681
8682 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
8683 // test that the upper word was properly cleared by Pop.
8684 if (x[i].IsZero()) {
8685 ASSERT_EQUAL_64(0, x[i]);
8686 } else {
8687 ASSERT_EQUAL_64(expected, x[i]);
8688 }
8689 }
8690 ASSERT(slot == requested_w_slots);
8691
8692 TEARDOWN();
8693 }
8694
8695
8696 TEST(push_pop_jssp_wx_overlap) {
8697 INIT_V8();
8698 for (int claim = 0; claim <= 8; claim++) {
8699 for (int count = 1; count <= 8; count++) {
8700 PushPopJsspWXOverlapHelper(count, claim);
8701 PushPopJsspWXOverlapHelper(count, claim);
8702 PushPopJsspWXOverlapHelper(count, claim);
8703 PushPopJsspWXOverlapHelper(count, claim);
8704 }
8705 // Test with the maximum number of registers.
8706 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8707 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8708 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8709 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8710 }
8711 }
8712
8713
8714 TEST(push_pop_csp) {
8715 INIT_V8();
8716 SETUP();
8717
8718 START();
8719
8720 ASSERT(csp.Is(__ StackPointer()));
8721
8722 __ Mov(x3, 0x3333333333333333UL);
8723 __ Mov(x2, 0x2222222222222222UL);
8724 __ Mov(x1, 0x1111111111111111UL);
8725 __ Mov(x0, 0x0000000000000000UL);
8726 __ Claim(2);
8727 __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
8728 __ Push(x3, x2);
8729 __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
8730 __ Push(x2, x1, x3, x0);
8731 __ Pop(x4, x5);
8732 __ Pop(x6, x7, x8, x9);
8733
8734 __ Claim(2);
8735 __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
8736 __ Push(w3, w1, w2, w0);
8737 __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
8738 __ Pop(w14, w15, w16, w17);
8739
8740 __ Claim(2);
8741 __ Push(w2, w2, w1, w1);
8742 __ Push(x3, x3);
8743 __ Pop(w18, w19, w20, w21);
8744 __ Pop(x22, x23);
8745
8746 __ Claim(2);
8747 __ PushXRegList(x1.Bit() | x22.Bit());
8748 __ PopXRegList(x24.Bit() | x26.Bit());
8749
8750 __ Claim(2);
8751 __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
8752 __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
8753
8754 __ Claim(2);
8755 __ PushXRegList(0);
8756 __ PopXRegList(0);
8757 __ PushXRegList(0xffffffff);
8758 __ PopXRegList(0xffffffff);
8759 __ Drop(12);
8760
8761 END();
8762
8763 RUN();
8764
8765 ASSERT_EQUAL_64(0x1111111111111111UL, x3);
8766 ASSERT_EQUAL_64(0x0000000000000000UL, x2);
8767 ASSERT_EQUAL_64(0x3333333333333333UL, x1);
8768 ASSERT_EQUAL_64(0x2222222222222222UL, x0);
8769 ASSERT_EQUAL_64(0x3333333333333333UL, x9);
8770 ASSERT_EQUAL_64(0x2222222222222222UL, x8);
8771 ASSERT_EQUAL_64(0x0000000000000000UL, x7);
8772 ASSERT_EQUAL_64(0x3333333333333333UL, x6);
8773 ASSERT_EQUAL_64(0x1111111111111111UL, x5);
8774 ASSERT_EQUAL_64(0x2222222222222222UL, x4);
8775
8776 ASSERT_EQUAL_32(0x11111111U, w13);
8777 ASSERT_EQUAL_32(0x33333333U, w12);
8778 ASSERT_EQUAL_32(0x00000000U, w11);
8779 ASSERT_EQUAL_32(0x22222222U, w10);
8780 ASSERT_EQUAL_32(0x11111111U, w17);
8781 ASSERT_EQUAL_32(0x00000000U, w16);
8782 ASSERT_EQUAL_32(0x33333333U, w15);
8783 ASSERT_EQUAL_32(0x22222222U, w14);
8784
8785 ASSERT_EQUAL_32(0x11111111U, w18);
8786 ASSERT_EQUAL_32(0x11111111U, w19);
8787 ASSERT_EQUAL_32(0x11111111U, w20);
8788 ASSERT_EQUAL_32(0x11111111U, w21);
8789 ASSERT_EQUAL_64(0x3333333333333333UL, x22);
8790 ASSERT_EQUAL_64(0x0000000000000000UL, x23);
8791
8792 ASSERT_EQUAL_64(0x3333333333333333UL, x24);
8793 ASSERT_EQUAL_64(0x3333333333333333UL, x26);
8794
8795 ASSERT_EQUAL_32(0x33333333U, w25);
8796 ASSERT_EQUAL_32(0x00000000U, w27);
8797 ASSERT_EQUAL_32(0x22222222U, w28);
8798 ASSERT_EQUAL_32(0x33333333U, w29);
8799 TEARDOWN();
8800 }
8801
8802
8803 TEST(push_queued) {
8804 INIT_V8();
8805 SETUP();
8806
8807 START();
8808
8809 ASSERT(__ StackPointer().Is(csp));
8810 __ Mov(jssp, __ StackPointer());
8811 __ SetStackPointer(jssp);
8812
8813 MacroAssembler::PushPopQueue queue(&masm);
8814
8815 // Queue up registers.
8816 queue.Queue(x0);
8817 queue.Queue(x1);
8818 queue.Queue(x2);
8819 queue.Queue(x3);
8820
8821 queue.Queue(w4);
8822 queue.Queue(w5);
8823 queue.Queue(w6);
8824
8825 queue.Queue(d0);
8826 queue.Queue(d1);
8827
8828 queue.Queue(s2);
8829
8830 __ Mov(x0, 0x1234000000000000);
8831 __ Mov(x1, 0x1234000100010001);
8832 __ Mov(x2, 0x1234000200020002);
8833 __ Mov(x3, 0x1234000300030003);
8834 __ Mov(w4, 0x12340004);
8835 __ Mov(w5, 0x12340005);
8836 __ Mov(w6, 0x12340006);
8837 __ Fmov(d0, 123400.0);
8838 __ Fmov(d1, 123401.0);
8839 __ Fmov(s2, 123402.0);
8840
8841 // Actually push them.
8842 queue.PushQueued();
8843
8844 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSize, 0, 6));
8845 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSize, 0, 2));
8846
8847 // Pop them conventionally.
8848 __ Pop(s2);
8849 __ Pop(d1, d0);
8850 __ Pop(w6, w5, w4);
8851 __ Pop(x3, x2, x1, x0);
8852
8853 __ Mov(csp, __ StackPointer());
8854 __ SetStackPointer(csp);
8855
8856 END();
8857
8858 RUN();
8859
8860 ASSERT_EQUAL_64(0x1234000000000000, x0);
8861 ASSERT_EQUAL_64(0x1234000100010001, x1);
8862 ASSERT_EQUAL_64(0x1234000200020002, x2);
8863 ASSERT_EQUAL_64(0x1234000300030003, x3);
8864
8865 ASSERT_EQUAL_32(0x12340004, w4);
8866 ASSERT_EQUAL_32(0x12340005, w5);
8867 ASSERT_EQUAL_32(0x12340006, w6);
8868
8869 ASSERT_EQUAL_FP64(123400.0, d0);
8870 ASSERT_EQUAL_FP64(123401.0, d1);
8871
8872 ASSERT_EQUAL_FP32(123402.0, s2);
8873
8874 TEARDOWN();
8875 }
8876
8877
8878 TEST(pop_queued) {
8879 INIT_V8();
8880 SETUP();
8881
8882 START();
8883
8884 ASSERT(__ StackPointer().Is(csp));
8885 __ Mov(jssp, __ StackPointer());
8886 __ SetStackPointer(jssp);
8887
8888 MacroAssembler::PushPopQueue queue(&masm);
8889
8890 __ Mov(x0, 0x1234000000000000);
8891 __ Mov(x1, 0x1234000100010001);
8892 __ Mov(x2, 0x1234000200020002);
8893 __ Mov(x3, 0x1234000300030003);
8894 __ Mov(w4, 0x12340004);
8895 __ Mov(w5, 0x12340005);
8896 __ Mov(w6, 0x12340006);
8897 __ Fmov(d0, 123400.0);
8898 __ Fmov(d1, 123401.0);
8899 __ Fmov(s2, 123402.0);
8900
8901 // Push registers conventionally.
8902 __ Push(x0, x1, x2, x3);
8903 __ Push(w4, w5, w6);
8904 __ Push(d0, d1);
8905 __ Push(s2);
8906
8907 // Queue up a pop.
8908 queue.Queue(s2);
8909
8910 queue.Queue(d1);
8911 queue.Queue(d0);
8912
8913 queue.Queue(w6);
8914 queue.Queue(w5);
8915 queue.Queue(w4);
8916
8917 queue.Queue(x3);
8918 queue.Queue(x2);
8919 queue.Queue(x1);
8920 queue.Queue(x0);
8921
8922 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSize, 0, 6));
8923 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSize, 0, 2));
8924
8925 // Actually pop them.
8926 queue.PopQueued();
8927
8928 __ Mov(csp, __ StackPointer());
8929 __ SetStackPointer(csp);
8930
8931 END();
8932
8933 RUN();
8934
8935 ASSERT_EQUAL_64(0x1234000000000000, x0);
8936 ASSERT_EQUAL_64(0x1234000100010001, x1);
8937 ASSERT_EQUAL_64(0x1234000200020002, x2);
8938 ASSERT_EQUAL_64(0x1234000300030003, x3);
8939
8940 ASSERT_EQUAL_64(0x0000000012340004, x4);
8941 ASSERT_EQUAL_64(0x0000000012340005, x5);
8942 ASSERT_EQUAL_64(0x0000000012340006, x6);
8943
8944 ASSERT_EQUAL_FP64(123400.0, d0);
8945 ASSERT_EQUAL_FP64(123401.0, d1);
8946
8947 ASSERT_EQUAL_FP32(123402.0, s2);
8948
8949 TEARDOWN();
8950 }
8951
8952
8953 TEST(jump_both_smi) {
8954 INIT_V8();
8955 SETUP();
8956
8957 Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
8958 Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
8959 Label return1, return2, return3, done;
8960
8961 START();
8962
8963 __ Mov(x0, 0x5555555500000001UL); // A pointer.
8964 __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
8965 __ Mov(x2, 0x1234567800000000UL); // A smi.
8966 __ Mov(x3, 0x8765432100000000UL); // A smi.
8967 __ Mov(x4, 0xdead);
8968 __ Mov(x5, 0xdead);
8969 __ Mov(x6, 0xdead);
8970 __ Mov(x7, 0xdead);
8971
8972 __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
8973 __ Bind(&return1);
8974 __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
8975 __ Bind(&return2);
8976 __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
8977 __ Bind(&return3);
8978 __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
8979
8980 __ Bind(&cond_fail_00);
8981 __ Mov(x4, 0);
8982 __ B(&return1);
8983 __ Bind(&cond_pass_00);
8984 __ Mov(x4, 1);
8985 __ B(&return1);
8986
8987 __ Bind(&cond_fail_01);
8988 __ Mov(x5, 0);
8989 __ B(&return2);
8990 __ Bind(&cond_pass_01);
8991 __ Mov(x5, 1);
8992 __ B(&return2);
8993
8994 __ Bind(&cond_fail_10);
8995 __ Mov(x6, 0);
8996 __ B(&return3);
8997 __ Bind(&cond_pass_10);
8998 __ Mov(x6, 1);
8999 __ B(&return3);
9000
9001 __ Bind(&cond_fail_11);
9002 __ Mov(x7, 0);
9003 __ B(&done);
9004 __ Bind(&cond_pass_11);
9005 __ Mov(x7, 1);
9006
9007 __ Bind(&done);
9008
9009 END();
9010
9011 RUN();
9012
9013 ASSERT_EQUAL_64(0x5555555500000001UL, x0);
9014 ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9015 ASSERT_EQUAL_64(0x1234567800000000UL, x2);
9016 ASSERT_EQUAL_64(0x8765432100000000UL, x3);
9017 ASSERT_EQUAL_64(0, x4);
9018 ASSERT_EQUAL_64(0, x5);
9019 ASSERT_EQUAL_64(0, x6);
9020 ASSERT_EQUAL_64(1, x7);
9021
9022 TEARDOWN();
9023 }
9024
9025
9026 TEST(jump_either_smi) {
9027 INIT_V8();
9028 SETUP();
9029
9030 Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
9031 Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
9032 Label return1, return2, return3, done;
9033
9034 START();
9035
9036 __ Mov(x0, 0x5555555500000001UL); // A pointer.
9037 __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
9038 __ Mov(x2, 0x1234567800000000UL); // A smi.
9039 __ Mov(x3, 0x8765432100000000UL); // A smi.
9040 __ Mov(x4, 0xdead);
9041 __ Mov(x5, 0xdead);
9042 __ Mov(x6, 0xdead);
9043 __ Mov(x7, 0xdead);
9044
9045 __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
9046 __ Bind(&return1);
9047 __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
9048 __ Bind(&return2);
9049 __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
9050 __ Bind(&return3);
9051 __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
9052
9053 __ Bind(&cond_fail_00);
9054 __ Mov(x4, 0);
9055 __ B(&return1);
9056 __ Bind(&cond_pass_00);
9057 __ Mov(x4, 1);
9058 __ B(&return1);
9059
9060 __ Bind(&cond_fail_01);
9061 __ Mov(x5, 0);
9062 __ B(&return2);
9063 __ Bind(&cond_pass_01);
9064 __ Mov(x5, 1);
9065 __ B(&return2);
9066
9067 __ Bind(&cond_fail_10);
9068 __ Mov(x6, 0);
9069 __ B(&return3);
9070 __ Bind(&cond_pass_10);
9071 __ Mov(x6, 1);
9072 __ B(&return3);
9073
9074 __ Bind(&cond_fail_11);
9075 __ Mov(x7, 0);
9076 __ B(&done);
9077 __ Bind(&cond_pass_11);
9078 __ Mov(x7, 1);
9079
9080 __ Bind(&done);
9081
9082 END();
9083
9084 RUN();
9085
9086 ASSERT_EQUAL_64(0x5555555500000001UL, x0);
9087 ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9088 ASSERT_EQUAL_64(0x1234567800000000UL, x2);
9089 ASSERT_EQUAL_64(0x8765432100000000UL, x3);
9090 ASSERT_EQUAL_64(0, x4);
9091 ASSERT_EQUAL_64(1, x5);
9092 ASSERT_EQUAL_64(1, x6);
9093 ASSERT_EQUAL_64(1, x7);
9094
9095 TEARDOWN();
9096 }
9097
9098
9099 TEST(noreg) {
9100 // This test doesn't generate any code, but it verifies some invariants
9101 // related to NoReg.
9102 CHECK(NoReg.Is(NoFPReg));
9103 CHECK(NoFPReg.Is(NoReg));
9104 CHECK(NoReg.Is(NoCPUReg));
9105 CHECK(NoCPUReg.Is(NoReg));
9106 CHECK(NoFPReg.Is(NoCPUReg));
9107 CHECK(NoCPUReg.Is(NoFPReg));
9108
9109 CHECK(NoReg.IsNone());
9110 CHECK(NoFPReg.IsNone());
9111 CHECK(NoCPUReg.IsNone());
9112 }
9113
9114
9115 TEST(isvalid) {
9116 // This test doesn't generate any code, but it verifies some invariants
9117 // related to IsValid().
9118 CHECK(!NoReg.IsValid());
9119 CHECK(!NoFPReg.IsValid());
9120 CHECK(!NoCPUReg.IsValid());
9121
9122 CHECK(x0.IsValid());
9123 CHECK(w0.IsValid());
9124 CHECK(x30.IsValid());
9125 CHECK(w30.IsValid());
9126 CHECK(xzr.IsValid());
9127 CHECK(wzr.IsValid());
9128
9129 CHECK(csp.IsValid());
9130 CHECK(wcsp.IsValid());
9131
9132 CHECK(d0.IsValid());
9133 CHECK(s0.IsValid());
9134 CHECK(d31.IsValid());
9135 CHECK(s31.IsValid());
9136
9137 CHECK(x0.IsValidRegister());
9138 CHECK(w0.IsValidRegister());
9139 CHECK(xzr.IsValidRegister());
9140 CHECK(wzr.IsValidRegister());
9141 CHECK(csp.IsValidRegister());
9142 CHECK(wcsp.IsValidRegister());
9143 CHECK(!x0.IsValidFPRegister());
9144 CHECK(!w0.IsValidFPRegister());
9145 CHECK(!xzr.IsValidFPRegister());
9146 CHECK(!wzr.IsValidFPRegister());
9147 CHECK(!csp.IsValidFPRegister());
9148 CHECK(!wcsp.IsValidFPRegister());
9149
9150 CHECK(d0.IsValidFPRegister());
9151 CHECK(s0.IsValidFPRegister());
9152 CHECK(!d0.IsValidRegister());
9153 CHECK(!s0.IsValidRegister());
9154
9155 // Test the same as before, but using CPURegister types. This shouldn't make
9156 // any difference.
9157 CHECK(static_cast<CPURegister>(x0).IsValid());
9158 CHECK(static_cast<CPURegister>(w0).IsValid());
9159 CHECK(static_cast<CPURegister>(x30).IsValid());
9160 CHECK(static_cast<CPURegister>(w30).IsValid());
9161 CHECK(static_cast<CPURegister>(xzr).IsValid());
9162 CHECK(static_cast<CPURegister>(wzr).IsValid());
9163
9164 CHECK(static_cast<CPURegister>(csp).IsValid());
9165 CHECK(static_cast<CPURegister>(wcsp).IsValid());
9166
9167 CHECK(static_cast<CPURegister>(d0).IsValid());
9168 CHECK(static_cast<CPURegister>(s0).IsValid());
9169 CHECK(static_cast<CPURegister>(d31).IsValid());
9170 CHECK(static_cast<CPURegister>(s31).IsValid());
9171
9172 CHECK(static_cast<CPURegister>(x0).IsValidRegister());
9173 CHECK(static_cast<CPURegister>(w0).IsValidRegister());
9174 CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
9175 CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
9176 CHECK(static_cast<CPURegister>(csp).IsValidRegister());
9177 CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
9178 CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
9179 CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
9180 CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
9181 CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
9182 CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister());
9183 CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister());
9184
9185 CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
9186 CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
9187 CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
9188 CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
9189 }
9190
9191
9192 TEST(cpureglist_utils_x) {
9193 // This test doesn't generate any code, but it verifies the behaviour of
9194 // the CPURegList utility methods.
9195
9196 // Test a list of X registers.
9197 CPURegList test(x0, x1, x2, x3);
9198
9199 CHECK(test.IncludesAliasOf(x0));
9200 CHECK(test.IncludesAliasOf(x1));
9201 CHECK(test.IncludesAliasOf(x2));
9202 CHECK(test.IncludesAliasOf(x3));
9203 CHECK(test.IncludesAliasOf(w0));
9204 CHECK(test.IncludesAliasOf(w1));
9205 CHECK(test.IncludesAliasOf(w2));
9206 CHECK(test.IncludesAliasOf(w3));
9207
9208 CHECK(!test.IncludesAliasOf(x4));
9209 CHECK(!test.IncludesAliasOf(x30));
9210 CHECK(!test.IncludesAliasOf(xzr));
9211 CHECK(!test.IncludesAliasOf(csp));
9212 CHECK(!test.IncludesAliasOf(w4));
9213 CHECK(!test.IncludesAliasOf(w30));
9214 CHECK(!test.IncludesAliasOf(wzr));
9215 CHECK(!test.IncludesAliasOf(wcsp));
9216
9217 CHECK(!test.IncludesAliasOf(d0));
9218 CHECK(!test.IncludesAliasOf(d1));
9219 CHECK(!test.IncludesAliasOf(d2));
9220 CHECK(!test.IncludesAliasOf(d3));
9221 CHECK(!test.IncludesAliasOf(s0));
9222 CHECK(!test.IncludesAliasOf(s1));
9223 CHECK(!test.IncludesAliasOf(s2));
9224 CHECK(!test.IncludesAliasOf(s3));
9225
9226 CHECK(!test.IsEmpty());
9227
9228 CHECK(test.type() == x0.type());
9229
9230 CHECK(test.PopHighestIndex().Is(x3));
9231 CHECK(test.PopLowestIndex().Is(x0));
9232
9233 CHECK(test.IncludesAliasOf(x1));
9234 CHECK(test.IncludesAliasOf(x2));
9235 CHECK(test.IncludesAliasOf(w1));
9236 CHECK(test.IncludesAliasOf(w2));
9237 CHECK(!test.IncludesAliasOf(x0));
9238 CHECK(!test.IncludesAliasOf(x3));
9239 CHECK(!test.IncludesAliasOf(w0));
9240 CHECK(!test.IncludesAliasOf(w3));
9241
9242 CHECK(test.PopHighestIndex().Is(x2));
9243 CHECK(test.PopLowestIndex().Is(x1));
9244
9245 CHECK(!test.IncludesAliasOf(x1));
9246 CHECK(!test.IncludesAliasOf(x2));
9247 CHECK(!test.IncludesAliasOf(w1));
9248 CHECK(!test.IncludesAliasOf(w2));
9249
9250 CHECK(test.IsEmpty());
9251 }
9252
9253
9254 TEST(cpureglist_utils_w) {
9255 // This test doesn't generate any code, but it verifies the behaviour of
9256 // the CPURegList utility methods.
9257
9258 // Test a list of W registers.
9259 CPURegList test(w10, w11, w12, w13);
9260
9261 CHECK(test.IncludesAliasOf(x10));
9262 CHECK(test.IncludesAliasOf(x11));
9263 CHECK(test.IncludesAliasOf(x12));
9264 CHECK(test.IncludesAliasOf(x13));
9265 CHECK(test.IncludesAliasOf(w10));
9266 CHECK(test.IncludesAliasOf(w11));
9267 CHECK(test.IncludesAliasOf(w12));
9268 CHECK(test.IncludesAliasOf(w13));
9269
9270 CHECK(!test.IncludesAliasOf(x0));
9271 CHECK(!test.IncludesAliasOf(x9));
9272 CHECK(!test.IncludesAliasOf(x14));
9273 CHECK(!test.IncludesAliasOf(x30));
9274 CHECK(!test.IncludesAliasOf(xzr));
9275 CHECK(!test.IncludesAliasOf(csp));
9276 CHECK(!test.IncludesAliasOf(w0));
9277 CHECK(!test.IncludesAliasOf(w9));
9278 CHECK(!test.IncludesAliasOf(w14));
9279 CHECK(!test.IncludesAliasOf(w30));
9280 CHECK(!test.IncludesAliasOf(wzr));
9281 CHECK(!test.IncludesAliasOf(wcsp));
9282
9283 CHECK(!test.IncludesAliasOf(d10));
9284 CHECK(!test.IncludesAliasOf(d11));
9285 CHECK(!test.IncludesAliasOf(d12));
9286 CHECK(!test.IncludesAliasOf(d13));
9287 CHECK(!test.IncludesAliasOf(s10));
9288 CHECK(!test.IncludesAliasOf(s11));
9289 CHECK(!test.IncludesAliasOf(s12));
9290 CHECK(!test.IncludesAliasOf(s13));
9291
9292 CHECK(!test.IsEmpty());
9293
9294 CHECK(test.type() == w10.type());
9295
9296 CHECK(test.PopHighestIndex().Is(w13));
9297 CHECK(test.PopLowestIndex().Is(w10));
9298
9299 CHECK(test.IncludesAliasOf(x11));
9300 CHECK(test.IncludesAliasOf(x12));
9301 CHECK(test.IncludesAliasOf(w11));
9302 CHECK(test.IncludesAliasOf(w12));
9303 CHECK(!test.IncludesAliasOf(x10));
9304 CHECK(!test.IncludesAliasOf(x13));
9305 CHECK(!test.IncludesAliasOf(w10));
9306 CHECK(!test.IncludesAliasOf(w13));
9307
9308 CHECK(test.PopHighestIndex().Is(w12));
9309 CHECK(test.PopLowestIndex().Is(w11));
9310
9311 CHECK(!test.IncludesAliasOf(x11));
9312 CHECK(!test.IncludesAliasOf(x12));
9313 CHECK(!test.IncludesAliasOf(w11));
9314 CHECK(!test.IncludesAliasOf(w12));
9315
9316 CHECK(test.IsEmpty());
9317 }
9318
9319
9320 TEST(cpureglist_utils_d) {
9321 // This test doesn't generate any code, but it verifies the behaviour of
9322 // the CPURegList utility methods.
9323
9324 // Test a list of D registers.
9325 CPURegList test(d20, d21, d22, d23);
9326
9327 CHECK(test.IncludesAliasOf(d20));
9328 CHECK(test.IncludesAliasOf(d21));
9329 CHECK(test.IncludesAliasOf(d22));
9330 CHECK(test.IncludesAliasOf(d23));
9331 CHECK(test.IncludesAliasOf(s20));
9332 CHECK(test.IncludesAliasOf(s21));
9333 CHECK(test.IncludesAliasOf(s22));
9334 CHECK(test.IncludesAliasOf(s23));
9335
9336 CHECK(!test.IncludesAliasOf(d0));
9337 CHECK(!test.IncludesAliasOf(d19));
9338 CHECK(!test.IncludesAliasOf(d24));
9339 CHECK(!test.IncludesAliasOf(d31));
9340 CHECK(!test.IncludesAliasOf(s0));
9341 CHECK(!test.IncludesAliasOf(s19));
9342 CHECK(!test.IncludesAliasOf(s24));
9343 CHECK(!test.IncludesAliasOf(s31));
9344
9345 CHECK(!test.IncludesAliasOf(x20));
9346 CHECK(!test.IncludesAliasOf(x21));
9347 CHECK(!test.IncludesAliasOf(x22));
9348 CHECK(!test.IncludesAliasOf(x23));
9349 CHECK(!test.IncludesAliasOf(w20));
9350 CHECK(!test.IncludesAliasOf(w21));
9351 CHECK(!test.IncludesAliasOf(w22));
9352 CHECK(!test.IncludesAliasOf(w23));
9353
9354 CHECK(!test.IncludesAliasOf(xzr));
9355 CHECK(!test.IncludesAliasOf(wzr));
9356 CHECK(!test.IncludesAliasOf(csp));
9357 CHECK(!test.IncludesAliasOf(wcsp));
9358
9359 CHECK(!test.IsEmpty());
9360
9361 CHECK(test.type() == d20.type());
9362
9363 CHECK(test.PopHighestIndex().Is(d23));
9364 CHECK(test.PopLowestIndex().Is(d20));
9365
9366 CHECK(test.IncludesAliasOf(d21));
9367 CHECK(test.IncludesAliasOf(d22));
9368 CHECK(test.IncludesAliasOf(s21));
9369 CHECK(test.IncludesAliasOf(s22));
9370 CHECK(!test.IncludesAliasOf(d20));
9371 CHECK(!test.IncludesAliasOf(d23));
9372 CHECK(!test.IncludesAliasOf(s20));
9373 CHECK(!test.IncludesAliasOf(s23));
9374
9375 CHECK(test.PopHighestIndex().Is(d22));
9376 CHECK(test.PopLowestIndex().Is(d21));
9377
9378 CHECK(!test.IncludesAliasOf(d21));
9379 CHECK(!test.IncludesAliasOf(d22));
9380 CHECK(!test.IncludesAliasOf(s21));
9381 CHECK(!test.IncludesAliasOf(s22));
9382
9383 CHECK(test.IsEmpty());
9384 }
9385
9386
9387 TEST(cpureglist_utils_s) {
9388 // This test doesn't generate any code, but it verifies the behaviour of
9389 // the CPURegList utility methods.
9390
9391 // Test a list of S registers.
9392 CPURegList test(s20, s21, s22, s23);
9393
9394 // The type and size mechanisms are already covered, so here we just test
9395 // that lists of S registers alias individual D registers.
9396
9397 CHECK(test.IncludesAliasOf(d20));
9398 CHECK(test.IncludesAliasOf(d21));
9399 CHECK(test.IncludesAliasOf(d22));
9400 CHECK(test.IncludesAliasOf(d23));
9401 CHECK(test.IncludesAliasOf(s20));
9402 CHECK(test.IncludesAliasOf(s21));
9403 CHECK(test.IncludesAliasOf(s22));
9404 CHECK(test.IncludesAliasOf(s23));
9405 }
9406
9407
9408 TEST(cpureglist_utils_empty) {
9409 // This test doesn't generate any code, but it verifies the behaviour of
9410 // the CPURegList utility methods.
9411
9412 // Test an empty list.
9413 // Empty lists can have type and size properties. Check that we can create
9414 // them, and that they are empty.
9415 CPURegList reg32(CPURegister::kRegister, kWRegSize, 0);
9416 CPURegList reg64(CPURegister::kRegister, kXRegSize, 0);
9417 CPURegList fpreg32(CPURegister::kFPRegister, kSRegSize, 0);
9418 CPURegList fpreg64(CPURegister::kFPRegister, kDRegSize, 0);
9419
9420 CHECK(reg32.IsEmpty());
9421 CHECK(reg64.IsEmpty());
9422 CHECK(fpreg32.IsEmpty());
9423 CHECK(fpreg64.IsEmpty());
9424
9425 CHECK(reg32.PopLowestIndex().IsNone());
9426 CHECK(reg64.PopLowestIndex().IsNone());
9427 CHECK(fpreg32.PopLowestIndex().IsNone());
9428 CHECK(fpreg64.PopLowestIndex().IsNone());
9429
9430 CHECK(reg32.PopHighestIndex().IsNone());
9431 CHECK(reg64.PopHighestIndex().IsNone());
9432 CHECK(fpreg32.PopHighestIndex().IsNone());
9433 CHECK(fpreg64.PopHighestIndex().IsNone());
9434
9435 CHECK(reg32.IsEmpty());
9436 CHECK(reg64.IsEmpty());
9437 CHECK(fpreg32.IsEmpty());
9438 CHECK(fpreg64.IsEmpty());
9439 }
9440
9441
9442 TEST(printf) {
9443 INIT_V8();
9444 SETUP();
9445 START();
9446
9447 char const * test_plain_string = "Printf with no arguments.\n";
9448 char const * test_substring = "'This is a substring.'";
9449 RegisterDump before;
9450
9451 // Initialize x29 to the value of the stack pointer. We will use x29 as a
9452 // temporary stack pointer later, and initializing it in this way allows the
9453 // RegisterDump check to pass.
9454 __ Mov(x29, __ StackPointer());
9455
9456 // Test simple integer arguments.
9457 __ Mov(x0, 1234);
9458 __ Mov(x1, 0x1234);
9459
9460 // Test simple floating-point arguments.
9461 __ Fmov(d0, 1.234);
9462
9463 // Test pointer (string) arguments.
9464 __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9465
9466 // Test the maximum number of arguments, and sign extension.
9467 __ Mov(w3, 0xffffffff);
9468 __ Mov(w4, 0xffffffff);
9469 __ Mov(x5, 0xffffffffffffffff);
9470 __ Mov(x6, 0xffffffffffffffff);
9471 __ Fmov(s1, 1.234);
9472 __ Fmov(s2, 2.345);
9473 __ Fmov(d3, 3.456);
9474 __ Fmov(d4, 4.567);
9475
9476 // Test printing callee-saved registers.
9477 __ Mov(x28, 0x123456789abcdef);
9478 __ Fmov(d10, 42.0);
9479
9480 // Test with three arguments.
9481 __ Mov(x10, 3);
9482 __ Mov(x11, 40);
9483 __ Mov(x12, 500);
9484
9485 // x8 and x9 are used by debug code in part of the macro assembler. However,
9486 // Printf guarantees to preserve them (so we can use Printf in debug code),
9487 // and we need to test that they are properly preserved. The above code
9488 // shouldn't need to use them, but we initialize x8 and x9 last to be on the
9489 // safe side. This test still assumes that none of the code from
9490 // before->Dump() to the end of the test can clobber x8 or x9, so where
9491 // possible we use the Assembler directly to be safe.
9492 __ orr(x8, xzr, 0x8888888888888888);
9493 __ orr(x9, xzr, 0x9999999999999999);
9494
9495 // Check that we don't clobber any registers, except those that we explicitly
9496 // write results into.
9497 before.Dump(&masm);
9498
9499 __ Printf(test_plain_string); // NOLINT(runtime/printf)
9500 __ Printf("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
9501 __ Printf("d0: %f\n", d0);
9502 __ Printf("Test %%s: %s\n", x2);
9503 __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
9504 "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9505 w3, w4, x5, x6);
9506 __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
9507 __ Printf("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
9508 __ Printf("%g\n", d10);
9509
9510 // Test with a different stack pointer.
9511 const Register old_stack_pointer = __ StackPointer();
9512 __ mov(x29, old_stack_pointer);
9513 __ SetStackPointer(x29);
9514 __ Printf("old_stack_pointer: 0x%016" PRIx64 "\n", old_stack_pointer);
9515 __ mov(old_stack_pointer, __ StackPointer());
9516 __ SetStackPointer(old_stack_pointer);
9517
9518 __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
9519
9520 END();
9521 RUN();
9522
9523 // We cannot easily test the output of the Printf sequences, and because
9524 // Printf preserves all registers by default, we can't look at the number of
9525 // bytes that were printed. However, the printf_no_preserve test should check
9526 // that, and here we just test that we didn't clobber any registers.
9527 ASSERT_EQUAL_REGISTERS(before);
9528
9529 TEARDOWN();
9530 }
9531
9532
9533 TEST(printf_no_preserve) {
9534 INIT_V8();
9535 SETUP();
9536 START();
9537
9538 char const * test_plain_string = "Printf with no arguments.\n";
9539 char const * test_substring = "'This is a substring.'";
9540
9541 __ PrintfNoPreserve(test_plain_string); // NOLINT(runtime/printf)
9542 __ Mov(x19, x0);
9543
9544 // Test simple integer arguments.
9545 __ Mov(x0, 1234);
9546 __ Mov(x1, 0x1234);
9547 __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
9548 __ Mov(x20, x0);
9549
9550 // Test simple floating-point arguments.
9551 __ Fmov(d0, 1.234);
9552 __ PrintfNoPreserve("d0: %f\n", d0);
9553 __ Mov(x21, x0);
9554
9555 // Test pointer (string) arguments.
9556 __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9557 __ PrintfNoPreserve("Test %%s: %s\n", x2);
9558 __ Mov(x22, x0);
9559
9560 // Test the maximum number of arguments, and sign extension.
9561 __ Mov(w3, 0xffffffff);
9562 __ Mov(w4, 0xffffffff);
9563 __ Mov(x5, 0xffffffffffffffff);
9564 __ Mov(x6, 0xffffffffffffffff);
9565 __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
9566 "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9567 w3, w4, x5, x6);
9568 __ Mov(x23, x0);
9569
9570 __ Fmov(s1, 1.234);
9571 __ Fmov(s2, 2.345);
9572 __ Fmov(d3, 3.456);
9573 __ Fmov(d4, 4.567);
9574 __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
9575 __ Mov(x24, x0);
9576
9577 // Test printing callee-saved registers.
9578 __ Mov(x28, 0x123456789abcdef);
9579 __ PrintfNoPreserve("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
9580 __ Mov(x25, x0);
9581
9582 __ Fmov(d10, 42.0);
9583 __ PrintfNoPreserve("%g\n", d10);
9584 __ Mov(x26, x0);
9585
9586 // Test with a different stack pointer.
9587 const Register old_stack_pointer = __ StackPointer();
9588 __ Mov(x29, old_stack_pointer);
9589 __ SetStackPointer(x29);
9590
9591 __ PrintfNoPreserve("old_stack_pointer: 0x%016" PRIx64 "\n",
9592 old_stack_pointer);
9593 __ Mov(x27, x0);
9594
9595 __ Mov(old_stack_pointer, __ StackPointer());
9596 __ SetStackPointer(old_stack_pointer);
9597
9598 // Test with three arguments.
9599 __ Mov(x3, 3);
9600 __ Mov(x4, 40);
9601 __ Mov(x5, 500);
9602 __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
9603 __ Mov(x28, x0);
9604
9605 END();
9606 RUN();
9607
9608 // We cannot easily test the exact output of the Printf sequences, but we can
9609 // use the return code to check that the string length was correct.
9610
9611 // Printf with no arguments.
9612 ASSERT_EQUAL_64(strlen(test_plain_string), x19);
9613 // x0: 1234, x1: 0x00001234
9614 ASSERT_EQUAL_64(25, x20);
9615 // d0: 1.234000
9616 ASSERT_EQUAL_64(13, x21);
9617 // Test %s: 'This is a substring.'
9618 ASSERT_EQUAL_64(32, x22);
9619 // w3(uint32): 4294967295
9620 // w4(int32): -1
9621 // x5(uint64): 18446744073709551615
9622 // x6(int64): -1
9623 ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
9624 // %f: 1.234000
9625 // %g: 2.345
9626 // %e: 3.456000e+00
9627 // %E: 4.567000E+00
9628 ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
9629 // 0x89abcdef, 0x0123456789abcdef
9630 ASSERT_EQUAL_64(31, x25);
9631 // 42
9632 ASSERT_EQUAL_64(3, x26);
9633 // old_stack_pointer: 0x00007fb037ae2370
9634 // Note: This is an example value, but the field width is fixed here so the
9635 // string length is still predictable.
9636 ASSERT_EQUAL_64(38, x27);
9637 // 3=3, 4=40, 5=500
9638 ASSERT_EQUAL_64(17, x28);
9639
9640 TEARDOWN();
9641 }
9642
9643
9644 // This is a V8-specific test.
9645 static void CopyFieldsHelper(CPURegList temps) {
9646 static const uint64_t kLiteralBase = 0x0100001000100101UL;
9647 static const uint64_t src[] = {kLiteralBase * 1,
9648 kLiteralBase * 2,
9649 kLiteralBase * 3,
9650 kLiteralBase * 4,
9651 kLiteralBase * 5,
9652 kLiteralBase * 6,
9653 kLiteralBase * 7,
9654 kLiteralBase * 8,
9655 kLiteralBase * 9,
9656 kLiteralBase * 10,
9657 kLiteralBase * 11};
9658 static const uint64_t src_tagged =
9659 reinterpret_cast<uint64_t>(src) + kHeapObjectTag;
9660
9661 static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1;
9662 uint64_t* dst[kTestCount];
9663 uint64_t dst_tagged[kTestCount];
9664
9665 // The first test will be to copy 0 fields. The destination (and source)
9666 // should not be accessed in any way.
9667 dst[0] = NULL;
9668 dst_tagged[0] = kHeapObjectTag;
9669
9670 // Allocate memory for each other test. Each test <n> will have <n> fields.
9671 // This is intended to exercise as many paths in CopyFields as possible.
9672 for (unsigned i = 1; i < kTestCount; i++) {
9673 dst[i] = new uint64_t[i];
9674 memset(dst[i], 0, i * sizeof(kLiteralBase));
9675 dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag;
9676 }
9677
9678 SETUP();
9679 START();
9680
9681 __ Mov(x0, dst_tagged[0]);
9682 __ Mov(x1, 0);
9683 __ CopyFields(x0, x1, temps, 0);
9684 for (unsigned i = 1; i < kTestCount; i++) {
9685 __ Mov(x0, dst_tagged[i]);
9686 __ Mov(x1, src_tagged);
9687 __ CopyFields(x0, x1, temps, i);
9688 }
9689
9690 END();
9691 RUN();
9692 TEARDOWN();
9693
9694 for (unsigned i = 1; i < kTestCount; i++) {
9695 for (unsigned j = 0; j < i; j++) {
9696 CHECK(src[j] == dst[i][j]);
9697 }
9698 delete [] dst[i];
9699 }
9700 }
9701
9702
9703 // This is a V8-specific test.
9704 TEST(copyfields) {
9705 INIT_V8();
9706 CopyFieldsHelper(CPURegList(x10));
9707 CopyFieldsHelper(CPURegList(x10, x11));
9708 CopyFieldsHelper(CPURegList(x10, x11, x12));
9709 CopyFieldsHelper(CPURegList(x10, x11, x12, x13));
9710 }
9711
9712
9713 static void DoSmiAbsTest(int32_t value, bool must_fail = false) {
9714 SETUP();
9715
9716 START();
9717 Label end, slow;
9718 __ Mov(x2, 0xc001c0de);
9719 __ Mov(x1, value);
9720 __ SmiTag(x1);
9721 __ SmiAbs(x1, &slow);
9722 __ SmiUntag(x1);
9723 __ B(&end);
9724
9725 __ Bind(&slow);
9726 __ Mov(x2, 0xbad);
9727
9728 __ Bind(&end);
9729 END();
9730
9731 RUN();
9732
9733 if (must_fail) {
9734 // We tested an invalid conversion. The code must have jump on slow.
9735 ASSERT_EQUAL_64(0xbad, x2);
9736 } else {
9737 // The conversion is valid, check the result.
9738 int32_t result = (value >= 0) ? value : -value;
9739 ASSERT_EQUAL_64(result, x1);
9740
9741 // Check that we didn't jump on slow.
9742 ASSERT_EQUAL_64(0xc001c0de, x2);
9743 }
9744
9745 TEARDOWN();
9746 }
9747
9748
9749 TEST(smi_abs) {
9750 INIT_V8();
9751 // Simple and edge cases.
9752 DoSmiAbsTest(0);
9753 DoSmiAbsTest(0x12345);
9754 DoSmiAbsTest(0x40000000);
9755 DoSmiAbsTest(0x7fffffff);
9756 DoSmiAbsTest(-1);
9757 DoSmiAbsTest(-12345);
9758 DoSmiAbsTest(0x80000001);
9759
9760 // Check that the most negative SMI is detected.
9761 DoSmiAbsTest(0x80000000, true);
9762 }
9763
9764
9765 TEST(blr_lr) {
9766 // A simple test to check that the simulator correcty handle "blr lr".
9767 INIT_V8();
9768 SETUP();
9769
9770 START();
9771 Label target;
9772 Label end;
9773
9774 __ Mov(x0, 0x0);
9775 __ Adr(lr, &target);
9776
9777 __ Blr(lr);
9778 __ Mov(x0, 0xdeadbeef);
9779 __ B(&end);
9780
9781 __ Bind(&target);
9782 __ Mov(x0, 0xc001c0de);
9783
9784 __ Bind(&end);
9785 END();
9786
9787 RUN();
9788
9789 ASSERT_EQUAL_64(0xc001c0de, x0);
9790
9791 TEARDOWN();
9792 }
9793
9794
9795 TEST(barriers) {
9796 // Generate all supported barriers, this is just a smoke test
9797 INIT_V8();
9798 SETUP();
9799
9800 START();
9801
9802 // DMB
9803 __ Dmb(FullSystem, BarrierAll);
9804 __ Dmb(FullSystem, BarrierReads);
9805 __ Dmb(FullSystem, BarrierWrites);
9806 __ Dmb(FullSystem, BarrierOther);
9807
9808 __ Dmb(InnerShareable, BarrierAll);
9809 __ Dmb(InnerShareable, BarrierReads);
9810 __ Dmb(InnerShareable, BarrierWrites);
9811 __ Dmb(InnerShareable, BarrierOther);
9812
9813 __ Dmb(NonShareable, BarrierAll);
9814 __ Dmb(NonShareable, BarrierReads);
9815 __ Dmb(NonShareable, BarrierWrites);
9816 __ Dmb(NonShareable, BarrierOther);
9817
9818 __ Dmb(OuterShareable, BarrierAll);
9819 __ Dmb(OuterShareable, BarrierReads);
9820 __ Dmb(OuterShareable, BarrierWrites);
9821 __ Dmb(OuterShareable, BarrierOther);
9822
9823 // DSB
9824 __ Dsb(FullSystem, BarrierAll);
9825 __ Dsb(FullSystem, BarrierReads);
9826 __ Dsb(FullSystem, BarrierWrites);
9827 __ Dsb(FullSystem, BarrierOther);
9828
9829 __ Dsb(InnerShareable, BarrierAll);
9830 __ Dsb(InnerShareable, BarrierReads);
9831 __ Dsb(InnerShareable, BarrierWrites);
9832 __ Dsb(InnerShareable, BarrierOther);
9833
9834 __ Dsb(NonShareable, BarrierAll);
9835 __ Dsb(NonShareable, BarrierReads);
9836 __ Dsb(NonShareable, BarrierWrites);
9837 __ Dsb(NonShareable, BarrierOther);
9838
9839 __ Dsb(OuterShareable, BarrierAll);
9840 __ Dsb(OuterShareable, BarrierReads);
9841 __ Dsb(OuterShareable, BarrierWrites);
9842 __ Dsb(OuterShareable, BarrierOther);
9843
9844 // ISB
9845 __ Isb();
9846
9847 END();
9848
9849 RUN();
9850
9851 TEARDOWN();
9852 }
9853
9854
9855 TEST(call_no_relocation) {
9856 Address call_start;
9857 Address return_address;
9858
9859 INIT_V8();
9860 SETUP();
9861
9862 START();
9863
9864 Label function;
9865 Label test;
9866
9867 __ B(&test);
9868
9869 __ Bind(&function);
9870 __ Mov(x0, 0x1);
9871 __ Ret();
9872
9873 __ Bind(&test);
9874 __ Mov(x0, 0x0);
9875 __ Push(lr, xzr);
9876 {
9877 Assembler::BlockConstPoolScope scope(&masm);
9878 call_start = buf + __ pc_offset();
9879 __ Call(buf + function.pos(), RelocInfo::NONE64);
9880 return_address = buf + __ pc_offset();
9881 }
9882 __ Pop(xzr, lr);
9883 END();
9884
9885 RUN();
9886
9887 ASSERT_EQUAL_64(1, x0);
9888
9889 // The return_address_from_call_start function doesn't currently encounter any
9890 // non-relocatable sequences, so we check it here to make sure it works.
9891 // TODO(jbramley): Once Crankshaft is complete, decide if we need to support
9892 // non-relocatable calls at all.
9893 CHECK(return_address ==
9894 Assembler::return_address_from_call_start(call_start));
9895
9896 TEARDOWN();
9897 }
9898
9899
9900 static void AbsHelperX(int64_t value) {
9901 int64_t expected;
9902
9903 SETUP();
9904 START();
9905
9906 Label fail;
9907 Label done;
9908
9909 __ Mov(x0, 0);
9910 __ Mov(x1, value);
9911
9912 if (value != kXMinInt) {
9913 expected = labs(value);
9914
9915 Label next;
9916 // The result is representable.
9917 __ Abs(x10, x1);
9918 __ Abs(x11, x1, &fail);
9919 __ Abs(x12, x1, &fail, &next);
9920 __ Bind(&next);
9921 __ Abs(x13, x1, NULL, &done);
9922 } else {
9923 // labs is undefined for kXMinInt but our implementation in the
9924 // MacroAssembler will return kXMinInt in such a case.
9925 expected = kXMinInt;
9926
9927 Label next;
9928 // The result is not representable.
9929 __ Abs(x10, x1);
9930 __ Abs(x11, x1, NULL, &fail);
9931 __ Abs(x12, x1, &next, &fail);
9932 __ Bind(&next);
9933 __ Abs(x13, x1, &done);
9934 }
9935
9936 __ Bind(&fail);
9937 __ Mov(x0, -1);
9938
9939 __ Bind(&done);
9940
9941 END();
9942 RUN();
9943
9944 ASSERT_EQUAL_64(0, x0);
9945 ASSERT_EQUAL_64(value, x1);
9946 ASSERT_EQUAL_64(expected, x10);
9947 ASSERT_EQUAL_64(expected, x11);
9948 ASSERT_EQUAL_64(expected, x12);
9949 ASSERT_EQUAL_64(expected, x13);
9950
9951 TEARDOWN();
9952 }
9953
9954
9955 static void AbsHelperW(int32_t value) {
9956 int32_t expected;
9957
9958 SETUP();
9959 START();
9960
9961 Label fail;
9962 Label done;
9963
9964 __ Mov(w0, 0);
9965 // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL.
9966 // Once it is fixed, we should remove the cast.
9967 __ Mov(w1, static_cast<uint32_t>(value));
9968
9969 if (value != kWMinInt) {
9970 expected = abs(value);
9971
9972 Label next;
9973 // The result is representable.
9974 __ Abs(w10, w1);
9975 __ Abs(w11, w1, &fail);
9976 __ Abs(w12, w1, &fail, &next);
9977 __ Bind(&next);
9978 __ Abs(w13, w1, NULL, &done);
9979 } else {
9980 // abs is undefined for kWMinInt but our implementation in the
9981 // MacroAssembler will return kWMinInt in such a case.
9982 expected = kWMinInt;
9983
9984 Label next;
9985 // The result is not representable.
9986 __ Abs(w10, w1);
9987 __ Abs(w11, w1, NULL, &fail);
9988 __ Abs(w12, w1, &next, &fail);
9989 __ Bind(&next);
9990 __ Abs(w13, w1, &done);
9991 }
9992
9993 __ Bind(&fail);
9994 __ Mov(w0, -1);
9995
9996 __ Bind(&done);
9997
9998 END();
9999 RUN();
10000
10001 ASSERT_EQUAL_32(0, w0);
10002 ASSERT_EQUAL_32(value, w1);
10003 ASSERT_EQUAL_32(expected, w10);
10004 ASSERT_EQUAL_32(expected, w11);
10005 ASSERT_EQUAL_32(expected, w12);
10006 ASSERT_EQUAL_32(expected, w13);
10007
10008 TEARDOWN();
10009 }
10010
10011
10012 TEST(abs) {
10013 INIT_V8();
10014 AbsHelperX(0);
10015 AbsHelperX(42);
10016 AbsHelperX(-42);
10017 AbsHelperX(kXMinInt);
10018 AbsHelperX(kXMaxInt);
10019
10020 AbsHelperW(0);
10021 AbsHelperW(42);
10022 AbsHelperW(-42);
10023 AbsHelperW(kWMinInt);
10024 AbsHelperW(kWMaxInt);
10025 }
OLDNEW
« no previous file with comments | « test/cctest/test-api.cc ('k') | test/cctest/test-assembler-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698