OLD | NEW |
| (Empty) |
1 // Copyright 2013 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include <stdio.h> | |
29 #include <stdlib.h> | |
30 #include <string.h> | |
31 #include <cmath> | |
32 #include <limits> | |
33 | |
34 #include "v8.h" | |
35 | |
36 #include "macro-assembler.h" | |
37 #include "a64/simulator-a64.h" | |
38 #include "a64/decoder-a64-inl.h" | |
39 #include "a64/disasm-a64.h" | |
40 #include "a64/utils-a64.h" | |
41 #include "cctest.h" | |
42 #include "test-utils-a64.h" | |
43 | |
44 using namespace v8::internal; | |
45 | |
46 // Test infrastructure. | |
47 // | |
48 // Tests are functions which accept no parameters and have no return values. | |
49 // The testing code should not perform an explicit return once completed. For | |
50 // example to test the mov immediate instruction a very simple test would be: | |
51 // | |
52 // TEST(mov_x0_one) { | |
53 // SETUP(); | |
54 // | |
55 // START(); | |
56 // __ mov(x0, Operand(1)); | |
57 // END(); | |
58 // | |
59 // RUN(); | |
60 // | |
61 // ASSERT_EQUAL_64(1, x0); | |
62 // | |
63 // TEARDOWN(); | |
64 // } | |
65 // | |
66 // Within a START ... END block all registers but sp can be modified. sp has to | |
67 // be explicitly saved/restored. The END() macro replaces the function return | |
68 // so it may appear multiple times in a test if the test has multiple exit | |
69 // points. | |
70 // | |
71 // Once the test has been run all integer and floating point registers as well | |
72 // as flags are accessible through a RegisterDump instance, see | |
73 // utils-a64.cc for more info on RegisterDump. | |
74 // | |
75 // We provide some helper assert to handle common cases: | |
76 // | |
77 // ASSERT_EQUAL_32(int32_t, int_32t) | |
78 // ASSERT_EQUAL_FP32(float, float) | |
79 // ASSERT_EQUAL_32(int32_t, W register) | |
80 // ASSERT_EQUAL_FP32(float, S register) | |
81 // ASSERT_EQUAL_64(int64_t, int_64t) | |
82 // ASSERT_EQUAL_FP64(double, double) | |
83 // ASSERT_EQUAL_64(int64_t, X register) | |
84 // ASSERT_EQUAL_64(X register, X register) | |
85 // ASSERT_EQUAL_FP64(double, D register) | |
86 // | |
87 // e.g. ASSERT_EQUAL_64(0.5, d30); | |
88 // | |
89 // If more advance computation is required before the assert then access the | |
90 // RegisterDump named core directly: | |
91 // | |
92 // ASSERT_EQUAL_64(0x1234, core.xreg(0) & 0xffff); | |
93 | |
94 | |
95 #if 0 // TODO(all): enable. | |
96 static v8::Persistent<v8::Context> env; | |
97 | |
98 static void InitializeVM() { | |
99 if (env.IsEmpty()) { | |
100 env = v8::Context::New(); | |
101 } | |
102 } | |
103 #endif | |
104 | |
105 #define __ masm. | |
106 | |
107 #define BUF_SIZE 8192 | |
108 #define SETUP() SETUP_SIZE(BUF_SIZE) | |
109 | |
110 #define INIT_V8() \ | |
111 CcTest::InitializeVM(); \ | |
112 | |
113 #ifdef USE_SIMULATOR | |
114 | |
115 // Run tests with the simulator. | |
116 #define SETUP_SIZE(buf_size) \ | |
117 Isolate* isolate = Isolate::Current(); \ | |
118 HandleScope scope(isolate); \ | |
119 ASSERT(isolate != NULL); \ | |
120 byte* buf = new byte[buf_size]; \ | |
121 MacroAssembler masm(isolate, buf, buf_size); \ | |
122 Decoder<DispatchingDecoderVisitor>* decoder = \ | |
123 new Decoder<DispatchingDecoderVisitor>(); \ | |
124 Simulator simulator(decoder); \ | |
125 PrintDisassembler* pdis = NULL; \ | |
126 RegisterDump core; | |
127 | |
128 /* if (Cctest::trace_sim()) { \ | |
129 pdis = new PrintDisassembler(stdout); \ | |
130 decoder.PrependVisitor(pdis); \ | |
131 } \ | |
132 */ | |
133 | |
134 // Reset the assembler and simulator, so that instructions can be generated, | |
135 // but don't actually emit any code. This can be used by tests that need to | |
136 // emit instructions at the start of the buffer. Note that START_AFTER_RESET | |
137 // must be called before any callee-saved register is modified, and before an | |
138 // END is encountered. | |
139 // | |
140 // Most tests should call START, rather than call RESET directly. | |
141 #define RESET() \ | |
142 __ Reset(); \ | |
143 simulator.ResetState(); | |
144 | |
145 #define START_AFTER_RESET() \ | |
146 __ SetStackPointer(csp); \ | |
147 __ PushCalleeSavedRegisters(); \ | |
148 __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL); | |
149 | |
150 #define START() \ | |
151 RESET(); \ | |
152 START_AFTER_RESET(); | |
153 | |
154 #define RUN() \ | |
155 simulator.RunFrom(reinterpret_cast<Instruction*>(buf)) | |
156 | |
157 #define END() \ | |
158 __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \ | |
159 core.Dump(&masm); \ | |
160 __ PopCalleeSavedRegisters(); \ | |
161 __ Ret(); \ | |
162 __ GetCode(NULL); | |
163 | |
164 #define TEARDOWN() \ | |
165 delete pdis; \ | |
166 delete[] buf; | |
167 | |
168 #else // ifdef USE_SIMULATOR. | |
169 // Run the test on real hardware or models. | |
170 #define SETUP_SIZE(buf_size) \ | |
171 Isolate* isolate = Isolate::Current(); \ | |
172 HandleScope scope(isolate); \ | |
173 ASSERT(isolate != NULL); \ | |
174 byte* buf = new byte[buf_size]; \ | |
175 MacroAssembler masm(isolate, buf, buf_size); \ | |
176 RegisterDump core; \ | |
177 CPU::SetUp(); | |
178 | |
179 #define RESET() \ | |
180 __ Reset(); | |
181 | |
182 #define START_AFTER_RESET() \ | |
183 __ SetStackPointer(csp); \ | |
184 __ PushCalleeSavedRegisters(); | |
185 | |
186 #define START() \ | |
187 RESET(); \ | |
188 START_AFTER_RESET(); | |
189 | |
190 #define RUN() \ | |
191 CPU::FlushICache(buf, masm.SizeOfGeneratedCode()); \ | |
192 { \ | |
193 void (*test_function)(void); \ | |
194 memcpy(&test_function, &buf, sizeof(buf)); \ | |
195 test_function(); \ | |
196 } | |
197 | |
198 #define END() \ | |
199 core.Dump(&masm); \ | |
200 __ PopCalleeSavedRegisters(); \ | |
201 __ Ret(); \ | |
202 __ GetCode(NULL); | |
203 | |
204 #define TEARDOWN() \ | |
205 delete[] buf; | |
206 | |
207 #endif // ifdef USE_SIMULATOR. | |
208 | |
209 #define ASSERT_EQUAL_NZCV(expected) \ | |
210 CHECK(EqualNzcv(expected, core.flags_nzcv())) | |
211 | |
212 #define ASSERT_EQUAL_REGISTERS(expected) \ | |
213 CHECK(EqualRegisters(&expected, &core)) | |
214 | |
215 #define ASSERT_EQUAL_32(expected, result) \ | |
216 CHECK(Equal32(static_cast<uint32_t>(expected), &core, result)) | |
217 | |
218 #define ASSERT_EQUAL_FP32(expected, result) \ | |
219 CHECK(EqualFP32(expected, &core, result)) | |
220 | |
221 #define ASSERT_EQUAL_64(expected, result) \ | |
222 CHECK(Equal64(expected, &core, result)) | |
223 | |
224 #define ASSERT_EQUAL_FP64(expected, result) \ | |
225 CHECK(EqualFP64(expected, &core, result)) | |
226 | |
227 #ifdef DEBUG | |
228 #define ASSERT_LITERAL_POOL_SIZE(expected) \ | |
229 CHECK((expected) == (__ LiteralPoolSize())) | |
230 #else | |
231 #define ASSERT_LITERAL_POOL_SIZE(expected) \ | |
232 ((void) 0) | |
233 #endif | |
234 | |
235 | |
236 TEST(stack_ops) { | |
237 INIT_V8(); | |
238 SETUP(); | |
239 | |
240 START(); | |
241 // save csp. | |
242 __ Mov(x29, csp); | |
243 | |
244 // Set the csp to a known value. | |
245 __ Mov(x16, 0x1000); | |
246 __ Mov(csp, x16); | |
247 __ Mov(x0, csp); | |
248 | |
249 // Add immediate to the csp, and move the result to a normal register. | |
250 __ Add(csp, csp, Operand(0x50)); | |
251 __ Mov(x1, csp); | |
252 | |
253 // Add extended to the csp, and move the result to a normal register. | |
254 __ Mov(x17, 0xfff); | |
255 __ Add(csp, csp, Operand(x17, SXTB)); | |
256 __ Mov(x2, csp); | |
257 | |
258 // Create an csp using a logical instruction, and move to normal register. | |
259 __ Orr(csp, xzr, Operand(0x1fff)); | |
260 __ Mov(x3, csp); | |
261 | |
262 // Write wcsp using a logical instruction. | |
263 __ Orr(wcsp, wzr, Operand(0xfffffff8L)); | |
264 __ Mov(x4, csp); | |
265 | |
266 // Write csp, and read back wcsp. | |
267 __ Orr(csp, xzr, Operand(0xfffffff8L)); | |
268 __ Mov(w5, wcsp); | |
269 | |
270 // restore csp. | |
271 __ Mov(csp, x29); | |
272 END(); | |
273 | |
274 RUN(); | |
275 | |
276 ASSERT_EQUAL_64(0x1000, x0); | |
277 ASSERT_EQUAL_64(0x1050, x1); | |
278 ASSERT_EQUAL_64(0x104f, x2); | |
279 ASSERT_EQUAL_64(0x1fff, x3); | |
280 ASSERT_EQUAL_64(0xfffffff8, x4); | |
281 ASSERT_EQUAL_64(0xfffffff8, x5); | |
282 | |
283 TEARDOWN(); | |
284 } | |
285 | |
286 | |
287 TEST(mvn) { | |
288 INIT_V8(); | |
289 SETUP(); | |
290 | |
291 START(); | |
292 __ Mvn(w0, 0xfff); | |
293 __ Mvn(x1, 0xfff); | |
294 __ Mvn(w2, Operand(w0, LSL, 1)); | |
295 __ Mvn(x3, Operand(x1, LSL, 2)); | |
296 __ Mvn(w4, Operand(w0, LSR, 3)); | |
297 __ Mvn(x5, Operand(x1, LSR, 4)); | |
298 __ Mvn(w6, Operand(w0, ASR, 11)); | |
299 __ Mvn(x7, Operand(x1, ASR, 12)); | |
300 __ Mvn(w8, Operand(w0, ROR, 13)); | |
301 __ Mvn(x9, Operand(x1, ROR, 14)); | |
302 __ Mvn(w10, Operand(w2, UXTB)); | |
303 __ Mvn(x11, Operand(x2, SXTB, 1)); | |
304 __ Mvn(w12, Operand(w2, UXTH, 2)); | |
305 __ Mvn(x13, Operand(x2, SXTH, 3)); | |
306 __ Mvn(x14, Operand(w2, UXTW, 4)); | |
307 __ Mvn(x15, Operand(w2, SXTW, 4)); | |
308 END(); | |
309 | |
310 RUN(); | |
311 | |
312 ASSERT_EQUAL_64(0xfffff000, x0); | |
313 ASSERT_EQUAL_64(0xfffffffffffff000UL, x1); | |
314 ASSERT_EQUAL_64(0x00001fff, x2); | |
315 ASSERT_EQUAL_64(0x0000000000003fffUL, x3); | |
316 ASSERT_EQUAL_64(0xe00001ff, x4); | |
317 ASSERT_EQUAL_64(0xf0000000000000ffUL, x5); | |
318 ASSERT_EQUAL_64(0x00000001, x6); | |
319 ASSERT_EQUAL_64(0x0, x7); | |
320 ASSERT_EQUAL_64(0x7ff80000, x8); | |
321 ASSERT_EQUAL_64(0x3ffc000000000000UL, x9); | |
322 ASSERT_EQUAL_64(0xffffff00, x10); | |
323 ASSERT_EQUAL_64(0x0000000000000001UL, x11); | |
324 ASSERT_EQUAL_64(0xffff8003, x12); | |
325 ASSERT_EQUAL_64(0xffffffffffff0007UL, x13); | |
326 ASSERT_EQUAL_64(0xfffffffffffe000fUL, x14); | |
327 ASSERT_EQUAL_64(0xfffffffffffe000fUL, x15); | |
328 | |
329 TEARDOWN(); | |
330 } | |
331 | |
332 | |
333 TEST(mov) { | |
334 INIT_V8(); | |
335 SETUP(); | |
336 | |
337 START(); | |
338 __ Mov(x0, 0xffffffffffffffffL); | |
339 __ Mov(x1, 0xffffffffffffffffL); | |
340 __ Mov(x2, 0xffffffffffffffffL); | |
341 __ Mov(x3, 0xffffffffffffffffL); | |
342 | |
343 __ Mov(x0, 0x0123456789abcdefL); | |
344 | |
345 __ movz(x1, 0xabcdL << 16); | |
346 __ movk(x2, 0xabcdL << 32); | |
347 __ movn(x3, 0xabcdL << 48); | |
348 | |
349 __ Mov(x4, 0x0123456789abcdefL); | |
350 __ Mov(x5, x4); | |
351 | |
352 __ Mov(w6, -1); | |
353 | |
354 // Test that moves back to the same register have the desired effect. This | |
355 // is a no-op for X registers, and a truncation for W registers. | |
356 __ Mov(x7, 0x0123456789abcdefL); | |
357 __ Mov(x7, x7); | |
358 __ Mov(x8, 0x0123456789abcdefL); | |
359 __ Mov(w8, w8); | |
360 __ Mov(x9, 0x0123456789abcdefL); | |
361 __ Mov(x9, Operand(x9)); | |
362 __ Mov(x10, 0x0123456789abcdefL); | |
363 __ Mov(w10, Operand(w10)); | |
364 | |
365 __ Mov(w11, 0xfff); | |
366 __ Mov(x12, 0xfff); | |
367 __ Mov(w13, Operand(w11, LSL, 1)); | |
368 __ Mov(x14, Operand(x12, LSL, 2)); | |
369 __ Mov(w15, Operand(w11, LSR, 3)); | |
370 __ Mov(x18, Operand(x12, LSR, 4)); | |
371 __ Mov(w19, Operand(w11, ASR, 11)); | |
372 __ Mov(x20, Operand(x12, ASR, 12)); | |
373 __ Mov(w21, Operand(w11, ROR, 13)); | |
374 __ Mov(x22, Operand(x12, ROR, 14)); | |
375 __ Mov(w23, Operand(w13, UXTB)); | |
376 __ Mov(x24, Operand(x13, SXTB, 1)); | |
377 __ Mov(w25, Operand(w13, UXTH, 2)); | |
378 __ Mov(x26, Operand(x13, SXTH, 3)); | |
379 __ Mov(x27, Operand(w13, UXTW, 4)); | |
380 END(); | |
381 | |
382 RUN(); | |
383 | |
384 ASSERT_EQUAL_64(0x0123456789abcdefL, x0); | |
385 ASSERT_EQUAL_64(0x00000000abcd0000L, x1); | |
386 ASSERT_EQUAL_64(0xffffabcdffffffffL, x2); | |
387 ASSERT_EQUAL_64(0x5432ffffffffffffL, x3); | |
388 ASSERT_EQUAL_64(x4, x5); | |
389 ASSERT_EQUAL_32(-1, w6); | |
390 ASSERT_EQUAL_64(0x0123456789abcdefL, x7); | |
391 ASSERT_EQUAL_32(0x89abcdefL, w8); | |
392 ASSERT_EQUAL_64(0x0123456789abcdefL, x9); | |
393 ASSERT_EQUAL_32(0x89abcdefL, w10); | |
394 ASSERT_EQUAL_64(0x00000fff, x11); | |
395 ASSERT_EQUAL_64(0x0000000000000fffUL, x12); | |
396 ASSERT_EQUAL_64(0x00001ffe, x13); | |
397 ASSERT_EQUAL_64(0x0000000000003ffcUL, x14); | |
398 ASSERT_EQUAL_64(0x000001ff, x15); | |
399 ASSERT_EQUAL_64(0x00000000000000ffUL, x18); | |
400 ASSERT_EQUAL_64(0x00000001, x19); | |
401 ASSERT_EQUAL_64(0x0, x20); | |
402 ASSERT_EQUAL_64(0x7ff80000, x21); | |
403 ASSERT_EQUAL_64(0x3ffc000000000000UL, x22); | |
404 ASSERT_EQUAL_64(0x000000fe, x23); | |
405 ASSERT_EQUAL_64(0xfffffffffffffffcUL, x24); | |
406 ASSERT_EQUAL_64(0x00007ff8, x25); | |
407 ASSERT_EQUAL_64(0x000000000000fff0UL, x26); | |
408 ASSERT_EQUAL_64(0x000000000001ffe0UL, x27); | |
409 | |
410 TEARDOWN(); | |
411 } | |
412 | |
413 | |
414 TEST(mov_imm_w) { | |
415 INIT_V8(); | |
416 SETUP(); | |
417 | |
418 START(); | |
419 __ Mov(w0, 0xffffffffL); | |
420 __ Mov(w1, 0xffff1234L); | |
421 __ Mov(w2, 0x1234ffffL); | |
422 __ Mov(w3, 0x00000000L); | |
423 __ Mov(w4, 0x00001234L); | |
424 __ Mov(w5, 0x12340000L); | |
425 __ Mov(w6, 0x12345678L); | |
426 END(); | |
427 | |
428 RUN(); | |
429 | |
430 ASSERT_EQUAL_64(0xffffffffL, x0); | |
431 ASSERT_EQUAL_64(0xffff1234L, x1); | |
432 ASSERT_EQUAL_64(0x1234ffffL, x2); | |
433 ASSERT_EQUAL_64(0x00000000L, x3); | |
434 ASSERT_EQUAL_64(0x00001234L, x4); | |
435 ASSERT_EQUAL_64(0x12340000L, x5); | |
436 ASSERT_EQUAL_64(0x12345678L, x6); | |
437 | |
438 TEARDOWN(); | |
439 } | |
440 | |
441 | |
442 TEST(mov_imm_x) { | |
443 INIT_V8(); | |
444 SETUP(); | |
445 | |
446 START(); | |
447 __ Mov(x0, 0xffffffffffffffffL); | |
448 __ Mov(x1, 0xffffffffffff1234L); | |
449 __ Mov(x2, 0xffffffff12345678L); | |
450 __ Mov(x3, 0xffff1234ffff5678L); | |
451 __ Mov(x4, 0x1234ffffffff5678L); | |
452 __ Mov(x5, 0x1234ffff5678ffffL); | |
453 __ Mov(x6, 0x12345678ffffffffL); | |
454 __ Mov(x7, 0x1234ffffffffffffL); | |
455 __ Mov(x8, 0x123456789abcffffL); | |
456 __ Mov(x9, 0x12345678ffff9abcL); | |
457 __ Mov(x10, 0x1234ffff56789abcL); | |
458 __ Mov(x11, 0xffff123456789abcL); | |
459 __ Mov(x12, 0x0000000000000000L); | |
460 __ Mov(x13, 0x0000000000001234L); | |
461 __ Mov(x14, 0x0000000012345678L); | |
462 __ Mov(x15, 0x0000123400005678L); | |
463 __ Mov(x18, 0x1234000000005678L); | |
464 __ Mov(x19, 0x1234000056780000L); | |
465 __ Mov(x20, 0x1234567800000000L); | |
466 __ Mov(x21, 0x1234000000000000L); | |
467 __ Mov(x22, 0x123456789abc0000L); | |
468 __ Mov(x23, 0x1234567800009abcL); | |
469 __ Mov(x24, 0x1234000056789abcL); | |
470 __ Mov(x25, 0x0000123456789abcL); | |
471 __ Mov(x26, 0x123456789abcdef0L); | |
472 __ Mov(x27, 0xffff000000000001L); | |
473 __ Mov(x28, 0x8000ffff00000000L); | |
474 END(); | |
475 | |
476 RUN(); | |
477 | |
478 ASSERT_EQUAL_64(0xffffffffffff1234L, x1); | |
479 ASSERT_EQUAL_64(0xffffffff12345678L, x2); | |
480 ASSERT_EQUAL_64(0xffff1234ffff5678L, x3); | |
481 ASSERT_EQUAL_64(0x1234ffffffff5678L, x4); | |
482 ASSERT_EQUAL_64(0x1234ffff5678ffffL, x5); | |
483 ASSERT_EQUAL_64(0x12345678ffffffffL, x6); | |
484 ASSERT_EQUAL_64(0x1234ffffffffffffL, x7); | |
485 ASSERT_EQUAL_64(0x123456789abcffffL, x8); | |
486 ASSERT_EQUAL_64(0x12345678ffff9abcL, x9); | |
487 ASSERT_EQUAL_64(0x1234ffff56789abcL, x10); | |
488 ASSERT_EQUAL_64(0xffff123456789abcL, x11); | |
489 ASSERT_EQUAL_64(0x0000000000000000L, x12); | |
490 ASSERT_EQUAL_64(0x0000000000001234L, x13); | |
491 ASSERT_EQUAL_64(0x0000000012345678L, x14); | |
492 ASSERT_EQUAL_64(0x0000123400005678L, x15); | |
493 ASSERT_EQUAL_64(0x1234000000005678L, x18); | |
494 ASSERT_EQUAL_64(0x1234000056780000L, x19); | |
495 ASSERT_EQUAL_64(0x1234567800000000L, x20); | |
496 ASSERT_EQUAL_64(0x1234000000000000L, x21); | |
497 ASSERT_EQUAL_64(0x123456789abc0000L, x22); | |
498 ASSERT_EQUAL_64(0x1234567800009abcL, x23); | |
499 ASSERT_EQUAL_64(0x1234000056789abcL, x24); | |
500 ASSERT_EQUAL_64(0x0000123456789abcL, x25); | |
501 ASSERT_EQUAL_64(0x123456789abcdef0L, x26); | |
502 ASSERT_EQUAL_64(0xffff000000000001L, x27); | |
503 ASSERT_EQUAL_64(0x8000ffff00000000L, x28); | |
504 | |
505 TEARDOWN(); | |
506 } | |
507 | |
508 | |
509 TEST(orr) { | |
510 INIT_V8(); | |
511 SETUP(); | |
512 | |
513 START(); | |
514 __ Mov(x0, 0xf0f0); | |
515 __ Mov(x1, 0xf00000ff); | |
516 | |
517 __ Orr(x2, x0, Operand(x1)); | |
518 __ Orr(w3, w0, Operand(w1, LSL, 28)); | |
519 __ Orr(x4, x0, Operand(x1, LSL, 32)); | |
520 __ Orr(x5, x0, Operand(x1, LSR, 4)); | |
521 __ Orr(w6, w0, Operand(w1, ASR, 4)); | |
522 __ Orr(x7, x0, Operand(x1, ASR, 4)); | |
523 __ Orr(w8, w0, Operand(w1, ROR, 12)); | |
524 __ Orr(x9, x0, Operand(x1, ROR, 12)); | |
525 __ Orr(w10, w0, Operand(0xf)); | |
526 __ Orr(x11, x0, Operand(0xf0000000f0000000L)); | |
527 END(); | |
528 | |
529 RUN(); | |
530 | |
531 ASSERT_EQUAL_64(0xf000f0ff, x2); | |
532 ASSERT_EQUAL_64(0xf000f0f0, x3); | |
533 ASSERT_EQUAL_64(0xf00000ff0000f0f0L, x4); | |
534 ASSERT_EQUAL_64(0x0f00f0ff, x5); | |
535 ASSERT_EQUAL_64(0xff00f0ff, x6); | |
536 ASSERT_EQUAL_64(0x0f00f0ff, x7); | |
537 ASSERT_EQUAL_64(0x0ffff0f0, x8); | |
538 ASSERT_EQUAL_64(0x0ff00000000ff0f0L, x9); | |
539 ASSERT_EQUAL_64(0xf0ff, x10); | |
540 ASSERT_EQUAL_64(0xf0000000f000f0f0L, x11); | |
541 | |
542 TEARDOWN(); | |
543 } | |
544 | |
545 | |
546 TEST(orr_extend) { | |
547 INIT_V8(); | |
548 SETUP(); | |
549 | |
550 START(); | |
551 __ Mov(x0, 1); | |
552 __ Mov(x1, 0x8000000080008080UL); | |
553 __ Orr(w6, w0, Operand(w1, UXTB)); | |
554 __ Orr(x7, x0, Operand(x1, UXTH, 1)); | |
555 __ Orr(w8, w0, Operand(w1, UXTW, 2)); | |
556 __ Orr(x9, x0, Operand(x1, UXTX, 3)); | |
557 __ Orr(w10, w0, Operand(w1, SXTB)); | |
558 __ Orr(x11, x0, Operand(x1, SXTH, 1)); | |
559 __ Orr(x12, x0, Operand(x1, SXTW, 2)); | |
560 __ Orr(x13, x0, Operand(x1, SXTX, 3)); | |
561 END(); | |
562 | |
563 RUN(); | |
564 | |
565 ASSERT_EQUAL_64(0x00000081, x6); | |
566 ASSERT_EQUAL_64(0x00010101, x7); | |
567 ASSERT_EQUAL_64(0x00020201, x8); | |
568 ASSERT_EQUAL_64(0x0000000400040401UL, x9); | |
569 ASSERT_EQUAL_64(0x00000000ffffff81UL, x10); | |
570 ASSERT_EQUAL_64(0xffffffffffff0101UL, x11); | |
571 ASSERT_EQUAL_64(0xfffffffe00020201UL, x12); | |
572 ASSERT_EQUAL_64(0x0000000400040401UL, x13); | |
573 | |
574 TEARDOWN(); | |
575 } | |
576 | |
577 | |
578 TEST(bitwise_wide_imm) { | |
579 INIT_V8(); | |
580 SETUP(); | |
581 | |
582 START(); | |
583 __ Mov(x0, 0); | |
584 __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL); | |
585 | |
586 __ Orr(x10, x0, Operand(0x1234567890abcdefUL)); | |
587 __ Orr(w11, w1, Operand(0x90abcdef)); | |
588 END(); | |
589 | |
590 RUN(); | |
591 | |
592 ASSERT_EQUAL_64(0, x0); | |
593 ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1); | |
594 ASSERT_EQUAL_64(0x1234567890abcdefUL, x10); | |
595 ASSERT_EQUAL_64(0xf0fbfdffUL, x11); | |
596 | |
597 TEARDOWN(); | |
598 } | |
599 | |
600 | |
601 TEST(orn) { | |
602 INIT_V8(); | |
603 SETUP(); | |
604 | |
605 START(); | |
606 __ Mov(x0, 0xf0f0); | |
607 __ Mov(x1, 0xf00000ff); | |
608 | |
609 __ Orn(x2, x0, Operand(x1)); | |
610 __ Orn(w3, w0, Operand(w1, LSL, 4)); | |
611 __ Orn(x4, x0, Operand(x1, LSL, 4)); | |
612 __ Orn(x5, x0, Operand(x1, LSR, 1)); | |
613 __ Orn(w6, w0, Operand(w1, ASR, 1)); | |
614 __ Orn(x7, x0, Operand(x1, ASR, 1)); | |
615 __ Orn(w8, w0, Operand(w1, ROR, 16)); | |
616 __ Orn(x9, x0, Operand(x1, ROR, 16)); | |
617 __ Orn(w10, w0, Operand(0xffff)); | |
618 __ Orn(x11, x0, Operand(0xffff0000ffffL)); | |
619 END(); | |
620 | |
621 RUN(); | |
622 | |
623 ASSERT_EQUAL_64(0xffffffff0ffffff0L, x2); | |
624 ASSERT_EQUAL_64(0xfffff0ff, x3); | |
625 ASSERT_EQUAL_64(0xfffffff0fffff0ffL, x4); | |
626 ASSERT_EQUAL_64(0xffffffff87fffff0L, x5); | |
627 ASSERT_EQUAL_64(0x07fffff0, x6); | |
628 ASSERT_EQUAL_64(0xffffffff87fffff0L, x7); | |
629 ASSERT_EQUAL_64(0xff00ffff, x8); | |
630 ASSERT_EQUAL_64(0xff00ffffffffffffL, x9); | |
631 ASSERT_EQUAL_64(0xfffff0f0, x10); | |
632 ASSERT_EQUAL_64(0xffff0000fffff0f0L, x11); | |
633 | |
634 TEARDOWN(); | |
635 } | |
636 | |
637 | |
638 TEST(orn_extend) { | |
639 INIT_V8(); | |
640 SETUP(); | |
641 | |
642 START(); | |
643 __ Mov(x0, 1); | |
644 __ Mov(x1, 0x8000000080008081UL); | |
645 __ Orn(w6, w0, Operand(w1, UXTB)); | |
646 __ Orn(x7, x0, Operand(x1, UXTH, 1)); | |
647 __ Orn(w8, w0, Operand(w1, UXTW, 2)); | |
648 __ Orn(x9, x0, Operand(x1, UXTX, 3)); | |
649 __ Orn(w10, w0, Operand(w1, SXTB)); | |
650 __ Orn(x11, x0, Operand(x1, SXTH, 1)); | |
651 __ Orn(x12, x0, Operand(x1, SXTW, 2)); | |
652 __ Orn(x13, x0, Operand(x1, SXTX, 3)); | |
653 END(); | |
654 | |
655 RUN(); | |
656 | |
657 ASSERT_EQUAL_64(0xffffff7f, x6); | |
658 ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7); | |
659 ASSERT_EQUAL_64(0xfffdfdfb, x8); | |
660 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9); | |
661 ASSERT_EQUAL_64(0x0000007f, x10); | |
662 ASSERT_EQUAL_64(0x0000fefd, x11); | |
663 ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12); | |
664 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13); | |
665 | |
666 TEARDOWN(); | |
667 } | |
668 | |
669 | |
670 TEST(and_) { | |
671 INIT_V8(); | |
672 SETUP(); | |
673 | |
674 START(); | |
675 __ Mov(x0, 0xfff0); | |
676 __ Mov(x1, 0xf00000ff); | |
677 | |
678 __ And(x2, x0, Operand(x1)); | |
679 __ And(w3, w0, Operand(w1, LSL, 4)); | |
680 __ And(x4, x0, Operand(x1, LSL, 4)); | |
681 __ And(x5, x0, Operand(x1, LSR, 1)); | |
682 __ And(w6, w0, Operand(w1, ASR, 20)); | |
683 __ And(x7, x0, Operand(x1, ASR, 20)); | |
684 __ And(w8, w0, Operand(w1, ROR, 28)); | |
685 __ And(x9, x0, Operand(x1, ROR, 28)); | |
686 __ And(w10, w0, Operand(0xff00)); | |
687 __ And(x11, x0, Operand(0xff)); | |
688 END(); | |
689 | |
690 RUN(); | |
691 | |
692 ASSERT_EQUAL_64(0x000000f0, x2); | |
693 ASSERT_EQUAL_64(0x00000ff0, x3); | |
694 ASSERT_EQUAL_64(0x00000ff0, x4); | |
695 ASSERT_EQUAL_64(0x00000070, x5); | |
696 ASSERT_EQUAL_64(0x0000ff00, x6); | |
697 ASSERT_EQUAL_64(0x00000f00, x7); | |
698 ASSERT_EQUAL_64(0x00000ff0, x8); | |
699 ASSERT_EQUAL_64(0x00000000, x9); | |
700 ASSERT_EQUAL_64(0x0000ff00, x10); | |
701 ASSERT_EQUAL_64(0x000000f0, x11); | |
702 | |
703 TEARDOWN(); | |
704 } | |
705 | |
706 | |
707 TEST(and_extend) { | |
708 INIT_V8(); | |
709 SETUP(); | |
710 | |
711 START(); | |
712 __ Mov(x0, 0xffffffffffffffffUL); | |
713 __ Mov(x1, 0x8000000080008081UL); | |
714 __ And(w6, w0, Operand(w1, UXTB)); | |
715 __ And(x7, x0, Operand(x1, UXTH, 1)); | |
716 __ And(w8, w0, Operand(w1, UXTW, 2)); | |
717 __ And(x9, x0, Operand(x1, UXTX, 3)); | |
718 __ And(w10, w0, Operand(w1, SXTB)); | |
719 __ And(x11, x0, Operand(x1, SXTH, 1)); | |
720 __ And(x12, x0, Operand(x1, SXTW, 2)); | |
721 __ And(x13, x0, Operand(x1, SXTX, 3)); | |
722 END(); | |
723 | |
724 RUN(); | |
725 | |
726 ASSERT_EQUAL_64(0x00000081, x6); | |
727 ASSERT_EQUAL_64(0x00010102, x7); | |
728 ASSERT_EQUAL_64(0x00020204, x8); | |
729 ASSERT_EQUAL_64(0x0000000400040408UL, x9); | |
730 ASSERT_EQUAL_64(0xffffff81, x10); | |
731 ASSERT_EQUAL_64(0xffffffffffff0102UL, x11); | |
732 ASSERT_EQUAL_64(0xfffffffe00020204UL, x12); | |
733 ASSERT_EQUAL_64(0x0000000400040408UL, x13); | |
734 | |
735 TEARDOWN(); | |
736 } | |
737 | |
738 | |
739 TEST(ands) { | |
740 INIT_V8(); | |
741 SETUP(); | |
742 | |
743 START(); | |
744 __ Mov(x1, 0xf00000ff); | |
745 __ Ands(w0, w1, Operand(w1)); | |
746 END(); | |
747 | |
748 RUN(); | |
749 | |
750 ASSERT_EQUAL_NZCV(NFlag); | |
751 ASSERT_EQUAL_64(0xf00000ff, x0); | |
752 | |
753 START(); | |
754 __ Mov(x0, 0xfff0); | |
755 __ Mov(x1, 0xf00000ff); | |
756 __ Ands(w0, w0, Operand(w1, LSR, 4)); | |
757 END(); | |
758 | |
759 RUN(); | |
760 | |
761 ASSERT_EQUAL_NZCV(ZFlag); | |
762 ASSERT_EQUAL_64(0x00000000, x0); | |
763 | |
764 START(); | |
765 __ Mov(x0, 0x8000000000000000L); | |
766 __ Mov(x1, 0x00000001); | |
767 __ Ands(x0, x0, Operand(x1, ROR, 1)); | |
768 END(); | |
769 | |
770 RUN(); | |
771 | |
772 ASSERT_EQUAL_NZCV(NFlag); | |
773 ASSERT_EQUAL_64(0x8000000000000000L, x0); | |
774 | |
775 START(); | |
776 __ Mov(x0, 0xfff0); | |
777 __ Ands(w0, w0, Operand(0xf)); | |
778 END(); | |
779 | |
780 RUN(); | |
781 | |
782 ASSERT_EQUAL_NZCV(ZFlag); | |
783 ASSERT_EQUAL_64(0x00000000, x0); | |
784 | |
785 START(); | |
786 __ Mov(x0, 0xff000000); | |
787 __ Ands(w0, w0, Operand(0x80000000)); | |
788 END(); | |
789 | |
790 RUN(); | |
791 | |
792 ASSERT_EQUAL_NZCV(NFlag); | |
793 ASSERT_EQUAL_64(0x80000000, x0); | |
794 | |
795 TEARDOWN(); | |
796 } | |
797 | |
798 | |
799 TEST(bic) { | |
800 INIT_V8(); | |
801 SETUP(); | |
802 | |
803 START(); | |
804 __ Mov(x0, 0xfff0); | |
805 __ Mov(x1, 0xf00000ff); | |
806 | |
807 __ Bic(x2, x0, Operand(x1)); | |
808 __ Bic(w3, w0, Operand(w1, LSL, 4)); | |
809 __ Bic(x4, x0, Operand(x1, LSL, 4)); | |
810 __ Bic(x5, x0, Operand(x1, LSR, 1)); | |
811 __ Bic(w6, w0, Operand(w1, ASR, 20)); | |
812 __ Bic(x7, x0, Operand(x1, ASR, 20)); | |
813 __ Bic(w8, w0, Operand(w1, ROR, 28)); | |
814 __ Bic(x9, x0, Operand(x1, ROR, 24)); | |
815 __ Bic(x10, x0, Operand(0x1f)); | |
816 __ Bic(x11, x0, Operand(0x100)); | |
817 | |
818 // Test bic into csp when the constant cannot be encoded in the immediate | |
819 // field. | |
820 // Use x20 to preserve csp. We check for the result via x21 because the | |
821 // test infrastructure requires that csp be restored to its original value. | |
822 __ Mov(x20, csp); | |
823 __ Mov(x0, 0xffffff); | |
824 __ Bic(csp, x0, Operand(0xabcdef)); | |
825 __ Mov(x21, csp); | |
826 __ Mov(csp, x20); | |
827 END(); | |
828 | |
829 RUN(); | |
830 | |
831 ASSERT_EQUAL_64(0x0000ff00, x2); | |
832 ASSERT_EQUAL_64(0x0000f000, x3); | |
833 ASSERT_EQUAL_64(0x0000f000, x4); | |
834 ASSERT_EQUAL_64(0x0000ff80, x5); | |
835 ASSERT_EQUAL_64(0x000000f0, x6); | |
836 ASSERT_EQUAL_64(0x0000f0f0, x7); | |
837 ASSERT_EQUAL_64(0x0000f000, x8); | |
838 ASSERT_EQUAL_64(0x0000ff00, x9); | |
839 ASSERT_EQUAL_64(0x0000ffe0, x10); | |
840 ASSERT_EQUAL_64(0x0000fef0, x11); | |
841 | |
842 ASSERT_EQUAL_64(0x543210, x21); | |
843 | |
844 TEARDOWN(); | |
845 } | |
846 | |
847 | |
848 TEST(bic_extend) { | |
849 INIT_V8(); | |
850 SETUP(); | |
851 | |
852 START(); | |
853 __ Mov(x0, 0xffffffffffffffffUL); | |
854 __ Mov(x1, 0x8000000080008081UL); | |
855 __ Bic(w6, w0, Operand(w1, UXTB)); | |
856 __ Bic(x7, x0, Operand(x1, UXTH, 1)); | |
857 __ Bic(w8, w0, Operand(w1, UXTW, 2)); | |
858 __ Bic(x9, x0, Operand(x1, UXTX, 3)); | |
859 __ Bic(w10, w0, Operand(w1, SXTB)); | |
860 __ Bic(x11, x0, Operand(x1, SXTH, 1)); | |
861 __ Bic(x12, x0, Operand(x1, SXTW, 2)); | |
862 __ Bic(x13, x0, Operand(x1, SXTX, 3)); | |
863 END(); | |
864 | |
865 RUN(); | |
866 | |
867 ASSERT_EQUAL_64(0xffffff7e, x6); | |
868 ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7); | |
869 ASSERT_EQUAL_64(0xfffdfdfb, x8); | |
870 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9); | |
871 ASSERT_EQUAL_64(0x0000007e, x10); | |
872 ASSERT_EQUAL_64(0x0000fefd, x11); | |
873 ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12); | |
874 ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13); | |
875 | |
876 TEARDOWN(); | |
877 } | |
878 | |
879 | |
880 TEST(bics) { | |
881 INIT_V8(); | |
882 SETUP(); | |
883 | |
884 START(); | |
885 __ Mov(x1, 0xffff); | |
886 __ Bics(w0, w1, Operand(w1)); | |
887 END(); | |
888 | |
889 RUN(); | |
890 | |
891 ASSERT_EQUAL_NZCV(ZFlag); | |
892 ASSERT_EQUAL_64(0x00000000, x0); | |
893 | |
894 START(); | |
895 __ Mov(x0, 0xffffffff); | |
896 __ Bics(w0, w0, Operand(w0, LSR, 1)); | |
897 END(); | |
898 | |
899 RUN(); | |
900 | |
901 ASSERT_EQUAL_NZCV(NFlag); | |
902 ASSERT_EQUAL_64(0x80000000, x0); | |
903 | |
904 START(); | |
905 __ Mov(x0, 0x8000000000000000L); | |
906 __ Mov(x1, 0x00000001); | |
907 __ Bics(x0, x0, Operand(x1, ROR, 1)); | |
908 END(); | |
909 | |
910 RUN(); | |
911 | |
912 ASSERT_EQUAL_NZCV(ZFlag); | |
913 ASSERT_EQUAL_64(0x00000000, x0); | |
914 | |
915 START(); | |
916 __ Mov(x0, 0xffffffffffffffffL); | |
917 __ Bics(x0, x0, Operand(0x7fffffffffffffffL)); | |
918 END(); | |
919 | |
920 RUN(); | |
921 | |
922 ASSERT_EQUAL_NZCV(NFlag); | |
923 ASSERT_EQUAL_64(0x8000000000000000L, x0); | |
924 | |
925 START(); | |
926 __ Mov(w0, 0xffff0000); | |
927 __ Bics(w0, w0, Operand(0xfffffff0)); | |
928 END(); | |
929 | |
930 RUN(); | |
931 | |
932 ASSERT_EQUAL_NZCV(ZFlag); | |
933 ASSERT_EQUAL_64(0x00000000, x0); | |
934 | |
935 TEARDOWN(); | |
936 } | |
937 | |
938 | |
939 TEST(eor) { | |
940 INIT_V8(); | |
941 SETUP(); | |
942 | |
943 START(); | |
944 __ Mov(x0, 0xfff0); | |
945 __ Mov(x1, 0xf00000ff); | |
946 | |
947 __ Eor(x2, x0, Operand(x1)); | |
948 __ Eor(w3, w0, Operand(w1, LSL, 4)); | |
949 __ Eor(x4, x0, Operand(x1, LSL, 4)); | |
950 __ Eor(x5, x0, Operand(x1, LSR, 1)); | |
951 __ Eor(w6, w0, Operand(w1, ASR, 20)); | |
952 __ Eor(x7, x0, Operand(x1, ASR, 20)); | |
953 __ Eor(w8, w0, Operand(w1, ROR, 28)); | |
954 __ Eor(x9, x0, Operand(x1, ROR, 28)); | |
955 __ Eor(w10, w0, Operand(0xff00ff00)); | |
956 __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L)); | |
957 END(); | |
958 | |
959 RUN(); | |
960 | |
961 ASSERT_EQUAL_64(0xf000ff0f, x2); | |
962 ASSERT_EQUAL_64(0x0000f000, x3); | |
963 ASSERT_EQUAL_64(0x0000000f0000f000L, x4); | |
964 ASSERT_EQUAL_64(0x7800ff8f, x5); | |
965 ASSERT_EQUAL_64(0xffff00f0, x6); | |
966 ASSERT_EQUAL_64(0x0000f0f0, x7); | |
967 ASSERT_EQUAL_64(0x0000f00f, x8); | |
968 ASSERT_EQUAL_64(0x00000ff00000ffffL, x9); | |
969 ASSERT_EQUAL_64(0xff0000f0, x10); | |
970 ASSERT_EQUAL_64(0xff00ff00ff0000f0L, x11); | |
971 | |
972 TEARDOWN(); | |
973 } | |
974 | |
975 | |
976 TEST(eor_extend) { | |
977 INIT_V8(); | |
978 SETUP(); | |
979 | |
980 START(); | |
981 __ Mov(x0, 0x1111111111111111UL); | |
982 __ Mov(x1, 0x8000000080008081UL); | |
983 __ Eor(w6, w0, Operand(w1, UXTB)); | |
984 __ Eor(x7, x0, Operand(x1, UXTH, 1)); | |
985 __ Eor(w8, w0, Operand(w1, UXTW, 2)); | |
986 __ Eor(x9, x0, Operand(x1, UXTX, 3)); | |
987 __ Eor(w10, w0, Operand(w1, SXTB)); | |
988 __ Eor(x11, x0, Operand(x1, SXTH, 1)); | |
989 __ Eor(x12, x0, Operand(x1, SXTW, 2)); | |
990 __ Eor(x13, x0, Operand(x1, SXTX, 3)); | |
991 END(); | |
992 | |
993 RUN(); | |
994 | |
995 ASSERT_EQUAL_64(0x11111190, x6); | |
996 ASSERT_EQUAL_64(0x1111111111101013UL, x7); | |
997 ASSERT_EQUAL_64(0x11131315, x8); | |
998 ASSERT_EQUAL_64(0x1111111511151519UL, x9); | |
999 ASSERT_EQUAL_64(0xeeeeee90, x10); | |
1000 ASSERT_EQUAL_64(0xeeeeeeeeeeee1013UL, x11); | |
1001 ASSERT_EQUAL_64(0xeeeeeeef11131315UL, x12); | |
1002 ASSERT_EQUAL_64(0x1111111511151519UL, x13); | |
1003 | |
1004 TEARDOWN(); | |
1005 } | |
1006 | |
1007 | |
1008 TEST(eon) { | |
1009 INIT_V8(); | |
1010 SETUP(); | |
1011 | |
1012 START(); | |
1013 __ Mov(x0, 0xfff0); | |
1014 __ Mov(x1, 0xf00000ff); | |
1015 | |
1016 __ Eon(x2, x0, Operand(x1)); | |
1017 __ Eon(w3, w0, Operand(w1, LSL, 4)); | |
1018 __ Eon(x4, x0, Operand(x1, LSL, 4)); | |
1019 __ Eon(x5, x0, Operand(x1, LSR, 1)); | |
1020 __ Eon(w6, w0, Operand(w1, ASR, 20)); | |
1021 __ Eon(x7, x0, Operand(x1, ASR, 20)); | |
1022 __ Eon(w8, w0, Operand(w1, ROR, 28)); | |
1023 __ Eon(x9, x0, Operand(x1, ROR, 28)); | |
1024 __ Eon(w10, w0, Operand(0x03c003c0)); | |
1025 __ Eon(x11, x0, Operand(0x0000100000001000L)); | |
1026 END(); | |
1027 | |
1028 RUN(); | |
1029 | |
1030 ASSERT_EQUAL_64(0xffffffff0fff00f0L, x2); | |
1031 ASSERT_EQUAL_64(0xffff0fff, x3); | |
1032 ASSERT_EQUAL_64(0xfffffff0ffff0fffL, x4); | |
1033 ASSERT_EQUAL_64(0xffffffff87ff0070L, x5); | |
1034 ASSERT_EQUAL_64(0x0000ff0f, x6); | |
1035 ASSERT_EQUAL_64(0xffffffffffff0f0fL, x7); | |
1036 ASSERT_EQUAL_64(0xffff0ff0, x8); | |
1037 ASSERT_EQUAL_64(0xfffff00fffff0000L, x9); | |
1038 ASSERT_EQUAL_64(0xfc3f03cf, x10); | |
1039 ASSERT_EQUAL_64(0xffffefffffff100fL, x11); | |
1040 | |
1041 TEARDOWN(); | |
1042 } | |
1043 | |
1044 | |
1045 TEST(eon_extend) { | |
1046 INIT_V8(); | |
1047 SETUP(); | |
1048 | |
1049 START(); | |
1050 __ Mov(x0, 0x1111111111111111UL); | |
1051 __ Mov(x1, 0x8000000080008081UL); | |
1052 __ Eon(w6, w0, Operand(w1, UXTB)); | |
1053 __ Eon(x7, x0, Operand(x1, UXTH, 1)); | |
1054 __ Eon(w8, w0, Operand(w1, UXTW, 2)); | |
1055 __ Eon(x9, x0, Operand(x1, UXTX, 3)); | |
1056 __ Eon(w10, w0, Operand(w1, SXTB)); | |
1057 __ Eon(x11, x0, Operand(x1, SXTH, 1)); | |
1058 __ Eon(x12, x0, Operand(x1, SXTW, 2)); | |
1059 __ Eon(x13, x0, Operand(x1, SXTX, 3)); | |
1060 END(); | |
1061 | |
1062 RUN(); | |
1063 | |
1064 ASSERT_EQUAL_64(0xeeeeee6f, x6); | |
1065 ASSERT_EQUAL_64(0xeeeeeeeeeeefefecUL, x7); | |
1066 ASSERT_EQUAL_64(0xeeececea, x8); | |
1067 ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9); | |
1068 ASSERT_EQUAL_64(0x1111116f, x10); | |
1069 ASSERT_EQUAL_64(0x111111111111efecUL, x11); | |
1070 ASSERT_EQUAL_64(0x11111110eeececeaUL, x12); | |
1071 ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13); | |
1072 | |
1073 TEARDOWN(); | |
1074 } | |
1075 | |
1076 | |
1077 TEST(mul) { | |
1078 INIT_V8(); | |
1079 SETUP(); | |
1080 | |
1081 START(); | |
1082 __ Mov(x16, 0); | |
1083 __ Mov(x17, 1); | |
1084 __ Mov(x18, 0xffffffff); | |
1085 __ Mov(x19, 0xffffffffffffffffUL); | |
1086 | |
1087 __ Mul(w0, w16, w16); | |
1088 __ Mul(w1, w16, w17); | |
1089 __ Mul(w2, w17, w18); | |
1090 __ Mul(w3, w18, w19); | |
1091 __ Mul(x4, x16, x16); | |
1092 __ Mul(x5, x17, x18); | |
1093 __ Mul(x6, x18, x19); | |
1094 __ Mul(x7, x19, x19); | |
1095 __ Smull(x8, w17, w18); | |
1096 __ Smull(x9, w18, w18); | |
1097 __ Smull(x10, w19, w19); | |
1098 __ Mneg(w11, w16, w16); | |
1099 __ Mneg(w12, w16, w17); | |
1100 __ Mneg(w13, w17, w18); | |
1101 __ Mneg(w14, w18, w19); | |
1102 __ Mneg(x20, x16, x16); | |
1103 __ Mneg(x21, x17, x18); | |
1104 __ Mneg(x22, x18, x19); | |
1105 __ Mneg(x23, x19, x19); | |
1106 END(); | |
1107 | |
1108 RUN(); | |
1109 | |
1110 ASSERT_EQUAL_64(0, x0); | |
1111 ASSERT_EQUAL_64(0, x1); | |
1112 ASSERT_EQUAL_64(0xffffffff, x2); | |
1113 ASSERT_EQUAL_64(1, x3); | |
1114 ASSERT_EQUAL_64(0, x4); | |
1115 ASSERT_EQUAL_64(0xffffffff, x5); | |
1116 ASSERT_EQUAL_64(0xffffffff00000001UL, x6); | |
1117 ASSERT_EQUAL_64(1, x7); | |
1118 ASSERT_EQUAL_64(0xffffffffffffffffUL, x8); | |
1119 ASSERT_EQUAL_64(1, x9); | |
1120 ASSERT_EQUAL_64(1, x10); | |
1121 ASSERT_EQUAL_64(0, x11); | |
1122 ASSERT_EQUAL_64(0, x12); | |
1123 ASSERT_EQUAL_64(1, x13); | |
1124 ASSERT_EQUAL_64(0xffffffff, x14); | |
1125 ASSERT_EQUAL_64(0, x20); | |
1126 ASSERT_EQUAL_64(0xffffffff00000001UL, x21); | |
1127 ASSERT_EQUAL_64(0xffffffff, x22); | |
1128 ASSERT_EQUAL_64(0xffffffffffffffffUL, x23); | |
1129 | |
1130 TEARDOWN(); | |
1131 } | |
1132 | |
1133 | |
1134 static void SmullHelper(int64_t expected, int64_t a, int64_t b) { | |
1135 SETUP(); | |
1136 START(); | |
1137 __ Mov(w0, a); | |
1138 __ Mov(w1, b); | |
1139 __ Smull(x2, w0, w1); | |
1140 END(); | |
1141 RUN(); | |
1142 ASSERT_EQUAL_64(expected, x2); | |
1143 TEARDOWN(); | |
1144 } | |
1145 | |
1146 | |
1147 TEST(smull) { | |
1148 INIT_V8(); | |
1149 SmullHelper(0, 0, 0); | |
1150 SmullHelper(1, 1, 1); | |
1151 SmullHelper(-1, -1, 1); | |
1152 SmullHelper(1, -1, -1); | |
1153 SmullHelper(0xffffffff80000000, 0x80000000, 1); | |
1154 SmullHelper(0x0000000080000000, 0x00010000, 0x00008000); | |
1155 } | |
1156 | |
1157 | |
1158 TEST(madd) { | |
1159 INIT_V8(); | |
1160 SETUP(); | |
1161 | |
1162 START(); | |
1163 __ Mov(x16, 0); | |
1164 __ Mov(x17, 1); | |
1165 __ Mov(x18, 0xffffffff); | |
1166 __ Mov(x19, 0xffffffffffffffffUL); | |
1167 | |
1168 __ Madd(w0, w16, w16, w16); | |
1169 __ Madd(w1, w16, w16, w17); | |
1170 __ Madd(w2, w16, w16, w18); | |
1171 __ Madd(w3, w16, w16, w19); | |
1172 __ Madd(w4, w16, w17, w17); | |
1173 __ Madd(w5, w17, w17, w18); | |
1174 __ Madd(w6, w17, w17, w19); | |
1175 __ Madd(w7, w17, w18, w16); | |
1176 __ Madd(w8, w17, w18, w18); | |
1177 __ Madd(w9, w18, w18, w17); | |
1178 __ Madd(w10, w18, w19, w18); | |
1179 __ Madd(w11, w19, w19, w19); | |
1180 | |
1181 __ Madd(x12, x16, x16, x16); | |
1182 __ Madd(x13, x16, x16, x17); | |
1183 __ Madd(x14, x16, x16, x18); | |
1184 __ Madd(x15, x16, x16, x19); | |
1185 __ Madd(x20, x16, x17, x17); | |
1186 __ Madd(x21, x17, x17, x18); | |
1187 __ Madd(x22, x17, x17, x19); | |
1188 __ Madd(x23, x17, x18, x16); | |
1189 __ Madd(x24, x17, x18, x18); | |
1190 __ Madd(x25, x18, x18, x17); | |
1191 __ Madd(x26, x18, x19, x18); | |
1192 __ Madd(x27, x19, x19, x19); | |
1193 | |
1194 END(); | |
1195 | |
1196 RUN(); | |
1197 | |
1198 ASSERT_EQUAL_64(0, x0); | |
1199 ASSERT_EQUAL_64(1, x1); | |
1200 ASSERT_EQUAL_64(0xffffffff, x2); | |
1201 ASSERT_EQUAL_64(0xffffffff, x3); | |
1202 ASSERT_EQUAL_64(1, x4); | |
1203 ASSERT_EQUAL_64(0, x5); | |
1204 ASSERT_EQUAL_64(0, x6); | |
1205 ASSERT_EQUAL_64(0xffffffff, x7); | |
1206 ASSERT_EQUAL_64(0xfffffffe, x8); | |
1207 ASSERT_EQUAL_64(2, x9); | |
1208 ASSERT_EQUAL_64(0, x10); | |
1209 ASSERT_EQUAL_64(0, x11); | |
1210 | |
1211 ASSERT_EQUAL_64(0, x12); | |
1212 ASSERT_EQUAL_64(1, x13); | |
1213 ASSERT_EQUAL_64(0xffffffff, x14); | |
1214 ASSERT_EQUAL_64(0xffffffffffffffff, x15); | |
1215 ASSERT_EQUAL_64(1, x20); | |
1216 ASSERT_EQUAL_64(0x100000000UL, x21); | |
1217 ASSERT_EQUAL_64(0, x22); | |
1218 ASSERT_EQUAL_64(0xffffffff, x23); | |
1219 ASSERT_EQUAL_64(0x1fffffffe, x24); | |
1220 ASSERT_EQUAL_64(0xfffffffe00000002UL, x25); | |
1221 ASSERT_EQUAL_64(0, x26); | |
1222 ASSERT_EQUAL_64(0, x27); | |
1223 | |
1224 TEARDOWN(); | |
1225 } | |
1226 | |
1227 | |
1228 TEST(msub) { | |
1229 INIT_V8(); | |
1230 SETUP(); | |
1231 | |
1232 START(); | |
1233 __ Mov(x16, 0); | |
1234 __ Mov(x17, 1); | |
1235 __ Mov(x18, 0xffffffff); | |
1236 __ Mov(x19, 0xffffffffffffffffUL); | |
1237 | |
1238 __ Msub(w0, w16, w16, w16); | |
1239 __ Msub(w1, w16, w16, w17); | |
1240 __ Msub(w2, w16, w16, w18); | |
1241 __ Msub(w3, w16, w16, w19); | |
1242 __ Msub(w4, w16, w17, w17); | |
1243 __ Msub(w5, w17, w17, w18); | |
1244 __ Msub(w6, w17, w17, w19); | |
1245 __ Msub(w7, w17, w18, w16); | |
1246 __ Msub(w8, w17, w18, w18); | |
1247 __ Msub(w9, w18, w18, w17); | |
1248 __ Msub(w10, w18, w19, w18); | |
1249 __ Msub(w11, w19, w19, w19); | |
1250 | |
1251 __ Msub(x12, x16, x16, x16); | |
1252 __ Msub(x13, x16, x16, x17); | |
1253 __ Msub(x14, x16, x16, x18); | |
1254 __ Msub(x15, x16, x16, x19); | |
1255 __ Msub(x20, x16, x17, x17); | |
1256 __ Msub(x21, x17, x17, x18); | |
1257 __ Msub(x22, x17, x17, x19); | |
1258 __ Msub(x23, x17, x18, x16); | |
1259 __ Msub(x24, x17, x18, x18); | |
1260 __ Msub(x25, x18, x18, x17); | |
1261 __ Msub(x26, x18, x19, x18); | |
1262 __ Msub(x27, x19, x19, x19); | |
1263 | |
1264 END(); | |
1265 | |
1266 RUN(); | |
1267 | |
1268 ASSERT_EQUAL_64(0, x0); | |
1269 ASSERT_EQUAL_64(1, x1); | |
1270 ASSERT_EQUAL_64(0xffffffff, x2); | |
1271 ASSERT_EQUAL_64(0xffffffff, x3); | |
1272 ASSERT_EQUAL_64(1, x4); | |
1273 ASSERT_EQUAL_64(0xfffffffe, x5); | |
1274 ASSERT_EQUAL_64(0xfffffffe, x6); | |
1275 ASSERT_EQUAL_64(1, x7); | |
1276 ASSERT_EQUAL_64(0, x8); | |
1277 ASSERT_EQUAL_64(0, x9); | |
1278 ASSERT_EQUAL_64(0xfffffffe, x10); | |
1279 ASSERT_EQUAL_64(0xfffffffe, x11); | |
1280 | |
1281 ASSERT_EQUAL_64(0, x12); | |
1282 ASSERT_EQUAL_64(1, x13); | |
1283 ASSERT_EQUAL_64(0xffffffff, x14); | |
1284 ASSERT_EQUAL_64(0xffffffffffffffffUL, x15); | |
1285 ASSERT_EQUAL_64(1, x20); | |
1286 ASSERT_EQUAL_64(0xfffffffeUL, x21); | |
1287 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x22); | |
1288 ASSERT_EQUAL_64(0xffffffff00000001UL, x23); | |
1289 ASSERT_EQUAL_64(0, x24); | |
1290 ASSERT_EQUAL_64(0x200000000UL, x25); | |
1291 ASSERT_EQUAL_64(0x1fffffffeUL, x26); | |
1292 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x27); | |
1293 | |
1294 TEARDOWN(); | |
1295 } | |
1296 | |
1297 | |
1298 TEST(smulh) { | |
1299 INIT_V8(); | |
1300 SETUP(); | |
1301 | |
1302 START(); | |
1303 __ Mov(x20, 0); | |
1304 __ Mov(x21, 1); | |
1305 __ Mov(x22, 0x0000000100000000L); | |
1306 __ Mov(x23, 0x12345678); | |
1307 __ Mov(x24, 0x0123456789abcdefL); | |
1308 __ Mov(x25, 0x0000000200000000L); | |
1309 __ Mov(x26, 0x8000000000000000UL); | |
1310 __ Mov(x27, 0xffffffffffffffffUL); | |
1311 __ Mov(x28, 0x5555555555555555UL); | |
1312 __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL); | |
1313 | |
1314 __ Smulh(x0, x20, x24); | |
1315 __ Smulh(x1, x21, x24); | |
1316 __ Smulh(x2, x22, x23); | |
1317 __ Smulh(x3, x22, x24); | |
1318 __ Smulh(x4, x24, x25); | |
1319 __ Smulh(x5, x23, x27); | |
1320 __ Smulh(x6, x26, x26); | |
1321 __ Smulh(x7, x26, x27); | |
1322 __ Smulh(x8, x27, x27); | |
1323 __ Smulh(x9, x28, x28); | |
1324 __ Smulh(x10, x28, x29); | |
1325 __ Smulh(x11, x29, x29); | |
1326 END(); | |
1327 | |
1328 RUN(); | |
1329 | |
1330 ASSERT_EQUAL_64(0, x0); | |
1331 ASSERT_EQUAL_64(0, x1); | |
1332 ASSERT_EQUAL_64(0, x2); | |
1333 ASSERT_EQUAL_64(0x01234567, x3); | |
1334 ASSERT_EQUAL_64(0x02468acf, x4); | |
1335 ASSERT_EQUAL_64(0xffffffffffffffffUL, x5); | |
1336 ASSERT_EQUAL_64(0x4000000000000000UL, x6); | |
1337 ASSERT_EQUAL_64(0, x7); | |
1338 ASSERT_EQUAL_64(0, x8); | |
1339 ASSERT_EQUAL_64(0x1c71c71c71c71c71UL, x9); | |
1340 ASSERT_EQUAL_64(0xe38e38e38e38e38eUL, x10); | |
1341 ASSERT_EQUAL_64(0x1c71c71c71c71c72UL, x11); | |
1342 | |
1343 TEARDOWN(); | |
1344 } | |
1345 | |
1346 | |
1347 TEST(smaddl_umaddl) { | |
1348 INIT_V8(); | |
1349 SETUP(); | |
1350 | |
1351 START(); | |
1352 __ Mov(x17, 1); | |
1353 __ Mov(x18, 0xffffffff); | |
1354 __ Mov(x19, 0xffffffffffffffffUL); | |
1355 __ Mov(x20, 4); | |
1356 __ Mov(x21, 0x200000000UL); | |
1357 | |
1358 __ Smaddl(x9, w17, w18, x20); | |
1359 __ Smaddl(x10, w18, w18, x20); | |
1360 __ Smaddl(x11, w19, w19, x20); | |
1361 __ Smaddl(x12, w19, w19, x21); | |
1362 __ Umaddl(x13, w17, w18, x20); | |
1363 __ Umaddl(x14, w18, w18, x20); | |
1364 __ Umaddl(x15, w19, w19, x20); | |
1365 __ Umaddl(x22, w19, w19, x21); | |
1366 END(); | |
1367 | |
1368 RUN(); | |
1369 | |
1370 ASSERT_EQUAL_64(3, x9); | |
1371 ASSERT_EQUAL_64(5, x10); | |
1372 ASSERT_EQUAL_64(5, x11); | |
1373 ASSERT_EQUAL_64(0x200000001UL, x12); | |
1374 ASSERT_EQUAL_64(0x100000003UL, x13); | |
1375 ASSERT_EQUAL_64(0xfffffffe00000005UL, x14); | |
1376 ASSERT_EQUAL_64(0xfffffffe00000005UL, x15); | |
1377 ASSERT_EQUAL_64(0x1, x22); | |
1378 | |
1379 TEARDOWN(); | |
1380 } | |
1381 | |
1382 | |
1383 TEST(smsubl_umsubl) { | |
1384 INIT_V8(); | |
1385 SETUP(); | |
1386 | |
1387 START(); | |
1388 __ Mov(x17, 1); | |
1389 __ Mov(x18, 0xffffffff); | |
1390 __ Mov(x19, 0xffffffffffffffffUL); | |
1391 __ Mov(x20, 4); | |
1392 __ Mov(x21, 0x200000000UL); | |
1393 | |
1394 __ Smsubl(x9, w17, w18, x20); | |
1395 __ Smsubl(x10, w18, w18, x20); | |
1396 __ Smsubl(x11, w19, w19, x20); | |
1397 __ Smsubl(x12, w19, w19, x21); | |
1398 __ Umsubl(x13, w17, w18, x20); | |
1399 __ Umsubl(x14, w18, w18, x20); | |
1400 __ Umsubl(x15, w19, w19, x20); | |
1401 __ Umsubl(x22, w19, w19, x21); | |
1402 END(); | |
1403 | |
1404 RUN(); | |
1405 | |
1406 ASSERT_EQUAL_64(5, x9); | |
1407 ASSERT_EQUAL_64(3, x10); | |
1408 ASSERT_EQUAL_64(3, x11); | |
1409 ASSERT_EQUAL_64(0x1ffffffffUL, x12); | |
1410 ASSERT_EQUAL_64(0xffffffff00000005UL, x13); | |
1411 ASSERT_EQUAL_64(0x200000003UL, x14); | |
1412 ASSERT_EQUAL_64(0x200000003UL, x15); | |
1413 ASSERT_EQUAL_64(0x3ffffffffUL, x22); | |
1414 | |
1415 TEARDOWN(); | |
1416 } | |
1417 | |
1418 | |
1419 TEST(div) { | |
1420 INIT_V8(); | |
1421 SETUP(); | |
1422 | |
1423 START(); | |
1424 __ Mov(x16, 1); | |
1425 __ Mov(x17, 0xffffffff); | |
1426 __ Mov(x18, 0xffffffffffffffffUL); | |
1427 __ Mov(x19, 0x80000000); | |
1428 __ Mov(x20, 0x8000000000000000UL); | |
1429 __ Mov(x21, 2); | |
1430 | |
1431 __ Udiv(w0, w16, w16); | |
1432 __ Udiv(w1, w17, w16); | |
1433 __ Sdiv(w2, w16, w16); | |
1434 __ Sdiv(w3, w16, w17); | |
1435 __ Sdiv(w4, w17, w18); | |
1436 | |
1437 __ Udiv(x5, x16, x16); | |
1438 __ Udiv(x6, x17, x18); | |
1439 __ Sdiv(x7, x16, x16); | |
1440 __ Sdiv(x8, x16, x17); | |
1441 __ Sdiv(x9, x17, x18); | |
1442 | |
1443 __ Udiv(w10, w19, w21); | |
1444 __ Sdiv(w11, w19, w21); | |
1445 __ Udiv(x12, x19, x21); | |
1446 __ Sdiv(x13, x19, x21); | |
1447 __ Udiv(x14, x20, x21); | |
1448 __ Sdiv(x15, x20, x21); | |
1449 | |
1450 __ Udiv(w22, w19, w17); | |
1451 __ Sdiv(w23, w19, w17); | |
1452 __ Udiv(x24, x20, x18); | |
1453 __ Sdiv(x25, x20, x18); | |
1454 | |
1455 __ Udiv(x26, x16, x21); | |
1456 __ Sdiv(x27, x16, x21); | |
1457 __ Udiv(x28, x18, x21); | |
1458 __ Sdiv(x29, x18, x21); | |
1459 | |
1460 __ Mov(x17, 0); | |
1461 __ Udiv(w18, w16, w17); | |
1462 __ Sdiv(w19, w16, w17); | |
1463 __ Udiv(x20, x16, x17); | |
1464 __ Sdiv(x21, x16, x17); | |
1465 END(); | |
1466 | |
1467 RUN(); | |
1468 | |
1469 ASSERT_EQUAL_64(1, x0); | |
1470 ASSERT_EQUAL_64(0xffffffff, x1); | |
1471 ASSERT_EQUAL_64(1, x2); | |
1472 ASSERT_EQUAL_64(0xffffffff, x3); | |
1473 ASSERT_EQUAL_64(1, x4); | |
1474 ASSERT_EQUAL_64(1, x5); | |
1475 ASSERT_EQUAL_64(0, x6); | |
1476 ASSERT_EQUAL_64(1, x7); | |
1477 ASSERT_EQUAL_64(0, x8); | |
1478 ASSERT_EQUAL_64(0xffffffff00000001UL, x9); | |
1479 ASSERT_EQUAL_64(0x40000000, x10); | |
1480 ASSERT_EQUAL_64(0xC0000000, x11); | |
1481 ASSERT_EQUAL_64(0x40000000, x12); | |
1482 ASSERT_EQUAL_64(0x40000000, x13); | |
1483 ASSERT_EQUAL_64(0x4000000000000000UL, x14); | |
1484 ASSERT_EQUAL_64(0xC000000000000000UL, x15); | |
1485 ASSERT_EQUAL_64(0, x22); | |
1486 ASSERT_EQUAL_64(0x80000000, x23); | |
1487 ASSERT_EQUAL_64(0, x24); | |
1488 ASSERT_EQUAL_64(0x8000000000000000UL, x25); | |
1489 ASSERT_EQUAL_64(0, x26); | |
1490 ASSERT_EQUAL_64(0, x27); | |
1491 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x28); | |
1492 ASSERT_EQUAL_64(0, x29); | |
1493 ASSERT_EQUAL_64(0, x18); | |
1494 ASSERT_EQUAL_64(0, x19); | |
1495 ASSERT_EQUAL_64(0, x20); | |
1496 ASSERT_EQUAL_64(0, x21); | |
1497 | |
1498 TEARDOWN(); | |
1499 } | |
1500 | |
1501 | |
1502 TEST(rbit_rev) { | |
1503 INIT_V8(); | |
1504 SETUP(); | |
1505 | |
1506 START(); | |
1507 __ Mov(x24, 0xfedcba9876543210UL); | |
1508 __ Rbit(w0, w24); | |
1509 __ Rbit(x1, x24); | |
1510 __ Rev16(w2, w24); | |
1511 __ Rev16(x3, x24); | |
1512 __ Rev(w4, w24); | |
1513 __ Rev32(x5, x24); | |
1514 __ Rev(x6, x24); | |
1515 END(); | |
1516 | |
1517 RUN(); | |
1518 | |
1519 ASSERT_EQUAL_64(0x084c2a6e, x0); | |
1520 ASSERT_EQUAL_64(0x084c2a6e195d3b7fUL, x1); | |
1521 ASSERT_EQUAL_64(0x54761032, x2); | |
1522 ASSERT_EQUAL_64(0xdcfe98ba54761032UL, x3); | |
1523 ASSERT_EQUAL_64(0x10325476, x4); | |
1524 ASSERT_EQUAL_64(0x98badcfe10325476UL, x5); | |
1525 ASSERT_EQUAL_64(0x1032547698badcfeUL, x6); | |
1526 | |
1527 TEARDOWN(); | |
1528 } | |
1529 | |
1530 | |
1531 TEST(clz_cls) { | |
1532 INIT_V8(); | |
1533 SETUP(); | |
1534 | |
1535 START(); | |
1536 __ Mov(x24, 0x0008000000800000UL); | |
1537 __ Mov(x25, 0xff800000fff80000UL); | |
1538 __ Mov(x26, 0); | |
1539 __ Clz(w0, w24); | |
1540 __ Clz(x1, x24); | |
1541 __ Clz(w2, w25); | |
1542 __ Clz(x3, x25); | |
1543 __ Clz(w4, w26); | |
1544 __ Clz(x5, x26); | |
1545 __ Cls(w6, w24); | |
1546 __ Cls(x7, x24); | |
1547 __ Cls(w8, w25); | |
1548 __ Cls(x9, x25); | |
1549 __ Cls(w10, w26); | |
1550 __ Cls(x11, x26); | |
1551 END(); | |
1552 | |
1553 RUN(); | |
1554 | |
1555 ASSERT_EQUAL_64(8, x0); | |
1556 ASSERT_EQUAL_64(12, x1); | |
1557 ASSERT_EQUAL_64(0, x2); | |
1558 ASSERT_EQUAL_64(0, x3); | |
1559 ASSERT_EQUAL_64(32, x4); | |
1560 ASSERT_EQUAL_64(64, x5); | |
1561 ASSERT_EQUAL_64(7, x6); | |
1562 ASSERT_EQUAL_64(11, x7); | |
1563 ASSERT_EQUAL_64(12, x8); | |
1564 ASSERT_EQUAL_64(8, x9); | |
1565 ASSERT_EQUAL_64(31, x10); | |
1566 ASSERT_EQUAL_64(63, x11); | |
1567 | |
1568 TEARDOWN(); | |
1569 } | |
1570 | |
1571 | |
1572 TEST(label) { | |
1573 INIT_V8(); | |
1574 SETUP(); | |
1575 | |
1576 Label label_1, label_2, label_3, label_4; | |
1577 | |
1578 START(); | |
1579 __ Mov(x0, 0x1); | |
1580 __ Mov(x1, 0x0); | |
1581 __ Mov(x22, lr); // Save lr. | |
1582 | |
1583 __ B(&label_1); | |
1584 __ B(&label_1); | |
1585 __ B(&label_1); // Multiple branches to the same label. | |
1586 __ Mov(x0, 0x0); | |
1587 __ Bind(&label_2); | |
1588 __ B(&label_3); // Forward branch. | |
1589 __ Mov(x0, 0x0); | |
1590 __ Bind(&label_1); | |
1591 __ B(&label_2); // Backward branch. | |
1592 __ Mov(x0, 0x0); | |
1593 __ Bind(&label_3); | |
1594 __ Bl(&label_4); | |
1595 END(); | |
1596 | |
1597 __ Bind(&label_4); | |
1598 __ Mov(x1, 0x1); | |
1599 __ Mov(lr, x22); | |
1600 END(); | |
1601 | |
1602 RUN(); | |
1603 | |
1604 ASSERT_EQUAL_64(0x1, x0); | |
1605 ASSERT_EQUAL_64(0x1, x1); | |
1606 | |
1607 TEARDOWN(); | |
1608 } | |
1609 | |
1610 | |
1611 TEST(branch_at_start) { | |
1612 INIT_V8(); | |
1613 SETUP(); | |
1614 | |
1615 Label good, exit; | |
1616 | |
1617 // Test that branches can exist at the start of the buffer. (This is a | |
1618 // boundary condition in the label-handling code.) To achieve this, we have | |
1619 // to work around the code generated by START. | |
1620 RESET(); | |
1621 __ B(&good); | |
1622 | |
1623 START_AFTER_RESET(); | |
1624 __ Mov(x0, 0x0); | |
1625 END(); | |
1626 | |
1627 __ Bind(&exit); | |
1628 START_AFTER_RESET(); | |
1629 __ Mov(x0, 0x1); | |
1630 END(); | |
1631 | |
1632 __ Bind(&good); | |
1633 __ B(&exit); | |
1634 END(); | |
1635 | |
1636 RUN(); | |
1637 | |
1638 ASSERT_EQUAL_64(0x1, x0); | |
1639 TEARDOWN(); | |
1640 } | |
1641 | |
1642 | |
1643 TEST(adr) { | |
1644 INIT_V8(); | |
1645 SETUP(); | |
1646 | |
1647 Label label_1, label_2, label_3, label_4; | |
1648 | |
1649 START(); | |
1650 __ Mov(x0, 0x0); // Set to non-zero to indicate failure. | |
1651 __ Adr(x1, &label_3); // Set to zero to indicate success. | |
1652 | |
1653 __ Adr(x2, &label_1); // Multiple forward references to the same label. | |
1654 __ Adr(x3, &label_1); | |
1655 __ Adr(x4, &label_1); | |
1656 | |
1657 __ Bind(&label_2); | |
1658 __ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical. | |
1659 __ Eor(x6, x2, Operand(x4)); | |
1660 __ Orr(x0, x0, Operand(x5)); | |
1661 __ Orr(x0, x0, Operand(x6)); | |
1662 __ Br(x2); // label_1, label_3 | |
1663 | |
1664 __ Bind(&label_3); | |
1665 __ Adr(x2, &label_3); // Self-reference (offset 0). | |
1666 __ Eor(x1, x1, Operand(x2)); | |
1667 __ Adr(x2, &label_4); // Simple forward reference. | |
1668 __ Br(x2); // label_4 | |
1669 | |
1670 __ Bind(&label_1); | |
1671 __ Adr(x2, &label_3); // Multiple reverse references to the same label. | |
1672 __ Adr(x3, &label_3); | |
1673 __ Adr(x4, &label_3); | |
1674 __ Adr(x5, &label_2); // Simple reverse reference. | |
1675 __ Br(x5); // label_2 | |
1676 | |
1677 __ Bind(&label_4); | |
1678 END(); | |
1679 | |
1680 RUN(); | |
1681 | |
1682 ASSERT_EQUAL_64(0x0, x0); | |
1683 ASSERT_EQUAL_64(0x0, x1); | |
1684 | |
1685 TEARDOWN(); | |
1686 } | |
1687 | |
1688 | |
1689 TEST(branch_cond) { | |
1690 INIT_V8(); | |
1691 SETUP(); | |
1692 | |
1693 Label wrong; | |
1694 | |
1695 START(); | |
1696 __ Mov(x0, 0x1); | |
1697 __ Mov(x1, 0x1); | |
1698 __ Mov(x2, 0x8000000000000000L); | |
1699 | |
1700 // For each 'cmp' instruction below, condition codes other than the ones | |
1701 // following it would branch. | |
1702 | |
1703 __ Cmp(x1, 0); | |
1704 __ B(&wrong, eq); | |
1705 __ B(&wrong, lo); | |
1706 __ B(&wrong, mi); | |
1707 __ B(&wrong, vs); | |
1708 __ B(&wrong, ls); | |
1709 __ B(&wrong, lt); | |
1710 __ B(&wrong, le); | |
1711 Label ok_1; | |
1712 __ B(&ok_1, ne); | |
1713 __ Mov(x0, 0x0); | |
1714 __ Bind(&ok_1); | |
1715 | |
1716 __ Cmp(x1, 1); | |
1717 __ B(&wrong, ne); | |
1718 __ B(&wrong, lo); | |
1719 __ B(&wrong, mi); | |
1720 __ B(&wrong, vs); | |
1721 __ B(&wrong, hi); | |
1722 __ B(&wrong, lt); | |
1723 __ B(&wrong, gt); | |
1724 Label ok_2; | |
1725 __ B(&ok_2, pl); | |
1726 __ Mov(x0, 0x0); | |
1727 __ Bind(&ok_2); | |
1728 | |
1729 __ Cmp(x1, 2); | |
1730 __ B(&wrong, eq); | |
1731 __ B(&wrong, hs); | |
1732 __ B(&wrong, pl); | |
1733 __ B(&wrong, vs); | |
1734 __ B(&wrong, hi); | |
1735 __ B(&wrong, ge); | |
1736 __ B(&wrong, gt); | |
1737 Label ok_3; | |
1738 __ B(&ok_3, vc); | |
1739 __ Mov(x0, 0x0); | |
1740 __ Bind(&ok_3); | |
1741 | |
1742 __ Cmp(x2, 1); | |
1743 __ B(&wrong, eq); | |
1744 __ B(&wrong, lo); | |
1745 __ B(&wrong, mi); | |
1746 __ B(&wrong, vc); | |
1747 __ B(&wrong, ls); | |
1748 __ B(&wrong, ge); | |
1749 __ B(&wrong, gt); | |
1750 Label ok_4; | |
1751 __ B(&ok_4, le); | |
1752 __ Mov(x0, 0x0); | |
1753 __ Bind(&ok_4); | |
1754 | |
1755 Label ok_5; | |
1756 __ b(&ok_5, al); | |
1757 __ Mov(x0, 0x0); | |
1758 __ Bind(&ok_5); | |
1759 | |
1760 Label ok_6; | |
1761 __ b(&ok_6, nv); | |
1762 __ Mov(x0, 0x0); | |
1763 __ Bind(&ok_6); | |
1764 | |
1765 END(); | |
1766 | |
1767 __ Bind(&wrong); | |
1768 __ Mov(x0, 0x0); | |
1769 END(); | |
1770 | |
1771 RUN(); | |
1772 | |
1773 ASSERT_EQUAL_64(0x1, x0); | |
1774 | |
1775 TEARDOWN(); | |
1776 } | |
1777 | |
1778 | |
1779 TEST(branch_to_reg) { | |
1780 INIT_V8(); | |
1781 SETUP(); | |
1782 | |
1783 // Test br. | |
1784 Label fn1, after_fn1; | |
1785 | |
1786 START(); | |
1787 __ Mov(x29, lr); | |
1788 | |
1789 __ Mov(x1, 0); | |
1790 __ B(&after_fn1); | |
1791 | |
1792 __ Bind(&fn1); | |
1793 __ Mov(x0, lr); | |
1794 __ Mov(x1, 42); | |
1795 __ Br(x0); | |
1796 | |
1797 __ Bind(&after_fn1); | |
1798 __ Bl(&fn1); | |
1799 | |
1800 // Test blr. | |
1801 Label fn2, after_fn2; | |
1802 | |
1803 __ Mov(x2, 0); | |
1804 __ B(&after_fn2); | |
1805 | |
1806 __ Bind(&fn2); | |
1807 __ Mov(x0, lr); | |
1808 __ Mov(x2, 84); | |
1809 __ Blr(x0); | |
1810 | |
1811 __ Bind(&after_fn2); | |
1812 __ Bl(&fn2); | |
1813 __ Mov(x3, lr); | |
1814 | |
1815 __ Mov(lr, x29); | |
1816 END(); | |
1817 | |
1818 RUN(); | |
1819 | |
1820 ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0); | |
1821 ASSERT_EQUAL_64(42, x1); | |
1822 ASSERT_EQUAL_64(84, x2); | |
1823 | |
1824 TEARDOWN(); | |
1825 } | |
1826 | |
1827 | |
1828 TEST(compare_branch) { | |
1829 INIT_V8(); | |
1830 SETUP(); | |
1831 | |
1832 START(); | |
1833 __ Mov(x0, 0); | |
1834 __ Mov(x1, 0); | |
1835 __ Mov(x2, 0); | |
1836 __ Mov(x3, 0); | |
1837 __ Mov(x4, 0); | |
1838 __ Mov(x5, 0); | |
1839 __ Mov(x16, 0); | |
1840 __ Mov(x17, 42); | |
1841 | |
1842 Label zt, zt_end; | |
1843 __ Cbz(w16, &zt); | |
1844 __ B(&zt_end); | |
1845 __ Bind(&zt); | |
1846 __ Mov(x0, 1); | |
1847 __ Bind(&zt_end); | |
1848 | |
1849 Label zf, zf_end; | |
1850 __ Cbz(x17, &zf); | |
1851 __ B(&zf_end); | |
1852 __ Bind(&zf); | |
1853 __ Mov(x1, 1); | |
1854 __ Bind(&zf_end); | |
1855 | |
1856 Label nzt, nzt_end; | |
1857 __ Cbnz(w17, &nzt); | |
1858 __ B(&nzt_end); | |
1859 __ Bind(&nzt); | |
1860 __ Mov(x2, 1); | |
1861 __ Bind(&nzt_end); | |
1862 | |
1863 Label nzf, nzf_end; | |
1864 __ Cbnz(x16, &nzf); | |
1865 __ B(&nzf_end); | |
1866 __ Bind(&nzf); | |
1867 __ Mov(x3, 1); | |
1868 __ Bind(&nzf_end); | |
1869 | |
1870 __ Mov(x18, 0xffffffff00000000UL); | |
1871 | |
1872 Label a, a_end; | |
1873 __ Cbz(w18, &a); | |
1874 __ B(&a_end); | |
1875 __ Bind(&a); | |
1876 __ Mov(x4, 1); | |
1877 __ Bind(&a_end); | |
1878 | |
1879 Label b, b_end; | |
1880 __ Cbnz(w18, &b); | |
1881 __ B(&b_end); | |
1882 __ Bind(&b); | |
1883 __ Mov(x5, 1); | |
1884 __ Bind(&b_end); | |
1885 | |
1886 END(); | |
1887 | |
1888 RUN(); | |
1889 | |
1890 ASSERT_EQUAL_64(1, x0); | |
1891 ASSERT_EQUAL_64(0, x1); | |
1892 ASSERT_EQUAL_64(1, x2); | |
1893 ASSERT_EQUAL_64(0, x3); | |
1894 ASSERT_EQUAL_64(1, x4); | |
1895 ASSERT_EQUAL_64(0, x5); | |
1896 | |
1897 TEARDOWN(); | |
1898 } | |
1899 | |
1900 | |
1901 TEST(test_branch) { | |
1902 INIT_V8(); | |
1903 SETUP(); | |
1904 | |
1905 START(); | |
1906 __ Mov(x0, 0); | |
1907 __ Mov(x1, 0); | |
1908 __ Mov(x2, 0); | |
1909 __ Mov(x3, 0); | |
1910 __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL); | |
1911 | |
1912 Label bz, bz_end; | |
1913 __ Tbz(w16, 0, &bz); | |
1914 __ B(&bz_end); | |
1915 __ Bind(&bz); | |
1916 __ Mov(x0, 1); | |
1917 __ Bind(&bz_end); | |
1918 | |
1919 Label bo, bo_end; | |
1920 __ Tbz(x16, 63, &bo); | |
1921 __ B(&bo_end); | |
1922 __ Bind(&bo); | |
1923 __ Mov(x1, 1); | |
1924 __ Bind(&bo_end); | |
1925 | |
1926 Label nbz, nbz_end; | |
1927 __ Tbnz(x16, 61, &nbz); | |
1928 __ B(&nbz_end); | |
1929 __ Bind(&nbz); | |
1930 __ Mov(x2, 1); | |
1931 __ Bind(&nbz_end); | |
1932 | |
1933 Label nbo, nbo_end; | |
1934 __ Tbnz(w16, 2, &nbo); | |
1935 __ B(&nbo_end); | |
1936 __ Bind(&nbo); | |
1937 __ Mov(x3, 1); | |
1938 __ Bind(&nbo_end); | |
1939 END(); | |
1940 | |
1941 RUN(); | |
1942 | |
1943 ASSERT_EQUAL_64(1, x0); | |
1944 ASSERT_EQUAL_64(0, x1); | |
1945 ASSERT_EQUAL_64(1, x2); | |
1946 ASSERT_EQUAL_64(0, x3); | |
1947 | |
1948 TEARDOWN(); | |
1949 } | |
1950 | |
1951 | |
1952 TEST(far_branch_backward) { | |
1953 INIT_V8(); | |
1954 | |
1955 // Test that the MacroAssembler correctly resolves backward branches to labels | |
1956 // that are outside the immediate range of branch instructions. | |
1957 int max_range = | |
1958 std::max(Instruction::ImmBranchRange(TestBranchType), | |
1959 std::max(Instruction::ImmBranchRange(CompareBranchType), | |
1960 Instruction::ImmBranchRange(CondBranchType))); | |
1961 | |
1962 SETUP_SIZE(max_range + 1000 * kInstructionSize); | |
1963 | |
1964 START(); | |
1965 | |
1966 Label done, fail; | |
1967 Label test_tbz, test_cbz, test_bcond; | |
1968 Label success_tbz, success_cbz, success_bcond; | |
1969 | |
1970 __ Mov(x0, 0); | |
1971 __ Mov(x1, 1); | |
1972 __ Mov(x10, 0); | |
1973 | |
1974 __ B(&test_tbz); | |
1975 __ Bind(&success_tbz); | |
1976 __ Orr(x0, x0, 1 << 0); | |
1977 __ B(&test_cbz); | |
1978 __ Bind(&success_cbz); | |
1979 __ Orr(x0, x0, 1 << 1); | |
1980 __ B(&test_bcond); | |
1981 __ Bind(&success_bcond); | |
1982 __ Orr(x0, x0, 1 << 2); | |
1983 | |
1984 __ B(&done); | |
1985 | |
1986 // Generate enough code to overflow the immediate range of the three types of | |
1987 // branches below. | |
1988 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) { | |
1989 if (i % 100 == 0) { | |
1990 // If we do land in this code, we do not want to execute so many nops | |
1991 // before reaching the end of test (especially if tracing is activated). | |
1992 __ B(&fail); | |
1993 } else { | |
1994 __ Nop(); | |
1995 } | |
1996 } | |
1997 __ B(&fail); | |
1998 | |
1999 __ Bind(&test_tbz); | |
2000 __ Tbz(x10, 7, &success_tbz); | |
2001 __ Bind(&test_cbz); | |
2002 __ Cbz(x10, &success_cbz); | |
2003 __ Bind(&test_bcond); | |
2004 __ Cmp(x10, 0); | |
2005 __ B(eq, &success_bcond); | |
2006 | |
2007 // For each out-of-range branch instructions, at least two instructions should | |
2008 // have been generated. | |
2009 CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz)); | |
2010 | |
2011 __ Bind(&fail); | |
2012 __ Mov(x1, 0); | |
2013 __ Bind(&done); | |
2014 | |
2015 END(); | |
2016 | |
2017 RUN(); | |
2018 | |
2019 ASSERT_EQUAL_64(0x7, x0); | |
2020 ASSERT_EQUAL_64(0x1, x1); | |
2021 | |
2022 TEARDOWN(); | |
2023 } | |
2024 | |
2025 | |
2026 TEST(far_branch_simple_veneer) { | |
2027 INIT_V8(); | |
2028 | |
2029 // Test that the MacroAssembler correctly emits veneers for forward branches | |
2030 // to labels that are outside the immediate range of branch instructions. | |
2031 int max_range = | |
2032 std::max(Instruction::ImmBranchRange(TestBranchType), | |
2033 std::max(Instruction::ImmBranchRange(CompareBranchType), | |
2034 Instruction::ImmBranchRange(CondBranchType))); | |
2035 | |
2036 SETUP_SIZE(max_range + 1000 * kInstructionSize); | |
2037 | |
2038 START(); | |
2039 | |
2040 Label done, fail; | |
2041 Label test_tbz, test_cbz, test_bcond; | |
2042 Label success_tbz, success_cbz, success_bcond; | |
2043 | |
2044 __ Mov(x0, 0); | |
2045 __ Mov(x1, 1); | |
2046 __ Mov(x10, 0); | |
2047 | |
2048 __ Bind(&test_tbz); | |
2049 __ Tbz(x10, 7, &success_tbz); | |
2050 __ Bind(&test_cbz); | |
2051 __ Cbz(x10, &success_cbz); | |
2052 __ Bind(&test_bcond); | |
2053 __ Cmp(x10, 0); | |
2054 __ B(eq, &success_bcond); | |
2055 | |
2056 // Generate enough code to overflow the immediate range of the three types of | |
2057 // branches below. | |
2058 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) { | |
2059 if (i % 100 == 0) { | |
2060 // If we do land in this code, we do not want to execute so many nops | |
2061 // before reaching the end of test (especially if tracing is activated). | |
2062 // Also, the branches give the MacroAssembler the opportunity to emit the | |
2063 // veneers. | |
2064 __ B(&fail); | |
2065 } else { | |
2066 __ Nop(); | |
2067 } | |
2068 } | |
2069 __ B(&fail); | |
2070 | |
2071 __ Bind(&success_tbz); | |
2072 __ Orr(x0, x0, 1 << 0); | |
2073 __ B(&test_cbz); | |
2074 __ Bind(&success_cbz); | |
2075 __ Orr(x0, x0, 1 << 1); | |
2076 __ B(&test_bcond); | |
2077 __ Bind(&success_bcond); | |
2078 __ Orr(x0, x0, 1 << 2); | |
2079 | |
2080 __ B(&done); | |
2081 __ Bind(&fail); | |
2082 __ Mov(x1, 0); | |
2083 __ Bind(&done); | |
2084 | |
2085 END(); | |
2086 | |
2087 RUN(); | |
2088 | |
2089 ASSERT_EQUAL_64(0x7, x0); | |
2090 ASSERT_EQUAL_64(0x1, x1); | |
2091 | |
2092 TEARDOWN(); | |
2093 } | |
2094 | |
2095 | |
2096 TEST(far_branch_veneer_link_chain) { | |
2097 INIT_V8(); | |
2098 | |
2099 // Test that the MacroAssembler correctly emits veneers for forward branches | |
2100 // that target out-of-range labels and are part of multiple instructions | |
2101 // jumping to that label. | |
2102 // | |
2103 // We test the three situations with the different types of instruction: | |
2104 // (1)- When the branch is at the start of the chain with tbz. | |
2105 // (2)- When the branch is in the middle of the chain with cbz. | |
2106 // (3)- When the branch is at the end of the chain with bcond. | |
2107 int max_range = | |
2108 std::max(Instruction::ImmBranchRange(TestBranchType), | |
2109 std::max(Instruction::ImmBranchRange(CompareBranchType), | |
2110 Instruction::ImmBranchRange(CondBranchType))); | |
2111 | |
2112 SETUP_SIZE(max_range + 1000 * kInstructionSize); | |
2113 | |
2114 START(); | |
2115 | |
2116 Label skip, fail, done; | |
2117 Label test_tbz, test_cbz, test_bcond; | |
2118 Label success_tbz, success_cbz, success_bcond; | |
2119 | |
2120 __ Mov(x0, 0); | |
2121 __ Mov(x1, 1); | |
2122 __ Mov(x10, 0); | |
2123 | |
2124 __ B(&skip); | |
2125 // Branches at the start of the chain for situations (2) and (3). | |
2126 __ B(&success_cbz); | |
2127 __ B(&success_bcond); | |
2128 __ Nop(); | |
2129 __ B(&success_bcond); | |
2130 __ B(&success_cbz); | |
2131 __ Bind(&skip); | |
2132 | |
2133 __ Bind(&test_tbz); | |
2134 __ Tbz(x10, 7, &success_tbz); | |
2135 __ Bind(&test_cbz); | |
2136 __ Cbz(x10, &success_cbz); | |
2137 __ Bind(&test_bcond); | |
2138 __ Cmp(x10, 0); | |
2139 __ B(eq, &success_bcond); | |
2140 | |
2141 skip.Unuse(); | |
2142 __ B(&skip); | |
2143 // Branches at the end of the chain for situations (1) and (2). | |
2144 __ B(&success_cbz); | |
2145 __ B(&success_tbz); | |
2146 __ Nop(); | |
2147 __ B(&success_tbz); | |
2148 __ B(&success_cbz); | |
2149 __ Bind(&skip); | |
2150 | |
2151 // Generate enough code to overflow the immediate range of the three types of | |
2152 // branches below. | |
2153 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) { | |
2154 if (i % 100 == 0) { | |
2155 // If we do land in this code, we do not want to execute so many nops | |
2156 // before reaching the end of test (especially if tracing is activated). | |
2157 // Also, the branches give the MacroAssembler the opportunity to emit the | |
2158 // veneers. | |
2159 __ B(&fail); | |
2160 } else { | |
2161 __ Nop(); | |
2162 } | |
2163 } | |
2164 __ B(&fail); | |
2165 | |
2166 __ Bind(&success_tbz); | |
2167 __ Orr(x0, x0, 1 << 0); | |
2168 __ B(&test_cbz); | |
2169 __ Bind(&success_cbz); | |
2170 __ Orr(x0, x0, 1 << 1); | |
2171 __ B(&test_bcond); | |
2172 __ Bind(&success_bcond); | |
2173 __ Orr(x0, x0, 1 << 2); | |
2174 | |
2175 __ B(&done); | |
2176 __ Bind(&fail); | |
2177 __ Mov(x1, 0); | |
2178 __ Bind(&done); | |
2179 | |
2180 END(); | |
2181 | |
2182 RUN(); | |
2183 | |
2184 ASSERT_EQUAL_64(0x7, x0); | |
2185 ASSERT_EQUAL_64(0x1, x1); | |
2186 | |
2187 TEARDOWN(); | |
2188 } | |
2189 | |
2190 | |
2191 TEST(far_branch_veneer_broken_link_chain) { | |
2192 INIT_V8(); | |
2193 | |
2194 // Check that the MacroAssembler correctly handles the situation when removing | |
2195 // a branch from the link chain of a label and the two links on each side of | |
2196 // the removed branch cannot be linked together (out of range). | |
2197 // | |
2198 // We test with tbz because it has a small range. | |
2199 int max_range = Instruction::ImmBranchRange(TestBranchType); | |
2200 int inter_range = max_range / 2 + max_range / 10; | |
2201 | |
2202 SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize); | |
2203 | |
2204 START(); | |
2205 | |
2206 Label skip, fail, done; | |
2207 Label test_1, test_2, test_3; | |
2208 Label far_target; | |
2209 | |
2210 __ Mov(x0, 0); // Indicates the origin of the branch. | |
2211 __ Mov(x1, 1); | |
2212 __ Mov(x10, 0); | |
2213 | |
2214 // First instruction in the label chain. | |
2215 __ Bind(&test_1); | |
2216 __ Mov(x0, 1); | |
2217 __ B(&far_target); | |
2218 | |
2219 for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) { | |
2220 if (i % 100 == 0) { | |
2221 // Do not allow generating veneers. They should not be needed. | |
2222 __ b(&fail); | |
2223 } else { | |
2224 __ Nop(); | |
2225 } | |
2226 } | |
2227 | |
2228 // Will need a veneer to point to reach the target. | |
2229 __ Bind(&test_2); | |
2230 __ Mov(x0, 2); | |
2231 __ Tbz(x10, 7, &far_target); | |
2232 | |
2233 for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) { | |
2234 if (i % 100 == 0) { | |
2235 // Do not allow generating veneers. They should not be needed. | |
2236 __ b(&fail); | |
2237 } else { | |
2238 __ Nop(); | |
2239 } | |
2240 } | |
2241 | |
2242 // Does not need a veneer to reach the target, but the initial branch | |
2243 // instruction is out of range. | |
2244 __ Bind(&test_3); | |
2245 __ Mov(x0, 3); | |
2246 __ Tbz(x10, 7, &far_target); | |
2247 | |
2248 for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) { | |
2249 if (i % 100 == 0) { | |
2250 // Allow generating veneers. | |
2251 __ B(&fail); | |
2252 } else { | |
2253 __ Nop(); | |
2254 } | |
2255 } | |
2256 | |
2257 __ B(&fail); | |
2258 | |
2259 __ Bind(&far_target); | |
2260 __ Cmp(x0, 1); | |
2261 __ B(eq, &test_2); | |
2262 __ Cmp(x0, 2); | |
2263 __ B(eq, &test_3); | |
2264 | |
2265 __ B(&done); | |
2266 __ Bind(&fail); | |
2267 __ Mov(x1, 0); | |
2268 __ Bind(&done); | |
2269 | |
2270 END(); | |
2271 | |
2272 RUN(); | |
2273 | |
2274 ASSERT_EQUAL_64(0x3, x0); | |
2275 ASSERT_EQUAL_64(0x1, x1); | |
2276 | |
2277 TEARDOWN(); | |
2278 } | |
2279 | |
2280 | |
2281 TEST(branch_type) { | |
2282 INIT_V8(); | |
2283 | |
2284 SETUP(); | |
2285 | |
2286 Label fail, done; | |
2287 | |
2288 START(); | |
2289 __ Mov(x0, 0x0); | |
2290 __ Mov(x10, 0x7); | |
2291 __ Mov(x11, 0x0); | |
2292 | |
2293 // Test non taken branches. | |
2294 __ Cmp(x10, 0x7); | |
2295 __ B(&fail, ne); | |
2296 __ B(&fail, never); | |
2297 __ B(&fail, reg_zero, x10); | |
2298 __ B(&fail, reg_not_zero, x11); | |
2299 __ B(&fail, reg_bit_clear, x10, 0); | |
2300 __ B(&fail, reg_bit_set, x10, 3); | |
2301 | |
2302 // Test taken branches. | |
2303 Label l1, l2, l3, l4, l5; | |
2304 __ Cmp(x10, 0x7); | |
2305 __ B(&l1, eq); | |
2306 __ B(&fail); | |
2307 __ Bind(&l1); | |
2308 __ B(&l2, always); | |
2309 __ B(&fail); | |
2310 __ Bind(&l2); | |
2311 __ B(&l3, reg_not_zero, x10); | |
2312 __ B(&fail); | |
2313 __ Bind(&l3); | |
2314 __ B(&l4, reg_bit_clear, x10, 15); | |
2315 __ B(&fail); | |
2316 __ Bind(&l4); | |
2317 __ B(&l5, reg_bit_set, x10, 1); | |
2318 __ B(&fail); | |
2319 __ Bind(&l5); | |
2320 | |
2321 __ B(&done); | |
2322 | |
2323 __ Bind(&fail); | |
2324 __ Mov(x0, 0x1); | |
2325 | |
2326 __ Bind(&done); | |
2327 | |
2328 END(); | |
2329 | |
2330 RUN(); | |
2331 | |
2332 ASSERT_EQUAL_64(0x0, x0); | |
2333 | |
2334 TEARDOWN(); | |
2335 } | |
2336 | |
2337 | |
2338 TEST(ldr_str_offset) { | |
2339 INIT_V8(); | |
2340 SETUP(); | |
2341 | |
2342 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL}; | |
2343 uint64_t dst[5] = {0, 0, 0, 0, 0}; | |
2344 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2345 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2346 | |
2347 START(); | |
2348 __ Mov(x17, src_base); | |
2349 __ Mov(x18, dst_base); | |
2350 __ Ldr(w0, MemOperand(x17)); | |
2351 __ Str(w0, MemOperand(x18)); | |
2352 __ Ldr(w1, MemOperand(x17, 4)); | |
2353 __ Str(w1, MemOperand(x18, 12)); | |
2354 __ Ldr(x2, MemOperand(x17, 8)); | |
2355 __ Str(x2, MemOperand(x18, 16)); | |
2356 __ Ldrb(w3, MemOperand(x17, 1)); | |
2357 __ Strb(w3, MemOperand(x18, 25)); | |
2358 __ Ldrh(w4, MemOperand(x17, 2)); | |
2359 __ Strh(w4, MemOperand(x18, 33)); | |
2360 END(); | |
2361 | |
2362 RUN(); | |
2363 | |
2364 ASSERT_EQUAL_64(0x76543210, x0); | |
2365 ASSERT_EQUAL_64(0x76543210, dst[0]); | |
2366 ASSERT_EQUAL_64(0xfedcba98, x1); | |
2367 ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]); | |
2368 ASSERT_EQUAL_64(0x0123456789abcdefUL, x2); | |
2369 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]); | |
2370 ASSERT_EQUAL_64(0x32, x3); | |
2371 ASSERT_EQUAL_64(0x3200, dst[3]); | |
2372 ASSERT_EQUAL_64(0x7654, x4); | |
2373 ASSERT_EQUAL_64(0x765400, dst[4]); | |
2374 ASSERT_EQUAL_64(src_base, x17); | |
2375 ASSERT_EQUAL_64(dst_base, x18); | |
2376 | |
2377 TEARDOWN(); | |
2378 } | |
2379 | |
2380 | |
2381 TEST(ldr_str_wide) { | |
2382 INIT_V8(); | |
2383 SETUP(); | |
2384 | |
2385 uint32_t src[8192]; | |
2386 uint32_t dst[8192]; | |
2387 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2388 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2389 memset(src, 0xaa, 8192 * sizeof(src[0])); | |
2390 memset(dst, 0xaa, 8192 * sizeof(dst[0])); | |
2391 src[0] = 0; | |
2392 src[6144] = 6144; | |
2393 src[8191] = 8191; | |
2394 | |
2395 START(); | |
2396 __ Mov(x22, src_base); | |
2397 __ Mov(x23, dst_base); | |
2398 __ Mov(x24, src_base); | |
2399 __ Mov(x25, dst_base); | |
2400 __ Mov(x26, src_base); | |
2401 __ Mov(x27, dst_base); | |
2402 | |
2403 __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0]))); | |
2404 __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0]))); | |
2405 __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex)); | |
2406 __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex)); | |
2407 __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex)); | |
2408 __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex)); | |
2409 END(); | |
2410 | |
2411 RUN(); | |
2412 | |
2413 ASSERT_EQUAL_32(8191, w0); | |
2414 ASSERT_EQUAL_32(8191, dst[8191]); | |
2415 ASSERT_EQUAL_64(src_base, x22); | |
2416 ASSERT_EQUAL_64(dst_base, x23); | |
2417 ASSERT_EQUAL_32(0, w1); | |
2418 ASSERT_EQUAL_32(0, dst[0]); | |
2419 ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24); | |
2420 ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25); | |
2421 ASSERT_EQUAL_32(6144, w2); | |
2422 ASSERT_EQUAL_32(6144, dst[6144]); | |
2423 ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26); | |
2424 ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27); | |
2425 | |
2426 TEARDOWN(); | |
2427 } | |
2428 | |
2429 | |
2430 TEST(ldr_str_preindex) { | |
2431 INIT_V8(); | |
2432 SETUP(); | |
2433 | |
2434 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL}; | |
2435 uint64_t dst[6] = {0, 0, 0, 0, 0, 0}; | |
2436 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2437 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2438 | |
2439 START(); | |
2440 __ Mov(x17, src_base); | |
2441 __ Mov(x18, dst_base); | |
2442 __ Mov(x19, src_base); | |
2443 __ Mov(x20, dst_base); | |
2444 __ Mov(x21, src_base + 16); | |
2445 __ Mov(x22, dst_base + 40); | |
2446 __ Mov(x23, src_base); | |
2447 __ Mov(x24, dst_base); | |
2448 __ Mov(x25, src_base); | |
2449 __ Mov(x26, dst_base); | |
2450 __ Ldr(w0, MemOperand(x17, 4, PreIndex)); | |
2451 __ Str(w0, MemOperand(x18, 12, PreIndex)); | |
2452 __ Ldr(x1, MemOperand(x19, 8, PreIndex)); | |
2453 __ Str(x1, MemOperand(x20, 16, PreIndex)); | |
2454 __ Ldr(w2, MemOperand(x21, -4, PreIndex)); | |
2455 __ Str(w2, MemOperand(x22, -4, PreIndex)); | |
2456 __ Ldrb(w3, MemOperand(x23, 1, PreIndex)); | |
2457 __ Strb(w3, MemOperand(x24, 25, PreIndex)); | |
2458 __ Ldrh(w4, MemOperand(x25, 3, PreIndex)); | |
2459 __ Strh(w4, MemOperand(x26, 41, PreIndex)); | |
2460 END(); | |
2461 | |
2462 RUN(); | |
2463 | |
2464 ASSERT_EQUAL_64(0xfedcba98, x0); | |
2465 ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]); | |
2466 ASSERT_EQUAL_64(0x0123456789abcdefUL, x1); | |
2467 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]); | |
2468 ASSERT_EQUAL_64(0x01234567, x2); | |
2469 ASSERT_EQUAL_64(0x0123456700000000UL, dst[4]); | |
2470 ASSERT_EQUAL_64(0x32, x3); | |
2471 ASSERT_EQUAL_64(0x3200, dst[3]); | |
2472 ASSERT_EQUAL_64(0x9876, x4); | |
2473 ASSERT_EQUAL_64(0x987600, dst[5]); | |
2474 ASSERT_EQUAL_64(src_base + 4, x17); | |
2475 ASSERT_EQUAL_64(dst_base + 12, x18); | |
2476 ASSERT_EQUAL_64(src_base + 8, x19); | |
2477 ASSERT_EQUAL_64(dst_base + 16, x20); | |
2478 ASSERT_EQUAL_64(src_base + 12, x21); | |
2479 ASSERT_EQUAL_64(dst_base + 36, x22); | |
2480 ASSERT_EQUAL_64(src_base + 1, x23); | |
2481 ASSERT_EQUAL_64(dst_base + 25, x24); | |
2482 ASSERT_EQUAL_64(src_base + 3, x25); | |
2483 ASSERT_EQUAL_64(dst_base + 41, x26); | |
2484 | |
2485 TEARDOWN(); | |
2486 } | |
2487 | |
2488 | |
2489 TEST(ldr_str_postindex) { | |
2490 INIT_V8(); | |
2491 SETUP(); | |
2492 | |
2493 uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL}; | |
2494 uint64_t dst[6] = {0, 0, 0, 0, 0, 0}; | |
2495 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2496 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2497 | |
2498 START(); | |
2499 __ Mov(x17, src_base + 4); | |
2500 __ Mov(x18, dst_base + 12); | |
2501 __ Mov(x19, src_base + 8); | |
2502 __ Mov(x20, dst_base + 16); | |
2503 __ Mov(x21, src_base + 8); | |
2504 __ Mov(x22, dst_base + 32); | |
2505 __ Mov(x23, src_base + 1); | |
2506 __ Mov(x24, dst_base + 25); | |
2507 __ Mov(x25, src_base + 3); | |
2508 __ Mov(x26, dst_base + 41); | |
2509 __ Ldr(w0, MemOperand(x17, 4, PostIndex)); | |
2510 __ Str(w0, MemOperand(x18, 12, PostIndex)); | |
2511 __ Ldr(x1, MemOperand(x19, 8, PostIndex)); | |
2512 __ Str(x1, MemOperand(x20, 16, PostIndex)); | |
2513 __ Ldr(x2, MemOperand(x21, -8, PostIndex)); | |
2514 __ Str(x2, MemOperand(x22, -32, PostIndex)); | |
2515 __ Ldrb(w3, MemOperand(x23, 1, PostIndex)); | |
2516 __ Strb(w3, MemOperand(x24, 5, PostIndex)); | |
2517 __ Ldrh(w4, MemOperand(x25, -3, PostIndex)); | |
2518 __ Strh(w4, MemOperand(x26, -41, PostIndex)); | |
2519 END(); | |
2520 | |
2521 RUN(); | |
2522 | |
2523 ASSERT_EQUAL_64(0xfedcba98, x0); | |
2524 ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]); | |
2525 ASSERT_EQUAL_64(0x0123456789abcdefUL, x1); | |
2526 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]); | |
2527 ASSERT_EQUAL_64(0x0123456789abcdefUL, x2); | |
2528 ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[4]); | |
2529 ASSERT_EQUAL_64(0x32, x3); | |
2530 ASSERT_EQUAL_64(0x3200, dst[3]); | |
2531 ASSERT_EQUAL_64(0x9876, x4); | |
2532 ASSERT_EQUAL_64(0x987600, dst[5]); | |
2533 ASSERT_EQUAL_64(src_base + 8, x17); | |
2534 ASSERT_EQUAL_64(dst_base + 24, x18); | |
2535 ASSERT_EQUAL_64(src_base + 16, x19); | |
2536 ASSERT_EQUAL_64(dst_base + 32, x20); | |
2537 ASSERT_EQUAL_64(src_base, x21); | |
2538 ASSERT_EQUAL_64(dst_base, x22); | |
2539 ASSERT_EQUAL_64(src_base + 2, x23); | |
2540 ASSERT_EQUAL_64(dst_base + 30, x24); | |
2541 ASSERT_EQUAL_64(src_base, x25); | |
2542 ASSERT_EQUAL_64(dst_base, x26); | |
2543 | |
2544 TEARDOWN(); | |
2545 } | |
2546 | |
2547 | |
2548 TEST(load_signed) { | |
2549 INIT_V8(); | |
2550 SETUP(); | |
2551 | |
2552 uint32_t src[2] = {0x80008080, 0x7fff7f7f}; | |
2553 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2554 | |
2555 START(); | |
2556 __ Mov(x24, src_base); | |
2557 __ Ldrsb(w0, MemOperand(x24)); | |
2558 __ Ldrsb(w1, MemOperand(x24, 4)); | |
2559 __ Ldrsh(w2, MemOperand(x24)); | |
2560 __ Ldrsh(w3, MemOperand(x24, 4)); | |
2561 __ Ldrsb(x4, MemOperand(x24)); | |
2562 __ Ldrsb(x5, MemOperand(x24, 4)); | |
2563 __ Ldrsh(x6, MemOperand(x24)); | |
2564 __ Ldrsh(x7, MemOperand(x24, 4)); | |
2565 __ Ldrsw(x8, MemOperand(x24)); | |
2566 __ Ldrsw(x9, MemOperand(x24, 4)); | |
2567 END(); | |
2568 | |
2569 RUN(); | |
2570 | |
2571 ASSERT_EQUAL_64(0xffffff80, x0); | |
2572 ASSERT_EQUAL_64(0x0000007f, x1); | |
2573 ASSERT_EQUAL_64(0xffff8080, x2); | |
2574 ASSERT_EQUAL_64(0x00007f7f, x3); | |
2575 ASSERT_EQUAL_64(0xffffffffffffff80UL, x4); | |
2576 ASSERT_EQUAL_64(0x000000000000007fUL, x5); | |
2577 ASSERT_EQUAL_64(0xffffffffffff8080UL, x6); | |
2578 ASSERT_EQUAL_64(0x0000000000007f7fUL, x7); | |
2579 ASSERT_EQUAL_64(0xffffffff80008080UL, x8); | |
2580 ASSERT_EQUAL_64(0x000000007fff7f7fUL, x9); | |
2581 | |
2582 TEARDOWN(); | |
2583 } | |
2584 | |
2585 | |
2586 TEST(load_store_regoffset) { | |
2587 INIT_V8(); | |
2588 SETUP(); | |
2589 | |
2590 uint32_t src[3] = {1, 2, 3}; | |
2591 uint32_t dst[4] = {0, 0, 0, 0}; | |
2592 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2593 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2594 | |
2595 START(); | |
2596 __ Mov(x16, src_base); | |
2597 __ Mov(x17, dst_base); | |
2598 __ Mov(x18, src_base + 3 * sizeof(src[0])); | |
2599 __ Mov(x19, dst_base + 3 * sizeof(dst[0])); | |
2600 __ Mov(x20, dst_base + 4 * sizeof(dst[0])); | |
2601 __ Mov(x24, 0); | |
2602 __ Mov(x25, 4); | |
2603 __ Mov(x26, -4); | |
2604 __ Mov(x27, 0xfffffffc); // 32-bit -4. | |
2605 __ Mov(x28, 0xfffffffe); // 32-bit -2. | |
2606 __ Mov(x29, 0xffffffff); // 32-bit -1. | |
2607 | |
2608 __ Ldr(w0, MemOperand(x16, x24)); | |
2609 __ Ldr(x1, MemOperand(x16, x25)); | |
2610 __ Ldr(w2, MemOperand(x18, x26)); | |
2611 __ Ldr(w3, MemOperand(x18, x27, SXTW)); | |
2612 __ Ldr(w4, MemOperand(x18, x28, SXTW, 2)); | |
2613 __ Str(w0, MemOperand(x17, x24)); | |
2614 __ Str(x1, MemOperand(x17, x25)); | |
2615 __ Str(w2, MemOperand(x20, x29, SXTW, 2)); | |
2616 END(); | |
2617 | |
2618 RUN(); | |
2619 | |
2620 ASSERT_EQUAL_64(1, x0); | |
2621 ASSERT_EQUAL_64(0x0000000300000002UL, x1); | |
2622 ASSERT_EQUAL_64(3, x2); | |
2623 ASSERT_EQUAL_64(3, x3); | |
2624 ASSERT_EQUAL_64(2, x4); | |
2625 ASSERT_EQUAL_32(1, dst[0]); | |
2626 ASSERT_EQUAL_32(2, dst[1]); | |
2627 ASSERT_EQUAL_32(3, dst[2]); | |
2628 ASSERT_EQUAL_32(3, dst[3]); | |
2629 | |
2630 TEARDOWN(); | |
2631 } | |
2632 | |
2633 | |
2634 TEST(load_store_float) { | |
2635 INIT_V8(); | |
2636 SETUP(); | |
2637 | |
2638 float src[3] = {1.0, 2.0, 3.0}; | |
2639 float dst[3] = {0.0, 0.0, 0.0}; | |
2640 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2641 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2642 | |
2643 START(); | |
2644 __ Mov(x17, src_base); | |
2645 __ Mov(x18, dst_base); | |
2646 __ Mov(x19, src_base); | |
2647 __ Mov(x20, dst_base); | |
2648 __ Mov(x21, src_base); | |
2649 __ Mov(x22, dst_base); | |
2650 __ Ldr(s0, MemOperand(x17, sizeof(src[0]))); | |
2651 __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex)); | |
2652 __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex)); | |
2653 __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex)); | |
2654 __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex)); | |
2655 __ Str(s2, MemOperand(x22, sizeof(dst[0]))); | |
2656 END(); | |
2657 | |
2658 RUN(); | |
2659 | |
2660 ASSERT_EQUAL_FP32(2.0, s0); | |
2661 ASSERT_EQUAL_FP32(2.0, dst[0]); | |
2662 ASSERT_EQUAL_FP32(1.0, s1); | |
2663 ASSERT_EQUAL_FP32(1.0, dst[2]); | |
2664 ASSERT_EQUAL_FP32(3.0, s2); | |
2665 ASSERT_EQUAL_FP32(3.0, dst[1]); | |
2666 ASSERT_EQUAL_64(src_base, x17); | |
2667 ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18); | |
2668 ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19); | |
2669 ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20); | |
2670 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21); | |
2671 ASSERT_EQUAL_64(dst_base, x22); | |
2672 | |
2673 TEARDOWN(); | |
2674 } | |
2675 | |
2676 | |
2677 TEST(load_store_double) { | |
2678 INIT_V8(); | |
2679 SETUP(); | |
2680 | |
2681 double src[3] = {1.0, 2.0, 3.0}; | |
2682 double dst[3] = {0.0, 0.0, 0.0}; | |
2683 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2684 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2685 | |
2686 START(); | |
2687 __ Mov(x17, src_base); | |
2688 __ Mov(x18, dst_base); | |
2689 __ Mov(x19, src_base); | |
2690 __ Mov(x20, dst_base); | |
2691 __ Mov(x21, src_base); | |
2692 __ Mov(x22, dst_base); | |
2693 __ Ldr(d0, MemOperand(x17, sizeof(src[0]))); | |
2694 __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex)); | |
2695 __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex)); | |
2696 __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex)); | |
2697 __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex)); | |
2698 __ Str(d2, MemOperand(x22, sizeof(dst[0]))); | |
2699 END(); | |
2700 | |
2701 RUN(); | |
2702 | |
2703 ASSERT_EQUAL_FP64(2.0, d0); | |
2704 ASSERT_EQUAL_FP64(2.0, dst[0]); | |
2705 ASSERT_EQUAL_FP64(1.0, d1); | |
2706 ASSERT_EQUAL_FP64(1.0, dst[2]); | |
2707 ASSERT_EQUAL_FP64(3.0, d2); | |
2708 ASSERT_EQUAL_FP64(3.0, dst[1]); | |
2709 ASSERT_EQUAL_64(src_base, x17); | |
2710 ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18); | |
2711 ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19); | |
2712 ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20); | |
2713 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21); | |
2714 ASSERT_EQUAL_64(dst_base, x22); | |
2715 | |
2716 TEARDOWN(); | |
2717 } | |
2718 | |
2719 | |
2720 TEST(ldp_stp_float) { | |
2721 INIT_V8(); | |
2722 SETUP(); | |
2723 | |
2724 float src[2] = {1.0, 2.0}; | |
2725 float dst[3] = {0.0, 0.0, 0.0}; | |
2726 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2727 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2728 | |
2729 START(); | |
2730 __ Mov(x16, src_base); | |
2731 __ Mov(x17, dst_base); | |
2732 __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex)); | |
2733 __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex)); | |
2734 END(); | |
2735 | |
2736 RUN(); | |
2737 | |
2738 ASSERT_EQUAL_FP32(1.0, s31); | |
2739 ASSERT_EQUAL_FP32(2.0, s0); | |
2740 ASSERT_EQUAL_FP32(0.0, dst[0]); | |
2741 ASSERT_EQUAL_FP32(2.0, dst[1]); | |
2742 ASSERT_EQUAL_FP32(1.0, dst[2]); | |
2743 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16); | |
2744 ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17); | |
2745 | |
2746 TEARDOWN(); | |
2747 } | |
2748 | |
2749 | |
2750 TEST(ldp_stp_double) { | |
2751 INIT_V8(); | |
2752 SETUP(); | |
2753 | |
2754 double src[2] = {1.0, 2.0}; | |
2755 double dst[3] = {0.0, 0.0, 0.0}; | |
2756 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2757 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2758 | |
2759 START(); | |
2760 __ Mov(x16, src_base); | |
2761 __ Mov(x17, dst_base); | |
2762 __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex)); | |
2763 __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex)); | |
2764 END(); | |
2765 | |
2766 RUN(); | |
2767 | |
2768 ASSERT_EQUAL_FP64(1.0, d31); | |
2769 ASSERT_EQUAL_FP64(2.0, d0); | |
2770 ASSERT_EQUAL_FP64(0.0, dst[0]); | |
2771 ASSERT_EQUAL_FP64(2.0, dst[1]); | |
2772 ASSERT_EQUAL_FP64(1.0, dst[2]); | |
2773 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16); | |
2774 ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17); | |
2775 | |
2776 TEARDOWN(); | |
2777 } | |
2778 | |
2779 | |
2780 TEST(ldp_stp_offset) { | |
2781 INIT_V8(); | |
2782 SETUP(); | |
2783 | |
2784 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL, | |
2785 0xffeeddccbbaa9988UL}; | |
2786 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0}; | |
2787 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2788 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2789 | |
2790 START(); | |
2791 __ Mov(x16, src_base); | |
2792 __ Mov(x17, dst_base); | |
2793 __ Mov(x18, src_base + 24); | |
2794 __ Mov(x19, dst_base + 56); | |
2795 __ Ldp(w0, w1, MemOperand(x16)); | |
2796 __ Ldp(w2, w3, MemOperand(x16, 4)); | |
2797 __ Ldp(x4, x5, MemOperand(x16, 8)); | |
2798 __ Ldp(w6, w7, MemOperand(x18, -12)); | |
2799 __ Ldp(x8, x9, MemOperand(x18, -16)); | |
2800 __ Stp(w0, w1, MemOperand(x17)); | |
2801 __ Stp(w2, w3, MemOperand(x17, 8)); | |
2802 __ Stp(x4, x5, MemOperand(x17, 16)); | |
2803 __ Stp(w6, w7, MemOperand(x19, -24)); | |
2804 __ Stp(x8, x9, MemOperand(x19, -16)); | |
2805 END(); | |
2806 | |
2807 RUN(); | |
2808 | |
2809 ASSERT_EQUAL_64(0x44556677, x0); | |
2810 ASSERT_EQUAL_64(0x00112233, x1); | |
2811 ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]); | |
2812 ASSERT_EQUAL_64(0x00112233, x2); | |
2813 ASSERT_EQUAL_64(0xccddeeff, x3); | |
2814 ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]); | |
2815 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4); | |
2816 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]); | |
2817 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5); | |
2818 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]); | |
2819 ASSERT_EQUAL_64(0x8899aabb, x6); | |
2820 ASSERT_EQUAL_64(0xbbaa9988, x7); | |
2821 ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]); | |
2822 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8); | |
2823 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]); | |
2824 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9); | |
2825 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]); | |
2826 ASSERT_EQUAL_64(src_base, x16); | |
2827 ASSERT_EQUAL_64(dst_base, x17); | |
2828 ASSERT_EQUAL_64(src_base + 24, x18); | |
2829 ASSERT_EQUAL_64(dst_base + 56, x19); | |
2830 | |
2831 TEARDOWN(); | |
2832 } | |
2833 | |
2834 | |
2835 TEST(ldnp_stnp_offset) { | |
2836 INIT_V8(); | |
2837 SETUP(); | |
2838 | |
2839 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL, | |
2840 0xffeeddccbbaa9988UL}; | |
2841 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0}; | |
2842 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2843 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2844 | |
2845 START(); | |
2846 __ Mov(x16, src_base); | |
2847 __ Mov(x17, dst_base); | |
2848 __ Mov(x18, src_base + 24); | |
2849 __ Mov(x19, dst_base + 56); | |
2850 __ Ldnp(w0, w1, MemOperand(x16)); | |
2851 __ Ldnp(w2, w3, MemOperand(x16, 4)); | |
2852 __ Ldnp(x4, x5, MemOperand(x16, 8)); | |
2853 __ Ldnp(w6, w7, MemOperand(x18, -12)); | |
2854 __ Ldnp(x8, x9, MemOperand(x18, -16)); | |
2855 __ Stnp(w0, w1, MemOperand(x17)); | |
2856 __ Stnp(w2, w3, MemOperand(x17, 8)); | |
2857 __ Stnp(x4, x5, MemOperand(x17, 16)); | |
2858 __ Stnp(w6, w7, MemOperand(x19, -24)); | |
2859 __ Stnp(x8, x9, MemOperand(x19, -16)); | |
2860 END(); | |
2861 | |
2862 RUN(); | |
2863 | |
2864 ASSERT_EQUAL_64(0x44556677, x0); | |
2865 ASSERT_EQUAL_64(0x00112233, x1); | |
2866 ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]); | |
2867 ASSERT_EQUAL_64(0x00112233, x2); | |
2868 ASSERT_EQUAL_64(0xccddeeff, x3); | |
2869 ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]); | |
2870 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4); | |
2871 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]); | |
2872 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5); | |
2873 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]); | |
2874 ASSERT_EQUAL_64(0x8899aabb, x6); | |
2875 ASSERT_EQUAL_64(0xbbaa9988, x7); | |
2876 ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]); | |
2877 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8); | |
2878 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]); | |
2879 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9); | |
2880 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]); | |
2881 ASSERT_EQUAL_64(src_base, x16); | |
2882 ASSERT_EQUAL_64(dst_base, x17); | |
2883 ASSERT_EQUAL_64(src_base + 24, x18); | |
2884 ASSERT_EQUAL_64(dst_base + 56, x19); | |
2885 | |
2886 TEARDOWN(); | |
2887 } | |
2888 | |
2889 | |
2890 TEST(ldp_stp_preindex) { | |
2891 INIT_V8(); | |
2892 SETUP(); | |
2893 | |
2894 uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL, | |
2895 0xffeeddccbbaa9988UL}; | |
2896 uint64_t dst[5] = {0, 0, 0, 0, 0}; | |
2897 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2898 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2899 | |
2900 START(); | |
2901 __ Mov(x16, src_base); | |
2902 __ Mov(x17, dst_base); | |
2903 __ Mov(x18, dst_base + 16); | |
2904 __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex)); | |
2905 __ Mov(x19, x16); | |
2906 __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex)); | |
2907 __ Stp(w2, w3, MemOperand(x17, 4, PreIndex)); | |
2908 __ Mov(x20, x17); | |
2909 __ Stp(w0, w1, MemOperand(x17, -4, PreIndex)); | |
2910 __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex)); | |
2911 __ Mov(x21, x16); | |
2912 __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex)); | |
2913 __ Stp(x7, x6, MemOperand(x18, 8, PreIndex)); | |
2914 __ Mov(x22, x18); | |
2915 __ Stp(x5, x4, MemOperand(x18, -8, PreIndex)); | |
2916 END(); | |
2917 | |
2918 RUN(); | |
2919 | |
2920 ASSERT_EQUAL_64(0x00112233, x0); | |
2921 ASSERT_EQUAL_64(0xccddeeff, x1); | |
2922 ASSERT_EQUAL_64(0x44556677, x2); | |
2923 ASSERT_EQUAL_64(0x00112233, x3); | |
2924 ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[0]); | |
2925 ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]); | |
2926 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4); | |
2927 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5); | |
2928 ASSERT_EQUAL_64(0x0011223344556677UL, x6); | |
2929 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x7); | |
2930 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]); | |
2931 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]); | |
2932 ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]); | |
2933 ASSERT_EQUAL_64(src_base, x16); | |
2934 ASSERT_EQUAL_64(dst_base, x17); | |
2935 ASSERT_EQUAL_64(dst_base + 16, x18); | |
2936 ASSERT_EQUAL_64(src_base + 4, x19); | |
2937 ASSERT_EQUAL_64(dst_base + 4, x20); | |
2938 ASSERT_EQUAL_64(src_base + 8, x21); | |
2939 ASSERT_EQUAL_64(dst_base + 24, x22); | |
2940 | |
2941 TEARDOWN(); | |
2942 } | |
2943 | |
2944 | |
2945 TEST(ldp_stp_postindex) { | |
2946 INIT_V8(); | |
2947 SETUP(); | |
2948 | |
2949 uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL, | |
2950 0xffeeddccbbaa9988UL, 0x7766554433221100UL}; | |
2951 uint64_t dst[5] = {0, 0, 0, 0, 0}; | |
2952 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
2953 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
2954 | |
2955 START(); | |
2956 __ Mov(x16, src_base); | |
2957 __ Mov(x17, dst_base); | |
2958 __ Mov(x18, dst_base + 16); | |
2959 __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex)); | |
2960 __ Mov(x19, x16); | |
2961 __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex)); | |
2962 __ Stp(w2, w3, MemOperand(x17, 4, PostIndex)); | |
2963 __ Mov(x20, x17); | |
2964 __ Stp(w0, w1, MemOperand(x17, -4, PostIndex)); | |
2965 __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex)); | |
2966 __ Mov(x21, x16); | |
2967 __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex)); | |
2968 __ Stp(x7, x6, MemOperand(x18, 8, PostIndex)); | |
2969 __ Mov(x22, x18); | |
2970 __ Stp(x5, x4, MemOperand(x18, -8, PostIndex)); | |
2971 END(); | |
2972 | |
2973 RUN(); | |
2974 | |
2975 ASSERT_EQUAL_64(0x44556677, x0); | |
2976 ASSERT_EQUAL_64(0x00112233, x1); | |
2977 ASSERT_EQUAL_64(0x00112233, x2); | |
2978 ASSERT_EQUAL_64(0xccddeeff, x3); | |
2979 ASSERT_EQUAL_64(0x4455667700112233UL, dst[0]); | |
2980 ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]); | |
2981 ASSERT_EQUAL_64(0x0011223344556677UL, x4); | |
2982 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x5); | |
2983 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x6); | |
2984 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x7); | |
2985 ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]); | |
2986 ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]); | |
2987 ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]); | |
2988 ASSERT_EQUAL_64(src_base, x16); | |
2989 ASSERT_EQUAL_64(dst_base, x17); | |
2990 ASSERT_EQUAL_64(dst_base + 16, x18); | |
2991 ASSERT_EQUAL_64(src_base + 4, x19); | |
2992 ASSERT_EQUAL_64(dst_base + 4, x20); | |
2993 ASSERT_EQUAL_64(src_base + 8, x21); | |
2994 ASSERT_EQUAL_64(dst_base + 24, x22); | |
2995 | |
2996 TEARDOWN(); | |
2997 } | |
2998 | |
2999 | |
3000 TEST(ldp_sign_extend) { | |
3001 INIT_V8(); | |
3002 SETUP(); | |
3003 | |
3004 uint32_t src[2] = {0x80000000, 0x7fffffff}; | |
3005 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
3006 | |
3007 START(); | |
3008 __ Mov(x24, src_base); | |
3009 __ Ldpsw(x0, x1, MemOperand(x24)); | |
3010 END(); | |
3011 | |
3012 RUN(); | |
3013 | |
3014 ASSERT_EQUAL_64(0xffffffff80000000UL, x0); | |
3015 ASSERT_EQUAL_64(0x000000007fffffffUL, x1); | |
3016 | |
3017 TEARDOWN(); | |
3018 } | |
3019 | |
3020 | |
3021 TEST(ldur_stur) { | |
3022 INIT_V8(); | |
3023 SETUP(); | |
3024 | |
3025 int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL}; | |
3026 int64_t dst[5] = {0, 0, 0, 0, 0}; | |
3027 uintptr_t src_base = reinterpret_cast<uintptr_t>(src); | |
3028 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst); | |
3029 | |
3030 START(); | |
3031 __ Mov(x17, src_base); | |
3032 __ Mov(x18, dst_base); | |
3033 __ Mov(x19, src_base + 16); | |
3034 __ Mov(x20, dst_base + 32); | |
3035 __ Mov(x21, dst_base + 40); | |
3036 __ Ldr(w0, MemOperand(x17, 1)); | |
3037 __ Str(w0, MemOperand(x18, 2)); | |
3038 __ Ldr(x1, MemOperand(x17, 3)); | |
3039 __ Str(x1, MemOperand(x18, 9)); | |
3040 __ Ldr(w2, MemOperand(x19, -9)); | |
3041 __ Str(w2, MemOperand(x20, -5)); | |
3042 __ Ldrb(w3, MemOperand(x19, -1)); | |
3043 __ Strb(w3, MemOperand(x21, -1)); | |
3044 END(); | |
3045 | |
3046 RUN(); | |
3047 | |
3048 ASSERT_EQUAL_64(0x6789abcd, x0); | |
3049 ASSERT_EQUAL_64(0x6789abcd0000L, dst[0]); | |
3050 ASSERT_EQUAL_64(0xabcdef0123456789L, x1); | |
3051 ASSERT_EQUAL_64(0xcdef012345678900L, dst[1]); | |
3052 ASSERT_EQUAL_64(0x000000ab, dst[2]); | |
3053 ASSERT_EQUAL_64(0xabcdef01, x2); | |
3054 ASSERT_EQUAL_64(0x00abcdef01000000L, dst[3]); | |
3055 ASSERT_EQUAL_64(0x00000001, x3); | |
3056 ASSERT_EQUAL_64(0x0100000000000000L, dst[4]); | |
3057 ASSERT_EQUAL_64(src_base, x17); | |
3058 ASSERT_EQUAL_64(dst_base, x18); | |
3059 ASSERT_EQUAL_64(src_base + 16, x19); | |
3060 ASSERT_EQUAL_64(dst_base + 32, x20); | |
3061 | |
3062 TEARDOWN(); | |
3063 } | |
3064 | |
3065 | |
3066 #if 0 // TODO(all) enable. | |
3067 // TODO(rodolph): Adapt w16 Literal tests for RelocInfo. | |
3068 TEST(ldr_literal) { | |
3069 INIT_V8(); | |
3070 SETUP(); | |
3071 | |
3072 START(); | |
3073 __ Ldr(x2, 0x1234567890abcdefUL); | |
3074 __ Ldr(w3, 0xfedcba09); | |
3075 __ Ldr(d13, 1.234); | |
3076 __ Ldr(s25, 2.5); | |
3077 END(); | |
3078 | |
3079 RUN(); | |
3080 | |
3081 ASSERT_EQUAL_64(0x1234567890abcdefUL, x2); | |
3082 ASSERT_EQUAL_64(0xfedcba09, x3); | |
3083 ASSERT_EQUAL_FP64(1.234, d13); | |
3084 ASSERT_EQUAL_FP32(2.5, s25); | |
3085 | |
3086 TEARDOWN(); | |
3087 } | |
3088 | |
3089 | |
3090 static void LdrLiteralRangeHelper(ptrdiff_t range_, | |
3091 LiteralPoolEmitOption option, | |
3092 bool expect_dump) { | |
3093 ASSERT(range_ > 0); | |
3094 SETUP_SIZE(range_ + 1024); | |
3095 | |
3096 Label label_1, label_2; | |
3097 | |
3098 size_t range = static_cast<size_t>(range_); | |
3099 size_t code_size = 0; | |
3100 size_t pool_guard_size; | |
3101 | |
3102 if (option == NoJumpRequired) { | |
3103 // Space for an explicit branch. | |
3104 pool_guard_size = sizeof(Instr); | |
3105 } else { | |
3106 pool_guard_size = 0; | |
3107 } | |
3108 | |
3109 START(); | |
3110 // Force a pool dump so the pool starts off empty. | |
3111 __ EmitLiteralPool(JumpRequired); | |
3112 ASSERT_LITERAL_POOL_SIZE(0); | |
3113 | |
3114 __ Ldr(x0, 0x1234567890abcdefUL); | |
3115 __ Ldr(w1, 0xfedcba09); | |
3116 __ Ldr(d0, 1.234); | |
3117 __ Ldr(s1, 2.5); | |
3118 ASSERT_LITERAL_POOL_SIZE(4); | |
3119 | |
3120 code_size += 4 * sizeof(Instr); | |
3121 | |
3122 // Check that the requested range (allowing space for a branch over the pool) | |
3123 // can be handled by this test. | |
3124 ASSERT((code_size + pool_guard_size) <= range); | |
3125 | |
3126 // Emit NOPs up to 'range', leaving space for the pool guard. | |
3127 while ((code_size + pool_guard_size) < range) { | |
3128 __ Nop(); | |
3129 code_size += sizeof(Instr); | |
3130 } | |
3131 | |
3132 // Emit the guard sequence before the literal pool. | |
3133 if (option == NoJumpRequired) { | |
3134 __ B(&label_1); | |
3135 code_size += sizeof(Instr); | |
3136 } | |
3137 | |
3138 ASSERT(code_size == range); | |
3139 ASSERT_LITERAL_POOL_SIZE(4); | |
3140 | |
3141 // Possibly generate a literal pool. | |
3142 __ CheckLiteralPool(option); | |
3143 __ Bind(&label_1); | |
3144 if (expect_dump) { | |
3145 ASSERT_LITERAL_POOL_SIZE(0); | |
3146 } else { | |
3147 ASSERT_LITERAL_POOL_SIZE(4); | |
3148 } | |
3149 | |
3150 // Force a pool flush to check that a second pool functions correctly. | |
3151 __ EmitLiteralPool(JumpRequired); | |
3152 ASSERT_LITERAL_POOL_SIZE(0); | |
3153 | |
3154 // These loads should be after the pool (and will require a new one). | |
3155 __ Ldr(x4, 0x34567890abcdef12UL); | |
3156 __ Ldr(w5, 0xdcba09fe); | |
3157 __ Ldr(d4, 123.4); | |
3158 __ Ldr(s5, 250.0); | |
3159 ASSERT_LITERAL_POOL_SIZE(4); | |
3160 END(); | |
3161 | |
3162 RUN(); | |
3163 | |
3164 // Check that the literals loaded correctly. | |
3165 ASSERT_EQUAL_64(0x1234567890abcdefUL, x0); | |
3166 ASSERT_EQUAL_64(0xfedcba09, x1); | |
3167 ASSERT_EQUAL_FP64(1.234, d0); | |
3168 ASSERT_EQUAL_FP32(2.5, s1); | |
3169 ASSERT_EQUAL_64(0x34567890abcdef12UL, x4); | |
3170 ASSERT_EQUAL_64(0xdcba09fe, x5); | |
3171 ASSERT_EQUAL_FP64(123.4, d4); | |
3172 ASSERT_EQUAL_FP32(250.0, s5); | |
3173 | |
3174 TEARDOWN(); | |
3175 } | |
3176 | |
3177 | |
3178 TEST(ldr_literal_range_1) { | |
3179 INIT_V8(); | |
3180 LdrLiteralRangeHelper(kRecommendedLiteralPoolRange, | |
3181 NoJumpRequired, | |
3182 true); | |
3183 } | |
3184 | |
3185 | |
3186 TEST(ldr_literal_range_2) { | |
3187 INIT_V8(); | |
3188 LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr), | |
3189 NoJumpRequired, | |
3190 false); | |
3191 } | |
3192 | |
3193 | |
3194 TEST(ldr_literal_range_3) { | |
3195 INIT_V8(); | |
3196 LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange, | |
3197 JumpRequired, | |
3198 true); | |
3199 } | |
3200 | |
3201 | |
3202 TEST(ldr_literal_range_4) { | |
3203 INIT_V8(); | |
3204 LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr), | |
3205 JumpRequired, | |
3206 false); | |
3207 } | |
3208 | |
3209 | |
3210 TEST(ldr_literal_range_5) { | |
3211 INIT_V8(); | |
3212 LdrLiteralRangeHelper(kLiteralPoolCheckInterval, | |
3213 JumpRequired, | |
3214 false); | |
3215 } | |
3216 | |
3217 | |
3218 TEST(ldr_literal_range_6) { | |
3219 INIT_V8(); | |
3220 LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr), | |
3221 JumpRequired, | |
3222 false); | |
3223 } | |
3224 #endif | |
3225 | |
3226 TEST(add_sub_imm) { | |
3227 INIT_V8(); | |
3228 SETUP(); | |
3229 | |
3230 START(); | |
3231 __ Mov(x0, 0x0); | |
3232 __ Mov(x1, 0x1111); | |
3233 __ Mov(x2, 0xffffffffffffffffL); | |
3234 __ Mov(x3, 0x8000000000000000L); | |
3235 | |
3236 __ Add(x10, x0, Operand(0x123)); | |
3237 __ Add(x11, x1, Operand(0x122000)); | |
3238 __ Add(x12, x0, Operand(0xabc << 12)); | |
3239 __ Add(x13, x2, Operand(1)); | |
3240 | |
3241 __ Add(w14, w0, Operand(0x123)); | |
3242 __ Add(w15, w1, Operand(0x122000)); | |
3243 __ Add(w16, w0, Operand(0xabc << 12)); | |
3244 __ Add(w17, w2, Operand(1)); | |
3245 | |
3246 __ Sub(x20, x0, Operand(0x1)); | |
3247 __ Sub(x21, x1, Operand(0x111)); | |
3248 __ Sub(x22, x1, Operand(0x1 << 12)); | |
3249 __ Sub(x23, x3, Operand(1)); | |
3250 | |
3251 __ Sub(w24, w0, Operand(0x1)); | |
3252 __ Sub(w25, w1, Operand(0x111)); | |
3253 __ Sub(w26, w1, Operand(0x1 << 12)); | |
3254 __ Sub(w27, w3, Operand(1)); | |
3255 END(); | |
3256 | |
3257 RUN(); | |
3258 | |
3259 ASSERT_EQUAL_64(0x123, x10); | |
3260 ASSERT_EQUAL_64(0x123111, x11); | |
3261 ASSERT_EQUAL_64(0xabc000, x12); | |
3262 ASSERT_EQUAL_64(0x0, x13); | |
3263 | |
3264 ASSERT_EQUAL_32(0x123, w14); | |
3265 ASSERT_EQUAL_32(0x123111, w15); | |
3266 ASSERT_EQUAL_32(0xabc000, w16); | |
3267 ASSERT_EQUAL_32(0x0, w17); | |
3268 | |
3269 ASSERT_EQUAL_64(0xffffffffffffffffL, x20); | |
3270 ASSERT_EQUAL_64(0x1000, x21); | |
3271 ASSERT_EQUAL_64(0x111, x22); | |
3272 ASSERT_EQUAL_64(0x7fffffffffffffffL, x23); | |
3273 | |
3274 ASSERT_EQUAL_32(0xffffffff, w24); | |
3275 ASSERT_EQUAL_32(0x1000, w25); | |
3276 ASSERT_EQUAL_32(0x111, w26); | |
3277 ASSERT_EQUAL_32(0xffffffff, w27); | |
3278 | |
3279 TEARDOWN(); | |
3280 } | |
3281 | |
3282 | |
3283 TEST(add_sub_wide_imm) { | |
3284 INIT_V8(); | |
3285 SETUP(); | |
3286 | |
3287 START(); | |
3288 __ Mov(x0, 0x0); | |
3289 __ Mov(x1, 0x1); | |
3290 | |
3291 __ Add(x10, x0, Operand(0x1234567890abcdefUL)); | |
3292 __ Add(x11, x1, Operand(0xffffffff)); | |
3293 | |
3294 __ Add(w12, w0, Operand(0x12345678)); | |
3295 __ Add(w13, w1, Operand(0xffffffff)); | |
3296 | |
3297 __ Sub(x20, x0, Operand(0x1234567890abcdefUL)); | |
3298 | |
3299 __ Sub(w21, w0, Operand(0x12345678)); | |
3300 END(); | |
3301 | |
3302 RUN(); | |
3303 | |
3304 ASSERT_EQUAL_64(0x1234567890abcdefUL, x10); | |
3305 ASSERT_EQUAL_64(0x100000000UL, x11); | |
3306 | |
3307 ASSERT_EQUAL_32(0x12345678, w12); | |
3308 ASSERT_EQUAL_64(0x0, x13); | |
3309 | |
3310 ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20); | |
3311 | |
3312 ASSERT_EQUAL_32(-0x12345678, w21); | |
3313 | |
3314 TEARDOWN(); | |
3315 } | |
3316 | |
3317 | |
3318 TEST(add_sub_shifted) { | |
3319 INIT_V8(); | |
3320 SETUP(); | |
3321 | |
3322 START(); | |
3323 __ Mov(x0, 0); | |
3324 __ Mov(x1, 0x0123456789abcdefL); | |
3325 __ Mov(x2, 0xfedcba9876543210L); | |
3326 __ Mov(x3, 0xffffffffffffffffL); | |
3327 | |
3328 __ Add(x10, x1, Operand(x2)); | |
3329 __ Add(x11, x0, Operand(x1, LSL, 8)); | |
3330 __ Add(x12, x0, Operand(x1, LSR, 8)); | |
3331 __ Add(x13, x0, Operand(x1, ASR, 8)); | |
3332 __ Add(x14, x0, Operand(x2, ASR, 8)); | |
3333 __ Add(w15, w0, Operand(w1, ASR, 8)); | |
3334 __ Add(w18, w3, Operand(w1, ROR, 8)); | |
3335 __ Add(x19, x3, Operand(x1, ROR, 8)); | |
3336 | |
3337 __ Sub(x20, x3, Operand(x2)); | |
3338 __ Sub(x21, x3, Operand(x1, LSL, 8)); | |
3339 __ Sub(x22, x3, Operand(x1, LSR, 8)); | |
3340 __ Sub(x23, x3, Operand(x1, ASR, 8)); | |
3341 __ Sub(x24, x3, Operand(x2, ASR, 8)); | |
3342 __ Sub(w25, w3, Operand(w1, ASR, 8)); | |
3343 __ Sub(w26, w3, Operand(w1, ROR, 8)); | |
3344 __ Sub(x27, x3, Operand(x1, ROR, 8)); | |
3345 END(); | |
3346 | |
3347 RUN(); | |
3348 | |
3349 ASSERT_EQUAL_64(0xffffffffffffffffL, x10); | |
3350 ASSERT_EQUAL_64(0x23456789abcdef00L, x11); | |
3351 ASSERT_EQUAL_64(0x000123456789abcdL, x12); | |
3352 ASSERT_EQUAL_64(0x000123456789abcdL, x13); | |
3353 ASSERT_EQUAL_64(0xfffedcba98765432L, x14); | |
3354 ASSERT_EQUAL_64(0xff89abcd, x15); | |
3355 ASSERT_EQUAL_64(0xef89abcc, x18); | |
3356 ASSERT_EQUAL_64(0xef0123456789abccL, x19); | |
3357 | |
3358 ASSERT_EQUAL_64(0x0123456789abcdefL, x20); | |
3359 ASSERT_EQUAL_64(0xdcba9876543210ffL, x21); | |
3360 ASSERT_EQUAL_64(0xfffedcba98765432L, x22); | |
3361 ASSERT_EQUAL_64(0xfffedcba98765432L, x23); | |
3362 ASSERT_EQUAL_64(0x000123456789abcdL, x24); | |
3363 ASSERT_EQUAL_64(0x00765432, x25); | |
3364 ASSERT_EQUAL_64(0x10765432, x26); | |
3365 ASSERT_EQUAL_64(0x10fedcba98765432L, x27); | |
3366 | |
3367 TEARDOWN(); | |
3368 } | |
3369 | |
3370 | |
3371 TEST(add_sub_extended) { | |
3372 INIT_V8(); | |
3373 SETUP(); | |
3374 | |
3375 START(); | |
3376 __ Mov(x0, 0); | |
3377 __ Mov(x1, 0x0123456789abcdefL); | |
3378 __ Mov(x2, 0xfedcba9876543210L); | |
3379 __ Mov(w3, 0x80); | |
3380 | |
3381 __ Add(x10, x0, Operand(x1, UXTB, 0)); | |
3382 __ Add(x11, x0, Operand(x1, UXTB, 1)); | |
3383 __ Add(x12, x0, Operand(x1, UXTH, 2)); | |
3384 __ Add(x13, x0, Operand(x1, UXTW, 4)); | |
3385 | |
3386 __ Add(x14, x0, Operand(x1, SXTB, 0)); | |
3387 __ Add(x15, x0, Operand(x1, SXTB, 1)); | |
3388 __ Add(x16, x0, Operand(x1, SXTH, 2)); | |
3389 __ Add(x17, x0, Operand(x1, SXTW, 3)); | |
3390 __ Add(x18, x0, Operand(x2, SXTB, 0)); | |
3391 __ Add(x19, x0, Operand(x2, SXTB, 1)); | |
3392 __ Add(x20, x0, Operand(x2, SXTH, 2)); | |
3393 __ Add(x21, x0, Operand(x2, SXTW, 3)); | |
3394 | |
3395 __ Add(x22, x1, Operand(x2, SXTB, 1)); | |
3396 __ Sub(x23, x1, Operand(x2, SXTB, 1)); | |
3397 | |
3398 __ Add(w24, w1, Operand(w2, UXTB, 2)); | |
3399 __ Add(w25, w0, Operand(w1, SXTB, 0)); | |
3400 __ Add(w26, w0, Operand(w1, SXTB, 1)); | |
3401 __ Add(w27, w2, Operand(w1, SXTW, 3)); | |
3402 | |
3403 __ Add(w28, w0, Operand(w1, SXTW, 3)); | |
3404 __ Add(x29, x0, Operand(w1, SXTW, 3)); | |
3405 | |
3406 __ Sub(x30, x0, Operand(w3, SXTB, 1)); | |
3407 END(); | |
3408 | |
3409 RUN(); | |
3410 | |
3411 ASSERT_EQUAL_64(0xefL, x10); | |
3412 ASSERT_EQUAL_64(0x1deL, x11); | |
3413 ASSERT_EQUAL_64(0x337bcL, x12); | |
3414 ASSERT_EQUAL_64(0x89abcdef0L, x13); | |
3415 | |
3416 ASSERT_EQUAL_64(0xffffffffffffffefL, x14); | |
3417 ASSERT_EQUAL_64(0xffffffffffffffdeL, x15); | |
3418 ASSERT_EQUAL_64(0xffffffffffff37bcL, x16); | |
3419 ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x17); | |
3420 ASSERT_EQUAL_64(0x10L, x18); | |
3421 ASSERT_EQUAL_64(0x20L, x19); | |
3422 ASSERT_EQUAL_64(0xc840L, x20); | |
3423 ASSERT_EQUAL_64(0x3b2a19080L, x21); | |
3424 | |
3425 ASSERT_EQUAL_64(0x0123456789abce0fL, x22); | |
3426 ASSERT_EQUAL_64(0x0123456789abcdcfL, x23); | |
3427 | |
3428 ASSERT_EQUAL_32(0x89abce2f, w24); | |
3429 ASSERT_EQUAL_32(0xffffffef, w25); | |
3430 ASSERT_EQUAL_32(0xffffffde, w26); | |
3431 ASSERT_EQUAL_32(0xc3b2a188, w27); | |
3432 | |
3433 ASSERT_EQUAL_32(0x4d5e6f78, w28); | |
3434 ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x29); | |
3435 | |
3436 ASSERT_EQUAL_64(256, x30); | |
3437 | |
3438 TEARDOWN(); | |
3439 } | |
3440 | |
3441 | |
3442 TEST(add_sub_negative) { | |
3443 INIT_V8(); | |
3444 SETUP(); | |
3445 | |
3446 START(); | |
3447 __ Mov(x0, 0); | |
3448 __ Mov(x1, 4687); | |
3449 __ Mov(x2, 0x1122334455667788); | |
3450 __ Mov(w3, 0x11223344); | |
3451 __ Mov(w4, 400000); | |
3452 | |
3453 __ Add(x10, x0, -42); | |
3454 __ Add(x11, x1, -687); | |
3455 __ Add(x12, x2, -0x88); | |
3456 | |
3457 __ Sub(x13, x0, -600); | |
3458 __ Sub(x14, x1, -313); | |
3459 __ Sub(x15, x2, -0x555); | |
3460 | |
3461 __ Add(w19, w3, -0x344); | |
3462 __ Add(w20, w4, -2000); | |
3463 | |
3464 __ Sub(w21, w3, -0xbc); | |
3465 __ Sub(w22, w4, -2000); | |
3466 END(); | |
3467 | |
3468 RUN(); | |
3469 | |
3470 ASSERT_EQUAL_64(-42, x10); | |
3471 ASSERT_EQUAL_64(4000, x11); | |
3472 ASSERT_EQUAL_64(0x1122334455667700, x12); | |
3473 | |
3474 ASSERT_EQUAL_64(600, x13); | |
3475 ASSERT_EQUAL_64(5000, x14); | |
3476 ASSERT_EQUAL_64(0x1122334455667cdd, x15); | |
3477 | |
3478 ASSERT_EQUAL_32(0x11223000, w19); | |
3479 ASSERT_EQUAL_32(398000, w20); | |
3480 | |
3481 ASSERT_EQUAL_32(0x11223400, w21); | |
3482 ASSERT_EQUAL_32(402000, w22); | |
3483 | |
3484 TEARDOWN(); | |
3485 } | |
3486 | |
3487 | |
3488 TEST(add_sub_zero) { | |
3489 INIT_V8(); | |
3490 SETUP(); | |
3491 | |
3492 START(); | |
3493 __ Mov(x0, 0); | |
3494 __ Mov(x1, 0); | |
3495 __ Mov(x2, 0); | |
3496 | |
3497 Label blob1; | |
3498 __ Bind(&blob1); | |
3499 __ Add(x0, x0, 0); | |
3500 __ Sub(x1, x1, 0); | |
3501 __ Sub(x2, x2, xzr); | |
3502 CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1)); | |
3503 | |
3504 Label blob2; | |
3505 __ Bind(&blob2); | |
3506 __ Add(w3, w3, 0); | |
3507 CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2)); | |
3508 | |
3509 Label blob3; | |
3510 __ Bind(&blob3); | |
3511 __ Sub(w3, w3, wzr); | |
3512 CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3)); | |
3513 | |
3514 END(); | |
3515 | |
3516 RUN(); | |
3517 | |
3518 ASSERT_EQUAL_64(0, x0); | |
3519 ASSERT_EQUAL_64(0, x1); | |
3520 ASSERT_EQUAL_64(0, x2); | |
3521 | |
3522 TEARDOWN(); | |
3523 } | |
3524 | |
3525 | |
3526 TEST(claim_drop_zero) { | |
3527 INIT_V8(); | |
3528 SETUP(); | |
3529 | |
3530 START(); | |
3531 | |
3532 Label start; | |
3533 __ Bind(&start); | |
3534 __ Claim(0); | |
3535 __ Drop(0); | |
3536 __ Claim(xzr, 8); | |
3537 __ Drop(xzr, 8); | |
3538 __ Claim(xzr, 0); | |
3539 __ Drop(xzr, 0); | |
3540 __ Claim(x7, 0); | |
3541 __ Drop(x7, 0); | |
3542 __ ClaimBySMI(xzr, 8); | |
3543 __ DropBySMI(xzr, 8); | |
3544 __ ClaimBySMI(xzr, 0); | |
3545 __ DropBySMI(xzr, 0); | |
3546 CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start)); | |
3547 | |
3548 END(); | |
3549 | |
3550 RUN(); | |
3551 | |
3552 TEARDOWN(); | |
3553 } | |
3554 | |
3555 | |
3556 TEST(neg) { | |
3557 INIT_V8(); | |
3558 SETUP(); | |
3559 | |
3560 START(); | |
3561 __ Mov(x0, 0xf123456789abcdefL); | |
3562 | |
3563 // Immediate. | |
3564 __ Neg(x1, 0x123); | |
3565 __ Neg(w2, 0x123); | |
3566 | |
3567 // Shifted. | |
3568 __ Neg(x3, Operand(x0, LSL, 1)); | |
3569 __ Neg(w4, Operand(w0, LSL, 2)); | |
3570 __ Neg(x5, Operand(x0, LSR, 3)); | |
3571 __ Neg(w6, Operand(w0, LSR, 4)); | |
3572 __ Neg(x7, Operand(x0, ASR, 5)); | |
3573 __ Neg(w8, Operand(w0, ASR, 6)); | |
3574 | |
3575 // Extended. | |
3576 __ Neg(w9, Operand(w0, UXTB)); | |
3577 __ Neg(x10, Operand(x0, SXTB, 1)); | |
3578 __ Neg(w11, Operand(w0, UXTH, 2)); | |
3579 __ Neg(x12, Operand(x0, SXTH, 3)); | |
3580 __ Neg(w13, Operand(w0, UXTW, 4)); | |
3581 __ Neg(x14, Operand(x0, SXTW, 4)); | |
3582 END(); | |
3583 | |
3584 RUN(); | |
3585 | |
3586 ASSERT_EQUAL_64(0xfffffffffffffeddUL, x1); | |
3587 ASSERT_EQUAL_64(0xfffffedd, x2); | |
3588 ASSERT_EQUAL_64(0x1db97530eca86422UL, x3); | |
3589 ASSERT_EQUAL_64(0xd950c844, x4); | |
3590 ASSERT_EQUAL_64(0xe1db97530eca8643UL, x5); | |
3591 ASSERT_EQUAL_64(0xf7654322, x6); | |
3592 ASSERT_EQUAL_64(0x0076e5d4c3b2a191UL, x7); | |
3593 ASSERT_EQUAL_64(0x01d950c9, x8); | |
3594 ASSERT_EQUAL_64(0xffffff11, x9); | |
3595 ASSERT_EQUAL_64(0x0000000000000022UL, x10); | |
3596 ASSERT_EQUAL_64(0xfffcc844, x11); | |
3597 ASSERT_EQUAL_64(0x0000000000019088UL, x12); | |
3598 ASSERT_EQUAL_64(0x65432110, x13); | |
3599 ASSERT_EQUAL_64(0x0000000765432110UL, x14); | |
3600 | |
3601 TEARDOWN(); | |
3602 } | |
3603 | |
3604 | |
3605 TEST(adc_sbc_shift) { | |
3606 INIT_V8(); | |
3607 SETUP(); | |
3608 | |
3609 START(); | |
3610 __ Mov(x0, 0); | |
3611 __ Mov(x1, 1); | |
3612 __ Mov(x2, 0x0123456789abcdefL); | |
3613 __ Mov(x3, 0xfedcba9876543210L); | |
3614 __ Mov(x4, 0xffffffffffffffffL); | |
3615 | |
3616 // Clear the C flag. | |
3617 __ Adds(x0, x0, Operand(0)); | |
3618 | |
3619 __ Adc(x5, x2, Operand(x3)); | |
3620 __ Adc(x6, x0, Operand(x1, LSL, 60)); | |
3621 __ Sbc(x7, x4, Operand(x3, LSR, 4)); | |
3622 __ Adc(x8, x2, Operand(x3, ASR, 4)); | |
3623 __ Adc(x9, x2, Operand(x3, ROR, 8)); | |
3624 | |
3625 __ Adc(w10, w2, Operand(w3)); | |
3626 __ Adc(w11, w0, Operand(w1, LSL, 30)); | |
3627 __ Sbc(w12, w4, Operand(w3, LSR, 4)); | |
3628 __ Adc(w13, w2, Operand(w3, ASR, 4)); | |
3629 __ Adc(w14, w2, Operand(w3, ROR, 8)); | |
3630 | |
3631 // Set the C flag. | |
3632 __ Cmp(w0, Operand(w0)); | |
3633 | |
3634 __ Adc(x18, x2, Operand(x3)); | |
3635 __ Adc(x19, x0, Operand(x1, LSL, 60)); | |
3636 __ Sbc(x20, x4, Operand(x3, LSR, 4)); | |
3637 __ Adc(x21, x2, Operand(x3, ASR, 4)); | |
3638 __ Adc(x22, x2, Operand(x3, ROR, 8)); | |
3639 | |
3640 __ Adc(w23, w2, Operand(w3)); | |
3641 __ Adc(w24, w0, Operand(w1, LSL, 30)); | |
3642 __ Sbc(w25, w4, Operand(w3, LSR, 4)); | |
3643 __ Adc(w26, w2, Operand(w3, ASR, 4)); | |
3644 __ Adc(w27, w2, Operand(w3, ROR, 8)); | |
3645 END(); | |
3646 | |
3647 RUN(); | |
3648 | |
3649 ASSERT_EQUAL_64(0xffffffffffffffffL, x5); | |
3650 ASSERT_EQUAL_64(1L << 60, x6); | |
3651 ASSERT_EQUAL_64(0xf0123456789abcddL, x7); | |
3652 ASSERT_EQUAL_64(0x0111111111111110L, x8); | |
3653 ASSERT_EQUAL_64(0x1222222222222221L, x9); | |
3654 | |
3655 ASSERT_EQUAL_32(0xffffffff, w10); | |
3656 ASSERT_EQUAL_32(1 << 30, w11); | |
3657 ASSERT_EQUAL_32(0xf89abcdd, w12); | |
3658 ASSERT_EQUAL_32(0x91111110, w13); | |
3659 ASSERT_EQUAL_32(0x9a222221, w14); | |
3660 | |
3661 ASSERT_EQUAL_64(0xffffffffffffffffL + 1, x18); | |
3662 ASSERT_EQUAL_64((1L << 60) + 1, x19); | |
3663 ASSERT_EQUAL_64(0xf0123456789abcddL + 1, x20); | |
3664 ASSERT_EQUAL_64(0x0111111111111110L + 1, x21); | |
3665 ASSERT_EQUAL_64(0x1222222222222221L + 1, x22); | |
3666 | |
3667 ASSERT_EQUAL_32(0xffffffff + 1, w23); | |
3668 ASSERT_EQUAL_32((1 << 30) + 1, w24); | |
3669 ASSERT_EQUAL_32(0xf89abcdd + 1, w25); | |
3670 ASSERT_EQUAL_32(0x91111110 + 1, w26); | |
3671 ASSERT_EQUAL_32(0x9a222221 + 1, w27); | |
3672 | |
3673 // Check that adc correctly sets the condition flags. | |
3674 START(); | |
3675 __ Mov(x0, 1); | |
3676 __ Mov(x1, 0xffffffffffffffffL); | |
3677 // Clear the C flag. | |
3678 __ Adds(x0, x0, Operand(0)); | |
3679 __ Adcs(x10, x0, Operand(x1)); | |
3680 END(); | |
3681 | |
3682 RUN(); | |
3683 | |
3684 ASSERT_EQUAL_NZCV(ZCFlag); | |
3685 ASSERT_EQUAL_64(0, x10); | |
3686 | |
3687 START(); | |
3688 __ Mov(x0, 1); | |
3689 __ Mov(x1, 0x8000000000000000L); | |
3690 // Clear the C flag. | |
3691 __ Adds(x0, x0, Operand(0)); | |
3692 __ Adcs(x10, x0, Operand(x1, ASR, 63)); | |
3693 END(); | |
3694 | |
3695 RUN(); | |
3696 | |
3697 ASSERT_EQUAL_NZCV(ZCFlag); | |
3698 ASSERT_EQUAL_64(0, x10); | |
3699 | |
3700 START(); | |
3701 __ Mov(x0, 0x10); | |
3702 __ Mov(x1, 0x07ffffffffffffffL); | |
3703 // Clear the C flag. | |
3704 __ Adds(x0, x0, Operand(0)); | |
3705 __ Adcs(x10, x0, Operand(x1, LSL, 4)); | |
3706 END(); | |
3707 | |
3708 RUN(); | |
3709 | |
3710 ASSERT_EQUAL_NZCV(NVFlag); | |
3711 ASSERT_EQUAL_64(0x8000000000000000L, x10); | |
3712 | |
3713 // Check that sbc correctly sets the condition flags. | |
3714 START(); | |
3715 __ Mov(x0, 0); | |
3716 __ Mov(x1, 0xffffffffffffffffL); | |
3717 // Clear the C flag. | |
3718 __ Adds(x0, x0, Operand(0)); | |
3719 __ Sbcs(x10, x0, Operand(x1)); | |
3720 END(); | |
3721 | |
3722 RUN(); | |
3723 | |
3724 ASSERT_EQUAL_NZCV(ZFlag); | |
3725 ASSERT_EQUAL_64(0, x10); | |
3726 | |
3727 START(); | |
3728 __ Mov(x0, 1); | |
3729 __ Mov(x1, 0xffffffffffffffffL); | |
3730 // Clear the C flag. | |
3731 __ Adds(x0, x0, Operand(0)); | |
3732 __ Sbcs(x10, x0, Operand(x1, LSR, 1)); | |
3733 END(); | |
3734 | |
3735 RUN(); | |
3736 | |
3737 ASSERT_EQUAL_NZCV(NFlag); | |
3738 ASSERT_EQUAL_64(0x8000000000000001L, x10); | |
3739 | |
3740 START(); | |
3741 __ Mov(x0, 0); | |
3742 // Clear the C flag. | |
3743 __ Adds(x0, x0, Operand(0)); | |
3744 __ Sbcs(x10, x0, Operand(0xffffffffffffffffL)); | |
3745 END(); | |
3746 | |
3747 RUN(); | |
3748 | |
3749 ASSERT_EQUAL_NZCV(ZFlag); | |
3750 ASSERT_EQUAL_64(0, x10); | |
3751 | |
3752 START() | |
3753 __ Mov(w0, 0x7fffffff); | |
3754 // Clear the C flag. | |
3755 __ Adds(x0, x0, Operand(0)); | |
3756 __ Ngcs(w10, w0); | |
3757 END(); | |
3758 | |
3759 RUN(); | |
3760 | |
3761 ASSERT_EQUAL_NZCV(NFlag); | |
3762 ASSERT_EQUAL_64(0x80000000, x10); | |
3763 | |
3764 START(); | |
3765 // Clear the C flag. | |
3766 __ Adds(x0, x0, Operand(0)); | |
3767 __ Ngcs(x10, 0x7fffffffffffffffL); | |
3768 END(); | |
3769 | |
3770 RUN(); | |
3771 | |
3772 ASSERT_EQUAL_NZCV(NFlag); | |
3773 ASSERT_EQUAL_64(0x8000000000000000L, x10); | |
3774 | |
3775 START() | |
3776 __ Mov(x0, 0); | |
3777 // Set the C flag. | |
3778 __ Cmp(x0, Operand(x0)); | |
3779 __ Sbcs(x10, x0, Operand(1)); | |
3780 END(); | |
3781 | |
3782 RUN(); | |
3783 | |
3784 ASSERT_EQUAL_NZCV(NFlag); | |
3785 ASSERT_EQUAL_64(0xffffffffffffffffL, x10); | |
3786 | |
3787 START() | |
3788 __ Mov(x0, 0); | |
3789 // Set the C flag. | |
3790 __ Cmp(x0, Operand(x0)); | |
3791 __ Ngcs(x10, 0x7fffffffffffffffL); | |
3792 END(); | |
3793 | |
3794 RUN(); | |
3795 | |
3796 ASSERT_EQUAL_NZCV(NFlag); | |
3797 ASSERT_EQUAL_64(0x8000000000000001L, x10); | |
3798 | |
3799 TEARDOWN(); | |
3800 } | |
3801 | |
3802 | |
3803 TEST(adc_sbc_extend) { | |
3804 INIT_V8(); | |
3805 SETUP(); | |
3806 | |
3807 START(); | |
3808 // Clear the C flag. | |
3809 __ Adds(x0, x0, Operand(0)); | |
3810 | |
3811 __ Mov(x0, 0); | |
3812 __ Mov(x1, 1); | |
3813 __ Mov(x2, 0x0123456789abcdefL); | |
3814 | |
3815 __ Adc(x10, x1, Operand(w2, UXTB, 1)); | |
3816 __ Adc(x11, x1, Operand(x2, SXTH, 2)); | |
3817 __ Sbc(x12, x1, Operand(w2, UXTW, 4)); | |
3818 __ Adc(x13, x1, Operand(x2, UXTX, 4)); | |
3819 | |
3820 __ Adc(w14, w1, Operand(w2, UXTB, 1)); | |
3821 __ Adc(w15, w1, Operand(w2, SXTH, 2)); | |
3822 __ Adc(w9, w1, Operand(w2, UXTW, 4)); | |
3823 | |
3824 // Set the C flag. | |
3825 __ Cmp(w0, Operand(w0)); | |
3826 | |
3827 __ Adc(x20, x1, Operand(w2, UXTB, 1)); | |
3828 __ Adc(x21, x1, Operand(x2, SXTH, 2)); | |
3829 __ Sbc(x22, x1, Operand(w2, UXTW, 4)); | |
3830 __ Adc(x23, x1, Operand(x2, UXTX, 4)); | |
3831 | |
3832 __ Adc(w24, w1, Operand(w2, UXTB, 1)); | |
3833 __ Adc(w25, w1, Operand(w2, SXTH, 2)); | |
3834 __ Adc(w26, w1, Operand(w2, UXTW, 4)); | |
3835 END(); | |
3836 | |
3837 RUN(); | |
3838 | |
3839 ASSERT_EQUAL_64(0x1df, x10); | |
3840 ASSERT_EQUAL_64(0xffffffffffff37bdL, x11); | |
3841 ASSERT_EQUAL_64(0xfffffff765432110L, x12); | |
3842 ASSERT_EQUAL_64(0x123456789abcdef1L, x13); | |
3843 | |
3844 ASSERT_EQUAL_32(0x1df, w14); | |
3845 ASSERT_EQUAL_32(0xffff37bd, w15); | |
3846 ASSERT_EQUAL_32(0x9abcdef1, w9); | |
3847 | |
3848 ASSERT_EQUAL_64(0x1df + 1, x20); | |
3849 ASSERT_EQUAL_64(0xffffffffffff37bdL + 1, x21); | |
3850 ASSERT_EQUAL_64(0xfffffff765432110L + 1, x22); | |
3851 ASSERT_EQUAL_64(0x123456789abcdef1L + 1, x23); | |
3852 | |
3853 ASSERT_EQUAL_32(0x1df + 1, w24); | |
3854 ASSERT_EQUAL_32(0xffff37bd + 1, w25); | |
3855 ASSERT_EQUAL_32(0x9abcdef1 + 1, w26); | |
3856 | |
3857 // Check that adc correctly sets the condition flags. | |
3858 START(); | |
3859 __ Mov(x0, 0xff); | |
3860 __ Mov(x1, 0xffffffffffffffffL); | |
3861 // Clear the C flag. | |
3862 __ Adds(x0, x0, Operand(0)); | |
3863 __ Adcs(x10, x0, Operand(x1, SXTX, 1)); | |
3864 END(); | |
3865 | |
3866 RUN(); | |
3867 | |
3868 ASSERT_EQUAL_NZCV(CFlag); | |
3869 | |
3870 START(); | |
3871 __ Mov(x0, 0x7fffffffffffffffL); | |
3872 __ Mov(x1, 1); | |
3873 // Clear the C flag. | |
3874 __ Adds(x0, x0, Operand(0)); | |
3875 __ Adcs(x10, x0, Operand(x1, UXTB, 2)); | |
3876 END(); | |
3877 | |
3878 RUN(); | |
3879 | |
3880 ASSERT_EQUAL_NZCV(NVFlag); | |
3881 | |
3882 START(); | |
3883 __ Mov(x0, 0x7fffffffffffffffL); | |
3884 // Clear the C flag. | |
3885 __ Adds(x0, x0, Operand(0)); | |
3886 __ Adcs(x10, x0, Operand(1)); | |
3887 END(); | |
3888 | |
3889 RUN(); | |
3890 | |
3891 ASSERT_EQUAL_NZCV(NVFlag); | |
3892 | |
3893 TEARDOWN(); | |
3894 } | |
3895 | |
3896 | |
3897 TEST(adc_sbc_wide_imm) { | |
3898 INIT_V8(); | |
3899 SETUP(); | |
3900 | |
3901 START(); | |
3902 __ Mov(x0, 0); | |
3903 | |
3904 // Clear the C flag. | |
3905 __ Adds(x0, x0, Operand(0)); | |
3906 | |
3907 __ Adc(x7, x0, Operand(0x1234567890abcdefUL)); | |
3908 __ Adc(w8, w0, Operand(0xffffffff)); | |
3909 __ Sbc(x9, x0, Operand(0x1234567890abcdefUL)); | |
3910 __ Sbc(w10, w0, Operand(0xffffffff)); | |
3911 __ Ngc(x11, Operand(0xffffffff00000000UL)); | |
3912 __ Ngc(w12, Operand(0xffff0000)); | |
3913 | |
3914 // Set the C flag. | |
3915 __ Cmp(w0, Operand(w0)); | |
3916 | |
3917 __ Adc(x18, x0, Operand(0x1234567890abcdefUL)); | |
3918 __ Adc(w19, w0, Operand(0xffffffff)); | |
3919 __ Sbc(x20, x0, Operand(0x1234567890abcdefUL)); | |
3920 __ Sbc(w21, w0, Operand(0xffffffff)); | |
3921 __ Ngc(x22, Operand(0xffffffff00000000UL)); | |
3922 __ Ngc(w23, Operand(0xffff0000)); | |
3923 END(); | |
3924 | |
3925 RUN(); | |
3926 | |
3927 ASSERT_EQUAL_64(0x1234567890abcdefUL, x7); | |
3928 ASSERT_EQUAL_64(0xffffffff, x8); | |
3929 ASSERT_EQUAL_64(0xedcba9876f543210UL, x9); | |
3930 ASSERT_EQUAL_64(0, x10); | |
3931 ASSERT_EQUAL_64(0xffffffff, x11); | |
3932 ASSERT_EQUAL_64(0xffff, x12); | |
3933 | |
3934 ASSERT_EQUAL_64(0x1234567890abcdefUL + 1, x18); | |
3935 ASSERT_EQUAL_64(0, x19); | |
3936 ASSERT_EQUAL_64(0xedcba9876f543211UL, x20); | |
3937 ASSERT_EQUAL_64(1, x21); | |
3938 ASSERT_EQUAL_64(0x100000000UL, x22); | |
3939 ASSERT_EQUAL_64(0x10000, x23); | |
3940 | |
3941 TEARDOWN(); | |
3942 } | |
3943 | |
3944 | |
3945 TEST(flags) { | |
3946 INIT_V8(); | |
3947 SETUP(); | |
3948 | |
3949 START(); | |
3950 __ Mov(x0, 0); | |
3951 __ Mov(x1, 0x1111111111111111L); | |
3952 __ Neg(x10, Operand(x0)); | |
3953 __ Neg(x11, Operand(x1)); | |
3954 __ Neg(w12, Operand(w1)); | |
3955 // Clear the C flag. | |
3956 __ Adds(x0, x0, Operand(0)); | |
3957 __ Ngc(x13, Operand(x0)); | |
3958 // Set the C flag. | |
3959 __ Cmp(x0, Operand(x0)); | |
3960 __ Ngc(w14, Operand(w0)); | |
3961 END(); | |
3962 | |
3963 RUN(); | |
3964 | |
3965 ASSERT_EQUAL_64(0, x10); | |
3966 ASSERT_EQUAL_64(-0x1111111111111111L, x11); | |
3967 ASSERT_EQUAL_32(-0x11111111, w12); | |
3968 ASSERT_EQUAL_64(-1L, x13); | |
3969 ASSERT_EQUAL_32(0, w14); | |
3970 | |
3971 START(); | |
3972 __ Mov(x0, 0); | |
3973 __ Cmp(x0, Operand(x0)); | |
3974 END(); | |
3975 | |
3976 RUN(); | |
3977 | |
3978 ASSERT_EQUAL_NZCV(ZCFlag); | |
3979 | |
3980 START(); | |
3981 __ Mov(w0, 0); | |
3982 __ Cmp(w0, Operand(w0)); | |
3983 END(); | |
3984 | |
3985 RUN(); | |
3986 | |
3987 ASSERT_EQUAL_NZCV(ZCFlag); | |
3988 | |
3989 START(); | |
3990 __ Mov(x0, 0); | |
3991 __ Mov(x1, 0x1111111111111111L); | |
3992 __ Cmp(x0, Operand(x1)); | |
3993 END(); | |
3994 | |
3995 RUN(); | |
3996 | |
3997 ASSERT_EQUAL_NZCV(NFlag); | |
3998 | |
3999 START(); | |
4000 __ Mov(w0, 0); | |
4001 __ Mov(w1, 0x11111111); | |
4002 __ Cmp(w0, Operand(w1)); | |
4003 END(); | |
4004 | |
4005 RUN(); | |
4006 | |
4007 ASSERT_EQUAL_NZCV(NFlag); | |
4008 | |
4009 START(); | |
4010 __ Mov(x1, 0x1111111111111111L); | |
4011 __ Cmp(x1, Operand(0)); | |
4012 END(); | |
4013 | |
4014 RUN(); | |
4015 | |
4016 ASSERT_EQUAL_NZCV(CFlag); | |
4017 | |
4018 START(); | |
4019 __ Mov(w1, 0x11111111); | |
4020 __ Cmp(w1, Operand(0)); | |
4021 END(); | |
4022 | |
4023 RUN(); | |
4024 | |
4025 ASSERT_EQUAL_NZCV(CFlag); | |
4026 | |
4027 START(); | |
4028 __ Mov(x0, 1); | |
4029 __ Mov(x1, 0x7fffffffffffffffL); | |
4030 __ Cmn(x1, Operand(x0)); | |
4031 END(); | |
4032 | |
4033 RUN(); | |
4034 | |
4035 ASSERT_EQUAL_NZCV(NVFlag); | |
4036 | |
4037 START(); | |
4038 __ Mov(w0, 1); | |
4039 __ Mov(w1, 0x7fffffff); | |
4040 __ Cmn(w1, Operand(w0)); | |
4041 END(); | |
4042 | |
4043 RUN(); | |
4044 | |
4045 ASSERT_EQUAL_NZCV(NVFlag); | |
4046 | |
4047 START(); | |
4048 __ Mov(x0, 1); | |
4049 __ Mov(x1, 0xffffffffffffffffL); | |
4050 __ Cmn(x1, Operand(x0)); | |
4051 END(); | |
4052 | |
4053 RUN(); | |
4054 | |
4055 ASSERT_EQUAL_NZCV(ZCFlag); | |
4056 | |
4057 START(); | |
4058 __ Mov(w0, 1); | |
4059 __ Mov(w1, 0xffffffff); | |
4060 __ Cmn(w1, Operand(w0)); | |
4061 END(); | |
4062 | |
4063 RUN(); | |
4064 | |
4065 ASSERT_EQUAL_NZCV(ZCFlag); | |
4066 | |
4067 START(); | |
4068 __ Mov(w0, 0); | |
4069 __ Mov(w1, 1); | |
4070 // Clear the C flag. | |
4071 __ Adds(w0, w0, Operand(0)); | |
4072 __ Ngcs(w0, Operand(w1)); | |
4073 END(); | |
4074 | |
4075 RUN(); | |
4076 | |
4077 ASSERT_EQUAL_NZCV(NFlag); | |
4078 | |
4079 START(); | |
4080 __ Mov(w0, 0); | |
4081 __ Mov(w1, 0); | |
4082 // Set the C flag. | |
4083 __ Cmp(w0, Operand(w0)); | |
4084 __ Ngcs(w0, Operand(w1)); | |
4085 END(); | |
4086 | |
4087 RUN(); | |
4088 | |
4089 ASSERT_EQUAL_NZCV(ZCFlag); | |
4090 | |
4091 TEARDOWN(); | |
4092 } | |
4093 | |
4094 | |
4095 TEST(cmp_shift) { | |
4096 INIT_V8(); | |
4097 SETUP(); | |
4098 | |
4099 START(); | |
4100 __ Mov(x18, 0xf0000000); | |
4101 __ Mov(x19, 0xf000000010000000UL); | |
4102 __ Mov(x20, 0xf0000000f0000000UL); | |
4103 __ Mov(x21, 0x7800000078000000UL); | |
4104 __ Mov(x22, 0x3c0000003c000000UL); | |
4105 __ Mov(x23, 0x8000000780000000UL); | |
4106 __ Mov(x24, 0x0000000f00000000UL); | |
4107 __ Mov(x25, 0x00000003c0000000UL); | |
4108 __ Mov(x26, 0x8000000780000000UL); | |
4109 __ Mov(x27, 0xc0000003); | |
4110 | |
4111 __ Cmp(w20, Operand(w21, LSL, 1)); | |
4112 __ Mrs(x0, NZCV); | |
4113 | |
4114 __ Cmp(x20, Operand(x22, LSL, 2)); | |
4115 __ Mrs(x1, NZCV); | |
4116 | |
4117 __ Cmp(w19, Operand(w23, LSR, 3)); | |
4118 __ Mrs(x2, NZCV); | |
4119 | |
4120 __ Cmp(x18, Operand(x24, LSR, 4)); | |
4121 __ Mrs(x3, NZCV); | |
4122 | |
4123 __ Cmp(w20, Operand(w25, ASR, 2)); | |
4124 __ Mrs(x4, NZCV); | |
4125 | |
4126 __ Cmp(x20, Operand(x26, ASR, 3)); | |
4127 __ Mrs(x5, NZCV); | |
4128 | |
4129 __ Cmp(w27, Operand(w22, ROR, 28)); | |
4130 __ Mrs(x6, NZCV); | |
4131 | |
4132 __ Cmp(x20, Operand(x21, ROR, 31)); | |
4133 __ Mrs(x7, NZCV); | |
4134 END(); | |
4135 | |
4136 RUN(); | |
4137 | |
4138 ASSERT_EQUAL_32(ZCFlag, w0); | |
4139 ASSERT_EQUAL_32(ZCFlag, w1); | |
4140 ASSERT_EQUAL_32(ZCFlag, w2); | |
4141 ASSERT_EQUAL_32(ZCFlag, w3); | |
4142 ASSERT_EQUAL_32(ZCFlag, w4); | |
4143 ASSERT_EQUAL_32(ZCFlag, w5); | |
4144 ASSERT_EQUAL_32(ZCFlag, w6); | |
4145 ASSERT_EQUAL_32(ZCFlag, w7); | |
4146 | |
4147 TEARDOWN(); | |
4148 } | |
4149 | |
4150 | |
4151 TEST(cmp_extend) { | |
4152 INIT_V8(); | |
4153 SETUP(); | |
4154 | |
4155 START(); | |
4156 __ Mov(w20, 0x2); | |
4157 __ Mov(w21, 0x1); | |
4158 __ Mov(x22, 0xffffffffffffffffUL); | |
4159 __ Mov(x23, 0xff); | |
4160 __ Mov(x24, 0xfffffffffffffffeUL); | |
4161 __ Mov(x25, 0xffff); | |
4162 __ Mov(x26, 0xffffffff); | |
4163 | |
4164 __ Cmp(w20, Operand(w21, LSL, 1)); | |
4165 __ Mrs(x0, NZCV); | |
4166 | |
4167 __ Cmp(x22, Operand(x23, SXTB, 0)); | |
4168 __ Mrs(x1, NZCV); | |
4169 | |
4170 __ Cmp(x24, Operand(x23, SXTB, 1)); | |
4171 __ Mrs(x2, NZCV); | |
4172 | |
4173 __ Cmp(x24, Operand(x23, UXTB, 1)); | |
4174 __ Mrs(x3, NZCV); | |
4175 | |
4176 __ Cmp(w22, Operand(w25, UXTH)); | |
4177 __ Mrs(x4, NZCV); | |
4178 | |
4179 __ Cmp(x22, Operand(x25, SXTH)); | |
4180 __ Mrs(x5, NZCV); | |
4181 | |
4182 __ Cmp(x22, Operand(x26, UXTW)); | |
4183 __ Mrs(x6, NZCV); | |
4184 | |
4185 __ Cmp(x24, Operand(x26, SXTW, 1)); | |
4186 __ Mrs(x7, NZCV); | |
4187 END(); | |
4188 | |
4189 RUN(); | |
4190 | |
4191 ASSERT_EQUAL_32(ZCFlag, w0); | |
4192 ASSERT_EQUAL_32(ZCFlag, w1); | |
4193 ASSERT_EQUAL_32(ZCFlag, w2); | |
4194 ASSERT_EQUAL_32(NCFlag, w3); | |
4195 ASSERT_EQUAL_32(NCFlag, w4); | |
4196 ASSERT_EQUAL_32(ZCFlag, w5); | |
4197 ASSERT_EQUAL_32(NCFlag, w6); | |
4198 ASSERT_EQUAL_32(ZCFlag, w7); | |
4199 | |
4200 TEARDOWN(); | |
4201 } | |
4202 | |
4203 | |
4204 TEST(ccmp) { | |
4205 INIT_V8(); | |
4206 SETUP(); | |
4207 | |
4208 START(); | |
4209 __ Mov(w16, 0); | |
4210 __ Mov(w17, 1); | |
4211 __ Cmp(w16, w16); | |
4212 __ Ccmp(w16, w17, NCFlag, eq); | |
4213 __ Mrs(x0, NZCV); | |
4214 | |
4215 __ Cmp(w16, w16); | |
4216 __ Ccmp(w16, w17, NCFlag, ne); | |
4217 __ Mrs(x1, NZCV); | |
4218 | |
4219 __ Cmp(x16, x16); | |
4220 __ Ccmn(x16, 2, NZCVFlag, eq); | |
4221 __ Mrs(x2, NZCV); | |
4222 | |
4223 __ Cmp(x16, x16); | |
4224 __ Ccmn(x16, 2, NZCVFlag, ne); | |
4225 __ Mrs(x3, NZCV); | |
4226 | |
4227 __ ccmp(x16, x16, NZCVFlag, al); | |
4228 __ Mrs(x4, NZCV); | |
4229 | |
4230 __ ccmp(x16, x16, NZCVFlag, nv); | |
4231 __ Mrs(x5, NZCV); | |
4232 | |
4233 END(); | |
4234 | |
4235 RUN(); | |
4236 | |
4237 ASSERT_EQUAL_32(NFlag, w0); | |
4238 ASSERT_EQUAL_32(NCFlag, w1); | |
4239 ASSERT_EQUAL_32(NoFlag, w2); | |
4240 ASSERT_EQUAL_32(NZCVFlag, w3); | |
4241 ASSERT_EQUAL_32(ZCFlag, w4); | |
4242 ASSERT_EQUAL_32(ZCFlag, w5); | |
4243 | |
4244 TEARDOWN(); | |
4245 } | |
4246 | |
4247 | |
4248 TEST(ccmp_wide_imm) { | |
4249 INIT_V8(); | |
4250 SETUP(); | |
4251 | |
4252 START(); | |
4253 __ Mov(w20, 0); | |
4254 | |
4255 __ Cmp(w20, Operand(w20)); | |
4256 __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq); | |
4257 __ Mrs(x0, NZCV); | |
4258 | |
4259 __ Cmp(w20, Operand(w20)); | |
4260 __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq); | |
4261 __ Mrs(x1, NZCV); | |
4262 END(); | |
4263 | |
4264 RUN(); | |
4265 | |
4266 ASSERT_EQUAL_32(NFlag, w0); | |
4267 ASSERT_EQUAL_32(NoFlag, w1); | |
4268 | |
4269 TEARDOWN(); | |
4270 } | |
4271 | |
4272 | |
4273 TEST(ccmp_shift_extend) { | |
4274 INIT_V8(); | |
4275 SETUP(); | |
4276 | |
4277 START(); | |
4278 __ Mov(w20, 0x2); | |
4279 __ Mov(w21, 0x1); | |
4280 __ Mov(x22, 0xffffffffffffffffUL); | |
4281 __ Mov(x23, 0xff); | |
4282 __ Mov(x24, 0xfffffffffffffffeUL); | |
4283 | |
4284 __ Cmp(w20, Operand(w20)); | |
4285 __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq); | |
4286 __ Mrs(x0, NZCV); | |
4287 | |
4288 __ Cmp(w20, Operand(w20)); | |
4289 __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq); | |
4290 __ Mrs(x1, NZCV); | |
4291 | |
4292 __ Cmp(w20, Operand(w20)); | |
4293 __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq); | |
4294 __ Mrs(x2, NZCV); | |
4295 | |
4296 __ Cmp(w20, Operand(w20)); | |
4297 __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq); | |
4298 __ Mrs(x3, NZCV); | |
4299 | |
4300 __ Cmp(w20, Operand(w20)); | |
4301 __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne); | |
4302 __ Mrs(x4, NZCV); | |
4303 END(); | |
4304 | |
4305 RUN(); | |
4306 | |
4307 ASSERT_EQUAL_32(ZCFlag, w0); | |
4308 ASSERT_EQUAL_32(ZCFlag, w1); | |
4309 ASSERT_EQUAL_32(ZCFlag, w2); | |
4310 ASSERT_EQUAL_32(NCFlag, w3); | |
4311 ASSERT_EQUAL_32(NZCVFlag, w4); | |
4312 | |
4313 TEARDOWN(); | |
4314 } | |
4315 | |
4316 | |
4317 TEST(csel) { | |
4318 INIT_V8(); | |
4319 SETUP(); | |
4320 | |
4321 START(); | |
4322 __ Mov(x16, 0); | |
4323 __ Mov(x24, 0x0000000f0000000fUL); | |
4324 __ Mov(x25, 0x0000001f0000001fUL); | |
4325 __ Mov(x26, 0); | |
4326 __ Mov(x27, 0); | |
4327 | |
4328 __ Cmp(w16, 0); | |
4329 __ Csel(w0, w24, w25, eq); | |
4330 __ Csel(w1, w24, w25, ne); | |
4331 __ Csinc(w2, w24, w25, mi); | |
4332 __ Csinc(w3, w24, w25, pl); | |
4333 | |
4334 __ csel(w13, w24, w25, al); | |
4335 __ csel(x14, x24, x25, nv); | |
4336 | |
4337 __ Cmp(x16, 1); | |
4338 __ Csinv(x4, x24, x25, gt); | |
4339 __ Csinv(x5, x24, x25, le); | |
4340 __ Csneg(x6, x24, x25, hs); | |
4341 __ Csneg(x7, x24, x25, lo); | |
4342 | |
4343 __ Cset(w8, ne); | |
4344 __ Csetm(w9, ne); | |
4345 __ Cinc(x10, x25, ne); | |
4346 __ Cinv(x11, x24, ne); | |
4347 __ Cneg(x12, x24, ne); | |
4348 | |
4349 __ csel(w15, w24, w25, al); | |
4350 __ csel(x18, x24, x25, nv); | |
4351 | |
4352 __ CzeroX(x24, ne); | |
4353 __ CzeroX(x25, eq); | |
4354 | |
4355 __ CmovX(x26, x25, ne); | |
4356 __ CmovX(x27, x25, eq); | |
4357 END(); | |
4358 | |
4359 RUN(); | |
4360 | |
4361 ASSERT_EQUAL_64(0x0000000f, x0); | |
4362 ASSERT_EQUAL_64(0x0000001f, x1); | |
4363 ASSERT_EQUAL_64(0x00000020, x2); | |
4364 ASSERT_EQUAL_64(0x0000000f, x3); | |
4365 ASSERT_EQUAL_64(0xffffffe0ffffffe0UL, x4); | |
4366 ASSERT_EQUAL_64(0x0000000f0000000fUL, x5); | |
4367 ASSERT_EQUAL_64(0xffffffe0ffffffe1UL, x6); | |
4368 ASSERT_EQUAL_64(0x0000000f0000000fUL, x7); | |
4369 ASSERT_EQUAL_64(0x00000001, x8); | |
4370 ASSERT_EQUAL_64(0xffffffff, x9); | |
4371 ASSERT_EQUAL_64(0x0000001f00000020UL, x10); | |
4372 ASSERT_EQUAL_64(0xfffffff0fffffff0UL, x11); | |
4373 ASSERT_EQUAL_64(0xfffffff0fffffff1UL, x12); | |
4374 ASSERT_EQUAL_64(0x0000000f, x13); | |
4375 ASSERT_EQUAL_64(0x0000000f0000000fUL, x14); | |
4376 ASSERT_EQUAL_64(0x0000000f, x15); | |
4377 ASSERT_EQUAL_64(0x0000000f0000000fUL, x18); | |
4378 ASSERT_EQUAL_64(0, x24); | |
4379 ASSERT_EQUAL_64(0x0000001f0000001fUL, x25); | |
4380 ASSERT_EQUAL_64(0x0000001f0000001fUL, x26); | |
4381 ASSERT_EQUAL_64(0, x27); | |
4382 | |
4383 TEARDOWN(); | |
4384 } | |
4385 | |
4386 | |
4387 TEST(csel_imm) { | |
4388 INIT_V8(); | |
4389 SETUP(); | |
4390 | |
4391 START(); | |
4392 __ Mov(x18, 0); | |
4393 __ Mov(x19, 0x80000000); | |
4394 __ Mov(x20, 0x8000000000000000UL); | |
4395 | |
4396 __ Cmp(x18, Operand(0)); | |
4397 __ Csel(w0, w19, -2, ne); | |
4398 __ Csel(w1, w19, -1, ne); | |
4399 __ Csel(w2, w19, 0, ne); | |
4400 __ Csel(w3, w19, 1, ne); | |
4401 __ Csel(w4, w19, 2, ne); | |
4402 __ Csel(w5, w19, Operand(w19, ASR, 31), ne); | |
4403 __ Csel(w6, w19, Operand(w19, ROR, 1), ne); | |
4404 __ Csel(w7, w19, 3, eq); | |
4405 | |
4406 __ Csel(x8, x20, -2, ne); | |
4407 __ Csel(x9, x20, -1, ne); | |
4408 __ Csel(x10, x20, 0, ne); | |
4409 __ Csel(x11, x20, 1, ne); | |
4410 __ Csel(x12, x20, 2, ne); | |
4411 __ Csel(x13, x20, Operand(x20, ASR, 63), ne); | |
4412 __ Csel(x14, x20, Operand(x20, ROR, 1), ne); | |
4413 __ Csel(x15, x20, 3, eq); | |
4414 | |
4415 END(); | |
4416 | |
4417 RUN(); | |
4418 | |
4419 ASSERT_EQUAL_32(-2, w0); | |
4420 ASSERT_EQUAL_32(-1, w1); | |
4421 ASSERT_EQUAL_32(0, w2); | |
4422 ASSERT_EQUAL_32(1, w3); | |
4423 ASSERT_EQUAL_32(2, w4); | |
4424 ASSERT_EQUAL_32(-1, w5); | |
4425 ASSERT_EQUAL_32(0x40000000, w6); | |
4426 ASSERT_EQUAL_32(0x80000000, w7); | |
4427 | |
4428 ASSERT_EQUAL_64(-2, x8); | |
4429 ASSERT_EQUAL_64(-1, x9); | |
4430 ASSERT_EQUAL_64(0, x10); | |
4431 ASSERT_EQUAL_64(1, x11); | |
4432 ASSERT_EQUAL_64(2, x12); | |
4433 ASSERT_EQUAL_64(-1, x13); | |
4434 ASSERT_EQUAL_64(0x4000000000000000UL, x14); | |
4435 ASSERT_EQUAL_64(0x8000000000000000UL, x15); | |
4436 | |
4437 TEARDOWN(); | |
4438 } | |
4439 | |
4440 | |
4441 TEST(lslv) { | |
4442 INIT_V8(); | |
4443 SETUP(); | |
4444 | |
4445 uint64_t value = 0x0123456789abcdefUL; | |
4446 int shift[] = {1, 3, 5, 9, 17, 33}; | |
4447 | |
4448 START(); | |
4449 __ Mov(x0, value); | |
4450 __ Mov(w1, shift[0]); | |
4451 __ Mov(w2, shift[1]); | |
4452 __ Mov(w3, shift[2]); | |
4453 __ Mov(w4, shift[3]); | |
4454 __ Mov(w5, shift[4]); | |
4455 __ Mov(w6, shift[5]); | |
4456 | |
4457 __ lslv(x0, x0, xzr); | |
4458 | |
4459 __ Lsl(x16, x0, x1); | |
4460 __ Lsl(x17, x0, x2); | |
4461 __ Lsl(x18, x0, x3); | |
4462 __ Lsl(x19, x0, x4); | |
4463 __ Lsl(x20, x0, x5); | |
4464 __ Lsl(x21, x0, x6); | |
4465 | |
4466 __ Lsl(w22, w0, w1); | |
4467 __ Lsl(w23, w0, w2); | |
4468 __ Lsl(w24, w0, w3); | |
4469 __ Lsl(w25, w0, w4); | |
4470 __ Lsl(w26, w0, w5); | |
4471 __ Lsl(w27, w0, w6); | |
4472 END(); | |
4473 | |
4474 RUN(); | |
4475 | |
4476 ASSERT_EQUAL_64(value, x0); | |
4477 ASSERT_EQUAL_64(value << (shift[0] & 63), x16); | |
4478 ASSERT_EQUAL_64(value << (shift[1] & 63), x17); | |
4479 ASSERT_EQUAL_64(value << (shift[2] & 63), x18); | |
4480 ASSERT_EQUAL_64(value << (shift[3] & 63), x19); | |
4481 ASSERT_EQUAL_64(value << (shift[4] & 63), x20); | |
4482 ASSERT_EQUAL_64(value << (shift[5] & 63), x21); | |
4483 ASSERT_EQUAL_32(value << (shift[0] & 31), w22); | |
4484 ASSERT_EQUAL_32(value << (shift[1] & 31), w23); | |
4485 ASSERT_EQUAL_32(value << (shift[2] & 31), w24); | |
4486 ASSERT_EQUAL_32(value << (shift[3] & 31), w25); | |
4487 ASSERT_EQUAL_32(value << (shift[4] & 31), w26); | |
4488 ASSERT_EQUAL_32(value << (shift[5] & 31), w27); | |
4489 | |
4490 TEARDOWN(); | |
4491 } | |
4492 | |
4493 | |
4494 TEST(lsrv) { | |
4495 INIT_V8(); | |
4496 SETUP(); | |
4497 | |
4498 uint64_t value = 0x0123456789abcdefUL; | |
4499 int shift[] = {1, 3, 5, 9, 17, 33}; | |
4500 | |
4501 START(); | |
4502 __ Mov(x0, value); | |
4503 __ Mov(w1, shift[0]); | |
4504 __ Mov(w2, shift[1]); | |
4505 __ Mov(w3, shift[2]); | |
4506 __ Mov(w4, shift[3]); | |
4507 __ Mov(w5, shift[4]); | |
4508 __ Mov(w6, shift[5]); | |
4509 | |
4510 __ lsrv(x0, x0, xzr); | |
4511 | |
4512 __ Lsr(x16, x0, x1); | |
4513 __ Lsr(x17, x0, x2); | |
4514 __ Lsr(x18, x0, x3); | |
4515 __ Lsr(x19, x0, x4); | |
4516 __ Lsr(x20, x0, x5); | |
4517 __ Lsr(x21, x0, x6); | |
4518 | |
4519 __ Lsr(w22, w0, w1); | |
4520 __ Lsr(w23, w0, w2); | |
4521 __ Lsr(w24, w0, w3); | |
4522 __ Lsr(w25, w0, w4); | |
4523 __ Lsr(w26, w0, w5); | |
4524 __ Lsr(w27, w0, w6); | |
4525 END(); | |
4526 | |
4527 RUN(); | |
4528 | |
4529 ASSERT_EQUAL_64(value, x0); | |
4530 ASSERT_EQUAL_64(value >> (shift[0] & 63), x16); | |
4531 ASSERT_EQUAL_64(value >> (shift[1] & 63), x17); | |
4532 ASSERT_EQUAL_64(value >> (shift[2] & 63), x18); | |
4533 ASSERT_EQUAL_64(value >> (shift[3] & 63), x19); | |
4534 ASSERT_EQUAL_64(value >> (shift[4] & 63), x20); | |
4535 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21); | |
4536 | |
4537 value &= 0xffffffffUL; | |
4538 ASSERT_EQUAL_32(value >> (shift[0] & 31), w22); | |
4539 ASSERT_EQUAL_32(value >> (shift[1] & 31), w23); | |
4540 ASSERT_EQUAL_32(value >> (shift[2] & 31), w24); | |
4541 ASSERT_EQUAL_32(value >> (shift[3] & 31), w25); | |
4542 ASSERT_EQUAL_32(value >> (shift[4] & 31), w26); | |
4543 ASSERT_EQUAL_32(value >> (shift[5] & 31), w27); | |
4544 | |
4545 TEARDOWN(); | |
4546 } | |
4547 | |
4548 | |
4549 TEST(asrv) { | |
4550 INIT_V8(); | |
4551 SETUP(); | |
4552 | |
4553 int64_t value = 0xfedcba98fedcba98UL; | |
4554 int shift[] = {1, 3, 5, 9, 17, 33}; | |
4555 | |
4556 START(); | |
4557 __ Mov(x0, value); | |
4558 __ Mov(w1, shift[0]); | |
4559 __ Mov(w2, shift[1]); | |
4560 __ Mov(w3, shift[2]); | |
4561 __ Mov(w4, shift[3]); | |
4562 __ Mov(w5, shift[4]); | |
4563 __ Mov(w6, shift[5]); | |
4564 | |
4565 __ asrv(x0, x0, xzr); | |
4566 | |
4567 __ Asr(x16, x0, x1); | |
4568 __ Asr(x17, x0, x2); | |
4569 __ Asr(x18, x0, x3); | |
4570 __ Asr(x19, x0, x4); | |
4571 __ Asr(x20, x0, x5); | |
4572 __ Asr(x21, x0, x6); | |
4573 | |
4574 __ Asr(w22, w0, w1); | |
4575 __ Asr(w23, w0, w2); | |
4576 __ Asr(w24, w0, w3); | |
4577 __ Asr(w25, w0, w4); | |
4578 __ Asr(w26, w0, w5); | |
4579 __ Asr(w27, w0, w6); | |
4580 END(); | |
4581 | |
4582 RUN(); | |
4583 | |
4584 ASSERT_EQUAL_64(value, x0); | |
4585 ASSERT_EQUAL_64(value >> (shift[0] & 63), x16); | |
4586 ASSERT_EQUAL_64(value >> (shift[1] & 63), x17); | |
4587 ASSERT_EQUAL_64(value >> (shift[2] & 63), x18); | |
4588 ASSERT_EQUAL_64(value >> (shift[3] & 63), x19); | |
4589 ASSERT_EQUAL_64(value >> (shift[4] & 63), x20); | |
4590 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21); | |
4591 | |
4592 int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL); | |
4593 ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22); | |
4594 ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23); | |
4595 ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24); | |
4596 ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25); | |
4597 ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26); | |
4598 ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27); | |
4599 | |
4600 TEARDOWN(); | |
4601 } | |
4602 | |
4603 | |
4604 TEST(rorv) { | |
4605 INIT_V8(); | |
4606 SETUP(); | |
4607 | |
4608 uint64_t value = 0x0123456789abcdefUL; | |
4609 int shift[] = {4, 8, 12, 16, 24, 36}; | |
4610 | |
4611 START(); | |
4612 __ Mov(x0, value); | |
4613 __ Mov(w1, shift[0]); | |
4614 __ Mov(w2, shift[1]); | |
4615 __ Mov(w3, shift[2]); | |
4616 __ Mov(w4, shift[3]); | |
4617 __ Mov(w5, shift[4]); | |
4618 __ Mov(w6, shift[5]); | |
4619 | |
4620 __ rorv(x0, x0, xzr); | |
4621 | |
4622 __ Ror(x16, x0, x1); | |
4623 __ Ror(x17, x0, x2); | |
4624 __ Ror(x18, x0, x3); | |
4625 __ Ror(x19, x0, x4); | |
4626 __ Ror(x20, x0, x5); | |
4627 __ Ror(x21, x0, x6); | |
4628 | |
4629 __ Ror(w22, w0, w1); | |
4630 __ Ror(w23, w0, w2); | |
4631 __ Ror(w24, w0, w3); | |
4632 __ Ror(w25, w0, w4); | |
4633 __ Ror(w26, w0, w5); | |
4634 __ Ror(w27, w0, w6); | |
4635 END(); | |
4636 | |
4637 RUN(); | |
4638 | |
4639 ASSERT_EQUAL_64(value, x0); | |
4640 ASSERT_EQUAL_64(0xf0123456789abcdeUL, x16); | |
4641 ASSERT_EQUAL_64(0xef0123456789abcdUL, x17); | |
4642 ASSERT_EQUAL_64(0xdef0123456789abcUL, x18); | |
4643 ASSERT_EQUAL_64(0xcdef0123456789abUL, x19); | |
4644 ASSERT_EQUAL_64(0xabcdef0123456789UL, x20); | |
4645 ASSERT_EQUAL_64(0x789abcdef0123456UL, x21); | |
4646 ASSERT_EQUAL_32(0xf89abcde, w22); | |
4647 ASSERT_EQUAL_32(0xef89abcd, w23); | |
4648 ASSERT_EQUAL_32(0xdef89abc, w24); | |
4649 ASSERT_EQUAL_32(0xcdef89ab, w25); | |
4650 ASSERT_EQUAL_32(0xabcdef89, w26); | |
4651 ASSERT_EQUAL_32(0xf89abcde, w27); | |
4652 | |
4653 TEARDOWN(); | |
4654 } | |
4655 | |
4656 | |
4657 TEST(bfm) { | |
4658 INIT_V8(); | |
4659 SETUP(); | |
4660 | |
4661 START(); | |
4662 __ Mov(x1, 0x0123456789abcdefL); | |
4663 | |
4664 __ Mov(x10, 0x8888888888888888L); | |
4665 __ Mov(x11, 0x8888888888888888L); | |
4666 __ Mov(x12, 0x8888888888888888L); | |
4667 __ Mov(x13, 0x8888888888888888L); | |
4668 __ Mov(w20, 0x88888888); | |
4669 __ Mov(w21, 0x88888888); | |
4670 | |
4671 __ bfm(x10, x1, 16, 31); | |
4672 __ bfm(x11, x1, 32, 15); | |
4673 | |
4674 __ bfm(w20, w1, 16, 23); | |
4675 __ bfm(w21, w1, 24, 15); | |
4676 | |
4677 // Aliases. | |
4678 __ Bfi(x12, x1, 16, 8); | |
4679 __ Bfxil(x13, x1, 16, 8); | |
4680 END(); | |
4681 | |
4682 RUN(); | |
4683 | |
4684 | |
4685 ASSERT_EQUAL_64(0x88888888888889abL, x10); | |
4686 ASSERT_EQUAL_64(0x8888cdef88888888L, x11); | |
4687 | |
4688 ASSERT_EQUAL_32(0x888888ab, w20); | |
4689 ASSERT_EQUAL_32(0x88cdef88, w21); | |
4690 | |
4691 ASSERT_EQUAL_64(0x8888888888ef8888L, x12); | |
4692 ASSERT_EQUAL_64(0x88888888888888abL, x13); | |
4693 | |
4694 TEARDOWN(); | |
4695 } | |
4696 | |
4697 | |
4698 TEST(sbfm) { | |
4699 INIT_V8(); | |
4700 SETUP(); | |
4701 | |
4702 START(); | |
4703 __ Mov(x1, 0x0123456789abcdefL); | |
4704 __ Mov(x2, 0xfedcba9876543210L); | |
4705 | |
4706 __ sbfm(x10, x1, 16, 31); | |
4707 __ sbfm(x11, x1, 32, 15); | |
4708 __ sbfm(x12, x1, 32, 47); | |
4709 __ sbfm(x13, x1, 48, 35); | |
4710 | |
4711 __ sbfm(w14, w1, 16, 23); | |
4712 __ sbfm(w15, w1, 24, 15); | |
4713 __ sbfm(w16, w2, 16, 23); | |
4714 __ sbfm(w17, w2, 24, 15); | |
4715 | |
4716 // Aliases. | |
4717 __ Asr(x18, x1, 32); | |
4718 __ Asr(x19, x2, 32); | |
4719 __ Sbfiz(x20, x1, 8, 16); | |
4720 __ Sbfiz(x21, x2, 8, 16); | |
4721 __ Sbfx(x22, x1, 8, 16); | |
4722 __ Sbfx(x23, x2, 8, 16); | |
4723 __ Sxtb(x24, w1); | |
4724 __ Sxtb(x25, x2); | |
4725 __ Sxth(x26, w1); | |
4726 __ Sxth(x27, x2); | |
4727 __ Sxtw(x28, w1); | |
4728 __ Sxtw(x29, x2); | |
4729 END(); | |
4730 | |
4731 RUN(); | |
4732 | |
4733 | |
4734 ASSERT_EQUAL_64(0xffffffffffff89abL, x10); | |
4735 ASSERT_EQUAL_64(0xffffcdef00000000L, x11); | |
4736 ASSERT_EQUAL_64(0x4567L, x12); | |
4737 ASSERT_EQUAL_64(0x789abcdef0000L, x13); | |
4738 | |
4739 ASSERT_EQUAL_32(0xffffffab, w14); | |
4740 ASSERT_EQUAL_32(0xffcdef00, w15); | |
4741 ASSERT_EQUAL_32(0x54, w16); | |
4742 ASSERT_EQUAL_32(0x00321000, w17); | |
4743 | |
4744 ASSERT_EQUAL_64(0x01234567L, x18); | |
4745 ASSERT_EQUAL_64(0xfffffffffedcba98L, x19); | |
4746 ASSERT_EQUAL_64(0xffffffffffcdef00L, x20); | |
4747 ASSERT_EQUAL_64(0x321000L, x21); | |
4748 ASSERT_EQUAL_64(0xffffffffffffabcdL, x22); | |
4749 ASSERT_EQUAL_64(0x5432L, x23); | |
4750 ASSERT_EQUAL_64(0xffffffffffffffefL, x24); | |
4751 ASSERT_EQUAL_64(0x10, x25); | |
4752 ASSERT_EQUAL_64(0xffffffffffffcdefL, x26); | |
4753 ASSERT_EQUAL_64(0x3210, x27); | |
4754 ASSERT_EQUAL_64(0xffffffff89abcdefL, x28); | |
4755 ASSERT_EQUAL_64(0x76543210, x29); | |
4756 | |
4757 TEARDOWN(); | |
4758 } | |
4759 | |
4760 | |
4761 TEST(ubfm) { | |
4762 INIT_V8(); | |
4763 SETUP(); | |
4764 | |
4765 START(); | |
4766 __ Mov(x1, 0x0123456789abcdefL); | |
4767 __ Mov(x2, 0xfedcba9876543210L); | |
4768 | |
4769 __ Mov(x10, 0x8888888888888888L); | |
4770 __ Mov(x11, 0x8888888888888888L); | |
4771 | |
4772 __ ubfm(x10, x1, 16, 31); | |
4773 __ ubfm(x11, x1, 32, 15); | |
4774 __ ubfm(x12, x1, 32, 47); | |
4775 __ ubfm(x13, x1, 48, 35); | |
4776 | |
4777 __ ubfm(w25, w1, 16, 23); | |
4778 __ ubfm(w26, w1, 24, 15); | |
4779 __ ubfm(w27, w2, 16, 23); | |
4780 __ ubfm(w28, w2, 24, 15); | |
4781 | |
4782 // Aliases | |
4783 __ Lsl(x15, x1, 63); | |
4784 __ Lsl(x16, x1, 0); | |
4785 __ Lsr(x17, x1, 32); | |
4786 __ Ubfiz(x18, x1, 8, 16); | |
4787 __ Ubfx(x19, x1, 8, 16); | |
4788 __ Uxtb(x20, x1); | |
4789 __ Uxth(x21, x1); | |
4790 __ Uxtw(x22, x1); | |
4791 END(); | |
4792 | |
4793 RUN(); | |
4794 | |
4795 ASSERT_EQUAL_64(0x00000000000089abL, x10); | |
4796 ASSERT_EQUAL_64(0x0000cdef00000000L, x11); | |
4797 ASSERT_EQUAL_64(0x4567L, x12); | |
4798 ASSERT_EQUAL_64(0x789abcdef0000L, x13); | |
4799 | |
4800 ASSERT_EQUAL_32(0x000000ab, w25); | |
4801 ASSERT_EQUAL_32(0x00cdef00, w26); | |
4802 ASSERT_EQUAL_32(0x54, w27); | |
4803 ASSERT_EQUAL_32(0x00321000, w28); | |
4804 | |
4805 ASSERT_EQUAL_64(0x8000000000000000L, x15); | |
4806 ASSERT_EQUAL_64(0x0123456789abcdefL, x16); | |
4807 ASSERT_EQUAL_64(0x01234567L, x17); | |
4808 ASSERT_EQUAL_64(0xcdef00L, x18); | |
4809 ASSERT_EQUAL_64(0xabcdL, x19); | |
4810 ASSERT_EQUAL_64(0xefL, x20); | |
4811 ASSERT_EQUAL_64(0xcdefL, x21); | |
4812 ASSERT_EQUAL_64(0x89abcdefL, x22); | |
4813 | |
4814 TEARDOWN(); | |
4815 } | |
4816 | |
4817 | |
4818 TEST(extr) { | |
4819 INIT_V8(); | |
4820 SETUP(); | |
4821 | |
4822 START(); | |
4823 __ Mov(x1, 0x0123456789abcdefL); | |
4824 __ Mov(x2, 0xfedcba9876543210L); | |
4825 | |
4826 __ Extr(w10, w1, w2, 0); | |
4827 __ Extr(w11, w1, w2, 1); | |
4828 __ Extr(x12, x2, x1, 2); | |
4829 | |
4830 __ Ror(w13, w1, 0); | |
4831 __ Ror(w14, w2, 17); | |
4832 __ Ror(w15, w1, 31); | |
4833 __ Ror(x18, x2, 1); | |
4834 __ Ror(x19, x1, 63); | |
4835 END(); | |
4836 | |
4837 RUN(); | |
4838 | |
4839 ASSERT_EQUAL_64(0x76543210, x10); | |
4840 ASSERT_EQUAL_64(0xbb2a1908, x11); | |
4841 ASSERT_EQUAL_64(0x0048d159e26af37bUL, x12); | |
4842 ASSERT_EQUAL_64(0x89abcdef, x13); | |
4843 ASSERT_EQUAL_64(0x19083b2a, x14); | |
4844 ASSERT_EQUAL_64(0x13579bdf, x15); | |
4845 ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908UL, x18); | |
4846 ASSERT_EQUAL_64(0x02468acf13579bdeUL, x19); | |
4847 | |
4848 TEARDOWN(); | |
4849 } | |
4850 | |
4851 | |
4852 TEST(fmov_imm) { | |
4853 INIT_V8(); | |
4854 SETUP(); | |
4855 | |
4856 START(); | |
4857 __ Fmov(s11, 1.0); | |
4858 __ Fmov(d22, -13.0); | |
4859 __ Fmov(s1, 255.0); | |
4860 __ Fmov(d2, 12.34567); | |
4861 __ Fmov(s3, 0.0); | |
4862 __ Fmov(d4, 0.0); | |
4863 __ Fmov(s5, kFP32PositiveInfinity); | |
4864 __ Fmov(d6, kFP64NegativeInfinity); | |
4865 END(); | |
4866 | |
4867 RUN(); | |
4868 | |
4869 ASSERT_EQUAL_FP32(1.0, s11); | |
4870 ASSERT_EQUAL_FP64(-13.0, d22); | |
4871 ASSERT_EQUAL_FP32(255.0, s1); | |
4872 ASSERT_EQUAL_FP64(12.34567, d2); | |
4873 ASSERT_EQUAL_FP32(0.0, s3); | |
4874 ASSERT_EQUAL_FP64(0.0, d4); | |
4875 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5); | |
4876 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6); | |
4877 | |
4878 TEARDOWN(); | |
4879 } | |
4880 | |
4881 | |
4882 TEST(fmov_reg) { | |
4883 INIT_V8(); | |
4884 SETUP(); | |
4885 | |
4886 START(); | |
4887 __ Fmov(s20, 1.0); | |
4888 __ Fmov(w10, s20); | |
4889 __ Fmov(s30, w10); | |
4890 __ Fmov(s5, s20); | |
4891 __ Fmov(d1, -13.0); | |
4892 __ Fmov(x1, d1); | |
4893 __ Fmov(d2, x1); | |
4894 __ Fmov(d4, d1); | |
4895 __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL)); | |
4896 __ Fmov(s6, s6); | |
4897 END(); | |
4898 | |
4899 RUN(); | |
4900 | |
4901 ASSERT_EQUAL_32(float_to_rawbits(1.0), w10); | |
4902 ASSERT_EQUAL_FP32(1.0, s30); | |
4903 ASSERT_EQUAL_FP32(1.0, s5); | |
4904 ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1); | |
4905 ASSERT_EQUAL_FP64(-13.0, d2); | |
4906 ASSERT_EQUAL_FP64(-13.0, d4); | |
4907 ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6); | |
4908 | |
4909 TEARDOWN(); | |
4910 } | |
4911 | |
4912 | |
4913 TEST(fadd) { | |
4914 INIT_V8(); | |
4915 SETUP(); | |
4916 | |
4917 START(); | |
4918 __ Fmov(s14, -0.0f); | |
4919 __ Fmov(s15, kFP32PositiveInfinity); | |
4920 __ Fmov(s16, kFP32NegativeInfinity); | |
4921 __ Fmov(s17, 3.25f); | |
4922 __ Fmov(s18, 1.0f); | |
4923 __ Fmov(s19, 0.0f); | |
4924 | |
4925 __ Fmov(d26, -0.0); | |
4926 __ Fmov(d27, kFP64PositiveInfinity); | |
4927 __ Fmov(d28, kFP64NegativeInfinity); | |
4928 __ Fmov(d29, 0.0); | |
4929 __ Fmov(d30, -2.0); | |
4930 __ Fmov(d31, 2.25); | |
4931 | |
4932 __ Fadd(s0, s17, s18); | |
4933 __ Fadd(s1, s18, s19); | |
4934 __ Fadd(s2, s14, s18); | |
4935 __ Fadd(s3, s15, s18); | |
4936 __ Fadd(s4, s16, s18); | |
4937 __ Fadd(s5, s15, s16); | |
4938 __ Fadd(s6, s16, s15); | |
4939 | |
4940 __ Fadd(d7, d30, d31); | |
4941 __ Fadd(d8, d29, d31); | |
4942 __ Fadd(d9, d26, d31); | |
4943 __ Fadd(d10, d27, d31); | |
4944 __ Fadd(d11, d28, d31); | |
4945 __ Fadd(d12, d27, d28); | |
4946 __ Fadd(d13, d28, d27); | |
4947 END(); | |
4948 | |
4949 RUN(); | |
4950 | |
4951 ASSERT_EQUAL_FP32(4.25, s0); | |
4952 ASSERT_EQUAL_FP32(1.0, s1); | |
4953 ASSERT_EQUAL_FP32(1.0, s2); | |
4954 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3); | |
4955 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4); | |
4956 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5); | |
4957 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6); | |
4958 ASSERT_EQUAL_FP64(0.25, d7); | |
4959 ASSERT_EQUAL_FP64(2.25, d8); | |
4960 ASSERT_EQUAL_FP64(2.25, d9); | |
4961 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d10); | |
4962 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d11); | |
4963 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12); | |
4964 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13); | |
4965 | |
4966 TEARDOWN(); | |
4967 } | |
4968 | |
4969 | |
4970 TEST(fsub) { | |
4971 INIT_V8(); | |
4972 SETUP(); | |
4973 | |
4974 START(); | |
4975 __ Fmov(s14, -0.0f); | |
4976 __ Fmov(s15, kFP32PositiveInfinity); | |
4977 __ Fmov(s16, kFP32NegativeInfinity); | |
4978 __ Fmov(s17, 3.25f); | |
4979 __ Fmov(s18, 1.0f); | |
4980 __ Fmov(s19, 0.0f); | |
4981 | |
4982 __ Fmov(d26, -0.0); | |
4983 __ Fmov(d27, kFP64PositiveInfinity); | |
4984 __ Fmov(d28, kFP64NegativeInfinity); | |
4985 __ Fmov(d29, 0.0); | |
4986 __ Fmov(d30, -2.0); | |
4987 __ Fmov(d31, 2.25); | |
4988 | |
4989 __ Fsub(s0, s17, s18); | |
4990 __ Fsub(s1, s18, s19); | |
4991 __ Fsub(s2, s14, s18); | |
4992 __ Fsub(s3, s18, s15); | |
4993 __ Fsub(s4, s18, s16); | |
4994 __ Fsub(s5, s15, s15); | |
4995 __ Fsub(s6, s16, s16); | |
4996 | |
4997 __ Fsub(d7, d30, d31); | |
4998 __ Fsub(d8, d29, d31); | |
4999 __ Fsub(d9, d26, d31); | |
5000 __ Fsub(d10, d31, d27); | |
5001 __ Fsub(d11, d31, d28); | |
5002 __ Fsub(d12, d27, d27); | |
5003 __ Fsub(d13, d28, d28); | |
5004 END(); | |
5005 | |
5006 RUN(); | |
5007 | |
5008 ASSERT_EQUAL_FP32(2.25, s0); | |
5009 ASSERT_EQUAL_FP32(1.0, s1); | |
5010 ASSERT_EQUAL_FP32(-1.0, s2); | |
5011 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3); | |
5012 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4); | |
5013 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5); | |
5014 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6); | |
5015 ASSERT_EQUAL_FP64(-4.25, d7); | |
5016 ASSERT_EQUAL_FP64(-2.25, d8); | |
5017 ASSERT_EQUAL_FP64(-2.25, d9); | |
5018 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10); | |
5019 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11); | |
5020 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12); | |
5021 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13); | |
5022 | |
5023 TEARDOWN(); | |
5024 } | |
5025 | |
5026 | |
5027 TEST(fmul) { | |
5028 INIT_V8(); | |
5029 SETUP(); | |
5030 | |
5031 START(); | |
5032 __ Fmov(s14, -0.0f); | |
5033 __ Fmov(s15, kFP32PositiveInfinity); | |
5034 __ Fmov(s16, kFP32NegativeInfinity); | |
5035 __ Fmov(s17, 3.25f); | |
5036 __ Fmov(s18, 2.0f); | |
5037 __ Fmov(s19, 0.0f); | |
5038 __ Fmov(s20, -2.0f); | |
5039 | |
5040 __ Fmov(d26, -0.0); | |
5041 __ Fmov(d27, kFP64PositiveInfinity); | |
5042 __ Fmov(d28, kFP64NegativeInfinity); | |
5043 __ Fmov(d29, 0.0); | |
5044 __ Fmov(d30, -2.0); | |
5045 __ Fmov(d31, 2.25); | |
5046 | |
5047 __ Fmul(s0, s17, s18); | |
5048 __ Fmul(s1, s18, s19); | |
5049 __ Fmul(s2, s14, s14); | |
5050 __ Fmul(s3, s15, s20); | |
5051 __ Fmul(s4, s16, s20); | |
5052 __ Fmul(s5, s15, s19); | |
5053 __ Fmul(s6, s19, s16); | |
5054 | |
5055 __ Fmul(d7, d30, d31); | |
5056 __ Fmul(d8, d29, d31); | |
5057 __ Fmul(d9, d26, d26); | |
5058 __ Fmul(d10, d27, d30); | |
5059 __ Fmul(d11, d28, d30); | |
5060 __ Fmul(d12, d27, d29); | |
5061 __ Fmul(d13, d29, d28); | |
5062 END(); | |
5063 | |
5064 RUN(); | |
5065 | |
5066 ASSERT_EQUAL_FP32(6.5, s0); | |
5067 ASSERT_EQUAL_FP32(0.0, s1); | |
5068 ASSERT_EQUAL_FP32(0.0, s2); | |
5069 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3); | |
5070 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4); | |
5071 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5); | |
5072 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6); | |
5073 ASSERT_EQUAL_FP64(-4.5, d7); | |
5074 ASSERT_EQUAL_FP64(0.0, d8); | |
5075 ASSERT_EQUAL_FP64(0.0, d9); | |
5076 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10); | |
5077 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11); | |
5078 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12); | |
5079 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13); | |
5080 | |
5081 TEARDOWN(); | |
5082 } | |
5083 | |
5084 | |
5085 static void FmaddFmsubHelper(double n, double m, double a, | |
5086 double fmadd, double fmsub, | |
5087 double fnmadd, double fnmsub) { | |
5088 SETUP(); | |
5089 START(); | |
5090 | |
5091 __ Fmov(d0, n); | |
5092 __ Fmov(d1, m); | |
5093 __ Fmov(d2, a); | |
5094 __ Fmadd(d28, d0, d1, d2); | |
5095 __ Fmsub(d29, d0, d1, d2); | |
5096 __ Fnmadd(d30, d0, d1, d2); | |
5097 __ Fnmsub(d31, d0, d1, d2); | |
5098 | |
5099 END(); | |
5100 RUN(); | |
5101 | |
5102 ASSERT_EQUAL_FP64(fmadd, d28); | |
5103 ASSERT_EQUAL_FP64(fmsub, d29); | |
5104 ASSERT_EQUAL_FP64(fnmadd, d30); | |
5105 ASSERT_EQUAL_FP64(fnmsub, d31); | |
5106 | |
5107 TEARDOWN(); | |
5108 } | |
5109 | |
5110 | |
5111 TEST(fmadd_fmsub_double) { | |
5112 INIT_V8(); | |
5113 | |
5114 // It's hard to check the result of fused operations because the only way to | |
5115 // calculate the result is using fma, which is what the simulator uses anyway. | |
5116 // TODO(jbramley): Add tests to check behaviour against a hardware trace. | |
5117 | |
5118 // Basic operation. | |
5119 FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0); | |
5120 FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0); | |
5121 | |
5122 // Check the sign of exact zeroes. | |
5123 // n m a fmadd fmsub fnmadd fnmsub | |
5124 FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0, +0.0, +0.0, +0.0); | |
5125 FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0, -0.0, +0.0, +0.0); | |
5126 FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0, +0.0, -0.0, +0.0); | |
5127 FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0, +0.0, +0.0, -0.0); | |
5128 FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0, +0.0, +0.0, +0.0); | |
5129 FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0, -0.0, +0.0, +0.0); | |
5130 FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0, +0.0, -0.0, +0.0); | |
5131 FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0, +0.0, +0.0, -0.0); | |
5132 | |
5133 // Check NaN generation. | |
5134 FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0, | |
5135 kFP64DefaultNaN, kFP64DefaultNaN, | |
5136 kFP64DefaultNaN, kFP64DefaultNaN); | |
5137 FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0, | |
5138 kFP64DefaultNaN, kFP64DefaultNaN, | |
5139 kFP64DefaultNaN, kFP64DefaultNaN); | |
5140 FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity, | |
5141 kFP64PositiveInfinity, // inf + ( inf * 1) = inf | |
5142 kFP64DefaultNaN, // inf + (-inf * 1) = NaN | |
5143 kFP64NegativeInfinity, // -inf + (-inf * 1) = -inf | |
5144 kFP64DefaultNaN); // -inf + ( inf * 1) = NaN | |
5145 FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity, | |
5146 kFP64DefaultNaN, // inf + (-inf * 1) = NaN | |
5147 kFP64PositiveInfinity, // inf + ( inf * 1) = inf | |
5148 kFP64DefaultNaN, // -inf + ( inf * 1) = NaN | |
5149 kFP64NegativeInfinity); // -inf + (-inf * 1) = -inf | |
5150 } | |
5151 | |
5152 | |
5153 static void FmaddFmsubHelper(float n, float m, float a, | |
5154 float fmadd, float fmsub, | |
5155 float fnmadd, float fnmsub) { | |
5156 SETUP(); | |
5157 START(); | |
5158 | |
5159 __ Fmov(s0, n); | |
5160 __ Fmov(s1, m); | |
5161 __ Fmov(s2, a); | |
5162 __ Fmadd(s28, s0, s1, s2); | |
5163 __ Fmsub(s29, s0, s1, s2); | |
5164 __ Fnmadd(s30, s0, s1, s2); | |
5165 __ Fnmsub(s31, s0, s1, s2); | |
5166 | |
5167 END(); | |
5168 RUN(); | |
5169 | |
5170 ASSERT_EQUAL_FP32(fmadd, s28); | |
5171 ASSERT_EQUAL_FP32(fmsub, s29); | |
5172 ASSERT_EQUAL_FP32(fnmadd, s30); | |
5173 ASSERT_EQUAL_FP32(fnmsub, s31); | |
5174 | |
5175 TEARDOWN(); | |
5176 } | |
5177 | |
5178 | |
5179 TEST(fmadd_fmsub_float) { | |
5180 INIT_V8(); | |
5181 // It's hard to check the result of fused operations because the only way to | |
5182 // calculate the result is using fma, which is what the simulator uses anyway. | |
5183 // TODO(jbramley): Add tests to check behaviour against a hardware trace. | |
5184 | |
5185 // Basic operation. | |
5186 FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f); | |
5187 FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f); | |
5188 | |
5189 // Check the sign of exact zeroes. | |
5190 // n m a fmadd fmsub fnmadd fnmsub | |
5191 FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f); | |
5192 FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f); | |
5193 FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f); | |
5194 FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f); | |
5195 FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f); | |
5196 FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f); | |
5197 FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f); | |
5198 FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f); | |
5199 | |
5200 // Check NaN generation. | |
5201 FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f, | |
5202 kFP32DefaultNaN, kFP32DefaultNaN, | |
5203 kFP32DefaultNaN, kFP32DefaultNaN); | |
5204 FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f, | |
5205 kFP32DefaultNaN, kFP32DefaultNaN, | |
5206 kFP32DefaultNaN, kFP32DefaultNaN); | |
5207 FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity, | |
5208 kFP32PositiveInfinity, // inf + ( inf * 1) = inf | |
5209 kFP32DefaultNaN, // inf + (-inf * 1) = NaN | |
5210 kFP32NegativeInfinity, // -inf + (-inf * 1) = -inf | |
5211 kFP32DefaultNaN); // -inf + ( inf * 1) = NaN | |
5212 FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity, | |
5213 kFP32DefaultNaN, // inf + (-inf * 1) = NaN | |
5214 kFP32PositiveInfinity, // inf + ( inf * 1) = inf | |
5215 kFP32DefaultNaN, // -inf + ( inf * 1) = NaN | |
5216 kFP32NegativeInfinity); // -inf + (-inf * 1) = -inf | |
5217 } | |
5218 | |
5219 | |
5220 TEST(fmadd_fmsub_double_nans) { | |
5221 INIT_V8(); | |
5222 // Make sure that NaN propagation works correctly. | |
5223 double s1 = rawbits_to_double(0x7ff5555511111111); | |
5224 double s2 = rawbits_to_double(0x7ff5555522222222); | |
5225 double sa = rawbits_to_double(0x7ff55555aaaaaaaa); | |
5226 double q1 = rawbits_to_double(0x7ffaaaaa11111111); | |
5227 double q2 = rawbits_to_double(0x7ffaaaaa22222222); | |
5228 double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa); | |
5229 ASSERT(IsSignallingNaN(s1)); | |
5230 ASSERT(IsSignallingNaN(s2)); | |
5231 ASSERT(IsSignallingNaN(sa)); | |
5232 ASSERT(IsQuietNaN(q1)); | |
5233 ASSERT(IsQuietNaN(q2)); | |
5234 ASSERT(IsQuietNaN(qa)); | |
5235 | |
5236 // The input NaNs after passing through ProcessNaN. | |
5237 double s1_proc = rawbits_to_double(0x7ffd555511111111); | |
5238 double s2_proc = rawbits_to_double(0x7ffd555522222222); | |
5239 double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa); | |
5240 double q1_proc = q1; | |
5241 double q2_proc = q2; | |
5242 double qa_proc = qa; | |
5243 ASSERT(IsQuietNaN(s1_proc)); | |
5244 ASSERT(IsQuietNaN(s2_proc)); | |
5245 ASSERT(IsQuietNaN(sa_proc)); | |
5246 ASSERT(IsQuietNaN(q1_proc)); | |
5247 ASSERT(IsQuietNaN(q2_proc)); | |
5248 ASSERT(IsQuietNaN(qa_proc)); | |
5249 | |
5250 // Quiet NaNs are propagated. | |
5251 FmaddFmsubHelper(q1, 0, 0, q1_proc, -q1_proc, -q1_proc, q1_proc); | |
5252 FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc); | |
5253 FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc); | |
5254 FmaddFmsubHelper(q1, q2, 0, q1_proc, -q1_proc, -q1_proc, q1_proc); | |
5255 FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc); | |
5256 FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc); | |
5257 FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc); | |
5258 | |
5259 // Signalling NaNs are propagated, and made quiet. | |
5260 FmaddFmsubHelper(s1, 0, 0, s1_proc, -s1_proc, -s1_proc, s1_proc); | |
5261 FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc); | |
5262 FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5263 FmaddFmsubHelper(s1, s2, 0, s1_proc, -s1_proc, -s1_proc, s1_proc); | |
5264 FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5265 FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5266 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5267 | |
5268 // Signalling NaNs take precedence over quiet NaNs. | |
5269 FmaddFmsubHelper(s1, q2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc); | |
5270 FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc); | |
5271 FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5272 FmaddFmsubHelper(s1, s2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc); | |
5273 FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5274 FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5275 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5276 | |
5277 // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a. | |
5278 FmaddFmsubHelper(0, kFP64PositiveInfinity, qa, | |
5279 kFP64DefaultNaN, kFP64DefaultNaN, | |
5280 kFP64DefaultNaN, kFP64DefaultNaN); | |
5281 FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa, | |
5282 kFP64DefaultNaN, kFP64DefaultNaN, | |
5283 kFP64DefaultNaN, kFP64DefaultNaN); | |
5284 FmaddFmsubHelper(0, kFP64NegativeInfinity, qa, | |
5285 kFP64DefaultNaN, kFP64DefaultNaN, | |
5286 kFP64DefaultNaN, kFP64DefaultNaN); | |
5287 FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa, | |
5288 kFP64DefaultNaN, kFP64DefaultNaN, | |
5289 kFP64DefaultNaN, kFP64DefaultNaN); | |
5290 } | |
5291 | |
5292 | |
5293 TEST(fmadd_fmsub_float_nans) { | |
5294 INIT_V8(); | |
5295 // Make sure that NaN propagation works correctly. | |
5296 float s1 = rawbits_to_float(0x7f951111); | |
5297 float s2 = rawbits_to_float(0x7f952222); | |
5298 float sa = rawbits_to_float(0x7f95aaaa); | |
5299 float q1 = rawbits_to_float(0x7fea1111); | |
5300 float q2 = rawbits_to_float(0x7fea2222); | |
5301 float qa = rawbits_to_float(0x7feaaaaa); | |
5302 ASSERT(IsSignallingNaN(s1)); | |
5303 ASSERT(IsSignallingNaN(s2)); | |
5304 ASSERT(IsSignallingNaN(sa)); | |
5305 ASSERT(IsQuietNaN(q1)); | |
5306 ASSERT(IsQuietNaN(q2)); | |
5307 ASSERT(IsQuietNaN(qa)); | |
5308 | |
5309 // The input NaNs after passing through ProcessNaN. | |
5310 float s1_proc = rawbits_to_float(0x7fd51111); | |
5311 float s2_proc = rawbits_to_float(0x7fd52222); | |
5312 float sa_proc = rawbits_to_float(0x7fd5aaaa); | |
5313 float q1_proc = q1; | |
5314 float q2_proc = q2; | |
5315 float qa_proc = qa; | |
5316 ASSERT(IsQuietNaN(s1_proc)); | |
5317 ASSERT(IsQuietNaN(s2_proc)); | |
5318 ASSERT(IsQuietNaN(sa_proc)); | |
5319 ASSERT(IsQuietNaN(q1_proc)); | |
5320 ASSERT(IsQuietNaN(q2_proc)); | |
5321 ASSERT(IsQuietNaN(qa_proc)); | |
5322 | |
5323 // Quiet NaNs are propagated. | |
5324 FmaddFmsubHelper(q1, 0, 0, q1_proc, -q1_proc, -q1_proc, q1_proc); | |
5325 FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc); | |
5326 FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc); | |
5327 FmaddFmsubHelper(q1, q2, 0, q1_proc, -q1_proc, -q1_proc, q1_proc); | |
5328 FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc); | |
5329 FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, -qa_proc, -qa_proc); | |
5330 FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, -qa_proc, -qa_proc); | |
5331 | |
5332 // Signalling NaNs are propagated, and made quiet. | |
5333 FmaddFmsubHelper(s1, 0, 0, s1_proc, -s1_proc, -s1_proc, s1_proc); | |
5334 FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc); | |
5335 FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5336 FmaddFmsubHelper(s1, s2, 0, s1_proc, -s1_proc, -s1_proc, s1_proc); | |
5337 FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5338 FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5339 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5340 | |
5341 // Signalling NaNs take precedence over quiet NaNs. | |
5342 FmaddFmsubHelper(s1, q2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc); | |
5343 FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc); | |
5344 FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5345 FmaddFmsubHelper(s1, s2, qa, s1_proc, -s1_proc, -s1_proc, s1_proc); | |
5346 FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5347 FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5348 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, -sa_proc, -sa_proc); | |
5349 | |
5350 // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a. | |
5351 FmaddFmsubHelper(0, kFP32PositiveInfinity, qa, | |
5352 kFP32DefaultNaN, kFP32DefaultNaN, | |
5353 kFP32DefaultNaN, kFP32DefaultNaN); | |
5354 FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa, | |
5355 kFP32DefaultNaN, kFP32DefaultNaN, | |
5356 kFP32DefaultNaN, kFP32DefaultNaN); | |
5357 FmaddFmsubHelper(0, kFP32NegativeInfinity, qa, | |
5358 kFP32DefaultNaN, kFP32DefaultNaN, | |
5359 kFP32DefaultNaN, kFP32DefaultNaN); | |
5360 FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa, | |
5361 kFP32DefaultNaN, kFP32DefaultNaN, | |
5362 kFP32DefaultNaN, kFP32DefaultNaN); | |
5363 } | |
5364 | |
5365 | |
5366 TEST(fdiv) { | |
5367 INIT_V8(); | |
5368 SETUP(); | |
5369 | |
5370 START(); | |
5371 __ Fmov(s14, -0.0f); | |
5372 __ Fmov(s15, kFP32PositiveInfinity); | |
5373 __ Fmov(s16, kFP32NegativeInfinity); | |
5374 __ Fmov(s17, 3.25f); | |
5375 __ Fmov(s18, 2.0f); | |
5376 __ Fmov(s19, 2.0f); | |
5377 __ Fmov(s20, -2.0f); | |
5378 | |
5379 __ Fmov(d26, -0.0); | |
5380 __ Fmov(d27, kFP64PositiveInfinity); | |
5381 __ Fmov(d28, kFP64NegativeInfinity); | |
5382 __ Fmov(d29, 0.0); | |
5383 __ Fmov(d30, -2.0); | |
5384 __ Fmov(d31, 2.25); | |
5385 | |
5386 __ Fdiv(s0, s17, s18); | |
5387 __ Fdiv(s1, s18, s19); | |
5388 __ Fdiv(s2, s14, s18); | |
5389 __ Fdiv(s3, s18, s15); | |
5390 __ Fdiv(s4, s18, s16); | |
5391 __ Fdiv(s5, s15, s16); | |
5392 __ Fdiv(s6, s14, s14); | |
5393 | |
5394 __ Fdiv(d7, d31, d30); | |
5395 __ Fdiv(d8, d29, d31); | |
5396 __ Fdiv(d9, d26, d31); | |
5397 __ Fdiv(d10, d31, d27); | |
5398 __ Fdiv(d11, d31, d28); | |
5399 __ Fdiv(d12, d28, d27); | |
5400 __ Fdiv(d13, d29, d29); | |
5401 END(); | |
5402 | |
5403 RUN(); | |
5404 | |
5405 ASSERT_EQUAL_FP32(1.625f, s0); | |
5406 ASSERT_EQUAL_FP32(1.0f, s1); | |
5407 ASSERT_EQUAL_FP32(-0.0f, s2); | |
5408 ASSERT_EQUAL_FP32(0.0f, s3); | |
5409 ASSERT_EQUAL_FP32(-0.0f, s4); | |
5410 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5); | |
5411 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6); | |
5412 ASSERT_EQUAL_FP64(-1.125, d7); | |
5413 ASSERT_EQUAL_FP64(0.0, d8); | |
5414 ASSERT_EQUAL_FP64(-0.0, d9); | |
5415 ASSERT_EQUAL_FP64(0.0, d10); | |
5416 ASSERT_EQUAL_FP64(-0.0, d11); | |
5417 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12); | |
5418 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13); | |
5419 | |
5420 TEARDOWN(); | |
5421 } | |
5422 | |
5423 | |
5424 static float MinMaxHelper(float n, | |
5425 float m, | |
5426 bool min, | |
5427 float quiet_nan_substitute = 0.0) { | |
5428 uint32_t raw_n = float_to_rawbits(n); | |
5429 uint32_t raw_m = float_to_rawbits(m); | |
5430 | |
5431 if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) { | |
5432 // n is signalling NaN. | |
5433 return rawbits_to_float(raw_n | kSQuietNanMask); | |
5434 } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) { | |
5435 // m is signalling NaN. | |
5436 return rawbits_to_float(raw_m | kSQuietNanMask); | |
5437 } else if (quiet_nan_substitute == 0.0) { | |
5438 if (std::isnan(n)) { | |
5439 // n is quiet NaN. | |
5440 return n; | |
5441 } else if (std::isnan(m)) { | |
5442 // m is quiet NaN. | |
5443 return m; | |
5444 } | |
5445 } else { | |
5446 // Substitute n or m if one is quiet, but not both. | |
5447 if (std::isnan(n) && !std::isnan(m)) { | |
5448 // n is quiet NaN: replace with substitute. | |
5449 n = quiet_nan_substitute; | |
5450 } else if (!std::isnan(n) && std::isnan(m)) { | |
5451 // m is quiet NaN: replace with substitute. | |
5452 m = quiet_nan_substitute; | |
5453 } | |
5454 } | |
5455 | |
5456 if ((n == 0.0) && (m == 0.0) && | |
5457 (copysign(1.0, n) != copysign(1.0, m))) { | |
5458 return min ? -0.0 : 0.0; | |
5459 } | |
5460 | |
5461 return min ? fminf(n, m) : fmaxf(n, m); | |
5462 } | |
5463 | |
5464 | |
5465 static double MinMaxHelper(double n, | |
5466 double m, | |
5467 bool min, | |
5468 double quiet_nan_substitute = 0.0) { | |
5469 uint64_t raw_n = double_to_rawbits(n); | |
5470 uint64_t raw_m = double_to_rawbits(m); | |
5471 | |
5472 if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) { | |
5473 // n is signalling NaN. | |
5474 return rawbits_to_double(raw_n | kDQuietNanMask); | |
5475 } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) { | |
5476 // m is signalling NaN. | |
5477 return rawbits_to_double(raw_m | kDQuietNanMask); | |
5478 } else if (quiet_nan_substitute == 0.0) { | |
5479 if (std::isnan(n)) { | |
5480 // n is quiet NaN. | |
5481 return n; | |
5482 } else if (std::isnan(m)) { | |
5483 // m is quiet NaN. | |
5484 return m; | |
5485 } | |
5486 } else { | |
5487 // Substitute n or m if one is quiet, but not both. | |
5488 if (std::isnan(n) && !std::isnan(m)) { | |
5489 // n is quiet NaN: replace with substitute. | |
5490 n = quiet_nan_substitute; | |
5491 } else if (!std::isnan(n) && std::isnan(m)) { | |
5492 // m is quiet NaN: replace with substitute. | |
5493 m = quiet_nan_substitute; | |
5494 } | |
5495 } | |
5496 | |
5497 if ((n == 0.0) && (m == 0.0) && | |
5498 (copysign(1.0, n) != copysign(1.0, m))) { | |
5499 return min ? -0.0 : 0.0; | |
5500 } | |
5501 | |
5502 return min ? fmin(n, m) : fmax(n, m); | |
5503 } | |
5504 | |
5505 | |
5506 static void FminFmaxDoubleHelper(double n, double m, double min, double max, | |
5507 double minnm, double maxnm) { | |
5508 SETUP(); | |
5509 | |
5510 START(); | |
5511 __ Fmov(d0, n); | |
5512 __ Fmov(d1, m); | |
5513 __ Fmin(d28, d0, d1); | |
5514 __ Fmax(d29, d0, d1); | |
5515 __ Fminnm(d30, d0, d1); | |
5516 __ Fmaxnm(d31, d0, d1); | |
5517 END(); | |
5518 | |
5519 RUN(); | |
5520 | |
5521 ASSERT_EQUAL_FP64(min, d28); | |
5522 ASSERT_EQUAL_FP64(max, d29); | |
5523 ASSERT_EQUAL_FP64(minnm, d30); | |
5524 ASSERT_EQUAL_FP64(maxnm, d31); | |
5525 | |
5526 TEARDOWN(); | |
5527 } | |
5528 | |
5529 | |
5530 TEST(fmax_fmin_d) { | |
5531 INIT_V8(); | |
5532 // Use non-standard NaNs to check that the payload bits are preserved. | |
5533 double snan = rawbits_to_double(0x7ff5555512345678); | |
5534 double qnan = rawbits_to_double(0x7ffaaaaa87654321); | |
5535 | |
5536 double snan_processed = rawbits_to_double(0x7ffd555512345678); | |
5537 double qnan_processed = qnan; | |
5538 | |
5539 ASSERT(IsSignallingNaN(snan)); | |
5540 ASSERT(IsQuietNaN(qnan)); | |
5541 ASSERT(IsQuietNaN(snan_processed)); | |
5542 ASSERT(IsQuietNaN(qnan_processed)); | |
5543 | |
5544 // Bootstrap tests. | |
5545 FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0); | |
5546 FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1); | |
5547 FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity, | |
5548 kFP64NegativeInfinity, kFP64PositiveInfinity, | |
5549 kFP64NegativeInfinity, kFP64PositiveInfinity); | |
5550 FminFmaxDoubleHelper(snan, 0, | |
5551 snan_processed, snan_processed, | |
5552 snan_processed, snan_processed); | |
5553 FminFmaxDoubleHelper(0, snan, | |
5554 snan_processed, snan_processed, | |
5555 snan_processed, snan_processed); | |
5556 FminFmaxDoubleHelper(qnan, 0, | |
5557 qnan_processed, qnan_processed, | |
5558 0, 0); | |
5559 FminFmaxDoubleHelper(0, qnan, | |
5560 qnan_processed, qnan_processed, | |
5561 0, 0); | |
5562 FminFmaxDoubleHelper(qnan, snan, | |
5563 snan_processed, snan_processed, | |
5564 snan_processed, snan_processed); | |
5565 FminFmaxDoubleHelper(snan, qnan, | |
5566 snan_processed, snan_processed, | |
5567 snan_processed, snan_processed); | |
5568 | |
5569 // Iterate over all combinations of inputs. | |
5570 double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0, | |
5571 -DBL_MAX, -DBL_MIN, -1.0, -0.0, | |
5572 kFP64PositiveInfinity, kFP64NegativeInfinity, | |
5573 kFP64QuietNaN, kFP64SignallingNaN }; | |
5574 | |
5575 const int count = sizeof(inputs) / sizeof(inputs[0]); | |
5576 | |
5577 for (int in = 0; in < count; in++) { | |
5578 double n = inputs[in]; | |
5579 for (int im = 0; im < count; im++) { | |
5580 double m = inputs[im]; | |
5581 FminFmaxDoubleHelper(n, m, | |
5582 MinMaxHelper(n, m, true), | |
5583 MinMaxHelper(n, m, false), | |
5584 MinMaxHelper(n, m, true, kFP64PositiveInfinity), | |
5585 MinMaxHelper(n, m, false, kFP64NegativeInfinity)); | |
5586 } | |
5587 } | |
5588 } | |
5589 | |
5590 | |
5591 static void FminFmaxFloatHelper(float n, float m, float min, float max, | |
5592 float minnm, float maxnm) { | |
5593 SETUP(); | |
5594 | |
5595 START(); | |
5596 __ Fmov(s0, n); | |
5597 __ Fmov(s1, m); | |
5598 __ Fmin(s28, s0, s1); | |
5599 __ Fmax(s29, s0, s1); | |
5600 __ Fminnm(s30, s0, s1); | |
5601 __ Fmaxnm(s31, s0, s1); | |
5602 END(); | |
5603 | |
5604 RUN(); | |
5605 | |
5606 ASSERT_EQUAL_FP32(min, s28); | |
5607 ASSERT_EQUAL_FP32(max, s29); | |
5608 ASSERT_EQUAL_FP32(minnm, s30); | |
5609 ASSERT_EQUAL_FP32(maxnm, s31); | |
5610 | |
5611 TEARDOWN(); | |
5612 } | |
5613 | |
5614 | |
5615 TEST(fmax_fmin_s) { | |
5616 INIT_V8(); | |
5617 // Use non-standard NaNs to check that the payload bits are preserved. | |
5618 float snan = rawbits_to_float(0x7f951234); | |
5619 float qnan = rawbits_to_float(0x7fea8765); | |
5620 | |
5621 float snan_processed = rawbits_to_float(0x7fd51234); | |
5622 float qnan_processed = qnan; | |
5623 | |
5624 ASSERT(IsSignallingNaN(snan)); | |
5625 ASSERT(IsQuietNaN(qnan)); | |
5626 ASSERT(IsQuietNaN(snan_processed)); | |
5627 ASSERT(IsQuietNaN(qnan_processed)); | |
5628 | |
5629 // Bootstrap tests. | |
5630 FminFmaxFloatHelper(0, 0, 0, 0, 0, 0); | |
5631 FminFmaxFloatHelper(0, 1, 0, 1, 0, 1); | |
5632 FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity, | |
5633 kFP32NegativeInfinity, kFP32PositiveInfinity, | |
5634 kFP32NegativeInfinity, kFP32PositiveInfinity); | |
5635 FminFmaxFloatHelper(snan, 0, | |
5636 snan_processed, snan_processed, | |
5637 snan_processed, snan_processed); | |
5638 FminFmaxFloatHelper(0, snan, | |
5639 snan_processed, snan_processed, | |
5640 snan_processed, snan_processed); | |
5641 FminFmaxFloatHelper(qnan, 0, | |
5642 qnan_processed, qnan_processed, | |
5643 0, 0); | |
5644 FminFmaxFloatHelper(0, qnan, | |
5645 qnan_processed, qnan_processed, | |
5646 0, 0); | |
5647 FminFmaxFloatHelper(qnan, snan, | |
5648 snan_processed, snan_processed, | |
5649 snan_processed, snan_processed); | |
5650 FminFmaxFloatHelper(snan, qnan, | |
5651 snan_processed, snan_processed, | |
5652 snan_processed, snan_processed); | |
5653 | |
5654 // Iterate over all combinations of inputs. | |
5655 float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0, | |
5656 -FLT_MAX, -FLT_MIN, -1.0, -0.0, | |
5657 kFP32PositiveInfinity, kFP32NegativeInfinity, | |
5658 kFP32QuietNaN, kFP32SignallingNaN }; | |
5659 | |
5660 const int count = sizeof(inputs) / sizeof(inputs[0]); | |
5661 | |
5662 for (int in = 0; in < count; in++) { | |
5663 float n = inputs[in]; | |
5664 for (int im = 0; im < count; im++) { | |
5665 float m = inputs[im]; | |
5666 FminFmaxFloatHelper(n, m, | |
5667 MinMaxHelper(n, m, true), | |
5668 MinMaxHelper(n, m, false), | |
5669 MinMaxHelper(n, m, true, kFP32PositiveInfinity), | |
5670 MinMaxHelper(n, m, false, kFP32NegativeInfinity)); | |
5671 } | |
5672 } | |
5673 } | |
5674 | |
5675 | |
5676 TEST(fccmp) { | |
5677 INIT_V8(); | |
5678 SETUP(); | |
5679 | |
5680 START(); | |
5681 __ Fmov(s16, 0.0); | |
5682 __ Fmov(s17, 0.5); | |
5683 __ Fmov(d18, -0.5); | |
5684 __ Fmov(d19, -1.0); | |
5685 __ Mov(x20, 0); | |
5686 | |
5687 __ Cmp(x20, 0); | |
5688 __ Fccmp(s16, s16, NoFlag, eq); | |
5689 __ Mrs(x0, NZCV); | |
5690 | |
5691 __ Cmp(x20, 0); | |
5692 __ Fccmp(s16, s16, VFlag, ne); | |
5693 __ Mrs(x1, NZCV); | |
5694 | |
5695 __ Cmp(x20, 0); | |
5696 __ Fccmp(s16, s17, CFlag, ge); | |
5697 __ Mrs(x2, NZCV); | |
5698 | |
5699 __ Cmp(x20, 0); | |
5700 __ Fccmp(s16, s17, CVFlag, lt); | |
5701 __ Mrs(x3, NZCV); | |
5702 | |
5703 __ Cmp(x20, 0); | |
5704 __ Fccmp(d18, d18, ZFlag, le); | |
5705 __ Mrs(x4, NZCV); | |
5706 | |
5707 __ Cmp(x20, 0); | |
5708 __ Fccmp(d18, d18, ZVFlag, gt); | |
5709 __ Mrs(x5, NZCV); | |
5710 | |
5711 __ Cmp(x20, 0); | |
5712 __ Fccmp(d18, d19, ZCVFlag, ls); | |
5713 __ Mrs(x6, NZCV); | |
5714 | |
5715 __ Cmp(x20, 0); | |
5716 __ Fccmp(d18, d19, NFlag, hi); | |
5717 __ Mrs(x7, NZCV); | |
5718 | |
5719 __ fccmp(s16, s16, NFlag, al); | |
5720 __ Mrs(x8, NZCV); | |
5721 | |
5722 __ fccmp(d18, d18, NFlag, nv); | |
5723 __ Mrs(x9, NZCV); | |
5724 | |
5725 END(); | |
5726 | |
5727 RUN(); | |
5728 | |
5729 ASSERT_EQUAL_32(ZCFlag, w0); | |
5730 ASSERT_EQUAL_32(VFlag, w1); | |
5731 ASSERT_EQUAL_32(NFlag, w2); | |
5732 ASSERT_EQUAL_32(CVFlag, w3); | |
5733 ASSERT_EQUAL_32(ZCFlag, w4); | |
5734 ASSERT_EQUAL_32(ZVFlag, w5); | |
5735 ASSERT_EQUAL_32(CFlag, w6); | |
5736 ASSERT_EQUAL_32(NFlag, w7); | |
5737 ASSERT_EQUAL_32(ZCFlag, w8); | |
5738 ASSERT_EQUAL_32(ZCFlag, w9); | |
5739 | |
5740 TEARDOWN(); | |
5741 } | |
5742 | |
5743 | |
5744 TEST(fcmp) { | |
5745 INIT_V8(); | |
5746 SETUP(); | |
5747 | |
5748 START(); | |
5749 | |
5750 // Some of these tests require a floating-point scratch register assigned to | |
5751 // the macro assembler, but most do not. | |
5752 { | |
5753 // We're going to mess around with the available scratch registers in this | |
5754 // test. A UseScratchRegisterScope will make sure that they are restored to | |
5755 // the default values once we're finished. | |
5756 UseScratchRegisterScope temps(&masm); | |
5757 masm.FPTmpList()->set_list(0); | |
5758 | |
5759 __ Fmov(s8, 0.0); | |
5760 __ Fmov(s9, 0.5); | |
5761 __ Mov(w18, 0x7f800001); // Single precision NaN. | |
5762 __ Fmov(s18, w18); | |
5763 | |
5764 __ Fcmp(s8, s8); | |
5765 __ Mrs(x0, NZCV); | |
5766 __ Fcmp(s8, s9); | |
5767 __ Mrs(x1, NZCV); | |
5768 __ Fcmp(s9, s8); | |
5769 __ Mrs(x2, NZCV); | |
5770 __ Fcmp(s8, s18); | |
5771 __ Mrs(x3, NZCV); | |
5772 __ Fcmp(s18, s18); | |
5773 __ Mrs(x4, NZCV); | |
5774 __ Fcmp(s8, 0.0); | |
5775 __ Mrs(x5, NZCV); | |
5776 masm.FPTmpList()->set_list(d0.Bit()); | |
5777 __ Fcmp(s8, 255.0); | |
5778 masm.FPTmpList()->set_list(0); | |
5779 __ Mrs(x6, NZCV); | |
5780 | |
5781 __ Fmov(d19, 0.0); | |
5782 __ Fmov(d20, 0.5); | |
5783 __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN. | |
5784 __ Fmov(d21, x21); | |
5785 | |
5786 __ Fcmp(d19, d19); | |
5787 __ Mrs(x10, NZCV); | |
5788 __ Fcmp(d19, d20); | |
5789 __ Mrs(x11, NZCV); | |
5790 __ Fcmp(d20, d19); | |
5791 __ Mrs(x12, NZCV); | |
5792 __ Fcmp(d19, d21); | |
5793 __ Mrs(x13, NZCV); | |
5794 __ Fcmp(d21, d21); | |
5795 __ Mrs(x14, NZCV); | |
5796 __ Fcmp(d19, 0.0); | |
5797 __ Mrs(x15, NZCV); | |
5798 masm.FPTmpList()->set_list(d0.Bit()); | |
5799 __ Fcmp(d19, 12.3456); | |
5800 masm.FPTmpList()->set_list(0); | |
5801 __ Mrs(x16, NZCV); | |
5802 } | |
5803 | |
5804 END(); | |
5805 | |
5806 RUN(); | |
5807 | |
5808 ASSERT_EQUAL_32(ZCFlag, w0); | |
5809 ASSERT_EQUAL_32(NFlag, w1); | |
5810 ASSERT_EQUAL_32(CFlag, w2); | |
5811 ASSERT_EQUAL_32(CVFlag, w3); | |
5812 ASSERT_EQUAL_32(CVFlag, w4); | |
5813 ASSERT_EQUAL_32(ZCFlag, w5); | |
5814 ASSERT_EQUAL_32(NFlag, w6); | |
5815 ASSERT_EQUAL_32(ZCFlag, w10); | |
5816 ASSERT_EQUAL_32(NFlag, w11); | |
5817 ASSERT_EQUAL_32(CFlag, w12); | |
5818 ASSERT_EQUAL_32(CVFlag, w13); | |
5819 ASSERT_EQUAL_32(CVFlag, w14); | |
5820 ASSERT_EQUAL_32(ZCFlag, w15); | |
5821 ASSERT_EQUAL_32(NFlag, w16); | |
5822 | |
5823 TEARDOWN(); | |
5824 } | |
5825 | |
5826 | |
5827 TEST(fcsel) { | |
5828 INIT_V8(); | |
5829 SETUP(); | |
5830 | |
5831 START(); | |
5832 __ Mov(x16, 0); | |
5833 __ Fmov(s16, 1.0); | |
5834 __ Fmov(s17, 2.0); | |
5835 __ Fmov(d18, 3.0); | |
5836 __ Fmov(d19, 4.0); | |
5837 | |
5838 __ Cmp(x16, 0); | |
5839 __ Fcsel(s0, s16, s17, eq); | |
5840 __ Fcsel(s1, s16, s17, ne); | |
5841 __ Fcsel(d2, d18, d19, eq); | |
5842 __ Fcsel(d3, d18, d19, ne); | |
5843 __ fcsel(s4, s16, s17, al); | |
5844 __ fcsel(d5, d18, d19, nv); | |
5845 END(); | |
5846 | |
5847 RUN(); | |
5848 | |
5849 ASSERT_EQUAL_FP32(1.0, s0); | |
5850 ASSERT_EQUAL_FP32(2.0, s1); | |
5851 ASSERT_EQUAL_FP64(3.0, d2); | |
5852 ASSERT_EQUAL_FP64(4.0, d3); | |
5853 ASSERT_EQUAL_FP32(1.0, s4); | |
5854 ASSERT_EQUAL_FP64(3.0, d5); | |
5855 | |
5856 TEARDOWN(); | |
5857 } | |
5858 | |
5859 | |
5860 TEST(fneg) { | |
5861 INIT_V8(); | |
5862 SETUP(); | |
5863 | |
5864 START(); | |
5865 __ Fmov(s16, 1.0); | |
5866 __ Fmov(s17, 0.0); | |
5867 __ Fmov(s18, kFP32PositiveInfinity); | |
5868 __ Fmov(d19, 1.0); | |
5869 __ Fmov(d20, 0.0); | |
5870 __ Fmov(d21, kFP64PositiveInfinity); | |
5871 | |
5872 __ Fneg(s0, s16); | |
5873 __ Fneg(s1, s0); | |
5874 __ Fneg(s2, s17); | |
5875 __ Fneg(s3, s2); | |
5876 __ Fneg(s4, s18); | |
5877 __ Fneg(s5, s4); | |
5878 __ Fneg(d6, d19); | |
5879 __ Fneg(d7, d6); | |
5880 __ Fneg(d8, d20); | |
5881 __ Fneg(d9, d8); | |
5882 __ Fneg(d10, d21); | |
5883 __ Fneg(d11, d10); | |
5884 END(); | |
5885 | |
5886 RUN(); | |
5887 | |
5888 ASSERT_EQUAL_FP32(-1.0, s0); | |
5889 ASSERT_EQUAL_FP32(1.0, s1); | |
5890 ASSERT_EQUAL_FP32(-0.0, s2); | |
5891 ASSERT_EQUAL_FP32(0.0, s3); | |
5892 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4); | |
5893 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5); | |
5894 ASSERT_EQUAL_FP64(-1.0, d6); | |
5895 ASSERT_EQUAL_FP64(1.0, d7); | |
5896 ASSERT_EQUAL_FP64(-0.0, d8); | |
5897 ASSERT_EQUAL_FP64(0.0, d9); | |
5898 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10); | |
5899 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11); | |
5900 | |
5901 TEARDOWN(); | |
5902 } | |
5903 | |
5904 | |
5905 TEST(fabs) { | |
5906 INIT_V8(); | |
5907 SETUP(); | |
5908 | |
5909 START(); | |
5910 __ Fmov(s16, -1.0); | |
5911 __ Fmov(s17, -0.0); | |
5912 __ Fmov(s18, kFP32NegativeInfinity); | |
5913 __ Fmov(d19, -1.0); | |
5914 __ Fmov(d20, -0.0); | |
5915 __ Fmov(d21, kFP64NegativeInfinity); | |
5916 | |
5917 __ Fabs(s0, s16); | |
5918 __ Fabs(s1, s0); | |
5919 __ Fabs(s2, s17); | |
5920 __ Fabs(s3, s18); | |
5921 __ Fabs(d4, d19); | |
5922 __ Fabs(d5, d4); | |
5923 __ Fabs(d6, d20); | |
5924 __ Fabs(d7, d21); | |
5925 END(); | |
5926 | |
5927 RUN(); | |
5928 | |
5929 ASSERT_EQUAL_FP32(1.0, s0); | |
5930 ASSERT_EQUAL_FP32(1.0, s1); | |
5931 ASSERT_EQUAL_FP32(0.0, s2); | |
5932 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3); | |
5933 ASSERT_EQUAL_FP64(1.0, d4); | |
5934 ASSERT_EQUAL_FP64(1.0, d5); | |
5935 ASSERT_EQUAL_FP64(0.0, d6); | |
5936 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7); | |
5937 | |
5938 TEARDOWN(); | |
5939 } | |
5940 | |
5941 | |
5942 TEST(fsqrt) { | |
5943 INIT_V8(); | |
5944 SETUP(); | |
5945 | |
5946 START(); | |
5947 __ Fmov(s16, 0.0); | |
5948 __ Fmov(s17, 1.0); | |
5949 __ Fmov(s18, 0.25); | |
5950 __ Fmov(s19, 65536.0); | |
5951 __ Fmov(s20, -0.0); | |
5952 __ Fmov(s21, kFP32PositiveInfinity); | |
5953 __ Fmov(s22, -1.0); | |
5954 __ Fmov(d23, 0.0); | |
5955 __ Fmov(d24, 1.0); | |
5956 __ Fmov(d25, 0.25); | |
5957 __ Fmov(d26, 4294967296.0); | |
5958 __ Fmov(d27, -0.0); | |
5959 __ Fmov(d28, kFP64PositiveInfinity); | |
5960 __ Fmov(d29, -1.0); | |
5961 | |
5962 __ Fsqrt(s0, s16); | |
5963 __ Fsqrt(s1, s17); | |
5964 __ Fsqrt(s2, s18); | |
5965 __ Fsqrt(s3, s19); | |
5966 __ Fsqrt(s4, s20); | |
5967 __ Fsqrt(s5, s21); | |
5968 __ Fsqrt(s6, s22); | |
5969 __ Fsqrt(d7, d23); | |
5970 __ Fsqrt(d8, d24); | |
5971 __ Fsqrt(d9, d25); | |
5972 __ Fsqrt(d10, d26); | |
5973 __ Fsqrt(d11, d27); | |
5974 __ Fsqrt(d12, d28); | |
5975 __ Fsqrt(d13, d29); | |
5976 END(); | |
5977 | |
5978 RUN(); | |
5979 | |
5980 ASSERT_EQUAL_FP32(0.0, s0); | |
5981 ASSERT_EQUAL_FP32(1.0, s1); | |
5982 ASSERT_EQUAL_FP32(0.5, s2); | |
5983 ASSERT_EQUAL_FP32(256.0, s3); | |
5984 ASSERT_EQUAL_FP32(-0.0, s4); | |
5985 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5); | |
5986 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6); | |
5987 ASSERT_EQUAL_FP64(0.0, d7); | |
5988 ASSERT_EQUAL_FP64(1.0, d8); | |
5989 ASSERT_EQUAL_FP64(0.5, d9); | |
5990 ASSERT_EQUAL_FP64(65536.0, d10); | |
5991 ASSERT_EQUAL_FP64(-0.0, d11); | |
5992 ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d12); | |
5993 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13); | |
5994 | |
5995 TEARDOWN(); | |
5996 } | |
5997 | |
5998 | |
5999 TEST(frinta) { | |
6000 INIT_V8(); | |
6001 SETUP(); | |
6002 | |
6003 START(); | |
6004 __ Fmov(s16, 1.0); | |
6005 __ Fmov(s17, 1.1); | |
6006 __ Fmov(s18, 1.5); | |
6007 __ Fmov(s19, 1.9); | |
6008 __ Fmov(s20, 2.5); | |
6009 __ Fmov(s21, -1.5); | |
6010 __ Fmov(s22, -2.5); | |
6011 __ Fmov(s23, kFP32PositiveInfinity); | |
6012 __ Fmov(s24, kFP32NegativeInfinity); | |
6013 __ Fmov(s25, 0.0); | |
6014 __ Fmov(s26, -0.0); | |
6015 | |
6016 __ Frinta(s0, s16); | |
6017 __ Frinta(s1, s17); | |
6018 __ Frinta(s2, s18); | |
6019 __ Frinta(s3, s19); | |
6020 __ Frinta(s4, s20); | |
6021 __ Frinta(s5, s21); | |
6022 __ Frinta(s6, s22); | |
6023 __ Frinta(s7, s23); | |
6024 __ Frinta(s8, s24); | |
6025 __ Frinta(s9, s25); | |
6026 __ Frinta(s10, s26); | |
6027 | |
6028 __ Fmov(d16, 1.0); | |
6029 __ Fmov(d17, 1.1); | |
6030 __ Fmov(d18, 1.5); | |
6031 __ Fmov(d19, 1.9); | |
6032 __ Fmov(d20, 2.5); | |
6033 __ Fmov(d21, -1.5); | |
6034 __ Fmov(d22, -2.5); | |
6035 __ Fmov(d23, kFP32PositiveInfinity); | |
6036 __ Fmov(d24, kFP32NegativeInfinity); | |
6037 __ Fmov(d25, 0.0); | |
6038 __ Fmov(d26, -0.0); | |
6039 | |
6040 __ Frinta(d11, d16); | |
6041 __ Frinta(d12, d17); | |
6042 __ Frinta(d13, d18); | |
6043 __ Frinta(d14, d19); | |
6044 __ Frinta(d15, d20); | |
6045 __ Frinta(d16, d21); | |
6046 __ Frinta(d17, d22); | |
6047 __ Frinta(d18, d23); | |
6048 __ Frinta(d19, d24); | |
6049 __ Frinta(d20, d25); | |
6050 __ Frinta(d21, d26); | |
6051 END(); | |
6052 | |
6053 RUN(); | |
6054 | |
6055 ASSERT_EQUAL_FP32(1.0, s0); | |
6056 ASSERT_EQUAL_FP32(1.0, s1); | |
6057 ASSERT_EQUAL_FP32(2.0, s2); | |
6058 ASSERT_EQUAL_FP32(2.0, s3); | |
6059 ASSERT_EQUAL_FP32(3.0, s4); | |
6060 ASSERT_EQUAL_FP32(-2.0, s5); | |
6061 ASSERT_EQUAL_FP32(-3.0, s6); | |
6062 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7); | |
6063 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8); | |
6064 ASSERT_EQUAL_FP32(0.0, s9); | |
6065 ASSERT_EQUAL_FP32(-0.0, s10); | |
6066 ASSERT_EQUAL_FP64(1.0, d11); | |
6067 ASSERT_EQUAL_FP64(1.0, d12); | |
6068 ASSERT_EQUAL_FP64(2.0, d13); | |
6069 ASSERT_EQUAL_FP64(2.0, d14); | |
6070 ASSERT_EQUAL_FP64(3.0, d15); | |
6071 ASSERT_EQUAL_FP64(-2.0, d16); | |
6072 ASSERT_EQUAL_FP64(-3.0, d17); | |
6073 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18); | |
6074 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19); | |
6075 ASSERT_EQUAL_FP64(0.0, d20); | |
6076 ASSERT_EQUAL_FP64(-0.0, d21); | |
6077 | |
6078 TEARDOWN(); | |
6079 } | |
6080 | |
6081 | |
6082 TEST(frintn) { | |
6083 INIT_V8(); | |
6084 SETUP(); | |
6085 | |
6086 START(); | |
6087 __ Fmov(s16, 1.0); | |
6088 __ Fmov(s17, 1.1); | |
6089 __ Fmov(s18, 1.5); | |
6090 __ Fmov(s19, 1.9); | |
6091 __ Fmov(s20, 2.5); | |
6092 __ Fmov(s21, -1.5); | |
6093 __ Fmov(s22, -2.5); | |
6094 __ Fmov(s23, kFP32PositiveInfinity); | |
6095 __ Fmov(s24, kFP32NegativeInfinity); | |
6096 __ Fmov(s25, 0.0); | |
6097 __ Fmov(s26, -0.0); | |
6098 | |
6099 __ Frintn(s0, s16); | |
6100 __ Frintn(s1, s17); | |
6101 __ Frintn(s2, s18); | |
6102 __ Frintn(s3, s19); | |
6103 __ Frintn(s4, s20); | |
6104 __ Frintn(s5, s21); | |
6105 __ Frintn(s6, s22); | |
6106 __ Frintn(s7, s23); | |
6107 __ Frintn(s8, s24); | |
6108 __ Frintn(s9, s25); | |
6109 __ Frintn(s10, s26); | |
6110 | |
6111 __ Fmov(d16, 1.0); | |
6112 __ Fmov(d17, 1.1); | |
6113 __ Fmov(d18, 1.5); | |
6114 __ Fmov(d19, 1.9); | |
6115 __ Fmov(d20, 2.5); | |
6116 __ Fmov(d21, -1.5); | |
6117 __ Fmov(d22, -2.5); | |
6118 __ Fmov(d23, kFP32PositiveInfinity); | |
6119 __ Fmov(d24, kFP32NegativeInfinity); | |
6120 __ Fmov(d25, 0.0); | |
6121 __ Fmov(d26, -0.0); | |
6122 | |
6123 __ Frintn(d11, d16); | |
6124 __ Frintn(d12, d17); | |
6125 __ Frintn(d13, d18); | |
6126 __ Frintn(d14, d19); | |
6127 __ Frintn(d15, d20); | |
6128 __ Frintn(d16, d21); | |
6129 __ Frintn(d17, d22); | |
6130 __ Frintn(d18, d23); | |
6131 __ Frintn(d19, d24); | |
6132 __ Frintn(d20, d25); | |
6133 __ Frintn(d21, d26); | |
6134 END(); | |
6135 | |
6136 RUN(); | |
6137 | |
6138 ASSERT_EQUAL_FP32(1.0, s0); | |
6139 ASSERT_EQUAL_FP32(1.0, s1); | |
6140 ASSERT_EQUAL_FP32(2.0, s2); | |
6141 ASSERT_EQUAL_FP32(2.0, s3); | |
6142 ASSERT_EQUAL_FP32(2.0, s4); | |
6143 ASSERT_EQUAL_FP32(-2.0, s5); | |
6144 ASSERT_EQUAL_FP32(-2.0, s6); | |
6145 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7); | |
6146 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8); | |
6147 ASSERT_EQUAL_FP32(0.0, s9); | |
6148 ASSERT_EQUAL_FP32(-0.0, s10); | |
6149 ASSERT_EQUAL_FP64(1.0, d11); | |
6150 ASSERT_EQUAL_FP64(1.0, d12); | |
6151 ASSERT_EQUAL_FP64(2.0, d13); | |
6152 ASSERT_EQUAL_FP64(2.0, d14); | |
6153 ASSERT_EQUAL_FP64(2.0, d15); | |
6154 ASSERT_EQUAL_FP64(-2.0, d16); | |
6155 ASSERT_EQUAL_FP64(-2.0, d17); | |
6156 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18); | |
6157 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19); | |
6158 ASSERT_EQUAL_FP64(0.0, d20); | |
6159 ASSERT_EQUAL_FP64(-0.0, d21); | |
6160 | |
6161 TEARDOWN(); | |
6162 } | |
6163 | |
6164 | |
6165 TEST(frintz) { | |
6166 INIT_V8(); | |
6167 SETUP(); | |
6168 | |
6169 START(); | |
6170 __ Fmov(s16, 1.0); | |
6171 __ Fmov(s17, 1.1); | |
6172 __ Fmov(s18, 1.5); | |
6173 __ Fmov(s19, 1.9); | |
6174 __ Fmov(s20, 2.5); | |
6175 __ Fmov(s21, -1.5); | |
6176 __ Fmov(s22, -2.5); | |
6177 __ Fmov(s23, kFP32PositiveInfinity); | |
6178 __ Fmov(s24, kFP32NegativeInfinity); | |
6179 __ Fmov(s25, 0.0); | |
6180 __ Fmov(s26, -0.0); | |
6181 | |
6182 __ Frintz(s0, s16); | |
6183 __ Frintz(s1, s17); | |
6184 __ Frintz(s2, s18); | |
6185 __ Frintz(s3, s19); | |
6186 __ Frintz(s4, s20); | |
6187 __ Frintz(s5, s21); | |
6188 __ Frintz(s6, s22); | |
6189 __ Frintz(s7, s23); | |
6190 __ Frintz(s8, s24); | |
6191 __ Frintz(s9, s25); | |
6192 __ Frintz(s10, s26); | |
6193 | |
6194 __ Fmov(d16, 1.0); | |
6195 __ Fmov(d17, 1.1); | |
6196 __ Fmov(d18, 1.5); | |
6197 __ Fmov(d19, 1.9); | |
6198 __ Fmov(d20, 2.5); | |
6199 __ Fmov(d21, -1.5); | |
6200 __ Fmov(d22, -2.5); | |
6201 __ Fmov(d23, kFP32PositiveInfinity); | |
6202 __ Fmov(d24, kFP32NegativeInfinity); | |
6203 __ Fmov(d25, 0.0); | |
6204 __ Fmov(d26, -0.0); | |
6205 | |
6206 __ Frintz(d11, d16); | |
6207 __ Frintz(d12, d17); | |
6208 __ Frintz(d13, d18); | |
6209 __ Frintz(d14, d19); | |
6210 __ Frintz(d15, d20); | |
6211 __ Frintz(d16, d21); | |
6212 __ Frintz(d17, d22); | |
6213 __ Frintz(d18, d23); | |
6214 __ Frintz(d19, d24); | |
6215 __ Frintz(d20, d25); | |
6216 __ Frintz(d21, d26); | |
6217 END(); | |
6218 | |
6219 RUN(); | |
6220 | |
6221 ASSERT_EQUAL_FP32(1.0, s0); | |
6222 ASSERT_EQUAL_FP32(1.0, s1); | |
6223 ASSERT_EQUAL_FP32(1.0, s2); | |
6224 ASSERT_EQUAL_FP32(1.0, s3); | |
6225 ASSERT_EQUAL_FP32(2.0, s4); | |
6226 ASSERT_EQUAL_FP32(-1.0, s5); | |
6227 ASSERT_EQUAL_FP32(-2.0, s6); | |
6228 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7); | |
6229 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8); | |
6230 ASSERT_EQUAL_FP32(0.0, s9); | |
6231 ASSERT_EQUAL_FP32(-0.0, s10); | |
6232 ASSERT_EQUAL_FP64(1.0, d11); | |
6233 ASSERT_EQUAL_FP64(1.0, d12); | |
6234 ASSERT_EQUAL_FP64(1.0, d13); | |
6235 ASSERT_EQUAL_FP64(1.0, d14); | |
6236 ASSERT_EQUAL_FP64(2.0, d15); | |
6237 ASSERT_EQUAL_FP64(-1.0, d16); | |
6238 ASSERT_EQUAL_FP64(-2.0, d17); | |
6239 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18); | |
6240 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19); | |
6241 ASSERT_EQUAL_FP64(0.0, d20); | |
6242 ASSERT_EQUAL_FP64(-0.0, d21); | |
6243 | |
6244 TEARDOWN(); | |
6245 } | |
6246 | |
6247 | |
6248 TEST(fcvt_ds) { | |
6249 INIT_V8(); | |
6250 SETUP(); | |
6251 | |
6252 START(); | |
6253 __ Fmov(s16, 1.0); | |
6254 __ Fmov(s17, 1.1); | |
6255 __ Fmov(s18, 1.5); | |
6256 __ Fmov(s19, 1.9); | |
6257 __ Fmov(s20, 2.5); | |
6258 __ Fmov(s21, -1.5); | |
6259 __ Fmov(s22, -2.5); | |
6260 __ Fmov(s23, kFP32PositiveInfinity); | |
6261 __ Fmov(s24, kFP32NegativeInfinity); | |
6262 __ Fmov(s25, 0.0); | |
6263 __ Fmov(s26, -0.0); | |
6264 __ Fmov(s27, FLT_MAX); | |
6265 __ Fmov(s28, FLT_MIN); | |
6266 __ Fmov(s29, rawbits_to_float(0x7fc12345)); // Quiet NaN. | |
6267 __ Fmov(s30, rawbits_to_float(0x7f812345)); // Signalling NaN. | |
6268 | |
6269 __ Fcvt(d0, s16); | |
6270 __ Fcvt(d1, s17); | |
6271 __ Fcvt(d2, s18); | |
6272 __ Fcvt(d3, s19); | |
6273 __ Fcvt(d4, s20); | |
6274 __ Fcvt(d5, s21); | |
6275 __ Fcvt(d6, s22); | |
6276 __ Fcvt(d7, s23); | |
6277 __ Fcvt(d8, s24); | |
6278 __ Fcvt(d9, s25); | |
6279 __ Fcvt(d10, s26); | |
6280 __ Fcvt(d11, s27); | |
6281 __ Fcvt(d12, s28); | |
6282 __ Fcvt(d13, s29); | |
6283 __ Fcvt(d14, s30); | |
6284 END(); | |
6285 | |
6286 RUN(); | |
6287 | |
6288 ASSERT_EQUAL_FP64(1.0f, d0); | |
6289 ASSERT_EQUAL_FP64(1.1f, d1); | |
6290 ASSERT_EQUAL_FP64(1.5f, d2); | |
6291 ASSERT_EQUAL_FP64(1.9f, d3); | |
6292 ASSERT_EQUAL_FP64(2.5f, d4); | |
6293 ASSERT_EQUAL_FP64(-1.5f, d5); | |
6294 ASSERT_EQUAL_FP64(-2.5f, d6); | |
6295 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7); | |
6296 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8); | |
6297 ASSERT_EQUAL_FP64(0.0f, d9); | |
6298 ASSERT_EQUAL_FP64(-0.0f, d10); | |
6299 ASSERT_EQUAL_FP64(FLT_MAX, d11); | |
6300 ASSERT_EQUAL_FP64(FLT_MIN, d12); | |
6301 | |
6302 // Check that the NaN payload is preserved according to A64 conversion rules: | |
6303 // - The sign bit is preserved. | |
6304 // - The top bit of the mantissa is forced to 1 (making it a quiet NaN). | |
6305 // - The remaining mantissa bits are copied until they run out. | |
6306 // - The low-order bits that haven't already been assigned are set to 0. | |
6307 ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13); | |
6308 ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14); | |
6309 | |
6310 TEARDOWN(); | |
6311 } | |
6312 | |
6313 | |
6314 TEST(fcvt_sd) { | |
6315 INIT_V8(); | |
6316 // There are a huge number of corner-cases to check, so this test iterates | |
6317 // through a list. The list is then negated and checked again (since the sign | |
6318 // is irrelevant in ties-to-even rounding), so the list shouldn't include any | |
6319 // negative values. | |
6320 // | |
6321 // Note that this test only checks ties-to-even rounding, because that is all | |
6322 // that the simulator supports. | |
6323 struct {double in; float expected;} test[] = { | |
6324 // Check some simple conversions. | |
6325 {0.0, 0.0f}, | |
6326 {1.0, 1.0f}, | |
6327 {1.5, 1.5f}, | |
6328 {2.0, 2.0f}, | |
6329 {FLT_MAX, FLT_MAX}, | |
6330 // - The smallest normalized float. | |
6331 {pow(2.0, -126), powf(2, -126)}, | |
6332 // - Normal floats that need (ties-to-even) rounding. | |
6333 // For normalized numbers: | |
6334 // bit 29 (0x0000000020000000) is the lowest-order bit which will | |
6335 // fit in the float's mantissa. | |
6336 {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)}, | |
6337 {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)}, | |
6338 {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)}, | |
6339 {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)}, | |
6340 {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)}, | |
6341 {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)}, | |
6342 {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)}, | |
6343 {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)}, | |
6344 {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)}, | |
6345 {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)}, | |
6346 {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)}, | |
6347 {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)}, | |
6348 {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)}, | |
6349 // - A mantissa that overflows into the exponent during rounding. | |
6350 {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)}, | |
6351 // - The largest double that rounds to a normal float. | |
6352 {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)}, | |
6353 | |
6354 // Doubles that are too big for a float. | |
6355 {kFP64PositiveInfinity, kFP32PositiveInfinity}, | |
6356 {DBL_MAX, kFP32PositiveInfinity}, | |
6357 // - The smallest exponent that's too big for a float. | |
6358 {pow(2.0, 128), kFP32PositiveInfinity}, | |
6359 // - This exponent is in range, but the value rounds to infinity. | |
6360 {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity}, | |
6361 | |
6362 // Doubles that are too small for a float. | |
6363 // - The smallest (subnormal) double. | |
6364 {DBL_MIN, 0.0}, | |
6365 // - The largest double which is too small for a subnormal float. | |
6366 {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)}, | |
6367 | |
6368 // Normal doubles that become subnormal floats. | |
6369 // - The largest subnormal float. | |
6370 {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)}, | |
6371 // - The smallest subnormal float. | |
6372 {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)}, | |
6373 // - Subnormal floats that need (ties-to-even) rounding. | |
6374 // For these subnormals: | |
6375 // bit 34 (0x0000000400000000) is the lowest-order bit which will | |
6376 // fit in the float's mantissa. | |
6377 {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)}, | |
6378 {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)}, | |
6379 {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)}, | |
6380 {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)}, | |
6381 {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)}, | |
6382 {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)}, | |
6383 {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)}, | |
6384 {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)}, | |
6385 {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)}, | |
6386 {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)}, | |
6387 {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)}, | |
6388 {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)}, | |
6389 {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)}, | |
6390 // - The smallest double which rounds up to become a subnormal float. | |
6391 {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)}, | |
6392 | |
6393 // Check NaN payload preservation. | |
6394 {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)}, | |
6395 {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)}, | |
6396 // - Signalling NaNs become quiet NaNs. | |
6397 {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)}, | |
6398 {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)}, | |
6399 {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)}, | |
6400 }; | |
6401 int count = sizeof(test) / sizeof(test[0]); | |
6402 | |
6403 for (int i = 0; i < count; i++) { | |
6404 double in = test[i].in; | |
6405 float expected = test[i].expected; | |
6406 | |
6407 // We only expect positive input. | |
6408 ASSERT(std::signbit(in) == 0); | |
6409 ASSERT(std::signbit(expected) == 0); | |
6410 | |
6411 SETUP(); | |
6412 START(); | |
6413 | |
6414 __ Fmov(d10, in); | |
6415 __ Fcvt(s20, d10); | |
6416 | |
6417 __ Fmov(d11, -in); | |
6418 __ Fcvt(s21, d11); | |
6419 | |
6420 END(); | |
6421 RUN(); | |
6422 ASSERT_EQUAL_FP32(expected, s20); | |
6423 ASSERT_EQUAL_FP32(-expected, s21); | |
6424 TEARDOWN(); | |
6425 } | |
6426 } | |
6427 | |
6428 | |
6429 TEST(fcvtas) { | |
6430 INIT_V8(); | |
6431 SETUP(); | |
6432 | |
6433 START(); | |
6434 __ Fmov(s0, 1.0); | |
6435 __ Fmov(s1, 1.1); | |
6436 __ Fmov(s2, 2.5); | |
6437 __ Fmov(s3, -2.5); | |
6438 __ Fmov(s4, kFP32PositiveInfinity); | |
6439 __ Fmov(s5, kFP32NegativeInfinity); | |
6440 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX. | |
6441 __ Fneg(s7, s6); // Smallest float > INT32_MIN. | |
6442 __ Fmov(d8, 1.0); | |
6443 __ Fmov(d9, 1.1); | |
6444 __ Fmov(d10, 2.5); | |
6445 __ Fmov(d11, -2.5); | |
6446 __ Fmov(d12, kFP64PositiveInfinity); | |
6447 __ Fmov(d13, kFP64NegativeInfinity); | |
6448 __ Fmov(d14, kWMaxInt - 1); | |
6449 __ Fmov(d15, kWMinInt + 1); | |
6450 __ Fmov(s17, 1.1); | |
6451 __ Fmov(s18, 2.5); | |
6452 __ Fmov(s19, -2.5); | |
6453 __ Fmov(s20, kFP32PositiveInfinity); | |
6454 __ Fmov(s21, kFP32NegativeInfinity); | |
6455 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX. | |
6456 __ Fneg(s23, s22); // Smallest float > INT64_MIN. | |
6457 __ Fmov(d24, 1.1); | |
6458 __ Fmov(d25, 2.5); | |
6459 __ Fmov(d26, -2.5); | |
6460 __ Fmov(d27, kFP64PositiveInfinity); | |
6461 __ Fmov(d28, kFP64NegativeInfinity); | |
6462 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX. | |
6463 __ Fneg(d30, d29); // Smallest double > INT64_MIN. | |
6464 | |
6465 __ Fcvtas(w0, s0); | |
6466 __ Fcvtas(w1, s1); | |
6467 __ Fcvtas(w2, s2); | |
6468 __ Fcvtas(w3, s3); | |
6469 __ Fcvtas(w4, s4); | |
6470 __ Fcvtas(w5, s5); | |
6471 __ Fcvtas(w6, s6); | |
6472 __ Fcvtas(w7, s7); | |
6473 __ Fcvtas(w8, d8); | |
6474 __ Fcvtas(w9, d9); | |
6475 __ Fcvtas(w10, d10); | |
6476 __ Fcvtas(w11, d11); | |
6477 __ Fcvtas(w12, d12); | |
6478 __ Fcvtas(w13, d13); | |
6479 __ Fcvtas(w14, d14); | |
6480 __ Fcvtas(w15, d15); | |
6481 __ Fcvtas(x17, s17); | |
6482 __ Fcvtas(x18, s18); | |
6483 __ Fcvtas(x19, s19); | |
6484 __ Fcvtas(x20, s20); | |
6485 __ Fcvtas(x21, s21); | |
6486 __ Fcvtas(x22, s22); | |
6487 __ Fcvtas(x23, s23); | |
6488 __ Fcvtas(x24, d24); | |
6489 __ Fcvtas(x25, d25); | |
6490 __ Fcvtas(x26, d26); | |
6491 __ Fcvtas(x27, d27); | |
6492 __ Fcvtas(x28, d28); | |
6493 __ Fcvtas(x29, d29); | |
6494 __ Fcvtas(x30, d30); | |
6495 END(); | |
6496 | |
6497 RUN(); | |
6498 | |
6499 ASSERT_EQUAL_64(1, x0); | |
6500 ASSERT_EQUAL_64(1, x1); | |
6501 ASSERT_EQUAL_64(3, x2); | |
6502 ASSERT_EQUAL_64(0xfffffffd, x3); | |
6503 ASSERT_EQUAL_64(0x7fffffff, x4); | |
6504 ASSERT_EQUAL_64(0x80000000, x5); | |
6505 ASSERT_EQUAL_64(0x7fffff80, x6); | |
6506 ASSERT_EQUAL_64(0x80000080, x7); | |
6507 ASSERT_EQUAL_64(1, x8); | |
6508 ASSERT_EQUAL_64(1, x9); | |
6509 ASSERT_EQUAL_64(3, x10); | |
6510 ASSERT_EQUAL_64(0xfffffffd, x11); | |
6511 ASSERT_EQUAL_64(0x7fffffff, x12); | |
6512 ASSERT_EQUAL_64(0x80000000, x13); | |
6513 ASSERT_EQUAL_64(0x7ffffffe, x14); | |
6514 ASSERT_EQUAL_64(0x80000001, x15); | |
6515 ASSERT_EQUAL_64(1, x17); | |
6516 ASSERT_EQUAL_64(3, x18); | |
6517 ASSERT_EQUAL_64(0xfffffffffffffffdUL, x19); | |
6518 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20); | |
6519 ASSERT_EQUAL_64(0x8000000000000000UL, x21); | |
6520 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22); | |
6521 ASSERT_EQUAL_64(0x8000008000000000UL, x23); | |
6522 ASSERT_EQUAL_64(1, x24); | |
6523 ASSERT_EQUAL_64(3, x25); | |
6524 ASSERT_EQUAL_64(0xfffffffffffffffdUL, x26); | |
6525 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27); | |
6526 ASSERT_EQUAL_64(0x8000000000000000UL, x28); | |
6527 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29); | |
6528 ASSERT_EQUAL_64(0x8000000000000400UL, x30); | |
6529 | |
6530 TEARDOWN(); | |
6531 } | |
6532 | |
6533 | |
6534 TEST(fcvtau) { | |
6535 INIT_V8(); | |
6536 SETUP(); | |
6537 | |
6538 START(); | |
6539 __ Fmov(s0, 1.0); | |
6540 __ Fmov(s1, 1.1); | |
6541 __ Fmov(s2, 2.5); | |
6542 __ Fmov(s3, -2.5); | |
6543 __ Fmov(s4, kFP32PositiveInfinity); | |
6544 __ Fmov(s5, kFP32NegativeInfinity); | |
6545 __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX. | |
6546 __ Fmov(d8, 1.0); | |
6547 __ Fmov(d9, 1.1); | |
6548 __ Fmov(d10, 2.5); | |
6549 __ Fmov(d11, -2.5); | |
6550 __ Fmov(d12, kFP64PositiveInfinity); | |
6551 __ Fmov(d13, kFP64NegativeInfinity); | |
6552 __ Fmov(d14, 0xfffffffe); | |
6553 __ Fmov(s16, 1.0); | |
6554 __ Fmov(s17, 1.1); | |
6555 __ Fmov(s18, 2.5); | |
6556 __ Fmov(s19, -2.5); | |
6557 __ Fmov(s20, kFP32PositiveInfinity); | |
6558 __ Fmov(s21, kFP32NegativeInfinity); | |
6559 __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX. | |
6560 __ Fmov(d24, 1.1); | |
6561 __ Fmov(d25, 2.5); | |
6562 __ Fmov(d26, -2.5); | |
6563 __ Fmov(d27, kFP64PositiveInfinity); | |
6564 __ Fmov(d28, kFP64NegativeInfinity); | |
6565 __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX. | |
6566 __ Fmov(s30, 0x100000000UL); | |
6567 | |
6568 __ Fcvtau(w0, s0); | |
6569 __ Fcvtau(w1, s1); | |
6570 __ Fcvtau(w2, s2); | |
6571 __ Fcvtau(w3, s3); | |
6572 __ Fcvtau(w4, s4); | |
6573 __ Fcvtau(w5, s5); | |
6574 __ Fcvtau(w6, s6); | |
6575 __ Fcvtau(w8, d8); | |
6576 __ Fcvtau(w9, d9); | |
6577 __ Fcvtau(w10, d10); | |
6578 __ Fcvtau(w11, d11); | |
6579 __ Fcvtau(w12, d12); | |
6580 __ Fcvtau(w13, d13); | |
6581 __ Fcvtau(w14, d14); | |
6582 __ Fcvtau(w15, d15); | |
6583 __ Fcvtau(x16, s16); | |
6584 __ Fcvtau(x17, s17); | |
6585 __ Fcvtau(x18, s18); | |
6586 __ Fcvtau(x19, s19); | |
6587 __ Fcvtau(x20, s20); | |
6588 __ Fcvtau(x21, s21); | |
6589 __ Fcvtau(x22, s22); | |
6590 __ Fcvtau(x24, d24); | |
6591 __ Fcvtau(x25, d25); | |
6592 __ Fcvtau(x26, d26); | |
6593 __ Fcvtau(x27, d27); | |
6594 __ Fcvtau(x28, d28); | |
6595 __ Fcvtau(x29, d29); | |
6596 __ Fcvtau(w30, s30); | |
6597 END(); | |
6598 | |
6599 RUN(); | |
6600 | |
6601 ASSERT_EQUAL_64(1, x0); | |
6602 ASSERT_EQUAL_64(1, x1); | |
6603 ASSERT_EQUAL_64(3, x2); | |
6604 ASSERT_EQUAL_64(0, x3); | |
6605 ASSERT_EQUAL_64(0xffffffff, x4); | |
6606 ASSERT_EQUAL_64(0, x5); | |
6607 ASSERT_EQUAL_64(0xffffff00, x6); | |
6608 ASSERT_EQUAL_64(1, x8); | |
6609 ASSERT_EQUAL_64(1, x9); | |
6610 ASSERT_EQUAL_64(3, x10); | |
6611 ASSERT_EQUAL_64(0, x11); | |
6612 ASSERT_EQUAL_64(0xffffffff, x12); | |
6613 ASSERT_EQUAL_64(0, x13); | |
6614 ASSERT_EQUAL_64(0xfffffffe, x14); | |
6615 ASSERT_EQUAL_64(1, x16); | |
6616 ASSERT_EQUAL_64(1, x17); | |
6617 ASSERT_EQUAL_64(3, x18); | |
6618 ASSERT_EQUAL_64(0, x19); | |
6619 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20); | |
6620 ASSERT_EQUAL_64(0, x21); | |
6621 ASSERT_EQUAL_64(0xffffff0000000000UL, x22); | |
6622 ASSERT_EQUAL_64(1, x24); | |
6623 ASSERT_EQUAL_64(3, x25); | |
6624 ASSERT_EQUAL_64(0, x26); | |
6625 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27); | |
6626 ASSERT_EQUAL_64(0, x28); | |
6627 ASSERT_EQUAL_64(0xfffffffffffff800UL, x29); | |
6628 ASSERT_EQUAL_64(0xffffffff, x30); | |
6629 | |
6630 TEARDOWN(); | |
6631 } | |
6632 | |
6633 | |
6634 TEST(fcvtms) { | |
6635 INIT_V8(); | |
6636 SETUP(); | |
6637 | |
6638 START(); | |
6639 __ Fmov(s0, 1.0); | |
6640 __ Fmov(s1, 1.1); | |
6641 __ Fmov(s2, 1.5); | |
6642 __ Fmov(s3, -1.5); | |
6643 __ Fmov(s4, kFP32PositiveInfinity); | |
6644 __ Fmov(s5, kFP32NegativeInfinity); | |
6645 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX. | |
6646 __ Fneg(s7, s6); // Smallest float > INT32_MIN. | |
6647 __ Fmov(d8, 1.0); | |
6648 __ Fmov(d9, 1.1); | |
6649 __ Fmov(d10, 1.5); | |
6650 __ Fmov(d11, -1.5); | |
6651 __ Fmov(d12, kFP64PositiveInfinity); | |
6652 __ Fmov(d13, kFP64NegativeInfinity); | |
6653 __ Fmov(d14, kWMaxInt - 1); | |
6654 __ Fmov(d15, kWMinInt + 1); | |
6655 __ Fmov(s17, 1.1); | |
6656 __ Fmov(s18, 1.5); | |
6657 __ Fmov(s19, -1.5); | |
6658 __ Fmov(s20, kFP32PositiveInfinity); | |
6659 __ Fmov(s21, kFP32NegativeInfinity); | |
6660 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX. | |
6661 __ Fneg(s23, s22); // Smallest float > INT64_MIN. | |
6662 __ Fmov(d24, 1.1); | |
6663 __ Fmov(d25, 1.5); | |
6664 __ Fmov(d26, -1.5); | |
6665 __ Fmov(d27, kFP64PositiveInfinity); | |
6666 __ Fmov(d28, kFP64NegativeInfinity); | |
6667 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX. | |
6668 __ Fneg(d30, d29); // Smallest double > INT64_MIN. | |
6669 | |
6670 __ Fcvtms(w0, s0); | |
6671 __ Fcvtms(w1, s1); | |
6672 __ Fcvtms(w2, s2); | |
6673 __ Fcvtms(w3, s3); | |
6674 __ Fcvtms(w4, s4); | |
6675 __ Fcvtms(w5, s5); | |
6676 __ Fcvtms(w6, s6); | |
6677 __ Fcvtms(w7, s7); | |
6678 __ Fcvtms(w8, d8); | |
6679 __ Fcvtms(w9, d9); | |
6680 __ Fcvtms(w10, d10); | |
6681 __ Fcvtms(w11, d11); | |
6682 __ Fcvtms(w12, d12); | |
6683 __ Fcvtms(w13, d13); | |
6684 __ Fcvtms(w14, d14); | |
6685 __ Fcvtms(w15, d15); | |
6686 __ Fcvtms(x17, s17); | |
6687 __ Fcvtms(x18, s18); | |
6688 __ Fcvtms(x19, s19); | |
6689 __ Fcvtms(x20, s20); | |
6690 __ Fcvtms(x21, s21); | |
6691 __ Fcvtms(x22, s22); | |
6692 __ Fcvtms(x23, s23); | |
6693 __ Fcvtms(x24, d24); | |
6694 __ Fcvtms(x25, d25); | |
6695 __ Fcvtms(x26, d26); | |
6696 __ Fcvtms(x27, d27); | |
6697 __ Fcvtms(x28, d28); | |
6698 __ Fcvtms(x29, d29); | |
6699 __ Fcvtms(x30, d30); | |
6700 END(); | |
6701 | |
6702 RUN(); | |
6703 | |
6704 ASSERT_EQUAL_64(1, x0); | |
6705 ASSERT_EQUAL_64(1, x1); | |
6706 ASSERT_EQUAL_64(1, x2); | |
6707 ASSERT_EQUAL_64(0xfffffffe, x3); | |
6708 ASSERT_EQUAL_64(0x7fffffff, x4); | |
6709 ASSERT_EQUAL_64(0x80000000, x5); | |
6710 ASSERT_EQUAL_64(0x7fffff80, x6); | |
6711 ASSERT_EQUAL_64(0x80000080, x7); | |
6712 ASSERT_EQUAL_64(1, x8); | |
6713 ASSERT_EQUAL_64(1, x9); | |
6714 ASSERT_EQUAL_64(1, x10); | |
6715 ASSERT_EQUAL_64(0xfffffffe, x11); | |
6716 ASSERT_EQUAL_64(0x7fffffff, x12); | |
6717 ASSERT_EQUAL_64(0x80000000, x13); | |
6718 ASSERT_EQUAL_64(0x7ffffffe, x14); | |
6719 ASSERT_EQUAL_64(0x80000001, x15); | |
6720 ASSERT_EQUAL_64(1, x17); | |
6721 ASSERT_EQUAL_64(1, x18); | |
6722 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19); | |
6723 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20); | |
6724 ASSERT_EQUAL_64(0x8000000000000000UL, x21); | |
6725 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22); | |
6726 ASSERT_EQUAL_64(0x8000008000000000UL, x23); | |
6727 ASSERT_EQUAL_64(1, x24); | |
6728 ASSERT_EQUAL_64(1, x25); | |
6729 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26); | |
6730 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27); | |
6731 ASSERT_EQUAL_64(0x8000000000000000UL, x28); | |
6732 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29); | |
6733 ASSERT_EQUAL_64(0x8000000000000400UL, x30); | |
6734 | |
6735 TEARDOWN(); | |
6736 } | |
6737 | |
6738 | |
6739 TEST(fcvtmu) { | |
6740 INIT_V8(); | |
6741 SETUP(); | |
6742 | |
6743 START(); | |
6744 __ Fmov(s0, 1.0); | |
6745 __ Fmov(s1, 1.1); | |
6746 __ Fmov(s2, 1.5); | |
6747 __ Fmov(s3, -1.5); | |
6748 __ Fmov(s4, kFP32PositiveInfinity); | |
6749 __ Fmov(s5, kFP32NegativeInfinity); | |
6750 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX. | |
6751 __ Fneg(s7, s6); // Smallest float > INT32_MIN. | |
6752 __ Fmov(d8, 1.0); | |
6753 __ Fmov(d9, 1.1); | |
6754 __ Fmov(d10, 1.5); | |
6755 __ Fmov(d11, -1.5); | |
6756 __ Fmov(d12, kFP64PositiveInfinity); | |
6757 __ Fmov(d13, kFP64NegativeInfinity); | |
6758 __ Fmov(d14, kWMaxInt - 1); | |
6759 __ Fmov(d15, kWMinInt + 1); | |
6760 __ Fmov(s17, 1.1); | |
6761 __ Fmov(s18, 1.5); | |
6762 __ Fmov(s19, -1.5); | |
6763 __ Fmov(s20, kFP32PositiveInfinity); | |
6764 __ Fmov(s21, kFP32NegativeInfinity); | |
6765 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX. | |
6766 __ Fneg(s23, s22); // Smallest float > INT64_MIN. | |
6767 __ Fmov(d24, 1.1); | |
6768 __ Fmov(d25, 1.5); | |
6769 __ Fmov(d26, -1.5); | |
6770 __ Fmov(d27, kFP64PositiveInfinity); | |
6771 __ Fmov(d28, kFP64NegativeInfinity); | |
6772 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX. | |
6773 __ Fneg(d30, d29); // Smallest double > INT64_MIN. | |
6774 | |
6775 __ Fcvtmu(w0, s0); | |
6776 __ Fcvtmu(w1, s1); | |
6777 __ Fcvtmu(w2, s2); | |
6778 __ Fcvtmu(w3, s3); | |
6779 __ Fcvtmu(w4, s4); | |
6780 __ Fcvtmu(w5, s5); | |
6781 __ Fcvtmu(w6, s6); | |
6782 __ Fcvtmu(w7, s7); | |
6783 __ Fcvtmu(w8, d8); | |
6784 __ Fcvtmu(w9, d9); | |
6785 __ Fcvtmu(w10, d10); | |
6786 __ Fcvtmu(w11, d11); | |
6787 __ Fcvtmu(w12, d12); | |
6788 __ Fcvtmu(w13, d13); | |
6789 __ Fcvtmu(w14, d14); | |
6790 __ Fcvtmu(x17, s17); | |
6791 __ Fcvtmu(x18, s18); | |
6792 __ Fcvtmu(x19, s19); | |
6793 __ Fcvtmu(x20, s20); | |
6794 __ Fcvtmu(x21, s21); | |
6795 __ Fcvtmu(x22, s22); | |
6796 __ Fcvtmu(x23, s23); | |
6797 __ Fcvtmu(x24, d24); | |
6798 __ Fcvtmu(x25, d25); | |
6799 __ Fcvtmu(x26, d26); | |
6800 __ Fcvtmu(x27, d27); | |
6801 __ Fcvtmu(x28, d28); | |
6802 __ Fcvtmu(x29, d29); | |
6803 __ Fcvtmu(x30, d30); | |
6804 END(); | |
6805 | |
6806 RUN(); | |
6807 | |
6808 ASSERT_EQUAL_64(1, x0); | |
6809 ASSERT_EQUAL_64(1, x1); | |
6810 ASSERT_EQUAL_64(1, x2); | |
6811 ASSERT_EQUAL_64(0, x3); | |
6812 ASSERT_EQUAL_64(0xffffffff, x4); | |
6813 ASSERT_EQUAL_64(0, x5); | |
6814 ASSERT_EQUAL_64(0x7fffff80, x6); | |
6815 ASSERT_EQUAL_64(0, x7); | |
6816 ASSERT_EQUAL_64(1, x8); | |
6817 ASSERT_EQUAL_64(1, x9); | |
6818 ASSERT_EQUAL_64(1, x10); | |
6819 ASSERT_EQUAL_64(0, x11); | |
6820 ASSERT_EQUAL_64(0xffffffff, x12); | |
6821 ASSERT_EQUAL_64(0, x13); | |
6822 ASSERT_EQUAL_64(0x7ffffffe, x14); | |
6823 ASSERT_EQUAL_64(1, x17); | |
6824 ASSERT_EQUAL_64(1, x18); | |
6825 ASSERT_EQUAL_64(0x0UL, x19); | |
6826 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20); | |
6827 ASSERT_EQUAL_64(0x0UL, x21); | |
6828 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22); | |
6829 ASSERT_EQUAL_64(0x0UL, x23); | |
6830 ASSERT_EQUAL_64(1, x24); | |
6831 ASSERT_EQUAL_64(1, x25); | |
6832 ASSERT_EQUAL_64(0x0UL, x26); | |
6833 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27); | |
6834 ASSERT_EQUAL_64(0x0UL, x28); | |
6835 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29); | |
6836 ASSERT_EQUAL_64(0x0UL, x30); | |
6837 | |
6838 TEARDOWN(); | |
6839 } | |
6840 | |
6841 | |
6842 TEST(fcvtns) { | |
6843 INIT_V8(); | |
6844 SETUP(); | |
6845 | |
6846 START(); | |
6847 __ Fmov(s0, 1.0); | |
6848 __ Fmov(s1, 1.1); | |
6849 __ Fmov(s2, 1.5); | |
6850 __ Fmov(s3, -1.5); | |
6851 __ Fmov(s4, kFP32PositiveInfinity); | |
6852 __ Fmov(s5, kFP32NegativeInfinity); | |
6853 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX. | |
6854 __ Fneg(s7, s6); // Smallest float > INT32_MIN. | |
6855 __ Fmov(d8, 1.0); | |
6856 __ Fmov(d9, 1.1); | |
6857 __ Fmov(d10, 1.5); | |
6858 __ Fmov(d11, -1.5); | |
6859 __ Fmov(d12, kFP64PositiveInfinity); | |
6860 __ Fmov(d13, kFP64NegativeInfinity); | |
6861 __ Fmov(d14, kWMaxInt - 1); | |
6862 __ Fmov(d15, kWMinInt + 1); | |
6863 __ Fmov(s17, 1.1); | |
6864 __ Fmov(s18, 1.5); | |
6865 __ Fmov(s19, -1.5); | |
6866 __ Fmov(s20, kFP32PositiveInfinity); | |
6867 __ Fmov(s21, kFP32NegativeInfinity); | |
6868 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX. | |
6869 __ Fneg(s23, s22); // Smallest float > INT64_MIN. | |
6870 __ Fmov(d24, 1.1); | |
6871 __ Fmov(d25, 1.5); | |
6872 __ Fmov(d26, -1.5); | |
6873 __ Fmov(d27, kFP64PositiveInfinity); | |
6874 __ Fmov(d28, kFP64NegativeInfinity); | |
6875 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX. | |
6876 __ Fneg(d30, d29); // Smallest double > INT64_MIN. | |
6877 | |
6878 __ Fcvtns(w0, s0); | |
6879 __ Fcvtns(w1, s1); | |
6880 __ Fcvtns(w2, s2); | |
6881 __ Fcvtns(w3, s3); | |
6882 __ Fcvtns(w4, s4); | |
6883 __ Fcvtns(w5, s5); | |
6884 __ Fcvtns(w6, s6); | |
6885 __ Fcvtns(w7, s7); | |
6886 __ Fcvtns(w8, d8); | |
6887 __ Fcvtns(w9, d9); | |
6888 __ Fcvtns(w10, d10); | |
6889 __ Fcvtns(w11, d11); | |
6890 __ Fcvtns(w12, d12); | |
6891 __ Fcvtns(w13, d13); | |
6892 __ Fcvtns(w14, d14); | |
6893 __ Fcvtns(w15, d15); | |
6894 __ Fcvtns(x17, s17); | |
6895 __ Fcvtns(x18, s18); | |
6896 __ Fcvtns(x19, s19); | |
6897 __ Fcvtns(x20, s20); | |
6898 __ Fcvtns(x21, s21); | |
6899 __ Fcvtns(x22, s22); | |
6900 __ Fcvtns(x23, s23); | |
6901 __ Fcvtns(x24, d24); | |
6902 __ Fcvtns(x25, d25); | |
6903 __ Fcvtns(x26, d26); | |
6904 __ Fcvtns(x27, d27); | |
6905 // __ Fcvtns(x28, d28); | |
6906 __ Fcvtns(x29, d29); | |
6907 __ Fcvtns(x30, d30); | |
6908 END(); | |
6909 | |
6910 RUN(); | |
6911 | |
6912 ASSERT_EQUAL_64(1, x0); | |
6913 ASSERT_EQUAL_64(1, x1); | |
6914 ASSERT_EQUAL_64(2, x2); | |
6915 ASSERT_EQUAL_64(0xfffffffe, x3); | |
6916 ASSERT_EQUAL_64(0x7fffffff, x4); | |
6917 ASSERT_EQUAL_64(0x80000000, x5); | |
6918 ASSERT_EQUAL_64(0x7fffff80, x6); | |
6919 ASSERT_EQUAL_64(0x80000080, x7); | |
6920 ASSERT_EQUAL_64(1, x8); | |
6921 ASSERT_EQUAL_64(1, x9); | |
6922 ASSERT_EQUAL_64(2, x10); | |
6923 ASSERT_EQUAL_64(0xfffffffe, x11); | |
6924 ASSERT_EQUAL_64(0x7fffffff, x12); | |
6925 ASSERT_EQUAL_64(0x80000000, x13); | |
6926 ASSERT_EQUAL_64(0x7ffffffe, x14); | |
6927 ASSERT_EQUAL_64(0x80000001, x15); | |
6928 ASSERT_EQUAL_64(1, x17); | |
6929 ASSERT_EQUAL_64(2, x18); | |
6930 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19); | |
6931 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20); | |
6932 ASSERT_EQUAL_64(0x8000000000000000UL, x21); | |
6933 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22); | |
6934 ASSERT_EQUAL_64(0x8000008000000000UL, x23); | |
6935 ASSERT_EQUAL_64(1, x24); | |
6936 ASSERT_EQUAL_64(2, x25); | |
6937 ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26); | |
6938 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27); | |
6939 // ASSERT_EQUAL_64(0x8000000000000000UL, x28); | |
6940 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29); | |
6941 ASSERT_EQUAL_64(0x8000000000000400UL, x30); | |
6942 | |
6943 TEARDOWN(); | |
6944 } | |
6945 | |
6946 | |
6947 TEST(fcvtnu) { | |
6948 INIT_V8(); | |
6949 SETUP(); | |
6950 | |
6951 START(); | |
6952 __ Fmov(s0, 1.0); | |
6953 __ Fmov(s1, 1.1); | |
6954 __ Fmov(s2, 1.5); | |
6955 __ Fmov(s3, -1.5); | |
6956 __ Fmov(s4, kFP32PositiveInfinity); | |
6957 __ Fmov(s5, kFP32NegativeInfinity); | |
6958 __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX. | |
6959 __ Fmov(d8, 1.0); | |
6960 __ Fmov(d9, 1.1); | |
6961 __ Fmov(d10, 1.5); | |
6962 __ Fmov(d11, -1.5); | |
6963 __ Fmov(d12, kFP64PositiveInfinity); | |
6964 __ Fmov(d13, kFP64NegativeInfinity); | |
6965 __ Fmov(d14, 0xfffffffe); | |
6966 __ Fmov(s16, 1.0); | |
6967 __ Fmov(s17, 1.1); | |
6968 __ Fmov(s18, 1.5); | |
6969 __ Fmov(s19, -1.5); | |
6970 __ Fmov(s20, kFP32PositiveInfinity); | |
6971 __ Fmov(s21, kFP32NegativeInfinity); | |
6972 __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX. | |
6973 __ Fmov(d24, 1.1); | |
6974 __ Fmov(d25, 1.5); | |
6975 __ Fmov(d26, -1.5); | |
6976 __ Fmov(d27, kFP64PositiveInfinity); | |
6977 __ Fmov(d28, kFP64NegativeInfinity); | |
6978 __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX. | |
6979 __ Fmov(s30, 0x100000000UL); | |
6980 | |
6981 __ Fcvtnu(w0, s0); | |
6982 __ Fcvtnu(w1, s1); | |
6983 __ Fcvtnu(w2, s2); | |
6984 __ Fcvtnu(w3, s3); | |
6985 __ Fcvtnu(w4, s4); | |
6986 __ Fcvtnu(w5, s5); | |
6987 __ Fcvtnu(w6, s6); | |
6988 __ Fcvtnu(w8, d8); | |
6989 __ Fcvtnu(w9, d9); | |
6990 __ Fcvtnu(w10, d10); | |
6991 __ Fcvtnu(w11, d11); | |
6992 __ Fcvtnu(w12, d12); | |
6993 __ Fcvtnu(w13, d13); | |
6994 __ Fcvtnu(w14, d14); | |
6995 __ Fcvtnu(w15, d15); | |
6996 __ Fcvtnu(x16, s16); | |
6997 __ Fcvtnu(x17, s17); | |
6998 __ Fcvtnu(x18, s18); | |
6999 __ Fcvtnu(x19, s19); | |
7000 __ Fcvtnu(x20, s20); | |
7001 __ Fcvtnu(x21, s21); | |
7002 __ Fcvtnu(x22, s22); | |
7003 __ Fcvtnu(x24, d24); | |
7004 __ Fcvtnu(x25, d25); | |
7005 __ Fcvtnu(x26, d26); | |
7006 __ Fcvtnu(x27, d27); | |
7007 // __ Fcvtnu(x28, d28); | |
7008 __ Fcvtnu(x29, d29); | |
7009 __ Fcvtnu(w30, s30); | |
7010 END(); | |
7011 | |
7012 RUN(); | |
7013 | |
7014 ASSERT_EQUAL_64(1, x0); | |
7015 ASSERT_EQUAL_64(1, x1); | |
7016 ASSERT_EQUAL_64(2, x2); | |
7017 ASSERT_EQUAL_64(0, x3); | |
7018 ASSERT_EQUAL_64(0xffffffff, x4); | |
7019 ASSERT_EQUAL_64(0, x5); | |
7020 ASSERT_EQUAL_64(0xffffff00, x6); | |
7021 ASSERT_EQUAL_64(1, x8); | |
7022 ASSERT_EQUAL_64(1, x9); | |
7023 ASSERT_EQUAL_64(2, x10); | |
7024 ASSERT_EQUAL_64(0, x11); | |
7025 ASSERT_EQUAL_64(0xffffffff, x12); | |
7026 ASSERT_EQUAL_64(0, x13); | |
7027 ASSERT_EQUAL_64(0xfffffffe, x14); | |
7028 ASSERT_EQUAL_64(1, x16); | |
7029 ASSERT_EQUAL_64(1, x17); | |
7030 ASSERT_EQUAL_64(2, x18); | |
7031 ASSERT_EQUAL_64(0, x19); | |
7032 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20); | |
7033 ASSERT_EQUAL_64(0, x21); | |
7034 ASSERT_EQUAL_64(0xffffff0000000000UL, x22); | |
7035 ASSERT_EQUAL_64(1, x24); | |
7036 ASSERT_EQUAL_64(2, x25); | |
7037 ASSERT_EQUAL_64(0, x26); | |
7038 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27); | |
7039 // ASSERT_EQUAL_64(0, x28); | |
7040 ASSERT_EQUAL_64(0xfffffffffffff800UL, x29); | |
7041 ASSERT_EQUAL_64(0xffffffff, x30); | |
7042 | |
7043 TEARDOWN(); | |
7044 } | |
7045 | |
7046 | |
7047 TEST(fcvtzs) { | |
7048 INIT_V8(); | |
7049 SETUP(); | |
7050 | |
7051 START(); | |
7052 __ Fmov(s0, 1.0); | |
7053 __ Fmov(s1, 1.1); | |
7054 __ Fmov(s2, 1.5); | |
7055 __ Fmov(s3, -1.5); | |
7056 __ Fmov(s4, kFP32PositiveInfinity); | |
7057 __ Fmov(s5, kFP32NegativeInfinity); | |
7058 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX. | |
7059 __ Fneg(s7, s6); // Smallest float > INT32_MIN. | |
7060 __ Fmov(d8, 1.0); | |
7061 __ Fmov(d9, 1.1); | |
7062 __ Fmov(d10, 1.5); | |
7063 __ Fmov(d11, -1.5); | |
7064 __ Fmov(d12, kFP64PositiveInfinity); | |
7065 __ Fmov(d13, kFP64NegativeInfinity); | |
7066 __ Fmov(d14, kWMaxInt - 1); | |
7067 __ Fmov(d15, kWMinInt + 1); | |
7068 __ Fmov(s17, 1.1); | |
7069 __ Fmov(s18, 1.5); | |
7070 __ Fmov(s19, -1.5); | |
7071 __ Fmov(s20, kFP32PositiveInfinity); | |
7072 __ Fmov(s21, kFP32NegativeInfinity); | |
7073 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX. | |
7074 __ Fneg(s23, s22); // Smallest float > INT64_MIN. | |
7075 __ Fmov(d24, 1.1); | |
7076 __ Fmov(d25, 1.5); | |
7077 __ Fmov(d26, -1.5); | |
7078 __ Fmov(d27, kFP64PositiveInfinity); | |
7079 __ Fmov(d28, kFP64NegativeInfinity); | |
7080 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX. | |
7081 __ Fneg(d30, d29); // Smallest double > INT64_MIN. | |
7082 | |
7083 __ Fcvtzs(w0, s0); | |
7084 __ Fcvtzs(w1, s1); | |
7085 __ Fcvtzs(w2, s2); | |
7086 __ Fcvtzs(w3, s3); | |
7087 __ Fcvtzs(w4, s4); | |
7088 __ Fcvtzs(w5, s5); | |
7089 __ Fcvtzs(w6, s6); | |
7090 __ Fcvtzs(w7, s7); | |
7091 __ Fcvtzs(w8, d8); | |
7092 __ Fcvtzs(w9, d9); | |
7093 __ Fcvtzs(w10, d10); | |
7094 __ Fcvtzs(w11, d11); | |
7095 __ Fcvtzs(w12, d12); | |
7096 __ Fcvtzs(w13, d13); | |
7097 __ Fcvtzs(w14, d14); | |
7098 __ Fcvtzs(w15, d15); | |
7099 __ Fcvtzs(x17, s17); | |
7100 __ Fcvtzs(x18, s18); | |
7101 __ Fcvtzs(x19, s19); | |
7102 __ Fcvtzs(x20, s20); | |
7103 __ Fcvtzs(x21, s21); | |
7104 __ Fcvtzs(x22, s22); | |
7105 __ Fcvtzs(x23, s23); | |
7106 __ Fcvtzs(x24, d24); | |
7107 __ Fcvtzs(x25, d25); | |
7108 __ Fcvtzs(x26, d26); | |
7109 __ Fcvtzs(x27, d27); | |
7110 __ Fcvtzs(x28, d28); | |
7111 __ Fcvtzs(x29, d29); | |
7112 __ Fcvtzs(x30, d30); | |
7113 END(); | |
7114 | |
7115 RUN(); | |
7116 | |
7117 ASSERT_EQUAL_64(1, x0); | |
7118 ASSERT_EQUAL_64(1, x1); | |
7119 ASSERT_EQUAL_64(1, x2); | |
7120 ASSERT_EQUAL_64(0xffffffff, x3); | |
7121 ASSERT_EQUAL_64(0x7fffffff, x4); | |
7122 ASSERT_EQUAL_64(0x80000000, x5); | |
7123 ASSERT_EQUAL_64(0x7fffff80, x6); | |
7124 ASSERT_EQUAL_64(0x80000080, x7); | |
7125 ASSERT_EQUAL_64(1, x8); | |
7126 ASSERT_EQUAL_64(1, x9); | |
7127 ASSERT_EQUAL_64(1, x10); | |
7128 ASSERT_EQUAL_64(0xffffffff, x11); | |
7129 ASSERT_EQUAL_64(0x7fffffff, x12); | |
7130 ASSERT_EQUAL_64(0x80000000, x13); | |
7131 ASSERT_EQUAL_64(0x7ffffffe, x14); | |
7132 ASSERT_EQUAL_64(0x80000001, x15); | |
7133 ASSERT_EQUAL_64(1, x17); | |
7134 ASSERT_EQUAL_64(1, x18); | |
7135 ASSERT_EQUAL_64(0xffffffffffffffffUL, x19); | |
7136 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20); | |
7137 ASSERT_EQUAL_64(0x8000000000000000UL, x21); | |
7138 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22); | |
7139 ASSERT_EQUAL_64(0x8000008000000000UL, x23); | |
7140 ASSERT_EQUAL_64(1, x24); | |
7141 ASSERT_EQUAL_64(1, x25); | |
7142 ASSERT_EQUAL_64(0xffffffffffffffffUL, x26); | |
7143 ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27); | |
7144 ASSERT_EQUAL_64(0x8000000000000000UL, x28); | |
7145 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29); | |
7146 ASSERT_EQUAL_64(0x8000000000000400UL, x30); | |
7147 | |
7148 TEARDOWN(); | |
7149 } | |
7150 | |
7151 | |
7152 TEST(fcvtzu) { | |
7153 INIT_V8(); | |
7154 SETUP(); | |
7155 | |
7156 START(); | |
7157 __ Fmov(s0, 1.0); | |
7158 __ Fmov(s1, 1.1); | |
7159 __ Fmov(s2, 1.5); | |
7160 __ Fmov(s3, -1.5); | |
7161 __ Fmov(s4, kFP32PositiveInfinity); | |
7162 __ Fmov(s5, kFP32NegativeInfinity); | |
7163 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX. | |
7164 __ Fneg(s7, s6); // Smallest float > INT32_MIN. | |
7165 __ Fmov(d8, 1.0); | |
7166 __ Fmov(d9, 1.1); | |
7167 __ Fmov(d10, 1.5); | |
7168 __ Fmov(d11, -1.5); | |
7169 __ Fmov(d12, kFP64PositiveInfinity); | |
7170 __ Fmov(d13, kFP64NegativeInfinity); | |
7171 __ Fmov(d14, kWMaxInt - 1); | |
7172 __ Fmov(d15, kWMinInt + 1); | |
7173 __ Fmov(s17, 1.1); | |
7174 __ Fmov(s18, 1.5); | |
7175 __ Fmov(s19, -1.5); | |
7176 __ Fmov(s20, kFP32PositiveInfinity); | |
7177 __ Fmov(s21, kFP32NegativeInfinity); | |
7178 __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX. | |
7179 __ Fneg(s23, s22); // Smallest float > INT64_MIN. | |
7180 __ Fmov(d24, 1.1); | |
7181 __ Fmov(d25, 1.5); | |
7182 __ Fmov(d26, -1.5); | |
7183 __ Fmov(d27, kFP64PositiveInfinity); | |
7184 __ Fmov(d28, kFP64NegativeInfinity); | |
7185 __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX. | |
7186 __ Fneg(d30, d29); // Smallest double > INT64_MIN. | |
7187 | |
7188 __ Fcvtzu(w0, s0); | |
7189 __ Fcvtzu(w1, s1); | |
7190 __ Fcvtzu(w2, s2); | |
7191 __ Fcvtzu(w3, s3); | |
7192 __ Fcvtzu(w4, s4); | |
7193 __ Fcvtzu(w5, s5); | |
7194 __ Fcvtzu(w6, s6); | |
7195 __ Fcvtzu(w7, s7); | |
7196 __ Fcvtzu(w8, d8); | |
7197 __ Fcvtzu(w9, d9); | |
7198 __ Fcvtzu(w10, d10); | |
7199 __ Fcvtzu(w11, d11); | |
7200 __ Fcvtzu(w12, d12); | |
7201 __ Fcvtzu(w13, d13); | |
7202 __ Fcvtzu(w14, d14); | |
7203 __ Fcvtzu(x17, s17); | |
7204 __ Fcvtzu(x18, s18); | |
7205 __ Fcvtzu(x19, s19); | |
7206 __ Fcvtzu(x20, s20); | |
7207 __ Fcvtzu(x21, s21); | |
7208 __ Fcvtzu(x22, s22); | |
7209 __ Fcvtzu(x23, s23); | |
7210 __ Fcvtzu(x24, d24); | |
7211 __ Fcvtzu(x25, d25); | |
7212 __ Fcvtzu(x26, d26); | |
7213 __ Fcvtzu(x27, d27); | |
7214 __ Fcvtzu(x28, d28); | |
7215 __ Fcvtzu(x29, d29); | |
7216 __ Fcvtzu(x30, d30); | |
7217 END(); | |
7218 | |
7219 RUN(); | |
7220 | |
7221 ASSERT_EQUAL_64(1, x0); | |
7222 ASSERT_EQUAL_64(1, x1); | |
7223 ASSERT_EQUAL_64(1, x2); | |
7224 ASSERT_EQUAL_64(0, x3); | |
7225 ASSERT_EQUAL_64(0xffffffff, x4); | |
7226 ASSERT_EQUAL_64(0, x5); | |
7227 ASSERT_EQUAL_64(0x7fffff80, x6); | |
7228 ASSERT_EQUAL_64(0, x7); | |
7229 ASSERT_EQUAL_64(1, x8); | |
7230 ASSERT_EQUAL_64(1, x9); | |
7231 ASSERT_EQUAL_64(1, x10); | |
7232 ASSERT_EQUAL_64(0, x11); | |
7233 ASSERT_EQUAL_64(0xffffffff, x12); | |
7234 ASSERT_EQUAL_64(0, x13); | |
7235 ASSERT_EQUAL_64(0x7ffffffe, x14); | |
7236 ASSERT_EQUAL_64(1, x17); | |
7237 ASSERT_EQUAL_64(1, x18); | |
7238 ASSERT_EQUAL_64(0x0UL, x19); | |
7239 ASSERT_EQUAL_64(0xffffffffffffffffUL, x20); | |
7240 ASSERT_EQUAL_64(0x0UL, x21); | |
7241 ASSERT_EQUAL_64(0x7fffff8000000000UL, x22); | |
7242 ASSERT_EQUAL_64(0x0UL, x23); | |
7243 ASSERT_EQUAL_64(1, x24); | |
7244 ASSERT_EQUAL_64(1, x25); | |
7245 ASSERT_EQUAL_64(0x0UL, x26); | |
7246 ASSERT_EQUAL_64(0xffffffffffffffffUL, x27); | |
7247 ASSERT_EQUAL_64(0x0UL, x28); | |
7248 ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29); | |
7249 ASSERT_EQUAL_64(0x0UL, x30); | |
7250 | |
7251 TEARDOWN(); | |
7252 } | |
7253 | |
7254 | |
7255 // Test that scvtf and ucvtf can convert the 64-bit input into the expected | |
7256 // value. All possible values of 'fbits' are tested. The expected value is | |
7257 // modified accordingly in each case. | |
7258 // | |
7259 // The expected value is specified as the bit encoding of the expected double | |
7260 // produced by scvtf (expected_scvtf_bits) as well as ucvtf | |
7261 // (expected_ucvtf_bits). | |
7262 // | |
7263 // Where the input value is representable by int32_t or uint32_t, conversions | |
7264 // from W registers will also be tested. | |
7265 static void TestUScvtfHelper(uint64_t in, | |
7266 uint64_t expected_scvtf_bits, | |
7267 uint64_t expected_ucvtf_bits) { | |
7268 uint64_t u64 = in; | |
7269 uint32_t u32 = u64 & 0xffffffff; | |
7270 int64_t s64 = static_cast<int64_t>(in); | |
7271 int32_t s32 = s64 & 0x7fffffff; | |
7272 | |
7273 bool cvtf_s32 = (s64 == s32); | |
7274 bool cvtf_u32 = (u64 == u32); | |
7275 | |
7276 double results_scvtf_x[65]; | |
7277 double results_ucvtf_x[65]; | |
7278 double results_scvtf_w[33]; | |
7279 double results_ucvtf_w[33]; | |
7280 | |
7281 SETUP(); | |
7282 START(); | |
7283 | |
7284 __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x)); | |
7285 __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x)); | |
7286 __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w)); | |
7287 __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w)); | |
7288 | |
7289 __ Mov(x10, s64); | |
7290 | |
7291 // Corrupt the top word, in case it is accidentally used during W-register | |
7292 // conversions. | |
7293 __ Mov(x11, 0x5555555555555555); | |
7294 __ Bfi(x11, x10, 0, kWRegSizeInBits); | |
7295 | |
7296 // Test integer conversions. | |
7297 __ Scvtf(d0, x10); | |
7298 __ Ucvtf(d1, x10); | |
7299 __ Scvtf(d2, w11); | |
7300 __ Ucvtf(d3, w11); | |
7301 __ Str(d0, MemOperand(x0)); | |
7302 __ Str(d1, MemOperand(x1)); | |
7303 __ Str(d2, MemOperand(x2)); | |
7304 __ Str(d3, MemOperand(x3)); | |
7305 | |
7306 // Test all possible values of fbits. | |
7307 for (int fbits = 1; fbits <= 32; fbits++) { | |
7308 __ Scvtf(d0, x10, fbits); | |
7309 __ Ucvtf(d1, x10, fbits); | |
7310 __ Scvtf(d2, w11, fbits); | |
7311 __ Ucvtf(d3, w11, fbits); | |
7312 __ Str(d0, MemOperand(x0, fbits * kDRegSize)); | |
7313 __ Str(d1, MemOperand(x1, fbits * kDRegSize)); | |
7314 __ Str(d2, MemOperand(x2, fbits * kDRegSize)); | |
7315 __ Str(d3, MemOperand(x3, fbits * kDRegSize)); | |
7316 } | |
7317 | |
7318 // Conversions from W registers can only handle fbits values <= 32, so just | |
7319 // test conversions from X registers for 32 < fbits <= 64. | |
7320 for (int fbits = 33; fbits <= 64; fbits++) { | |
7321 __ Scvtf(d0, x10, fbits); | |
7322 __ Ucvtf(d1, x10, fbits); | |
7323 __ Str(d0, MemOperand(x0, fbits * kDRegSize)); | |
7324 __ Str(d1, MemOperand(x1, fbits * kDRegSize)); | |
7325 } | |
7326 | |
7327 END(); | |
7328 RUN(); | |
7329 | |
7330 // Check the results. | |
7331 double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits); | |
7332 double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits); | |
7333 | |
7334 for (int fbits = 0; fbits <= 32; fbits++) { | |
7335 double expected_scvtf = expected_scvtf_base / pow(2.0, fbits); | |
7336 double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits); | |
7337 ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]); | |
7338 ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]); | |
7339 if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]); | |
7340 if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]); | |
7341 } | |
7342 for (int fbits = 33; fbits <= 64; fbits++) { | |
7343 double expected_scvtf = expected_scvtf_base / pow(2.0, fbits); | |
7344 double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits); | |
7345 ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]); | |
7346 ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]); | |
7347 } | |
7348 | |
7349 TEARDOWN(); | |
7350 } | |
7351 | |
7352 | |
7353 TEST(scvtf_ucvtf_double) { | |
7354 INIT_V8(); | |
7355 // Simple conversions of positive numbers which require no rounding; the | |
7356 // results should not depened on the rounding mode, and ucvtf and scvtf should | |
7357 // produce the same result. | |
7358 TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000); | |
7359 TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000); | |
7360 TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000); | |
7361 TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000); | |
7362 TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000); | |
7363 // Test mantissa extremities. | |
7364 TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001); | |
7365 // The largest int32_t that fits in a double. | |
7366 TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000); | |
7367 // Values that would be negative if treated as an int32_t. | |
7368 TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000); | |
7369 TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000); | |
7370 TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000); | |
7371 // The largest int64_t that fits in a double. | |
7372 TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff); | |
7373 // Check for bit pattern reproduction. | |
7374 TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde); | |
7375 TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000); | |
7376 | |
7377 // Simple conversions of negative int64_t values. These require no rounding, | |
7378 // and the results should not depend on the rounding mode. | |
7379 TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000); | |
7380 TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000); | |
7381 TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000); | |
7382 | |
7383 // Conversions which require rounding. | |
7384 TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000); | |
7385 TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000); | |
7386 TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000); | |
7387 TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001); | |
7388 TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001); | |
7389 TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001); | |
7390 TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002); | |
7391 TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002); | |
7392 TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002); | |
7393 TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002); | |
7394 TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002); | |
7395 TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003); | |
7396 TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003); | |
7397 // Check rounding of negative int64_t values (and large uint64_t values). | |
7398 TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000); | |
7399 TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000); | |
7400 TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000); | |
7401 TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000); | |
7402 TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000); | |
7403 TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001); | |
7404 TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001); | |
7405 TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001); | |
7406 TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001); | |
7407 TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001); | |
7408 TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001); | |
7409 TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001); | |
7410 TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002); | |
7411 // Round up to produce a result that's too big for the input to represent. | |
7412 TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000); | |
7413 TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000); | |
7414 TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000); | |
7415 TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000); | |
7416 } | |
7417 | |
7418 | |
7419 // The same as TestUScvtfHelper, but convert to floats. | |
7420 static void TestUScvtf32Helper(uint64_t in, | |
7421 uint32_t expected_scvtf_bits, | |
7422 uint32_t expected_ucvtf_bits) { | |
7423 uint64_t u64 = in; | |
7424 uint32_t u32 = u64 & 0xffffffff; | |
7425 int64_t s64 = static_cast<int64_t>(in); | |
7426 int32_t s32 = s64 & 0x7fffffff; | |
7427 | |
7428 bool cvtf_s32 = (s64 == s32); | |
7429 bool cvtf_u32 = (u64 == u32); | |
7430 | |
7431 float results_scvtf_x[65]; | |
7432 float results_ucvtf_x[65]; | |
7433 float results_scvtf_w[33]; | |
7434 float results_ucvtf_w[33]; | |
7435 | |
7436 SETUP(); | |
7437 START(); | |
7438 | |
7439 __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x)); | |
7440 __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x)); | |
7441 __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w)); | |
7442 __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w)); | |
7443 | |
7444 __ Mov(x10, s64); | |
7445 | |
7446 // Corrupt the top word, in case it is accidentally used during W-register | |
7447 // conversions. | |
7448 __ Mov(x11, 0x5555555555555555); | |
7449 __ Bfi(x11, x10, 0, kWRegSizeInBits); | |
7450 | |
7451 // Test integer conversions. | |
7452 __ Scvtf(s0, x10); | |
7453 __ Ucvtf(s1, x10); | |
7454 __ Scvtf(s2, w11); | |
7455 __ Ucvtf(s3, w11); | |
7456 __ Str(s0, MemOperand(x0)); | |
7457 __ Str(s1, MemOperand(x1)); | |
7458 __ Str(s2, MemOperand(x2)); | |
7459 __ Str(s3, MemOperand(x3)); | |
7460 | |
7461 // Test all possible values of fbits. | |
7462 for (int fbits = 1; fbits <= 32; fbits++) { | |
7463 __ Scvtf(s0, x10, fbits); | |
7464 __ Ucvtf(s1, x10, fbits); | |
7465 __ Scvtf(s2, w11, fbits); | |
7466 __ Ucvtf(s3, w11, fbits); | |
7467 __ Str(s0, MemOperand(x0, fbits * kSRegSize)); | |
7468 __ Str(s1, MemOperand(x1, fbits * kSRegSize)); | |
7469 __ Str(s2, MemOperand(x2, fbits * kSRegSize)); | |
7470 __ Str(s3, MemOperand(x3, fbits * kSRegSize)); | |
7471 } | |
7472 | |
7473 // Conversions from W registers can only handle fbits values <= 32, so just | |
7474 // test conversions from X registers for 32 < fbits <= 64. | |
7475 for (int fbits = 33; fbits <= 64; fbits++) { | |
7476 __ Scvtf(s0, x10, fbits); | |
7477 __ Ucvtf(s1, x10, fbits); | |
7478 __ Str(s0, MemOperand(x0, fbits * kSRegSize)); | |
7479 __ Str(s1, MemOperand(x1, fbits * kSRegSize)); | |
7480 } | |
7481 | |
7482 END(); | |
7483 RUN(); | |
7484 | |
7485 // Check the results. | |
7486 float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits); | |
7487 float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits); | |
7488 | |
7489 for (int fbits = 0; fbits <= 32; fbits++) { | |
7490 float expected_scvtf = expected_scvtf_base / powf(2, fbits); | |
7491 float expected_ucvtf = expected_ucvtf_base / powf(2, fbits); | |
7492 ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]); | |
7493 ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]); | |
7494 if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]); | |
7495 if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]); | |
7496 break; | |
7497 } | |
7498 for (int fbits = 33; fbits <= 64; fbits++) { | |
7499 break; | |
7500 float expected_scvtf = expected_scvtf_base / powf(2, fbits); | |
7501 float expected_ucvtf = expected_ucvtf_base / powf(2, fbits); | |
7502 ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]); | |
7503 ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]); | |
7504 } | |
7505 | |
7506 TEARDOWN(); | |
7507 } | |
7508 | |
7509 | |
7510 TEST(scvtf_ucvtf_float) { | |
7511 INIT_V8(); | |
7512 // Simple conversions of positive numbers which require no rounding; the | |
7513 // results should not depened on the rounding mode, and ucvtf and scvtf should | |
7514 // produce the same result. | |
7515 TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000); | |
7516 TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000); | |
7517 TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000); | |
7518 TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000); | |
7519 TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000); | |
7520 // Test mantissa extremities. | |
7521 TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001); | |
7522 TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001); | |
7523 // The largest int32_t that fits in a float. | |
7524 TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff); | |
7525 // Values that would be negative if treated as an int32_t. | |
7526 TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff); | |
7527 TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000); | |
7528 TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001); | |
7529 // The largest int64_t that fits in a float. | |
7530 TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff); | |
7531 // Check for bit pattern reproduction. | |
7532 TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543); | |
7533 | |
7534 // Simple conversions of negative int64_t values. These require no rounding, | |
7535 // and the results should not depend on the rounding mode. | |
7536 TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc); | |
7537 TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000); | |
7538 | |
7539 // Conversions which require rounding. | |
7540 TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000); | |
7541 TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000); | |
7542 TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000); | |
7543 TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001); | |
7544 TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001); | |
7545 TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001); | |
7546 TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002); | |
7547 TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002); | |
7548 TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002); | |
7549 TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002); | |
7550 TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002); | |
7551 TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003); | |
7552 TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003); | |
7553 // Check rounding of negative int64_t values (and large uint64_t values). | |
7554 TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000); | |
7555 TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000); | |
7556 TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000); | |
7557 TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000); | |
7558 TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000); | |
7559 TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001); | |
7560 TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001); | |
7561 TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001); | |
7562 TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001); | |
7563 TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001); | |
7564 TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001); | |
7565 TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001); | |
7566 TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002); | |
7567 // Round up to produce a result that's too big for the input to represent. | |
7568 TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000); | |
7569 TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000); | |
7570 TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000); | |
7571 TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000); | |
7572 TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000); | |
7573 TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000); | |
7574 TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000); | |
7575 TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000); | |
7576 } | |
7577 | |
7578 | |
7579 TEST(system_mrs) { | |
7580 INIT_V8(); | |
7581 SETUP(); | |
7582 | |
7583 START(); | |
7584 __ Mov(w0, 0); | |
7585 __ Mov(w1, 1); | |
7586 __ Mov(w2, 0x80000000); | |
7587 | |
7588 // Set the Z and C flags. | |
7589 __ Cmp(w0, w0); | |
7590 __ Mrs(x3, NZCV); | |
7591 | |
7592 // Set the N flag. | |
7593 __ Cmp(w0, w1); | |
7594 __ Mrs(x4, NZCV); | |
7595 | |
7596 // Set the Z, C and V flags. | |
7597 __ Adds(w0, w2, w2); | |
7598 __ Mrs(x5, NZCV); | |
7599 | |
7600 // Read the default FPCR. | |
7601 __ Mrs(x6, FPCR); | |
7602 END(); | |
7603 | |
7604 RUN(); | |
7605 | |
7606 // NZCV | |
7607 ASSERT_EQUAL_32(ZCFlag, w3); | |
7608 ASSERT_EQUAL_32(NFlag, w4); | |
7609 ASSERT_EQUAL_32(ZCVFlag, w5); | |
7610 | |
7611 // FPCR | |
7612 // The default FPCR on Linux-based platforms is 0. | |
7613 ASSERT_EQUAL_32(0, w6); | |
7614 | |
7615 TEARDOWN(); | |
7616 } | |
7617 | |
7618 | |
7619 TEST(system_msr) { | |
7620 INIT_V8(); | |
7621 // All FPCR fields that must be implemented: AHP, DN, FZ, RMode | |
7622 const uint64_t fpcr_core = 0x07c00000; | |
7623 | |
7624 // All FPCR fields (including fields which may be read-as-zero): | |
7625 // Stride, Len | |
7626 // IDE, IXE, UFE, OFE, DZE, IOE | |
7627 const uint64_t fpcr_all = fpcr_core | 0x00379f00; | |
7628 | |
7629 SETUP(); | |
7630 | |
7631 START(); | |
7632 __ Mov(w0, 0); | |
7633 __ Mov(w1, 0x7fffffff); | |
7634 | |
7635 __ Mov(x7, 0); | |
7636 | |
7637 __ Mov(x10, NVFlag); | |
7638 __ Cmp(w0, w0); // Set Z and C. | |
7639 __ Msr(NZCV, x10); // Set N and V. | |
7640 // The Msr should have overwritten every flag set by the Cmp. | |
7641 __ Cinc(x7, x7, mi); // N | |
7642 __ Cinc(x7, x7, ne); // !Z | |
7643 __ Cinc(x7, x7, lo); // !C | |
7644 __ Cinc(x7, x7, vs); // V | |
7645 | |
7646 __ Mov(x10, ZCFlag); | |
7647 __ Cmn(w1, w1); // Set N and V. | |
7648 __ Msr(NZCV, x10); // Set Z and C. | |
7649 // The Msr should have overwritten every flag set by the Cmn. | |
7650 __ Cinc(x7, x7, pl); // !N | |
7651 __ Cinc(x7, x7, eq); // Z | |
7652 __ Cinc(x7, x7, hs); // C | |
7653 __ Cinc(x7, x7, vc); // !V | |
7654 | |
7655 // All core FPCR fields must be writable. | |
7656 __ Mov(x8, fpcr_core); | |
7657 __ Msr(FPCR, x8); | |
7658 __ Mrs(x8, FPCR); | |
7659 | |
7660 // All FPCR fields, including optional ones. This part of the test doesn't | |
7661 // achieve much other than ensuring that supported fields can be cleared by | |
7662 // the next test. | |
7663 __ Mov(x9, fpcr_all); | |
7664 __ Msr(FPCR, x9); | |
7665 __ Mrs(x9, FPCR); | |
7666 __ And(x9, x9, fpcr_core); | |
7667 | |
7668 // The undefined bits must ignore writes. | |
7669 // It's conceivable that a future version of the architecture could use these | |
7670 // fields (making this test fail), but in the meantime this is a useful test | |
7671 // for the simulator. | |
7672 __ Mov(x10, ~fpcr_all); | |
7673 __ Msr(FPCR, x10); | |
7674 __ Mrs(x10, FPCR); | |
7675 | |
7676 END(); | |
7677 | |
7678 RUN(); | |
7679 | |
7680 // We should have incremented x7 (from 0) exactly 8 times. | |
7681 ASSERT_EQUAL_64(8, x7); | |
7682 | |
7683 ASSERT_EQUAL_64(fpcr_core, x8); | |
7684 ASSERT_EQUAL_64(fpcr_core, x9); | |
7685 ASSERT_EQUAL_64(0, x10); | |
7686 | |
7687 TEARDOWN(); | |
7688 } | |
7689 | |
7690 | |
7691 TEST(system_nop) { | |
7692 INIT_V8(); | |
7693 SETUP(); | |
7694 RegisterDump before; | |
7695 | |
7696 START(); | |
7697 before.Dump(&masm); | |
7698 __ Nop(); | |
7699 END(); | |
7700 | |
7701 RUN(); | |
7702 | |
7703 ASSERT_EQUAL_REGISTERS(before); | |
7704 ASSERT_EQUAL_NZCV(before.flags_nzcv()); | |
7705 | |
7706 TEARDOWN(); | |
7707 } | |
7708 | |
7709 | |
7710 TEST(zero_dest) { | |
7711 INIT_V8(); | |
7712 SETUP(); | |
7713 RegisterDump before; | |
7714 | |
7715 START(); | |
7716 // Preserve the system stack pointer, in case we clobber it. | |
7717 __ Mov(x30, csp); | |
7718 // Initialize the other registers used in this test. | |
7719 uint64_t literal_base = 0x0100001000100101UL; | |
7720 __ Mov(x0, 0); | |
7721 __ Mov(x1, literal_base); | |
7722 for (unsigned i = 2; i < x30.code(); i++) { | |
7723 __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1); | |
7724 } | |
7725 before.Dump(&masm); | |
7726 | |
7727 // All of these instructions should be NOPs in these forms, but have | |
7728 // alternate forms which can write into the stack pointer. | |
7729 __ add(xzr, x0, x1); | |
7730 __ add(xzr, x1, xzr); | |
7731 __ add(xzr, xzr, x1); | |
7732 | |
7733 __ and_(xzr, x0, x2); | |
7734 __ and_(xzr, x2, xzr); | |
7735 __ and_(xzr, xzr, x2); | |
7736 | |
7737 __ bic(xzr, x0, x3); | |
7738 __ bic(xzr, x3, xzr); | |
7739 __ bic(xzr, xzr, x3); | |
7740 | |
7741 __ eon(xzr, x0, x4); | |
7742 __ eon(xzr, x4, xzr); | |
7743 __ eon(xzr, xzr, x4); | |
7744 | |
7745 __ eor(xzr, x0, x5); | |
7746 __ eor(xzr, x5, xzr); | |
7747 __ eor(xzr, xzr, x5); | |
7748 | |
7749 __ orr(xzr, x0, x6); | |
7750 __ orr(xzr, x6, xzr); | |
7751 __ orr(xzr, xzr, x6); | |
7752 | |
7753 __ sub(xzr, x0, x7); | |
7754 __ sub(xzr, x7, xzr); | |
7755 __ sub(xzr, xzr, x7); | |
7756 | |
7757 // Swap the saved system stack pointer with the real one. If csp was written | |
7758 // during the test, it will show up in x30. This is done because the test | |
7759 // framework assumes that csp will be valid at the end of the test. | |
7760 __ Mov(x29, x30); | |
7761 __ Mov(x30, csp); | |
7762 __ Mov(csp, x29); | |
7763 // We used x29 as a scratch register, so reset it to make sure it doesn't | |
7764 // trigger a test failure. | |
7765 __ Add(x29, x28, x1); | |
7766 END(); | |
7767 | |
7768 RUN(); | |
7769 | |
7770 ASSERT_EQUAL_REGISTERS(before); | |
7771 ASSERT_EQUAL_NZCV(before.flags_nzcv()); | |
7772 | |
7773 TEARDOWN(); | |
7774 } | |
7775 | |
7776 | |
7777 TEST(zero_dest_setflags) { | |
7778 INIT_V8(); | |
7779 SETUP(); | |
7780 RegisterDump before; | |
7781 | |
7782 START(); | |
7783 // Preserve the system stack pointer, in case we clobber it. | |
7784 __ Mov(x30, csp); | |
7785 // Initialize the other registers used in this test. | |
7786 uint64_t literal_base = 0x0100001000100101UL; | |
7787 __ Mov(x0, 0); | |
7788 __ Mov(x1, literal_base); | |
7789 for (int i = 2; i < 30; i++) { | |
7790 __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1); | |
7791 } | |
7792 before.Dump(&masm); | |
7793 | |
7794 // All of these instructions should only write to the flags in these forms, | |
7795 // but have alternate forms which can write into the stack pointer. | |
7796 __ adds(xzr, x0, Operand(x1, UXTX)); | |
7797 __ adds(xzr, x1, Operand(xzr, UXTX)); | |
7798 __ adds(xzr, x1, 1234); | |
7799 __ adds(xzr, x0, x1); | |
7800 __ adds(xzr, x1, xzr); | |
7801 __ adds(xzr, xzr, x1); | |
7802 | |
7803 __ ands(xzr, x2, ~0xf); | |
7804 __ ands(xzr, xzr, ~0xf); | |
7805 __ ands(xzr, x0, x2); | |
7806 __ ands(xzr, x2, xzr); | |
7807 __ ands(xzr, xzr, x2); | |
7808 | |
7809 __ bics(xzr, x3, ~0xf); | |
7810 __ bics(xzr, xzr, ~0xf); | |
7811 __ bics(xzr, x0, x3); | |
7812 __ bics(xzr, x3, xzr); | |
7813 __ bics(xzr, xzr, x3); | |
7814 | |
7815 __ subs(xzr, x0, Operand(x3, UXTX)); | |
7816 __ subs(xzr, x3, Operand(xzr, UXTX)); | |
7817 __ subs(xzr, x3, 1234); | |
7818 __ subs(xzr, x0, x3); | |
7819 __ subs(xzr, x3, xzr); | |
7820 __ subs(xzr, xzr, x3); | |
7821 | |
7822 // Swap the saved system stack pointer with the real one. If csp was written | |
7823 // during the test, it will show up in x30. This is done because the test | |
7824 // framework assumes that csp will be valid at the end of the test. | |
7825 __ Mov(x29, x30); | |
7826 __ Mov(x30, csp); | |
7827 __ Mov(csp, x29); | |
7828 // We used x29 as a scratch register, so reset it to make sure it doesn't | |
7829 // trigger a test failure. | |
7830 __ Add(x29, x28, x1); | |
7831 END(); | |
7832 | |
7833 RUN(); | |
7834 | |
7835 ASSERT_EQUAL_REGISTERS(before); | |
7836 | |
7837 TEARDOWN(); | |
7838 } | |
7839 | |
7840 | |
7841 TEST(register_bit) { | |
7842 // No code generation takes place in this test, so no need to setup and | |
7843 // teardown. | |
7844 | |
7845 // Simple tests. | |
7846 CHECK(x0.Bit() == (1UL << 0)); | |
7847 CHECK(x1.Bit() == (1UL << 1)); | |
7848 CHECK(x10.Bit() == (1UL << 10)); | |
7849 | |
7850 // AAPCS64 definitions. | |
7851 CHECK(fp.Bit() == (1UL << kFramePointerRegCode)); | |
7852 CHECK(lr.Bit() == (1UL << kLinkRegCode)); | |
7853 | |
7854 // Fixed (hardware) definitions. | |
7855 CHECK(xzr.Bit() == (1UL << kZeroRegCode)); | |
7856 | |
7857 // Internal ABI definitions. | |
7858 CHECK(jssp.Bit() == (1UL << kJSSPCode)); | |
7859 CHECK(csp.Bit() == (1UL << kSPRegInternalCode)); | |
7860 CHECK(csp.Bit() != xzr.Bit()); | |
7861 | |
7862 // xn.Bit() == wn.Bit() at all times, for the same n. | |
7863 CHECK(x0.Bit() == w0.Bit()); | |
7864 CHECK(x1.Bit() == w1.Bit()); | |
7865 CHECK(x10.Bit() == w10.Bit()); | |
7866 CHECK(jssp.Bit() == wjssp.Bit()); | |
7867 CHECK(xzr.Bit() == wzr.Bit()); | |
7868 CHECK(csp.Bit() == wcsp.Bit()); | |
7869 } | |
7870 | |
7871 | |
7872 TEST(stack_pointer_override) { | |
7873 // This test generates some stack maintenance code, but the test only checks | |
7874 // the reported state. | |
7875 INIT_V8(); | |
7876 SETUP(); | |
7877 START(); | |
7878 | |
7879 // The default stack pointer in V8 is jssp, but for compatibility with W16, | |
7880 // the test framework sets it to csp before calling the test. | |
7881 CHECK(csp.Is(__ StackPointer())); | |
7882 __ SetStackPointer(x0); | |
7883 CHECK(x0.Is(__ StackPointer())); | |
7884 __ SetStackPointer(jssp); | |
7885 CHECK(jssp.Is(__ StackPointer())); | |
7886 __ SetStackPointer(csp); | |
7887 CHECK(csp.Is(__ StackPointer())); | |
7888 | |
7889 END(); | |
7890 RUN(); | |
7891 TEARDOWN(); | |
7892 } | |
7893 | |
7894 | |
7895 TEST(peek_poke_simple) { | |
7896 INIT_V8(); | |
7897 SETUP(); | |
7898 START(); | |
7899 | |
7900 static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit(); | |
7901 static const RegList x10_to_x13 = x10.Bit() | x11.Bit() | | |
7902 x12.Bit() | x13.Bit(); | |
7903 | |
7904 // The literal base is chosen to have two useful properties: | |
7905 // * When multiplied by small values (such as a register index), this value | |
7906 // is clearly readable in the result. | |
7907 // * The value is not formed from repeating fixed-size smaller values, so it | |
7908 // can be used to detect endianness-related errors. | |
7909 uint64_t literal_base = 0x0100001000100101UL; | |
7910 | |
7911 // Initialize the registers. | |
7912 __ Mov(x0, literal_base); | |
7913 __ Add(x1, x0, x0); | |
7914 __ Add(x2, x1, x0); | |
7915 __ Add(x3, x2, x0); | |
7916 | |
7917 __ Claim(4); | |
7918 | |
7919 // Simple exchange. | |
7920 // After this test: | |
7921 // x0-x3 should be unchanged. | |
7922 // w10-w13 should contain the lower words of x0-x3. | |
7923 __ Poke(x0, 0); | |
7924 __ Poke(x1, 8); | |
7925 __ Poke(x2, 16); | |
7926 __ Poke(x3, 24); | |
7927 Clobber(&masm, x0_to_x3); | |
7928 __ Peek(x0, 0); | |
7929 __ Peek(x1, 8); | |
7930 __ Peek(x2, 16); | |
7931 __ Peek(x3, 24); | |
7932 | |
7933 __ Poke(w0, 0); | |
7934 __ Poke(w1, 4); | |
7935 __ Poke(w2, 8); | |
7936 __ Poke(w3, 12); | |
7937 Clobber(&masm, x10_to_x13); | |
7938 __ Peek(w10, 0); | |
7939 __ Peek(w11, 4); | |
7940 __ Peek(w12, 8); | |
7941 __ Peek(w13, 12); | |
7942 | |
7943 __ Drop(4); | |
7944 | |
7945 END(); | |
7946 RUN(); | |
7947 | |
7948 ASSERT_EQUAL_64(literal_base * 1, x0); | |
7949 ASSERT_EQUAL_64(literal_base * 2, x1); | |
7950 ASSERT_EQUAL_64(literal_base * 3, x2); | |
7951 ASSERT_EQUAL_64(literal_base * 4, x3); | |
7952 | |
7953 ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10); | |
7954 ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11); | |
7955 ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12); | |
7956 ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13); | |
7957 | |
7958 TEARDOWN(); | |
7959 } | |
7960 | |
7961 | |
7962 TEST(peek_poke_unaligned) { | |
7963 INIT_V8(); | |
7964 SETUP(); | |
7965 START(); | |
7966 | |
7967 // The literal base is chosen to have two useful properties: | |
7968 // * When multiplied by small values (such as a register index), this value | |
7969 // is clearly readable in the result. | |
7970 // * The value is not formed from repeating fixed-size smaller values, so it | |
7971 // can be used to detect endianness-related errors. | |
7972 uint64_t literal_base = 0x0100001000100101UL; | |
7973 | |
7974 // Initialize the registers. | |
7975 __ Mov(x0, literal_base); | |
7976 __ Add(x1, x0, x0); | |
7977 __ Add(x2, x1, x0); | |
7978 __ Add(x3, x2, x0); | |
7979 __ Add(x4, x3, x0); | |
7980 __ Add(x5, x4, x0); | |
7981 __ Add(x6, x5, x0); | |
7982 | |
7983 __ Claim(4); | |
7984 | |
7985 // Unaligned exchanges. | |
7986 // After this test: | |
7987 // x0-x6 should be unchanged. | |
7988 // w10-w12 should contain the lower words of x0-x2. | |
7989 __ Poke(x0, 1); | |
7990 Clobber(&masm, x0.Bit()); | |
7991 __ Peek(x0, 1); | |
7992 __ Poke(x1, 2); | |
7993 Clobber(&masm, x1.Bit()); | |
7994 __ Peek(x1, 2); | |
7995 __ Poke(x2, 3); | |
7996 Clobber(&masm, x2.Bit()); | |
7997 __ Peek(x2, 3); | |
7998 __ Poke(x3, 4); | |
7999 Clobber(&masm, x3.Bit()); | |
8000 __ Peek(x3, 4); | |
8001 __ Poke(x4, 5); | |
8002 Clobber(&masm, x4.Bit()); | |
8003 __ Peek(x4, 5); | |
8004 __ Poke(x5, 6); | |
8005 Clobber(&masm, x5.Bit()); | |
8006 __ Peek(x5, 6); | |
8007 __ Poke(x6, 7); | |
8008 Clobber(&masm, x6.Bit()); | |
8009 __ Peek(x6, 7); | |
8010 | |
8011 __ Poke(w0, 1); | |
8012 Clobber(&masm, w10.Bit()); | |
8013 __ Peek(w10, 1); | |
8014 __ Poke(w1, 2); | |
8015 Clobber(&masm, w11.Bit()); | |
8016 __ Peek(w11, 2); | |
8017 __ Poke(w2, 3); | |
8018 Clobber(&masm, w12.Bit()); | |
8019 __ Peek(w12, 3); | |
8020 | |
8021 __ Drop(4); | |
8022 | |
8023 END(); | |
8024 RUN(); | |
8025 | |
8026 ASSERT_EQUAL_64(literal_base * 1, x0); | |
8027 ASSERT_EQUAL_64(literal_base * 2, x1); | |
8028 ASSERT_EQUAL_64(literal_base * 3, x2); | |
8029 ASSERT_EQUAL_64(literal_base * 4, x3); | |
8030 ASSERT_EQUAL_64(literal_base * 5, x4); | |
8031 ASSERT_EQUAL_64(literal_base * 6, x5); | |
8032 ASSERT_EQUAL_64(literal_base * 7, x6); | |
8033 | |
8034 ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10); | |
8035 ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11); | |
8036 ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12); | |
8037 | |
8038 TEARDOWN(); | |
8039 } | |
8040 | |
8041 | |
8042 TEST(peek_poke_endianness) { | |
8043 INIT_V8(); | |
8044 SETUP(); | |
8045 START(); | |
8046 | |
8047 // The literal base is chosen to have two useful properties: | |
8048 // * When multiplied by small values (such as a register index), this value | |
8049 // is clearly readable in the result. | |
8050 // * The value is not formed from repeating fixed-size smaller values, so it | |
8051 // can be used to detect endianness-related errors. | |
8052 uint64_t literal_base = 0x0100001000100101UL; | |
8053 | |
8054 // Initialize the registers. | |
8055 __ Mov(x0, literal_base); | |
8056 __ Add(x1, x0, x0); | |
8057 | |
8058 __ Claim(4); | |
8059 | |
8060 // Endianness tests. | |
8061 // After this section: | |
8062 // x4 should match x0[31:0]:x0[63:32] | |
8063 // w5 should match w1[15:0]:w1[31:16] | |
8064 __ Poke(x0, 0); | |
8065 __ Poke(x0, 8); | |
8066 __ Peek(x4, 4); | |
8067 | |
8068 __ Poke(w1, 0); | |
8069 __ Poke(w1, 4); | |
8070 __ Peek(w5, 2); | |
8071 | |
8072 __ Drop(4); | |
8073 | |
8074 END(); | |
8075 RUN(); | |
8076 | |
8077 uint64_t x0_expected = literal_base * 1; | |
8078 uint64_t x1_expected = literal_base * 2; | |
8079 uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32); | |
8080 uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) | | |
8081 ((x1_expected >> 16) & 0x0000ffff); | |
8082 | |
8083 ASSERT_EQUAL_64(x0_expected, x0); | |
8084 ASSERT_EQUAL_64(x1_expected, x1); | |
8085 ASSERT_EQUAL_64(x4_expected, x4); | |
8086 ASSERT_EQUAL_64(x5_expected, x5); | |
8087 | |
8088 TEARDOWN(); | |
8089 } | |
8090 | |
8091 | |
8092 TEST(peek_poke_mixed) { | |
8093 INIT_V8(); | |
8094 SETUP(); | |
8095 START(); | |
8096 | |
8097 // The literal base is chosen to have two useful properties: | |
8098 // * When multiplied by small values (such as a register index), this value | |
8099 // is clearly readable in the result. | |
8100 // * The value is not formed from repeating fixed-size smaller values, so it | |
8101 // can be used to detect endianness-related errors. | |
8102 uint64_t literal_base = 0x0100001000100101UL; | |
8103 | |
8104 // Initialize the registers. | |
8105 __ Mov(x0, literal_base); | |
8106 __ Add(x1, x0, x0); | |
8107 __ Add(x2, x1, x0); | |
8108 __ Add(x3, x2, x0); | |
8109 | |
8110 __ Claim(4); | |
8111 | |
8112 // Mix with other stack operations. | |
8113 // After this section: | |
8114 // x0-x3 should be unchanged. | |
8115 // x6 should match x1[31:0]:x0[63:32] | |
8116 // w7 should match x1[15:0]:x0[63:48] | |
8117 __ Poke(x1, 8); | |
8118 __ Poke(x0, 0); | |
8119 { | |
8120 ASSERT(__ StackPointer().Is(csp)); | |
8121 __ Mov(x4, __ StackPointer()); | |
8122 __ SetStackPointer(x4); | |
8123 | |
8124 __ Poke(wzr, 0); // Clobber the space we're about to drop. | |
8125 __ Drop(1, kWRegSize); | |
8126 __ Peek(x6, 0); | |
8127 __ Claim(1); | |
8128 __ Peek(w7, 10); | |
8129 __ Poke(x3, 28); | |
8130 __ Poke(xzr, 0); // Clobber the space we're about to drop. | |
8131 __ Drop(1); | |
8132 __ Poke(x2, 12); | |
8133 __ Push(w0); | |
8134 | |
8135 __ Mov(csp, __ StackPointer()); | |
8136 __ SetStackPointer(csp); | |
8137 } | |
8138 | |
8139 __ Pop(x0, x1, x2, x3); | |
8140 | |
8141 END(); | |
8142 RUN(); | |
8143 | |
8144 uint64_t x0_expected = literal_base * 1; | |
8145 uint64_t x1_expected = literal_base * 2; | |
8146 uint64_t x2_expected = literal_base * 3; | |
8147 uint64_t x3_expected = literal_base * 4; | |
8148 uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32); | |
8149 uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) | | |
8150 ((x0_expected >> 48) & 0x0000ffff); | |
8151 | |
8152 ASSERT_EQUAL_64(x0_expected, x0); | |
8153 ASSERT_EQUAL_64(x1_expected, x1); | |
8154 ASSERT_EQUAL_64(x2_expected, x2); | |
8155 ASSERT_EQUAL_64(x3_expected, x3); | |
8156 ASSERT_EQUAL_64(x6_expected, x6); | |
8157 ASSERT_EQUAL_64(x7_expected, x7); | |
8158 | |
8159 TEARDOWN(); | |
8160 } | |
8161 | |
8162 | |
8163 // This enum is used only as an argument to the push-pop test helpers. | |
8164 enum PushPopMethod { | |
8165 // Push or Pop using the Push and Pop methods, with blocks of up to four | |
8166 // registers. (Smaller blocks will be used if necessary.) | |
8167 PushPopByFour, | |
8168 | |
8169 // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers. | |
8170 PushPopRegList | |
8171 }; | |
8172 | |
8173 | |
8174 // The maximum number of registers that can be used by the PushPopJssp* tests, | |
8175 // where a reg_count field is provided. | |
8176 static int const kPushPopJsspMaxRegCount = -1; | |
8177 | |
8178 // Test a simple push-pop pattern: | |
8179 // * Claim <claim> bytes to set the stack alignment. | |
8180 // * Push <reg_count> registers with size <reg_size>. | |
8181 // * Clobber the register contents. | |
8182 // * Pop <reg_count> registers to restore the original contents. | |
8183 // * Drop <claim> bytes to restore the original stack pointer. | |
8184 // | |
8185 // Different push and pop methods can be specified independently to test for | |
8186 // proper word-endian behaviour. | |
8187 static void PushPopJsspSimpleHelper(int reg_count, | |
8188 int claim, | |
8189 int reg_size, | |
8190 PushPopMethod push_method, | |
8191 PushPopMethod pop_method) { | |
8192 SETUP(); | |
8193 | |
8194 START(); | |
8195 | |
8196 // Registers x8 and x9 are used by the macro assembler for debug code (for | |
8197 // example in 'Pop'), so we can't use them here. We can't use jssp because it | |
8198 // will be the stack pointer for this test. | |
8199 static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit()); | |
8200 if (reg_count == kPushPopJsspMaxRegCount) { | |
8201 reg_count = CountSetBits(allowed, kNumberOfRegisters); | |
8202 } | |
8203 // Work out which registers to use, based on reg_size. | |
8204 Register r[kNumberOfRegisters]; | |
8205 Register x[kNumberOfRegisters]; | |
8206 RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count, | |
8207 allowed); | |
8208 | |
8209 // The literal base is chosen to have two useful properties: | |
8210 // * When multiplied by small values (such as a register index), this value | |
8211 // is clearly readable in the result. | |
8212 // * The value is not formed from repeating fixed-size smaller values, so it | |
8213 // can be used to detect endianness-related errors. | |
8214 uint64_t literal_base = 0x0100001000100101UL; | |
8215 | |
8216 { | |
8217 ASSERT(__ StackPointer().Is(csp)); | |
8218 __ Mov(jssp, __ StackPointer()); | |
8219 __ SetStackPointer(jssp); | |
8220 | |
8221 int i; | |
8222 | |
8223 // Initialize the registers. | |
8224 for (i = 0; i < reg_count; i++) { | |
8225 // Always write into the X register, to ensure that the upper word is | |
8226 // properly ignored by Push when testing W registers. | |
8227 if (!x[i].IsZero()) { | |
8228 __ Mov(x[i], literal_base * i); | |
8229 } | |
8230 } | |
8231 | |
8232 // Claim memory first, as requested. | |
8233 __ Claim(claim, kByteSizeInBytes); | |
8234 | |
8235 switch (push_method) { | |
8236 case PushPopByFour: | |
8237 // Push high-numbered registers first (to the highest addresses). | |
8238 for (i = reg_count; i >= 4; i -= 4) { | |
8239 __ Push(r[i-1], r[i-2], r[i-3], r[i-4]); | |
8240 } | |
8241 // Finish off the leftovers. | |
8242 switch (i) { | |
8243 case 3: __ Push(r[2], r[1], r[0]); break; | |
8244 case 2: __ Push(r[1], r[0]); break; | |
8245 case 1: __ Push(r[0]); break; | |
8246 default: ASSERT(i == 0); break; | |
8247 } | |
8248 break; | |
8249 case PushPopRegList: | |
8250 __ PushSizeRegList(list, reg_size); | |
8251 break; | |
8252 } | |
8253 | |
8254 // Clobber all the registers, to ensure that they get repopulated by Pop. | |
8255 Clobber(&masm, list); | |
8256 | |
8257 switch (pop_method) { | |
8258 case PushPopByFour: | |
8259 // Pop low-numbered registers first (from the lowest addresses). | |
8260 for (i = 0; i <= (reg_count-4); i += 4) { | |
8261 __ Pop(r[i], r[i+1], r[i+2], r[i+3]); | |
8262 } | |
8263 // Finish off the leftovers. | |
8264 switch (reg_count - i) { | |
8265 case 3: __ Pop(r[i], r[i+1], r[i+2]); break; | |
8266 case 2: __ Pop(r[i], r[i+1]); break; | |
8267 case 1: __ Pop(r[i]); break; | |
8268 default: ASSERT(i == reg_count); break; | |
8269 } | |
8270 break; | |
8271 case PushPopRegList: | |
8272 __ PopSizeRegList(list, reg_size); | |
8273 break; | |
8274 } | |
8275 | |
8276 // Drop memory to restore jssp. | |
8277 __ Drop(claim, kByteSizeInBytes); | |
8278 | |
8279 __ Mov(csp, __ StackPointer()); | |
8280 __ SetStackPointer(csp); | |
8281 } | |
8282 | |
8283 END(); | |
8284 | |
8285 RUN(); | |
8286 | |
8287 // Check that the register contents were preserved. | |
8288 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test | |
8289 // that the upper word was properly cleared by Pop. | |
8290 literal_base &= (0xffffffffffffffffUL >> (64-reg_size)); | |
8291 for (int i = 0; i < reg_count; i++) { | |
8292 if (x[i].IsZero()) { | |
8293 ASSERT_EQUAL_64(0, x[i]); | |
8294 } else { | |
8295 ASSERT_EQUAL_64(literal_base * i, x[i]); | |
8296 } | |
8297 } | |
8298 | |
8299 TEARDOWN(); | |
8300 } | |
8301 | |
8302 | |
8303 TEST(push_pop_jssp_simple_32) { | |
8304 INIT_V8(); | |
8305 for (int claim = 0; claim <= 8; claim++) { | |
8306 for (int count = 0; count <= 8; count++) { | |
8307 PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits, | |
8308 PushPopByFour, PushPopByFour); | |
8309 PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits, | |
8310 PushPopByFour, PushPopRegList); | |
8311 PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits, | |
8312 PushPopRegList, PushPopByFour); | |
8313 PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits, | |
8314 PushPopRegList, PushPopRegList); | |
8315 } | |
8316 // Test with the maximum number of registers. | |
8317 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits, | |
8318 PushPopByFour, PushPopByFour); | |
8319 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits, | |
8320 PushPopByFour, PushPopRegList); | |
8321 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits, | |
8322 PushPopRegList, PushPopByFour); | |
8323 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits, | |
8324 PushPopRegList, PushPopRegList); | |
8325 } | |
8326 } | |
8327 | |
8328 | |
8329 TEST(push_pop_jssp_simple_64) { | |
8330 INIT_V8(); | |
8331 for (int claim = 0; claim <= 8; claim++) { | |
8332 for (int count = 0; count <= 8; count++) { | |
8333 PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits, | |
8334 PushPopByFour, PushPopByFour); | |
8335 PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits, | |
8336 PushPopByFour, PushPopRegList); | |
8337 PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits, | |
8338 PushPopRegList, PushPopByFour); | |
8339 PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits, | |
8340 PushPopRegList, PushPopRegList); | |
8341 } | |
8342 // Test with the maximum number of registers. | |
8343 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits, | |
8344 PushPopByFour, PushPopByFour); | |
8345 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits, | |
8346 PushPopByFour, PushPopRegList); | |
8347 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits, | |
8348 PushPopRegList, PushPopByFour); | |
8349 PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits, | |
8350 PushPopRegList, PushPopRegList); | |
8351 } | |
8352 } | |
8353 | |
8354 | |
8355 // The maximum number of registers that can be used by the PushPopFPJssp* tests, | |
8356 // where a reg_count field is provided. | |
8357 static int const kPushPopFPJsspMaxRegCount = -1; | |
8358 | |
8359 // Test a simple push-pop pattern: | |
8360 // * Claim <claim> bytes to set the stack alignment. | |
8361 // * Push <reg_count> FP registers with size <reg_size>. | |
8362 // * Clobber the register contents. | |
8363 // * Pop <reg_count> FP registers to restore the original contents. | |
8364 // * Drop <claim> bytes to restore the original stack pointer. | |
8365 // | |
8366 // Different push and pop methods can be specified independently to test for | |
8367 // proper word-endian behaviour. | |
8368 static void PushPopFPJsspSimpleHelper(int reg_count, | |
8369 int claim, | |
8370 int reg_size, | |
8371 PushPopMethod push_method, | |
8372 PushPopMethod pop_method) { | |
8373 SETUP(); | |
8374 | |
8375 START(); | |
8376 | |
8377 // We can use any floating-point register. None of them are reserved for | |
8378 // debug code, for example. | |
8379 static RegList const allowed = ~0; | |
8380 if (reg_count == kPushPopFPJsspMaxRegCount) { | |
8381 reg_count = CountSetBits(allowed, kNumberOfFPRegisters); | |
8382 } | |
8383 // Work out which registers to use, based on reg_size. | |
8384 FPRegister v[kNumberOfRegisters]; | |
8385 FPRegister d[kNumberOfRegisters]; | |
8386 RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count, | |
8387 allowed); | |
8388 | |
8389 // The literal base is chosen to have two useful properties: | |
8390 // * When multiplied (using an integer) by small values (such as a register | |
8391 // index), this value is clearly readable in the result. | |
8392 // * The value is not formed from repeating fixed-size smaller values, so it | |
8393 // can be used to detect endianness-related errors. | |
8394 // * It is never a floating-point NaN, and will therefore always compare | |
8395 // equal to itself. | |
8396 uint64_t literal_base = 0x0100001000100101UL; | |
8397 | |
8398 { | |
8399 ASSERT(__ StackPointer().Is(csp)); | |
8400 __ Mov(jssp, __ StackPointer()); | |
8401 __ SetStackPointer(jssp); | |
8402 | |
8403 int i; | |
8404 | |
8405 // Initialize the registers, using X registers to load the literal. | |
8406 __ Mov(x0, 0); | |
8407 __ Mov(x1, literal_base); | |
8408 for (i = 0; i < reg_count; i++) { | |
8409 // Always write into the D register, to ensure that the upper word is | |
8410 // properly ignored by Push when testing S registers. | |
8411 __ Fmov(d[i], x0); | |
8412 // Calculate the next literal. | |
8413 __ Add(x0, x0, x1); | |
8414 } | |
8415 | |
8416 // Claim memory first, as requested. | |
8417 __ Claim(claim, kByteSizeInBytes); | |
8418 | |
8419 switch (push_method) { | |
8420 case PushPopByFour: | |
8421 // Push high-numbered registers first (to the highest addresses). | |
8422 for (i = reg_count; i >= 4; i -= 4) { | |
8423 __ Push(v[i-1], v[i-2], v[i-3], v[i-4]); | |
8424 } | |
8425 // Finish off the leftovers. | |
8426 switch (i) { | |
8427 case 3: __ Push(v[2], v[1], v[0]); break; | |
8428 case 2: __ Push(v[1], v[0]); break; | |
8429 case 1: __ Push(v[0]); break; | |
8430 default: ASSERT(i == 0); break; | |
8431 } | |
8432 break; | |
8433 case PushPopRegList: | |
8434 __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister); | |
8435 break; | |
8436 } | |
8437 | |
8438 // Clobber all the registers, to ensure that they get repopulated by Pop. | |
8439 ClobberFP(&masm, list); | |
8440 | |
8441 switch (pop_method) { | |
8442 case PushPopByFour: | |
8443 // Pop low-numbered registers first (from the lowest addresses). | |
8444 for (i = 0; i <= (reg_count-4); i += 4) { | |
8445 __ Pop(v[i], v[i+1], v[i+2], v[i+3]); | |
8446 } | |
8447 // Finish off the leftovers. | |
8448 switch (reg_count - i) { | |
8449 case 3: __ Pop(v[i], v[i+1], v[i+2]); break; | |
8450 case 2: __ Pop(v[i], v[i+1]); break; | |
8451 case 1: __ Pop(v[i]); break; | |
8452 default: ASSERT(i == reg_count); break; | |
8453 } | |
8454 break; | |
8455 case PushPopRegList: | |
8456 __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister); | |
8457 break; | |
8458 } | |
8459 | |
8460 // Drop memory to restore jssp. | |
8461 __ Drop(claim, kByteSizeInBytes); | |
8462 | |
8463 __ Mov(csp, __ StackPointer()); | |
8464 __ SetStackPointer(csp); | |
8465 } | |
8466 | |
8467 END(); | |
8468 | |
8469 RUN(); | |
8470 | |
8471 // Check that the register contents were preserved. | |
8472 // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can | |
8473 // test that the upper word was properly cleared by Pop. | |
8474 literal_base &= (0xffffffffffffffffUL >> (64-reg_size)); | |
8475 for (int i = 0; i < reg_count; i++) { | |
8476 uint64_t literal = literal_base * i; | |
8477 double expected; | |
8478 memcpy(&expected, &literal, sizeof(expected)); | |
8479 ASSERT_EQUAL_FP64(expected, d[i]); | |
8480 } | |
8481 | |
8482 TEARDOWN(); | |
8483 } | |
8484 | |
8485 | |
8486 TEST(push_pop_fp_jssp_simple_32) { | |
8487 INIT_V8(); | |
8488 for (int claim = 0; claim <= 8; claim++) { | |
8489 for (int count = 0; count <= 8; count++) { | |
8490 PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits, | |
8491 PushPopByFour, PushPopByFour); | |
8492 PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits, | |
8493 PushPopByFour, PushPopRegList); | |
8494 PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits, | |
8495 PushPopRegList, PushPopByFour); | |
8496 PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits, | |
8497 PushPopRegList, PushPopRegList); | |
8498 } | |
8499 // Test with the maximum number of registers. | |
8500 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits, | |
8501 PushPopByFour, PushPopByFour); | |
8502 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits, | |
8503 PushPopByFour, PushPopRegList); | |
8504 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits, | |
8505 PushPopRegList, PushPopByFour); | |
8506 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits, | |
8507 PushPopRegList, PushPopRegList); | |
8508 } | |
8509 } | |
8510 | |
8511 | |
8512 TEST(push_pop_fp_jssp_simple_64) { | |
8513 INIT_V8(); | |
8514 for (int claim = 0; claim <= 8; claim++) { | |
8515 for (int count = 0; count <= 8; count++) { | |
8516 PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits, | |
8517 PushPopByFour, PushPopByFour); | |
8518 PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits, | |
8519 PushPopByFour, PushPopRegList); | |
8520 PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits, | |
8521 PushPopRegList, PushPopByFour); | |
8522 PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits, | |
8523 PushPopRegList, PushPopRegList); | |
8524 } | |
8525 // Test with the maximum number of registers. | |
8526 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits, | |
8527 PushPopByFour, PushPopByFour); | |
8528 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits, | |
8529 PushPopByFour, PushPopRegList); | |
8530 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits, | |
8531 PushPopRegList, PushPopByFour); | |
8532 PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits, | |
8533 PushPopRegList, PushPopRegList); | |
8534 } | |
8535 } | |
8536 | |
8537 | |
8538 // Push and pop data using an overlapping combination of Push/Pop and | |
8539 // RegList-based methods. | |
8540 static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) { | |
8541 SETUP(); | |
8542 | |
8543 // Registers x8 and x9 are used by the macro assembler for debug code (for | |
8544 // example in 'Pop'), so we can't use them here. We can't use jssp because it | |
8545 // will be the stack pointer for this test. | |
8546 static RegList const allowed = | |
8547 ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit()); | |
8548 // Work out which registers to use, based on reg_size. | |
8549 Register r[10]; | |
8550 Register x[10]; | |
8551 PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed); | |
8552 | |
8553 // Calculate some handy register lists. | |
8554 RegList r0_to_r3 = 0; | |
8555 for (int i = 0; i <= 3; i++) { | |
8556 r0_to_r3 |= x[i].Bit(); | |
8557 } | |
8558 RegList r4_to_r5 = 0; | |
8559 for (int i = 4; i <= 5; i++) { | |
8560 r4_to_r5 |= x[i].Bit(); | |
8561 } | |
8562 RegList r6_to_r9 = 0; | |
8563 for (int i = 6; i <= 9; i++) { | |
8564 r6_to_r9 |= x[i].Bit(); | |
8565 } | |
8566 | |
8567 // The literal base is chosen to have two useful properties: | |
8568 // * When multiplied by small values (such as a register index), this value | |
8569 // is clearly readable in the result. | |
8570 // * The value is not formed from repeating fixed-size smaller values, so it | |
8571 // can be used to detect endianness-related errors. | |
8572 uint64_t literal_base = 0x0100001000100101UL; | |
8573 | |
8574 START(); | |
8575 { | |
8576 ASSERT(__ StackPointer().Is(csp)); | |
8577 __ Mov(jssp, __ StackPointer()); | |
8578 __ SetStackPointer(jssp); | |
8579 | |
8580 // Claim memory first, as requested. | |
8581 __ Claim(claim, kByteSizeInBytes); | |
8582 | |
8583 __ Mov(x[3], literal_base * 3); | |
8584 __ Mov(x[2], literal_base * 2); | |
8585 __ Mov(x[1], literal_base * 1); | |
8586 __ Mov(x[0], literal_base * 0); | |
8587 | |
8588 __ PushSizeRegList(r0_to_r3, reg_size); | |
8589 __ Push(r[3], r[2]); | |
8590 | |
8591 Clobber(&masm, r0_to_r3); | |
8592 __ PopSizeRegList(r0_to_r3, reg_size); | |
8593 | |
8594 __ Push(r[2], r[1], r[3], r[0]); | |
8595 | |
8596 Clobber(&masm, r4_to_r5); | |
8597 __ Pop(r[4], r[5]); | |
8598 Clobber(&masm, r6_to_r9); | |
8599 __ Pop(r[6], r[7], r[8], r[9]); | |
8600 | |
8601 // Drop memory to restore jssp. | |
8602 __ Drop(claim, kByteSizeInBytes); | |
8603 | |
8604 __ Mov(csp, __ StackPointer()); | |
8605 __ SetStackPointer(csp); | |
8606 } | |
8607 | |
8608 END(); | |
8609 | |
8610 RUN(); | |
8611 | |
8612 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test | |
8613 // that the upper word was properly cleared by Pop. | |
8614 literal_base &= (0xffffffffffffffffUL >> (64-reg_size)); | |
8615 | |
8616 ASSERT_EQUAL_64(literal_base * 3, x[9]); | |
8617 ASSERT_EQUAL_64(literal_base * 2, x[8]); | |
8618 ASSERT_EQUAL_64(literal_base * 0, x[7]); | |
8619 ASSERT_EQUAL_64(literal_base * 3, x[6]); | |
8620 ASSERT_EQUAL_64(literal_base * 1, x[5]); | |
8621 ASSERT_EQUAL_64(literal_base * 2, x[4]); | |
8622 | |
8623 TEARDOWN(); | |
8624 } | |
8625 | |
8626 | |
8627 TEST(push_pop_jssp_mixed_methods_64) { | |
8628 INIT_V8(); | |
8629 for (int claim = 0; claim <= 8; claim++) { | |
8630 PushPopJsspMixedMethodsHelper(claim, kXRegSizeInBits); | |
8631 } | |
8632 } | |
8633 | |
8634 | |
8635 TEST(push_pop_jssp_mixed_methods_32) { | |
8636 INIT_V8(); | |
8637 for (int claim = 0; claim <= 8; claim++) { | |
8638 PushPopJsspMixedMethodsHelper(claim, kWRegSizeInBits); | |
8639 } | |
8640 } | |
8641 | |
8642 | |
8643 // Push and pop data using overlapping X- and W-sized quantities. | |
8644 static void PushPopJsspWXOverlapHelper(int reg_count, int claim) { | |
8645 // This test emits rather a lot of code. | |
8646 SETUP_SIZE(BUF_SIZE * 2); | |
8647 | |
8648 // Work out which registers to use, based on reg_size. | |
8649 Register tmp = x8; | |
8650 static RegList const allowed = ~(tmp.Bit() | jssp.Bit()); | |
8651 if (reg_count == kPushPopJsspMaxRegCount) { | |
8652 reg_count = CountSetBits(allowed, kNumberOfRegisters); | |
8653 } | |
8654 Register w[kNumberOfRegisters]; | |
8655 Register x[kNumberOfRegisters]; | |
8656 RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed); | |
8657 | |
8658 // The number of W-sized slots we expect to pop. When we pop, we alternate | |
8659 // between W and X registers, so we need reg_count*1.5 W-sized slots. | |
8660 int const requested_w_slots = reg_count + reg_count / 2; | |
8661 | |
8662 // Track what _should_ be on the stack, using W-sized slots. | |
8663 static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2; | |
8664 uint32_t stack[kMaxWSlots]; | |
8665 for (int i = 0; i < kMaxWSlots; i++) { | |
8666 stack[i] = 0xdeadbeef; | |
8667 } | |
8668 | |
8669 // The literal base is chosen to have two useful properties: | |
8670 // * When multiplied by small values (such as a register index), this value | |
8671 // is clearly readable in the result. | |
8672 // * The value is not formed from repeating fixed-size smaller values, so it | |
8673 // can be used to detect endianness-related errors. | |
8674 static uint64_t const literal_base = 0x0100001000100101UL; | |
8675 static uint64_t const literal_base_hi = literal_base >> 32; | |
8676 static uint64_t const literal_base_lo = literal_base & 0xffffffff; | |
8677 static uint64_t const literal_base_w = literal_base & 0xffffffff; | |
8678 | |
8679 START(); | |
8680 { | |
8681 ASSERT(__ StackPointer().Is(csp)); | |
8682 __ Mov(jssp, __ StackPointer()); | |
8683 __ SetStackPointer(jssp); | |
8684 | |
8685 // Initialize the registers. | |
8686 for (int i = 0; i < reg_count; i++) { | |
8687 // Always write into the X register, to ensure that the upper word is | |
8688 // properly ignored by Push when testing W registers. | |
8689 if (!x[i].IsZero()) { | |
8690 __ Mov(x[i], literal_base * i); | |
8691 } | |
8692 } | |
8693 | |
8694 // Claim memory first, as requested. | |
8695 __ Claim(claim, kByteSizeInBytes); | |
8696 | |
8697 // The push-pop pattern is as follows: | |
8698 // Push: Pop: | |
8699 // x[0](hi) -> w[0] | |
8700 // x[0](lo) -> x[1](hi) | |
8701 // w[1] -> x[1](lo) | |
8702 // w[1] -> w[2] | |
8703 // x[2](hi) -> x[2](hi) | |
8704 // x[2](lo) -> x[2](lo) | |
8705 // x[2](hi) -> w[3] | |
8706 // x[2](lo) -> x[4](hi) | |
8707 // x[2](hi) -> x[4](lo) | |
8708 // x[2](lo) -> w[5] | |
8709 // w[3] -> x[5](hi) | |
8710 // w[3] -> x[6](lo) | |
8711 // w[3] -> w[7] | |
8712 // w[3] -> x[8](hi) | |
8713 // x[4](hi) -> x[8](lo) | |
8714 // x[4](lo) -> w[9] | |
8715 // ... pattern continues ... | |
8716 // | |
8717 // That is, registers are pushed starting with the lower numbers, | |
8718 // alternating between x and w registers, and pushing i%4+1 copies of each, | |
8719 // where i is the register number. | |
8720 // Registers are popped starting with the higher numbers one-by-one, | |
8721 // alternating between x and w registers, but only popping one at a time. | |
8722 // | |
8723 // This pattern provides a wide variety of alignment effects and overlaps. | |
8724 | |
8725 // ---- Push ---- | |
8726 | |
8727 int active_w_slots = 0; | |
8728 for (int i = 0; active_w_slots < requested_w_slots; i++) { | |
8729 ASSERT(i < reg_count); | |
8730 // In order to test various arguments to PushMultipleTimes, and to try to | |
8731 // exercise different alignment and overlap effects, we push each | |
8732 // register a different number of times. | |
8733 int times = i % 4 + 1; | |
8734 if (i & 1) { | |
8735 // Push odd-numbered registers as W registers. | |
8736 if (i & 2) { | |
8737 __ PushMultipleTimes(w[i], times); | |
8738 } else { | |
8739 // Use a register to specify the count. | |
8740 __ Mov(tmp.W(), times); | |
8741 __ PushMultipleTimes(w[i], tmp.W()); | |
8742 } | |
8743 // Fill in the expected stack slots. | |
8744 for (int j = 0; j < times; j++) { | |
8745 if (w[i].Is(wzr)) { | |
8746 // The zero register always writes zeroes. | |
8747 stack[active_w_slots++] = 0; | |
8748 } else { | |
8749 stack[active_w_slots++] = literal_base_w * i; | |
8750 } | |
8751 } | |
8752 } else { | |
8753 // Push even-numbered registers as X registers. | |
8754 if (i & 2) { | |
8755 __ PushMultipleTimes(x[i], times); | |
8756 } else { | |
8757 // Use a register to specify the count. | |
8758 __ Mov(tmp, times); | |
8759 __ PushMultipleTimes(x[i], tmp); | |
8760 } | |
8761 // Fill in the expected stack slots. | |
8762 for (int j = 0; j < times; j++) { | |
8763 if (x[i].IsZero()) { | |
8764 // The zero register always writes zeroes. | |
8765 stack[active_w_slots++] = 0; | |
8766 stack[active_w_slots++] = 0; | |
8767 } else { | |
8768 stack[active_w_slots++] = literal_base_hi * i; | |
8769 stack[active_w_slots++] = literal_base_lo * i; | |
8770 } | |
8771 } | |
8772 } | |
8773 } | |
8774 // Because we were pushing several registers at a time, we probably pushed | |
8775 // more than we needed to. | |
8776 if (active_w_slots > requested_w_slots) { | |
8777 __ Drop(active_w_slots - requested_w_slots, kWRegSize); | |
8778 // Bump the number of active W-sized slots back to where it should be, | |
8779 // and fill the empty space with a dummy value. | |
8780 do { | |
8781 stack[active_w_slots--] = 0xdeadbeef; | |
8782 } while (active_w_slots > requested_w_slots); | |
8783 } | |
8784 | |
8785 // ---- Pop ---- | |
8786 | |
8787 Clobber(&masm, list); | |
8788 | |
8789 // If popping an even number of registers, the first one will be X-sized. | |
8790 // Otherwise, the first one will be W-sized. | |
8791 bool next_is_64 = !(reg_count & 1); | |
8792 for (int i = reg_count-1; i >= 0; i--) { | |
8793 if (next_is_64) { | |
8794 __ Pop(x[i]); | |
8795 active_w_slots -= 2; | |
8796 } else { | |
8797 __ Pop(w[i]); | |
8798 active_w_slots -= 1; | |
8799 } | |
8800 next_is_64 = !next_is_64; | |
8801 } | |
8802 ASSERT(active_w_slots == 0); | |
8803 | |
8804 // Drop memory to restore jssp. | |
8805 __ Drop(claim, kByteSizeInBytes); | |
8806 | |
8807 __ Mov(csp, __ StackPointer()); | |
8808 __ SetStackPointer(csp); | |
8809 } | |
8810 | |
8811 END(); | |
8812 | |
8813 RUN(); | |
8814 | |
8815 int slot = 0; | |
8816 for (int i = 0; i < reg_count; i++) { | |
8817 // Even-numbered registers were written as W registers. | |
8818 // Odd-numbered registers were written as X registers. | |
8819 bool expect_64 = (i & 1); | |
8820 uint64_t expected; | |
8821 | |
8822 if (expect_64) { | |
8823 uint64_t hi = stack[slot++]; | |
8824 uint64_t lo = stack[slot++]; | |
8825 expected = (hi << 32) | lo; | |
8826 } else { | |
8827 expected = stack[slot++]; | |
8828 } | |
8829 | |
8830 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can | |
8831 // test that the upper word was properly cleared by Pop. | |
8832 if (x[i].IsZero()) { | |
8833 ASSERT_EQUAL_64(0, x[i]); | |
8834 } else { | |
8835 ASSERT_EQUAL_64(expected, x[i]); | |
8836 } | |
8837 } | |
8838 ASSERT(slot == requested_w_slots); | |
8839 | |
8840 TEARDOWN(); | |
8841 } | |
8842 | |
8843 | |
8844 TEST(push_pop_jssp_wx_overlap) { | |
8845 INIT_V8(); | |
8846 for (int claim = 0; claim <= 8; claim++) { | |
8847 for (int count = 1; count <= 8; count++) { | |
8848 PushPopJsspWXOverlapHelper(count, claim); | |
8849 PushPopJsspWXOverlapHelper(count, claim); | |
8850 PushPopJsspWXOverlapHelper(count, claim); | |
8851 PushPopJsspWXOverlapHelper(count, claim); | |
8852 } | |
8853 // Test with the maximum number of registers. | |
8854 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim); | |
8855 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim); | |
8856 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim); | |
8857 PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim); | |
8858 } | |
8859 } | |
8860 | |
8861 | |
8862 TEST(push_pop_csp) { | |
8863 INIT_V8(); | |
8864 SETUP(); | |
8865 | |
8866 START(); | |
8867 | |
8868 ASSERT(csp.Is(__ StackPointer())); | |
8869 | |
8870 __ Mov(x3, 0x3333333333333333UL); | |
8871 __ Mov(x2, 0x2222222222222222UL); | |
8872 __ Mov(x1, 0x1111111111111111UL); | |
8873 __ Mov(x0, 0x0000000000000000UL); | |
8874 __ Claim(2); | |
8875 __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit()); | |
8876 __ Push(x3, x2); | |
8877 __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit()); | |
8878 __ Push(x2, x1, x3, x0); | |
8879 __ Pop(x4, x5); | |
8880 __ Pop(x6, x7, x8, x9); | |
8881 | |
8882 __ Claim(2); | |
8883 __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit()); | |
8884 __ Push(w3, w1, w2, w0); | |
8885 __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit()); | |
8886 __ Pop(w14, w15, w16, w17); | |
8887 | |
8888 __ Claim(2); | |
8889 __ Push(w2, w2, w1, w1); | |
8890 __ Push(x3, x3); | |
8891 __ Pop(w18, w19, w20, w21); | |
8892 __ Pop(x22, x23); | |
8893 | |
8894 __ Claim(2); | |
8895 __ PushXRegList(x1.Bit() | x22.Bit()); | |
8896 __ PopXRegList(x24.Bit() | x26.Bit()); | |
8897 | |
8898 __ Claim(2); | |
8899 __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit()); | |
8900 __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit()); | |
8901 | |
8902 __ Claim(2); | |
8903 __ PushXRegList(0); | |
8904 __ PopXRegList(0); | |
8905 __ PushXRegList(0xffffffff); | |
8906 __ PopXRegList(0xffffffff); | |
8907 __ Drop(12); | |
8908 | |
8909 END(); | |
8910 | |
8911 RUN(); | |
8912 | |
8913 ASSERT_EQUAL_64(0x1111111111111111UL, x3); | |
8914 ASSERT_EQUAL_64(0x0000000000000000UL, x2); | |
8915 ASSERT_EQUAL_64(0x3333333333333333UL, x1); | |
8916 ASSERT_EQUAL_64(0x2222222222222222UL, x0); | |
8917 ASSERT_EQUAL_64(0x3333333333333333UL, x9); | |
8918 ASSERT_EQUAL_64(0x2222222222222222UL, x8); | |
8919 ASSERT_EQUAL_64(0x0000000000000000UL, x7); | |
8920 ASSERT_EQUAL_64(0x3333333333333333UL, x6); | |
8921 ASSERT_EQUAL_64(0x1111111111111111UL, x5); | |
8922 ASSERT_EQUAL_64(0x2222222222222222UL, x4); | |
8923 | |
8924 ASSERT_EQUAL_32(0x11111111U, w13); | |
8925 ASSERT_EQUAL_32(0x33333333U, w12); | |
8926 ASSERT_EQUAL_32(0x00000000U, w11); | |
8927 ASSERT_EQUAL_32(0x22222222U, w10); | |
8928 ASSERT_EQUAL_32(0x11111111U, w17); | |
8929 ASSERT_EQUAL_32(0x00000000U, w16); | |
8930 ASSERT_EQUAL_32(0x33333333U, w15); | |
8931 ASSERT_EQUAL_32(0x22222222U, w14); | |
8932 | |
8933 ASSERT_EQUAL_32(0x11111111U, w18); | |
8934 ASSERT_EQUAL_32(0x11111111U, w19); | |
8935 ASSERT_EQUAL_32(0x11111111U, w20); | |
8936 ASSERT_EQUAL_32(0x11111111U, w21); | |
8937 ASSERT_EQUAL_64(0x3333333333333333UL, x22); | |
8938 ASSERT_EQUAL_64(0x0000000000000000UL, x23); | |
8939 | |
8940 ASSERT_EQUAL_64(0x3333333333333333UL, x24); | |
8941 ASSERT_EQUAL_64(0x3333333333333333UL, x26); | |
8942 | |
8943 ASSERT_EQUAL_32(0x33333333U, w25); | |
8944 ASSERT_EQUAL_32(0x00000000U, w27); | |
8945 ASSERT_EQUAL_32(0x22222222U, w28); | |
8946 ASSERT_EQUAL_32(0x33333333U, w29); | |
8947 TEARDOWN(); | |
8948 } | |
8949 | |
8950 | |
8951 TEST(push_queued) { | |
8952 INIT_V8(); | |
8953 SETUP(); | |
8954 | |
8955 START(); | |
8956 | |
8957 ASSERT(__ StackPointer().Is(csp)); | |
8958 __ Mov(jssp, __ StackPointer()); | |
8959 __ SetStackPointer(jssp); | |
8960 | |
8961 MacroAssembler::PushPopQueue queue(&masm); | |
8962 | |
8963 // Queue up registers. | |
8964 queue.Queue(x0); | |
8965 queue.Queue(x1); | |
8966 queue.Queue(x2); | |
8967 queue.Queue(x3); | |
8968 | |
8969 queue.Queue(w4); | |
8970 queue.Queue(w5); | |
8971 queue.Queue(w6); | |
8972 | |
8973 queue.Queue(d0); | |
8974 queue.Queue(d1); | |
8975 | |
8976 queue.Queue(s2); | |
8977 | |
8978 __ Mov(x0, 0x1234000000000000); | |
8979 __ Mov(x1, 0x1234000100010001); | |
8980 __ Mov(x2, 0x1234000200020002); | |
8981 __ Mov(x3, 0x1234000300030003); | |
8982 __ Mov(w4, 0x12340004); | |
8983 __ Mov(w5, 0x12340005); | |
8984 __ Mov(w6, 0x12340006); | |
8985 __ Fmov(d0, 123400.0); | |
8986 __ Fmov(d1, 123401.0); | |
8987 __ Fmov(s2, 123402.0); | |
8988 | |
8989 // Actually push them. | |
8990 queue.PushQueued(); | |
8991 | |
8992 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6)); | |
8993 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2)); | |
8994 | |
8995 // Pop them conventionally. | |
8996 __ Pop(s2); | |
8997 __ Pop(d1, d0); | |
8998 __ Pop(w6, w5, w4); | |
8999 __ Pop(x3, x2, x1, x0); | |
9000 | |
9001 __ Mov(csp, __ StackPointer()); | |
9002 __ SetStackPointer(csp); | |
9003 | |
9004 END(); | |
9005 | |
9006 RUN(); | |
9007 | |
9008 ASSERT_EQUAL_64(0x1234000000000000, x0); | |
9009 ASSERT_EQUAL_64(0x1234000100010001, x1); | |
9010 ASSERT_EQUAL_64(0x1234000200020002, x2); | |
9011 ASSERT_EQUAL_64(0x1234000300030003, x3); | |
9012 | |
9013 ASSERT_EQUAL_32(0x12340004, w4); | |
9014 ASSERT_EQUAL_32(0x12340005, w5); | |
9015 ASSERT_EQUAL_32(0x12340006, w6); | |
9016 | |
9017 ASSERT_EQUAL_FP64(123400.0, d0); | |
9018 ASSERT_EQUAL_FP64(123401.0, d1); | |
9019 | |
9020 ASSERT_EQUAL_FP32(123402.0, s2); | |
9021 | |
9022 TEARDOWN(); | |
9023 } | |
9024 | |
9025 | |
9026 TEST(pop_queued) { | |
9027 INIT_V8(); | |
9028 SETUP(); | |
9029 | |
9030 START(); | |
9031 | |
9032 ASSERT(__ StackPointer().Is(csp)); | |
9033 __ Mov(jssp, __ StackPointer()); | |
9034 __ SetStackPointer(jssp); | |
9035 | |
9036 MacroAssembler::PushPopQueue queue(&masm); | |
9037 | |
9038 __ Mov(x0, 0x1234000000000000); | |
9039 __ Mov(x1, 0x1234000100010001); | |
9040 __ Mov(x2, 0x1234000200020002); | |
9041 __ Mov(x3, 0x1234000300030003); | |
9042 __ Mov(w4, 0x12340004); | |
9043 __ Mov(w5, 0x12340005); | |
9044 __ Mov(w6, 0x12340006); | |
9045 __ Fmov(d0, 123400.0); | |
9046 __ Fmov(d1, 123401.0); | |
9047 __ Fmov(s2, 123402.0); | |
9048 | |
9049 // Push registers conventionally. | |
9050 __ Push(x0, x1, x2, x3); | |
9051 __ Push(w4, w5, w6); | |
9052 __ Push(d0, d1); | |
9053 __ Push(s2); | |
9054 | |
9055 // Queue up a pop. | |
9056 queue.Queue(s2); | |
9057 | |
9058 queue.Queue(d1); | |
9059 queue.Queue(d0); | |
9060 | |
9061 queue.Queue(w6); | |
9062 queue.Queue(w5); | |
9063 queue.Queue(w4); | |
9064 | |
9065 queue.Queue(x3); | |
9066 queue.Queue(x2); | |
9067 queue.Queue(x1); | |
9068 queue.Queue(x0); | |
9069 | |
9070 Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6)); | |
9071 Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2)); | |
9072 | |
9073 // Actually pop them. | |
9074 queue.PopQueued(); | |
9075 | |
9076 __ Mov(csp, __ StackPointer()); | |
9077 __ SetStackPointer(csp); | |
9078 | |
9079 END(); | |
9080 | |
9081 RUN(); | |
9082 | |
9083 ASSERT_EQUAL_64(0x1234000000000000, x0); | |
9084 ASSERT_EQUAL_64(0x1234000100010001, x1); | |
9085 ASSERT_EQUAL_64(0x1234000200020002, x2); | |
9086 ASSERT_EQUAL_64(0x1234000300030003, x3); | |
9087 | |
9088 ASSERT_EQUAL_64(0x0000000012340004, x4); | |
9089 ASSERT_EQUAL_64(0x0000000012340005, x5); | |
9090 ASSERT_EQUAL_64(0x0000000012340006, x6); | |
9091 | |
9092 ASSERT_EQUAL_FP64(123400.0, d0); | |
9093 ASSERT_EQUAL_FP64(123401.0, d1); | |
9094 | |
9095 ASSERT_EQUAL_FP32(123402.0, s2); | |
9096 | |
9097 TEARDOWN(); | |
9098 } | |
9099 | |
9100 | |
9101 TEST(jump_both_smi) { | |
9102 INIT_V8(); | |
9103 SETUP(); | |
9104 | |
9105 Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11; | |
9106 Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11; | |
9107 Label return1, return2, return3, done; | |
9108 | |
9109 START(); | |
9110 | |
9111 __ Mov(x0, 0x5555555500000001UL); // A pointer. | |
9112 __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer. | |
9113 __ Mov(x2, 0x1234567800000000UL); // A smi. | |
9114 __ Mov(x3, 0x8765432100000000UL); // A smi. | |
9115 __ Mov(x4, 0xdead); | |
9116 __ Mov(x5, 0xdead); | |
9117 __ Mov(x6, 0xdead); | |
9118 __ Mov(x7, 0xdead); | |
9119 | |
9120 __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00); | |
9121 __ Bind(&return1); | |
9122 __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01); | |
9123 __ Bind(&return2); | |
9124 __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10); | |
9125 __ Bind(&return3); | |
9126 __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11); | |
9127 | |
9128 __ Bind(&cond_fail_00); | |
9129 __ Mov(x4, 0); | |
9130 __ B(&return1); | |
9131 __ Bind(&cond_pass_00); | |
9132 __ Mov(x4, 1); | |
9133 __ B(&return1); | |
9134 | |
9135 __ Bind(&cond_fail_01); | |
9136 __ Mov(x5, 0); | |
9137 __ B(&return2); | |
9138 __ Bind(&cond_pass_01); | |
9139 __ Mov(x5, 1); | |
9140 __ B(&return2); | |
9141 | |
9142 __ Bind(&cond_fail_10); | |
9143 __ Mov(x6, 0); | |
9144 __ B(&return3); | |
9145 __ Bind(&cond_pass_10); | |
9146 __ Mov(x6, 1); | |
9147 __ B(&return3); | |
9148 | |
9149 __ Bind(&cond_fail_11); | |
9150 __ Mov(x7, 0); | |
9151 __ B(&done); | |
9152 __ Bind(&cond_pass_11); | |
9153 __ Mov(x7, 1); | |
9154 | |
9155 __ Bind(&done); | |
9156 | |
9157 END(); | |
9158 | |
9159 RUN(); | |
9160 | |
9161 ASSERT_EQUAL_64(0x5555555500000001UL, x0); | |
9162 ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1); | |
9163 ASSERT_EQUAL_64(0x1234567800000000UL, x2); | |
9164 ASSERT_EQUAL_64(0x8765432100000000UL, x3); | |
9165 ASSERT_EQUAL_64(0, x4); | |
9166 ASSERT_EQUAL_64(0, x5); | |
9167 ASSERT_EQUAL_64(0, x6); | |
9168 ASSERT_EQUAL_64(1, x7); | |
9169 | |
9170 TEARDOWN(); | |
9171 } | |
9172 | |
9173 | |
9174 TEST(jump_either_smi) { | |
9175 INIT_V8(); | |
9176 SETUP(); | |
9177 | |
9178 Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11; | |
9179 Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11; | |
9180 Label return1, return2, return3, done; | |
9181 | |
9182 START(); | |
9183 | |
9184 __ Mov(x0, 0x5555555500000001UL); // A pointer. | |
9185 __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer. | |
9186 __ Mov(x2, 0x1234567800000000UL); // A smi. | |
9187 __ Mov(x3, 0x8765432100000000UL); // A smi. | |
9188 __ Mov(x4, 0xdead); | |
9189 __ Mov(x5, 0xdead); | |
9190 __ Mov(x6, 0xdead); | |
9191 __ Mov(x7, 0xdead); | |
9192 | |
9193 __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00); | |
9194 __ Bind(&return1); | |
9195 __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01); | |
9196 __ Bind(&return2); | |
9197 __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10); | |
9198 __ Bind(&return3); | |
9199 __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11); | |
9200 | |
9201 __ Bind(&cond_fail_00); | |
9202 __ Mov(x4, 0); | |
9203 __ B(&return1); | |
9204 __ Bind(&cond_pass_00); | |
9205 __ Mov(x4, 1); | |
9206 __ B(&return1); | |
9207 | |
9208 __ Bind(&cond_fail_01); | |
9209 __ Mov(x5, 0); | |
9210 __ B(&return2); | |
9211 __ Bind(&cond_pass_01); | |
9212 __ Mov(x5, 1); | |
9213 __ B(&return2); | |
9214 | |
9215 __ Bind(&cond_fail_10); | |
9216 __ Mov(x6, 0); | |
9217 __ B(&return3); | |
9218 __ Bind(&cond_pass_10); | |
9219 __ Mov(x6, 1); | |
9220 __ B(&return3); | |
9221 | |
9222 __ Bind(&cond_fail_11); | |
9223 __ Mov(x7, 0); | |
9224 __ B(&done); | |
9225 __ Bind(&cond_pass_11); | |
9226 __ Mov(x7, 1); | |
9227 | |
9228 __ Bind(&done); | |
9229 | |
9230 END(); | |
9231 | |
9232 RUN(); | |
9233 | |
9234 ASSERT_EQUAL_64(0x5555555500000001UL, x0); | |
9235 ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1); | |
9236 ASSERT_EQUAL_64(0x1234567800000000UL, x2); | |
9237 ASSERT_EQUAL_64(0x8765432100000000UL, x3); | |
9238 ASSERT_EQUAL_64(0, x4); | |
9239 ASSERT_EQUAL_64(1, x5); | |
9240 ASSERT_EQUAL_64(1, x6); | |
9241 ASSERT_EQUAL_64(1, x7); | |
9242 | |
9243 TEARDOWN(); | |
9244 } | |
9245 | |
9246 | |
9247 TEST(noreg) { | |
9248 // This test doesn't generate any code, but it verifies some invariants | |
9249 // related to NoReg. | |
9250 CHECK(NoReg.Is(NoFPReg)); | |
9251 CHECK(NoFPReg.Is(NoReg)); | |
9252 CHECK(NoReg.Is(NoCPUReg)); | |
9253 CHECK(NoCPUReg.Is(NoReg)); | |
9254 CHECK(NoFPReg.Is(NoCPUReg)); | |
9255 CHECK(NoCPUReg.Is(NoFPReg)); | |
9256 | |
9257 CHECK(NoReg.IsNone()); | |
9258 CHECK(NoFPReg.IsNone()); | |
9259 CHECK(NoCPUReg.IsNone()); | |
9260 } | |
9261 | |
9262 | |
9263 TEST(isvalid) { | |
9264 // This test doesn't generate any code, but it verifies some invariants | |
9265 // related to IsValid(). | |
9266 CHECK(!NoReg.IsValid()); | |
9267 CHECK(!NoFPReg.IsValid()); | |
9268 CHECK(!NoCPUReg.IsValid()); | |
9269 | |
9270 CHECK(x0.IsValid()); | |
9271 CHECK(w0.IsValid()); | |
9272 CHECK(x30.IsValid()); | |
9273 CHECK(w30.IsValid()); | |
9274 CHECK(xzr.IsValid()); | |
9275 CHECK(wzr.IsValid()); | |
9276 | |
9277 CHECK(csp.IsValid()); | |
9278 CHECK(wcsp.IsValid()); | |
9279 | |
9280 CHECK(d0.IsValid()); | |
9281 CHECK(s0.IsValid()); | |
9282 CHECK(d31.IsValid()); | |
9283 CHECK(s31.IsValid()); | |
9284 | |
9285 CHECK(x0.IsValidRegister()); | |
9286 CHECK(w0.IsValidRegister()); | |
9287 CHECK(xzr.IsValidRegister()); | |
9288 CHECK(wzr.IsValidRegister()); | |
9289 CHECK(csp.IsValidRegister()); | |
9290 CHECK(wcsp.IsValidRegister()); | |
9291 CHECK(!x0.IsValidFPRegister()); | |
9292 CHECK(!w0.IsValidFPRegister()); | |
9293 CHECK(!xzr.IsValidFPRegister()); | |
9294 CHECK(!wzr.IsValidFPRegister()); | |
9295 CHECK(!csp.IsValidFPRegister()); | |
9296 CHECK(!wcsp.IsValidFPRegister()); | |
9297 | |
9298 CHECK(d0.IsValidFPRegister()); | |
9299 CHECK(s0.IsValidFPRegister()); | |
9300 CHECK(!d0.IsValidRegister()); | |
9301 CHECK(!s0.IsValidRegister()); | |
9302 | |
9303 // Test the same as before, but using CPURegister types. This shouldn't make | |
9304 // any difference. | |
9305 CHECK(static_cast<CPURegister>(x0).IsValid()); | |
9306 CHECK(static_cast<CPURegister>(w0).IsValid()); | |
9307 CHECK(static_cast<CPURegister>(x30).IsValid()); | |
9308 CHECK(static_cast<CPURegister>(w30).IsValid()); | |
9309 CHECK(static_cast<CPURegister>(xzr).IsValid()); | |
9310 CHECK(static_cast<CPURegister>(wzr).IsValid()); | |
9311 | |
9312 CHECK(static_cast<CPURegister>(csp).IsValid()); | |
9313 CHECK(static_cast<CPURegister>(wcsp).IsValid()); | |
9314 | |
9315 CHECK(static_cast<CPURegister>(d0).IsValid()); | |
9316 CHECK(static_cast<CPURegister>(s0).IsValid()); | |
9317 CHECK(static_cast<CPURegister>(d31).IsValid()); | |
9318 CHECK(static_cast<CPURegister>(s31).IsValid()); | |
9319 | |
9320 CHECK(static_cast<CPURegister>(x0).IsValidRegister()); | |
9321 CHECK(static_cast<CPURegister>(w0).IsValidRegister()); | |
9322 CHECK(static_cast<CPURegister>(xzr).IsValidRegister()); | |
9323 CHECK(static_cast<CPURegister>(wzr).IsValidRegister()); | |
9324 CHECK(static_cast<CPURegister>(csp).IsValidRegister()); | |
9325 CHECK(static_cast<CPURegister>(wcsp).IsValidRegister()); | |
9326 CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister()); | |
9327 CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister()); | |
9328 CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister()); | |
9329 CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister()); | |
9330 CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister()); | |
9331 CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister()); | |
9332 | |
9333 CHECK(static_cast<CPURegister>(d0).IsValidFPRegister()); | |
9334 CHECK(static_cast<CPURegister>(s0).IsValidFPRegister()); | |
9335 CHECK(!static_cast<CPURegister>(d0).IsValidRegister()); | |
9336 CHECK(!static_cast<CPURegister>(s0).IsValidRegister()); | |
9337 } | |
9338 | |
9339 | |
9340 TEST(cpureglist_utils_x) { | |
9341 // This test doesn't generate any code, but it verifies the behaviour of | |
9342 // the CPURegList utility methods. | |
9343 | |
9344 // Test a list of X registers. | |
9345 CPURegList test(x0, x1, x2, x3); | |
9346 | |
9347 CHECK(test.IncludesAliasOf(x0)); | |
9348 CHECK(test.IncludesAliasOf(x1)); | |
9349 CHECK(test.IncludesAliasOf(x2)); | |
9350 CHECK(test.IncludesAliasOf(x3)); | |
9351 CHECK(test.IncludesAliasOf(w0)); | |
9352 CHECK(test.IncludesAliasOf(w1)); | |
9353 CHECK(test.IncludesAliasOf(w2)); | |
9354 CHECK(test.IncludesAliasOf(w3)); | |
9355 | |
9356 CHECK(!test.IncludesAliasOf(x4)); | |
9357 CHECK(!test.IncludesAliasOf(x30)); | |
9358 CHECK(!test.IncludesAliasOf(xzr)); | |
9359 CHECK(!test.IncludesAliasOf(csp)); | |
9360 CHECK(!test.IncludesAliasOf(w4)); | |
9361 CHECK(!test.IncludesAliasOf(w30)); | |
9362 CHECK(!test.IncludesAliasOf(wzr)); | |
9363 CHECK(!test.IncludesAliasOf(wcsp)); | |
9364 | |
9365 CHECK(!test.IncludesAliasOf(d0)); | |
9366 CHECK(!test.IncludesAliasOf(d1)); | |
9367 CHECK(!test.IncludesAliasOf(d2)); | |
9368 CHECK(!test.IncludesAliasOf(d3)); | |
9369 CHECK(!test.IncludesAliasOf(s0)); | |
9370 CHECK(!test.IncludesAliasOf(s1)); | |
9371 CHECK(!test.IncludesAliasOf(s2)); | |
9372 CHECK(!test.IncludesAliasOf(s3)); | |
9373 | |
9374 CHECK(!test.IsEmpty()); | |
9375 | |
9376 CHECK(test.type() == x0.type()); | |
9377 | |
9378 CHECK(test.PopHighestIndex().Is(x3)); | |
9379 CHECK(test.PopLowestIndex().Is(x0)); | |
9380 | |
9381 CHECK(test.IncludesAliasOf(x1)); | |
9382 CHECK(test.IncludesAliasOf(x2)); | |
9383 CHECK(test.IncludesAliasOf(w1)); | |
9384 CHECK(test.IncludesAliasOf(w2)); | |
9385 CHECK(!test.IncludesAliasOf(x0)); | |
9386 CHECK(!test.IncludesAliasOf(x3)); | |
9387 CHECK(!test.IncludesAliasOf(w0)); | |
9388 CHECK(!test.IncludesAliasOf(w3)); | |
9389 | |
9390 CHECK(test.PopHighestIndex().Is(x2)); | |
9391 CHECK(test.PopLowestIndex().Is(x1)); | |
9392 | |
9393 CHECK(!test.IncludesAliasOf(x1)); | |
9394 CHECK(!test.IncludesAliasOf(x2)); | |
9395 CHECK(!test.IncludesAliasOf(w1)); | |
9396 CHECK(!test.IncludesAliasOf(w2)); | |
9397 | |
9398 CHECK(test.IsEmpty()); | |
9399 } | |
9400 | |
9401 | |
9402 TEST(cpureglist_utils_w) { | |
9403 // This test doesn't generate any code, but it verifies the behaviour of | |
9404 // the CPURegList utility methods. | |
9405 | |
9406 // Test a list of W registers. | |
9407 CPURegList test(w10, w11, w12, w13); | |
9408 | |
9409 CHECK(test.IncludesAliasOf(x10)); | |
9410 CHECK(test.IncludesAliasOf(x11)); | |
9411 CHECK(test.IncludesAliasOf(x12)); | |
9412 CHECK(test.IncludesAliasOf(x13)); | |
9413 CHECK(test.IncludesAliasOf(w10)); | |
9414 CHECK(test.IncludesAliasOf(w11)); | |
9415 CHECK(test.IncludesAliasOf(w12)); | |
9416 CHECK(test.IncludesAliasOf(w13)); | |
9417 | |
9418 CHECK(!test.IncludesAliasOf(x0)); | |
9419 CHECK(!test.IncludesAliasOf(x9)); | |
9420 CHECK(!test.IncludesAliasOf(x14)); | |
9421 CHECK(!test.IncludesAliasOf(x30)); | |
9422 CHECK(!test.IncludesAliasOf(xzr)); | |
9423 CHECK(!test.IncludesAliasOf(csp)); | |
9424 CHECK(!test.IncludesAliasOf(w0)); | |
9425 CHECK(!test.IncludesAliasOf(w9)); | |
9426 CHECK(!test.IncludesAliasOf(w14)); | |
9427 CHECK(!test.IncludesAliasOf(w30)); | |
9428 CHECK(!test.IncludesAliasOf(wzr)); | |
9429 CHECK(!test.IncludesAliasOf(wcsp)); | |
9430 | |
9431 CHECK(!test.IncludesAliasOf(d10)); | |
9432 CHECK(!test.IncludesAliasOf(d11)); | |
9433 CHECK(!test.IncludesAliasOf(d12)); | |
9434 CHECK(!test.IncludesAliasOf(d13)); | |
9435 CHECK(!test.IncludesAliasOf(s10)); | |
9436 CHECK(!test.IncludesAliasOf(s11)); | |
9437 CHECK(!test.IncludesAliasOf(s12)); | |
9438 CHECK(!test.IncludesAliasOf(s13)); | |
9439 | |
9440 CHECK(!test.IsEmpty()); | |
9441 | |
9442 CHECK(test.type() == w10.type()); | |
9443 | |
9444 CHECK(test.PopHighestIndex().Is(w13)); | |
9445 CHECK(test.PopLowestIndex().Is(w10)); | |
9446 | |
9447 CHECK(test.IncludesAliasOf(x11)); | |
9448 CHECK(test.IncludesAliasOf(x12)); | |
9449 CHECK(test.IncludesAliasOf(w11)); | |
9450 CHECK(test.IncludesAliasOf(w12)); | |
9451 CHECK(!test.IncludesAliasOf(x10)); | |
9452 CHECK(!test.IncludesAliasOf(x13)); | |
9453 CHECK(!test.IncludesAliasOf(w10)); | |
9454 CHECK(!test.IncludesAliasOf(w13)); | |
9455 | |
9456 CHECK(test.PopHighestIndex().Is(w12)); | |
9457 CHECK(test.PopLowestIndex().Is(w11)); | |
9458 | |
9459 CHECK(!test.IncludesAliasOf(x11)); | |
9460 CHECK(!test.IncludesAliasOf(x12)); | |
9461 CHECK(!test.IncludesAliasOf(w11)); | |
9462 CHECK(!test.IncludesAliasOf(w12)); | |
9463 | |
9464 CHECK(test.IsEmpty()); | |
9465 } | |
9466 | |
9467 | |
9468 TEST(cpureglist_utils_d) { | |
9469 // This test doesn't generate any code, but it verifies the behaviour of | |
9470 // the CPURegList utility methods. | |
9471 | |
9472 // Test a list of D registers. | |
9473 CPURegList test(d20, d21, d22, d23); | |
9474 | |
9475 CHECK(test.IncludesAliasOf(d20)); | |
9476 CHECK(test.IncludesAliasOf(d21)); | |
9477 CHECK(test.IncludesAliasOf(d22)); | |
9478 CHECK(test.IncludesAliasOf(d23)); | |
9479 CHECK(test.IncludesAliasOf(s20)); | |
9480 CHECK(test.IncludesAliasOf(s21)); | |
9481 CHECK(test.IncludesAliasOf(s22)); | |
9482 CHECK(test.IncludesAliasOf(s23)); | |
9483 | |
9484 CHECK(!test.IncludesAliasOf(d0)); | |
9485 CHECK(!test.IncludesAliasOf(d19)); | |
9486 CHECK(!test.IncludesAliasOf(d24)); | |
9487 CHECK(!test.IncludesAliasOf(d31)); | |
9488 CHECK(!test.IncludesAliasOf(s0)); | |
9489 CHECK(!test.IncludesAliasOf(s19)); | |
9490 CHECK(!test.IncludesAliasOf(s24)); | |
9491 CHECK(!test.IncludesAliasOf(s31)); | |
9492 | |
9493 CHECK(!test.IncludesAliasOf(x20)); | |
9494 CHECK(!test.IncludesAliasOf(x21)); | |
9495 CHECK(!test.IncludesAliasOf(x22)); | |
9496 CHECK(!test.IncludesAliasOf(x23)); | |
9497 CHECK(!test.IncludesAliasOf(w20)); | |
9498 CHECK(!test.IncludesAliasOf(w21)); | |
9499 CHECK(!test.IncludesAliasOf(w22)); | |
9500 CHECK(!test.IncludesAliasOf(w23)); | |
9501 | |
9502 CHECK(!test.IncludesAliasOf(xzr)); | |
9503 CHECK(!test.IncludesAliasOf(wzr)); | |
9504 CHECK(!test.IncludesAliasOf(csp)); | |
9505 CHECK(!test.IncludesAliasOf(wcsp)); | |
9506 | |
9507 CHECK(!test.IsEmpty()); | |
9508 | |
9509 CHECK(test.type() == d20.type()); | |
9510 | |
9511 CHECK(test.PopHighestIndex().Is(d23)); | |
9512 CHECK(test.PopLowestIndex().Is(d20)); | |
9513 | |
9514 CHECK(test.IncludesAliasOf(d21)); | |
9515 CHECK(test.IncludesAliasOf(d22)); | |
9516 CHECK(test.IncludesAliasOf(s21)); | |
9517 CHECK(test.IncludesAliasOf(s22)); | |
9518 CHECK(!test.IncludesAliasOf(d20)); | |
9519 CHECK(!test.IncludesAliasOf(d23)); | |
9520 CHECK(!test.IncludesAliasOf(s20)); | |
9521 CHECK(!test.IncludesAliasOf(s23)); | |
9522 | |
9523 CHECK(test.PopHighestIndex().Is(d22)); | |
9524 CHECK(test.PopLowestIndex().Is(d21)); | |
9525 | |
9526 CHECK(!test.IncludesAliasOf(d21)); | |
9527 CHECK(!test.IncludesAliasOf(d22)); | |
9528 CHECK(!test.IncludesAliasOf(s21)); | |
9529 CHECK(!test.IncludesAliasOf(s22)); | |
9530 | |
9531 CHECK(test.IsEmpty()); | |
9532 } | |
9533 | |
9534 | |
9535 TEST(cpureglist_utils_s) { | |
9536 // This test doesn't generate any code, but it verifies the behaviour of | |
9537 // the CPURegList utility methods. | |
9538 | |
9539 // Test a list of S registers. | |
9540 CPURegList test(s20, s21, s22, s23); | |
9541 | |
9542 // The type and size mechanisms are already covered, so here we just test | |
9543 // that lists of S registers alias individual D registers. | |
9544 | |
9545 CHECK(test.IncludesAliasOf(d20)); | |
9546 CHECK(test.IncludesAliasOf(d21)); | |
9547 CHECK(test.IncludesAliasOf(d22)); | |
9548 CHECK(test.IncludesAliasOf(d23)); | |
9549 CHECK(test.IncludesAliasOf(s20)); | |
9550 CHECK(test.IncludesAliasOf(s21)); | |
9551 CHECK(test.IncludesAliasOf(s22)); | |
9552 CHECK(test.IncludesAliasOf(s23)); | |
9553 } | |
9554 | |
9555 | |
9556 TEST(cpureglist_utils_empty) { | |
9557 // This test doesn't generate any code, but it verifies the behaviour of | |
9558 // the CPURegList utility methods. | |
9559 | |
9560 // Test an empty list. | |
9561 // Empty lists can have type and size properties. Check that we can create | |
9562 // them, and that they are empty. | |
9563 CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0); | |
9564 CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0); | |
9565 CPURegList fpreg32(CPURegister::kFPRegister, kSRegSizeInBits, 0); | |
9566 CPURegList fpreg64(CPURegister::kFPRegister, kDRegSizeInBits, 0); | |
9567 | |
9568 CHECK(reg32.IsEmpty()); | |
9569 CHECK(reg64.IsEmpty()); | |
9570 CHECK(fpreg32.IsEmpty()); | |
9571 CHECK(fpreg64.IsEmpty()); | |
9572 | |
9573 CHECK(reg32.PopLowestIndex().IsNone()); | |
9574 CHECK(reg64.PopLowestIndex().IsNone()); | |
9575 CHECK(fpreg32.PopLowestIndex().IsNone()); | |
9576 CHECK(fpreg64.PopLowestIndex().IsNone()); | |
9577 | |
9578 CHECK(reg32.PopHighestIndex().IsNone()); | |
9579 CHECK(reg64.PopHighestIndex().IsNone()); | |
9580 CHECK(fpreg32.PopHighestIndex().IsNone()); | |
9581 CHECK(fpreg64.PopHighestIndex().IsNone()); | |
9582 | |
9583 CHECK(reg32.IsEmpty()); | |
9584 CHECK(reg64.IsEmpty()); | |
9585 CHECK(fpreg32.IsEmpty()); | |
9586 CHECK(fpreg64.IsEmpty()); | |
9587 } | |
9588 | |
9589 | |
9590 TEST(printf) { | |
9591 INIT_V8(); | |
9592 SETUP(); | |
9593 START(); | |
9594 | |
9595 char const * test_plain_string = "Printf with no arguments.\n"; | |
9596 char const * test_substring = "'This is a substring.'"; | |
9597 RegisterDump before; | |
9598 | |
9599 // Initialize x29 to the value of the stack pointer. We will use x29 as a | |
9600 // temporary stack pointer later, and initializing it in this way allows the | |
9601 // RegisterDump check to pass. | |
9602 __ Mov(x29, __ StackPointer()); | |
9603 | |
9604 // Test simple integer arguments. | |
9605 __ Mov(x0, 1234); | |
9606 __ Mov(x1, 0x1234); | |
9607 | |
9608 // Test simple floating-point arguments. | |
9609 __ Fmov(d0, 1.234); | |
9610 | |
9611 // Test pointer (string) arguments. | |
9612 __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring)); | |
9613 | |
9614 // Test the maximum number of arguments, and sign extension. | |
9615 __ Mov(w3, 0xffffffff); | |
9616 __ Mov(w4, 0xffffffff); | |
9617 __ Mov(x5, 0xffffffffffffffff); | |
9618 __ Mov(x6, 0xffffffffffffffff); | |
9619 __ Fmov(s1, 1.234); | |
9620 __ Fmov(s2, 2.345); | |
9621 __ Fmov(d3, 3.456); | |
9622 __ Fmov(d4, 4.567); | |
9623 | |
9624 // Test printing callee-saved registers. | |
9625 __ Mov(x28, 0x123456789abcdef); | |
9626 __ Fmov(d10, 42.0); | |
9627 | |
9628 // Test with three arguments. | |
9629 __ Mov(x10, 3); | |
9630 __ Mov(x11, 40); | |
9631 __ Mov(x12, 500); | |
9632 | |
9633 // x8 and x9 are used by debug code in part of the macro assembler. However, | |
9634 // Printf guarantees to preserve them (so we can use Printf in debug code), | |
9635 // and we need to test that they are properly preserved. The above code | |
9636 // shouldn't need to use them, but we initialize x8 and x9 last to be on the | |
9637 // safe side. This test still assumes that none of the code from | |
9638 // before->Dump() to the end of the test can clobber x8 or x9, so where | |
9639 // possible we use the Assembler directly to be safe. | |
9640 __ orr(x8, xzr, 0x8888888888888888); | |
9641 __ orr(x9, xzr, 0x9999999999999999); | |
9642 | |
9643 // Check that we don't clobber any registers, except those that we explicitly | |
9644 // write results into. | |
9645 before.Dump(&masm); | |
9646 | |
9647 __ Printf(test_plain_string); // NOLINT(runtime/printf) | |
9648 __ Printf("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1); | |
9649 __ Printf("d0: %f\n", d0); | |
9650 __ Printf("Test %%s: %s\n", x2); | |
9651 __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n" | |
9652 "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n", | |
9653 w3, w4, x5, x6); | |
9654 __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4); | |
9655 __ Printf("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28); | |
9656 __ Printf("%g\n", d10); | |
9657 | |
9658 // Test with a different stack pointer. | |
9659 const Register old_stack_pointer = __ StackPointer(); | |
9660 __ mov(x29, old_stack_pointer); | |
9661 __ SetStackPointer(x29); | |
9662 __ Printf("old_stack_pointer: 0x%016" PRIx64 "\n", old_stack_pointer); | |
9663 __ mov(old_stack_pointer, __ StackPointer()); | |
9664 __ SetStackPointer(old_stack_pointer); | |
9665 | |
9666 __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12); | |
9667 | |
9668 END(); | |
9669 RUN(); | |
9670 | |
9671 // We cannot easily test the output of the Printf sequences, and because | |
9672 // Printf preserves all registers by default, we can't look at the number of | |
9673 // bytes that were printed. However, the printf_no_preserve test should check | |
9674 // that, and here we just test that we didn't clobber any registers. | |
9675 ASSERT_EQUAL_REGISTERS(before); | |
9676 | |
9677 TEARDOWN(); | |
9678 } | |
9679 | |
9680 | |
9681 TEST(printf_no_preserve) { | |
9682 INIT_V8(); | |
9683 SETUP(); | |
9684 START(); | |
9685 | |
9686 char const * test_plain_string = "Printf with no arguments.\n"; | |
9687 char const * test_substring = "'This is a substring.'"; | |
9688 | |
9689 __ PrintfNoPreserve(test_plain_string); // NOLINT(runtime/printf) | |
9690 __ Mov(x19, x0); | |
9691 | |
9692 // Test simple integer arguments. | |
9693 __ Mov(x0, 1234); | |
9694 __ Mov(x1, 0x1234); | |
9695 __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1); | |
9696 __ Mov(x20, x0); | |
9697 | |
9698 // Test simple floating-point arguments. | |
9699 __ Fmov(d0, 1.234); | |
9700 __ PrintfNoPreserve("d0: %f\n", d0); | |
9701 __ Mov(x21, x0); | |
9702 | |
9703 // Test pointer (string) arguments. | |
9704 __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring)); | |
9705 __ PrintfNoPreserve("Test %%s: %s\n", x2); | |
9706 __ Mov(x22, x0); | |
9707 | |
9708 // Test the maximum number of arguments, and sign extension. | |
9709 __ Mov(w3, 0xffffffff); | |
9710 __ Mov(w4, 0xffffffff); | |
9711 __ Mov(x5, 0xffffffffffffffff); | |
9712 __ Mov(x6, 0xffffffffffffffff); | |
9713 __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n" | |
9714 "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n", | |
9715 w3, w4, x5, x6); | |
9716 __ Mov(x23, x0); | |
9717 | |
9718 __ Fmov(s1, 1.234); | |
9719 __ Fmov(s2, 2.345); | |
9720 __ Fmov(d3, 3.456); | |
9721 __ Fmov(d4, 4.567); | |
9722 __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4); | |
9723 __ Mov(x24, x0); | |
9724 | |
9725 // Test printing callee-saved registers. | |
9726 __ Mov(x28, 0x123456789abcdef); | |
9727 __ PrintfNoPreserve("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28); | |
9728 __ Mov(x25, x0); | |
9729 | |
9730 __ Fmov(d10, 42.0); | |
9731 __ PrintfNoPreserve("%g\n", d10); | |
9732 __ Mov(x26, x0); | |
9733 | |
9734 // Test with a different stack pointer. | |
9735 const Register old_stack_pointer = __ StackPointer(); | |
9736 __ Mov(x29, old_stack_pointer); | |
9737 __ SetStackPointer(x29); | |
9738 | |
9739 __ PrintfNoPreserve("old_stack_pointer: 0x%016" PRIx64 "\n", | |
9740 old_stack_pointer); | |
9741 __ Mov(x27, x0); | |
9742 | |
9743 __ Mov(old_stack_pointer, __ StackPointer()); | |
9744 __ SetStackPointer(old_stack_pointer); | |
9745 | |
9746 // Test with three arguments. | |
9747 __ Mov(x3, 3); | |
9748 __ Mov(x4, 40); | |
9749 __ Mov(x5, 500); | |
9750 __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5); | |
9751 __ Mov(x28, x0); | |
9752 | |
9753 END(); | |
9754 RUN(); | |
9755 | |
9756 // We cannot easily test the exact output of the Printf sequences, but we can | |
9757 // use the return code to check that the string length was correct. | |
9758 | |
9759 // Printf with no arguments. | |
9760 ASSERT_EQUAL_64(strlen(test_plain_string), x19); | |
9761 // x0: 1234, x1: 0x00001234 | |
9762 ASSERT_EQUAL_64(25, x20); | |
9763 // d0: 1.234000 | |
9764 ASSERT_EQUAL_64(13, x21); | |
9765 // Test %s: 'This is a substring.' | |
9766 ASSERT_EQUAL_64(32, x22); | |
9767 // w3(uint32): 4294967295 | |
9768 // w4(int32): -1 | |
9769 // x5(uint64): 18446744073709551615 | |
9770 // x6(int64): -1 | |
9771 ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23); | |
9772 // %f: 1.234000 | |
9773 // %g: 2.345 | |
9774 // %e: 3.456000e+00 | |
9775 // %E: 4.567000E+00 | |
9776 ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24); | |
9777 // 0x89abcdef, 0x0123456789abcdef | |
9778 ASSERT_EQUAL_64(31, x25); | |
9779 // 42 | |
9780 ASSERT_EQUAL_64(3, x26); | |
9781 // old_stack_pointer: 0x00007fb037ae2370 | |
9782 // Note: This is an example value, but the field width is fixed here so the | |
9783 // string length is still predictable. | |
9784 ASSERT_EQUAL_64(38, x27); | |
9785 // 3=3, 4=40, 5=500 | |
9786 ASSERT_EQUAL_64(17, x28); | |
9787 | |
9788 TEARDOWN(); | |
9789 } | |
9790 | |
9791 | |
9792 // This is a V8-specific test. | |
9793 static void CopyFieldsHelper(CPURegList temps) { | |
9794 static const uint64_t kLiteralBase = 0x0100001000100101UL; | |
9795 static const uint64_t src[] = {kLiteralBase * 1, | |
9796 kLiteralBase * 2, | |
9797 kLiteralBase * 3, | |
9798 kLiteralBase * 4, | |
9799 kLiteralBase * 5, | |
9800 kLiteralBase * 6, | |
9801 kLiteralBase * 7, | |
9802 kLiteralBase * 8, | |
9803 kLiteralBase * 9, | |
9804 kLiteralBase * 10, | |
9805 kLiteralBase * 11}; | |
9806 static const uint64_t src_tagged = | |
9807 reinterpret_cast<uint64_t>(src) + kHeapObjectTag; | |
9808 | |
9809 static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1; | |
9810 uint64_t* dst[kTestCount]; | |
9811 uint64_t dst_tagged[kTestCount]; | |
9812 | |
9813 // The first test will be to copy 0 fields. The destination (and source) | |
9814 // should not be accessed in any way. | |
9815 dst[0] = NULL; | |
9816 dst_tagged[0] = kHeapObjectTag; | |
9817 | |
9818 // Allocate memory for each other test. Each test <n> will have <n> fields. | |
9819 // This is intended to exercise as many paths in CopyFields as possible. | |
9820 for (unsigned i = 1; i < kTestCount; i++) { | |
9821 dst[i] = new uint64_t[i]; | |
9822 memset(dst[i], 0, i * sizeof(kLiteralBase)); | |
9823 dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag; | |
9824 } | |
9825 | |
9826 SETUP(); | |
9827 START(); | |
9828 | |
9829 __ Mov(x0, dst_tagged[0]); | |
9830 __ Mov(x1, 0); | |
9831 __ CopyFields(x0, x1, temps, 0); | |
9832 for (unsigned i = 1; i < kTestCount; i++) { | |
9833 __ Mov(x0, dst_tagged[i]); | |
9834 __ Mov(x1, src_tagged); | |
9835 __ CopyFields(x0, x1, temps, i); | |
9836 } | |
9837 | |
9838 END(); | |
9839 RUN(); | |
9840 TEARDOWN(); | |
9841 | |
9842 for (unsigned i = 1; i < kTestCount; i++) { | |
9843 for (unsigned j = 0; j < i; j++) { | |
9844 CHECK(src[j] == dst[i][j]); | |
9845 } | |
9846 delete [] dst[i]; | |
9847 } | |
9848 } | |
9849 | |
9850 | |
9851 // This is a V8-specific test. | |
9852 TEST(copyfields) { | |
9853 INIT_V8(); | |
9854 CopyFieldsHelper(CPURegList(x10)); | |
9855 CopyFieldsHelper(CPURegList(x10, x11)); | |
9856 CopyFieldsHelper(CPURegList(x10, x11, x12)); | |
9857 CopyFieldsHelper(CPURegList(x10, x11, x12, x13)); | |
9858 } | |
9859 | |
9860 | |
9861 static void DoSmiAbsTest(int32_t value, bool must_fail = false) { | |
9862 SETUP(); | |
9863 | |
9864 START(); | |
9865 Label end, slow; | |
9866 __ Mov(x2, 0xc001c0de); | |
9867 __ Mov(x1, value); | |
9868 __ SmiTag(x1); | |
9869 __ SmiAbs(x1, &slow); | |
9870 __ SmiUntag(x1); | |
9871 __ B(&end); | |
9872 | |
9873 __ Bind(&slow); | |
9874 __ Mov(x2, 0xbad); | |
9875 | |
9876 __ Bind(&end); | |
9877 END(); | |
9878 | |
9879 RUN(); | |
9880 | |
9881 if (must_fail) { | |
9882 // We tested an invalid conversion. The code must have jump on slow. | |
9883 ASSERT_EQUAL_64(0xbad, x2); | |
9884 } else { | |
9885 // The conversion is valid, check the result. | |
9886 int32_t result = (value >= 0) ? value : -value; | |
9887 ASSERT_EQUAL_64(result, x1); | |
9888 | |
9889 // Check that we didn't jump on slow. | |
9890 ASSERT_EQUAL_64(0xc001c0de, x2); | |
9891 } | |
9892 | |
9893 TEARDOWN(); | |
9894 } | |
9895 | |
9896 | |
9897 TEST(smi_abs) { | |
9898 INIT_V8(); | |
9899 // Simple and edge cases. | |
9900 DoSmiAbsTest(0); | |
9901 DoSmiAbsTest(0x12345); | |
9902 DoSmiAbsTest(0x40000000); | |
9903 DoSmiAbsTest(0x7fffffff); | |
9904 DoSmiAbsTest(-1); | |
9905 DoSmiAbsTest(-12345); | |
9906 DoSmiAbsTest(0x80000001); | |
9907 | |
9908 // Check that the most negative SMI is detected. | |
9909 DoSmiAbsTest(0x80000000, true); | |
9910 } | |
9911 | |
9912 | |
9913 TEST(blr_lr) { | |
9914 // A simple test to check that the simulator correcty handle "blr lr". | |
9915 INIT_V8(); | |
9916 SETUP(); | |
9917 | |
9918 START(); | |
9919 Label target; | |
9920 Label end; | |
9921 | |
9922 __ Mov(x0, 0x0); | |
9923 __ Adr(lr, &target); | |
9924 | |
9925 __ Blr(lr); | |
9926 __ Mov(x0, 0xdeadbeef); | |
9927 __ B(&end); | |
9928 | |
9929 __ Bind(&target); | |
9930 __ Mov(x0, 0xc001c0de); | |
9931 | |
9932 __ Bind(&end); | |
9933 END(); | |
9934 | |
9935 RUN(); | |
9936 | |
9937 ASSERT_EQUAL_64(0xc001c0de, x0); | |
9938 | |
9939 TEARDOWN(); | |
9940 } | |
9941 | |
9942 | |
9943 TEST(barriers) { | |
9944 // Generate all supported barriers, this is just a smoke test | |
9945 INIT_V8(); | |
9946 SETUP(); | |
9947 | |
9948 START(); | |
9949 | |
9950 // DMB | |
9951 __ Dmb(FullSystem, BarrierAll); | |
9952 __ Dmb(FullSystem, BarrierReads); | |
9953 __ Dmb(FullSystem, BarrierWrites); | |
9954 __ Dmb(FullSystem, BarrierOther); | |
9955 | |
9956 __ Dmb(InnerShareable, BarrierAll); | |
9957 __ Dmb(InnerShareable, BarrierReads); | |
9958 __ Dmb(InnerShareable, BarrierWrites); | |
9959 __ Dmb(InnerShareable, BarrierOther); | |
9960 | |
9961 __ Dmb(NonShareable, BarrierAll); | |
9962 __ Dmb(NonShareable, BarrierReads); | |
9963 __ Dmb(NonShareable, BarrierWrites); | |
9964 __ Dmb(NonShareable, BarrierOther); | |
9965 | |
9966 __ Dmb(OuterShareable, BarrierAll); | |
9967 __ Dmb(OuterShareable, BarrierReads); | |
9968 __ Dmb(OuterShareable, BarrierWrites); | |
9969 __ Dmb(OuterShareable, BarrierOther); | |
9970 | |
9971 // DSB | |
9972 __ Dsb(FullSystem, BarrierAll); | |
9973 __ Dsb(FullSystem, BarrierReads); | |
9974 __ Dsb(FullSystem, BarrierWrites); | |
9975 __ Dsb(FullSystem, BarrierOther); | |
9976 | |
9977 __ Dsb(InnerShareable, BarrierAll); | |
9978 __ Dsb(InnerShareable, BarrierReads); | |
9979 __ Dsb(InnerShareable, BarrierWrites); | |
9980 __ Dsb(InnerShareable, BarrierOther); | |
9981 | |
9982 __ Dsb(NonShareable, BarrierAll); | |
9983 __ Dsb(NonShareable, BarrierReads); | |
9984 __ Dsb(NonShareable, BarrierWrites); | |
9985 __ Dsb(NonShareable, BarrierOther); | |
9986 | |
9987 __ Dsb(OuterShareable, BarrierAll); | |
9988 __ Dsb(OuterShareable, BarrierReads); | |
9989 __ Dsb(OuterShareable, BarrierWrites); | |
9990 __ Dsb(OuterShareable, BarrierOther); | |
9991 | |
9992 // ISB | |
9993 __ Isb(); | |
9994 | |
9995 END(); | |
9996 | |
9997 RUN(); | |
9998 | |
9999 TEARDOWN(); | |
10000 } | |
10001 | |
10002 | |
10003 TEST(process_nan_double) { | |
10004 INIT_V8(); | |
10005 // Make sure that NaN propagation works correctly. | |
10006 double sn = rawbits_to_double(0x7ff5555511111111); | |
10007 double qn = rawbits_to_double(0x7ffaaaaa11111111); | |
10008 ASSERT(IsSignallingNaN(sn)); | |
10009 ASSERT(IsQuietNaN(qn)); | |
10010 | |
10011 // The input NaNs after passing through ProcessNaN. | |
10012 double sn_proc = rawbits_to_double(0x7ffd555511111111); | |
10013 double qn_proc = qn; | |
10014 ASSERT(IsQuietNaN(sn_proc)); | |
10015 ASSERT(IsQuietNaN(qn_proc)); | |
10016 | |
10017 SETUP(); | |
10018 START(); | |
10019 | |
10020 // Execute a number of instructions which all use ProcessNaN, and check that | |
10021 // they all handle the NaN correctly. | |
10022 __ Fmov(d0, sn); | |
10023 __ Fmov(d10, qn); | |
10024 | |
10025 // Operations that always propagate NaNs unchanged, even signalling NaNs. | |
10026 // - Signalling NaN | |
10027 __ Fmov(d1, d0); | |
10028 __ Fabs(d2, d0); | |
10029 __ Fneg(d3, d0); | |
10030 // - Quiet NaN | |
10031 __ Fmov(d11, d10); | |
10032 __ Fabs(d12, d10); | |
10033 __ Fneg(d13, d10); | |
10034 | |
10035 // Operations that use ProcessNaN. | |
10036 // - Signalling NaN | |
10037 __ Fsqrt(d4, d0); | |
10038 __ Frinta(d5, d0); | |
10039 __ Frintn(d6, d0); | |
10040 __ Frintz(d7, d0); | |
10041 // - Quiet NaN | |
10042 __ Fsqrt(d14, d10); | |
10043 __ Frinta(d15, d10); | |
10044 __ Frintn(d16, d10); | |
10045 __ Frintz(d17, d10); | |
10046 | |
10047 // The behaviour of fcvt is checked in TEST(fcvt_sd). | |
10048 | |
10049 END(); | |
10050 RUN(); | |
10051 | |
10052 uint64_t qn_raw = double_to_rawbits(qn); | |
10053 uint64_t sn_raw = double_to_rawbits(sn); | |
10054 | |
10055 // - Signalling NaN | |
10056 ASSERT_EQUAL_FP64(sn, d1); | |
10057 ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2); | |
10058 ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3); | |
10059 // - Quiet NaN | |
10060 ASSERT_EQUAL_FP64(qn, d11); | |
10061 ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12); | |
10062 ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13); | |
10063 | |
10064 // - Signalling NaN | |
10065 ASSERT_EQUAL_FP64(sn_proc, d4); | |
10066 ASSERT_EQUAL_FP64(sn_proc, d5); | |
10067 ASSERT_EQUAL_FP64(sn_proc, d6); | |
10068 ASSERT_EQUAL_FP64(sn_proc, d7); | |
10069 // - Quiet NaN | |
10070 ASSERT_EQUAL_FP64(qn_proc, d14); | |
10071 ASSERT_EQUAL_FP64(qn_proc, d15); | |
10072 ASSERT_EQUAL_FP64(qn_proc, d16); | |
10073 ASSERT_EQUAL_FP64(qn_proc, d17); | |
10074 | |
10075 TEARDOWN(); | |
10076 } | |
10077 | |
10078 | |
10079 TEST(process_nan_float) { | |
10080 INIT_V8(); | |
10081 // Make sure that NaN propagation works correctly. | |
10082 float sn = rawbits_to_float(0x7f951111); | |
10083 float qn = rawbits_to_float(0x7fea1111); | |
10084 ASSERT(IsSignallingNaN(sn)); | |
10085 ASSERT(IsQuietNaN(qn)); | |
10086 | |
10087 // The input NaNs after passing through ProcessNaN. | |
10088 float sn_proc = rawbits_to_float(0x7fd51111); | |
10089 float qn_proc = qn; | |
10090 ASSERT(IsQuietNaN(sn_proc)); | |
10091 ASSERT(IsQuietNaN(qn_proc)); | |
10092 | |
10093 SETUP(); | |
10094 START(); | |
10095 | |
10096 // Execute a number of instructions which all use ProcessNaN, and check that | |
10097 // they all handle the NaN correctly. | |
10098 __ Fmov(s0, sn); | |
10099 __ Fmov(s10, qn); | |
10100 | |
10101 // Operations that always propagate NaNs unchanged, even signalling NaNs. | |
10102 // - Signalling NaN | |
10103 __ Fmov(s1, s0); | |
10104 __ Fabs(s2, s0); | |
10105 __ Fneg(s3, s0); | |
10106 // - Quiet NaN | |
10107 __ Fmov(s11, s10); | |
10108 __ Fabs(s12, s10); | |
10109 __ Fneg(s13, s10); | |
10110 | |
10111 // Operations that use ProcessNaN. | |
10112 // - Signalling NaN | |
10113 __ Fsqrt(s4, s0); | |
10114 __ Frinta(s5, s0); | |
10115 __ Frintn(s6, s0); | |
10116 __ Frintz(s7, s0); | |
10117 // - Quiet NaN | |
10118 __ Fsqrt(s14, s10); | |
10119 __ Frinta(s15, s10); | |
10120 __ Frintn(s16, s10); | |
10121 __ Frintz(s17, s10); | |
10122 | |
10123 // The behaviour of fcvt is checked in TEST(fcvt_sd). | |
10124 | |
10125 END(); | |
10126 RUN(); | |
10127 | |
10128 uint32_t qn_raw = float_to_rawbits(qn); | |
10129 uint32_t sn_raw = float_to_rawbits(sn); | |
10130 | |
10131 // - Signalling NaN | |
10132 ASSERT_EQUAL_FP32(sn, s1); | |
10133 ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2); | |
10134 ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3); | |
10135 // - Quiet NaN | |
10136 ASSERT_EQUAL_FP32(qn, s11); | |
10137 ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12); | |
10138 ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13); | |
10139 | |
10140 // - Signalling NaN | |
10141 ASSERT_EQUAL_FP32(sn_proc, s4); | |
10142 ASSERT_EQUAL_FP32(sn_proc, s5); | |
10143 ASSERT_EQUAL_FP32(sn_proc, s6); | |
10144 ASSERT_EQUAL_FP32(sn_proc, s7); | |
10145 // - Quiet NaN | |
10146 ASSERT_EQUAL_FP32(qn_proc, s14); | |
10147 ASSERT_EQUAL_FP32(qn_proc, s15); | |
10148 ASSERT_EQUAL_FP32(qn_proc, s16); | |
10149 ASSERT_EQUAL_FP32(qn_proc, s17); | |
10150 | |
10151 TEARDOWN(); | |
10152 } | |
10153 | |
10154 | |
10155 static void ProcessNaNsHelper(double n, double m, double expected) { | |
10156 ASSERT(isnan(n) || isnan(m)); | |
10157 ASSERT(isnan(expected)); | |
10158 | |
10159 SETUP(); | |
10160 START(); | |
10161 | |
10162 // Execute a number of instructions which all use ProcessNaNs, and check that | |
10163 // they all propagate NaNs correctly. | |
10164 __ Fmov(d0, n); | |
10165 __ Fmov(d1, m); | |
10166 | |
10167 __ Fadd(d2, d0, d1); | |
10168 __ Fsub(d3, d0, d1); | |
10169 __ Fmul(d4, d0, d1); | |
10170 __ Fdiv(d5, d0, d1); | |
10171 __ Fmax(d6, d0, d1); | |
10172 __ Fmin(d7, d0, d1); | |
10173 | |
10174 END(); | |
10175 RUN(); | |
10176 | |
10177 ASSERT_EQUAL_FP64(expected, d2); | |
10178 ASSERT_EQUAL_FP64(expected, d3); | |
10179 ASSERT_EQUAL_FP64(expected, d4); | |
10180 ASSERT_EQUAL_FP64(expected, d5); | |
10181 ASSERT_EQUAL_FP64(expected, d6); | |
10182 ASSERT_EQUAL_FP64(expected, d7); | |
10183 | |
10184 TEARDOWN(); | |
10185 } | |
10186 | |
10187 | |
10188 TEST(process_nans_double) { | |
10189 INIT_V8(); | |
10190 // Make sure that NaN propagation works correctly. | |
10191 double sn = rawbits_to_double(0x7ff5555511111111); | |
10192 double sm = rawbits_to_double(0x7ff5555522222222); | |
10193 double qn = rawbits_to_double(0x7ffaaaaa11111111); | |
10194 double qm = rawbits_to_double(0x7ffaaaaa22222222); | |
10195 ASSERT(IsSignallingNaN(sn)); | |
10196 ASSERT(IsSignallingNaN(sm)); | |
10197 ASSERT(IsQuietNaN(qn)); | |
10198 ASSERT(IsQuietNaN(qm)); | |
10199 | |
10200 // The input NaNs after passing through ProcessNaN. | |
10201 double sn_proc = rawbits_to_double(0x7ffd555511111111); | |
10202 double sm_proc = rawbits_to_double(0x7ffd555522222222); | |
10203 double qn_proc = qn; | |
10204 double qm_proc = qm; | |
10205 ASSERT(IsQuietNaN(sn_proc)); | |
10206 ASSERT(IsQuietNaN(sm_proc)); | |
10207 ASSERT(IsQuietNaN(qn_proc)); | |
10208 ASSERT(IsQuietNaN(qm_proc)); | |
10209 | |
10210 // Quiet NaNs are propagated. | |
10211 ProcessNaNsHelper(qn, 0, qn_proc); | |
10212 ProcessNaNsHelper(0, qm, qm_proc); | |
10213 ProcessNaNsHelper(qn, qm, qn_proc); | |
10214 | |
10215 // Signalling NaNs are propagated, and made quiet. | |
10216 ProcessNaNsHelper(sn, 0, sn_proc); | |
10217 ProcessNaNsHelper(0, sm, sm_proc); | |
10218 ProcessNaNsHelper(sn, sm, sn_proc); | |
10219 | |
10220 // Signalling NaNs take precedence over quiet NaNs. | |
10221 ProcessNaNsHelper(sn, qm, sn_proc); | |
10222 ProcessNaNsHelper(qn, sm, sm_proc); | |
10223 ProcessNaNsHelper(sn, sm, sn_proc); | |
10224 } | |
10225 | |
10226 | |
10227 static void ProcessNaNsHelper(float n, float m, float expected) { | |
10228 ASSERT(isnan(n) || isnan(m)); | |
10229 ASSERT(isnan(expected)); | |
10230 | |
10231 SETUP(); | |
10232 START(); | |
10233 | |
10234 // Execute a number of instructions which all use ProcessNaNs, and check that | |
10235 // they all propagate NaNs correctly. | |
10236 __ Fmov(s0, n); | |
10237 __ Fmov(s1, m); | |
10238 | |
10239 __ Fadd(s2, s0, s1); | |
10240 __ Fsub(s3, s0, s1); | |
10241 __ Fmul(s4, s0, s1); | |
10242 __ Fdiv(s5, s0, s1); | |
10243 __ Fmax(s6, s0, s1); | |
10244 __ Fmin(s7, s0, s1); | |
10245 | |
10246 END(); | |
10247 RUN(); | |
10248 | |
10249 ASSERT_EQUAL_FP32(expected, s2); | |
10250 ASSERT_EQUAL_FP32(expected, s3); | |
10251 ASSERT_EQUAL_FP32(expected, s4); | |
10252 ASSERT_EQUAL_FP32(expected, s5); | |
10253 ASSERT_EQUAL_FP32(expected, s6); | |
10254 ASSERT_EQUAL_FP32(expected, s7); | |
10255 | |
10256 TEARDOWN(); | |
10257 } | |
10258 | |
10259 | |
10260 TEST(process_nans_float) { | |
10261 INIT_V8(); | |
10262 // Make sure that NaN propagation works correctly. | |
10263 float sn = rawbits_to_float(0x7f951111); | |
10264 float sm = rawbits_to_float(0x7f952222); | |
10265 float qn = rawbits_to_float(0x7fea1111); | |
10266 float qm = rawbits_to_float(0x7fea2222); | |
10267 ASSERT(IsSignallingNaN(sn)); | |
10268 ASSERT(IsSignallingNaN(sm)); | |
10269 ASSERT(IsQuietNaN(qn)); | |
10270 ASSERT(IsQuietNaN(qm)); | |
10271 | |
10272 // The input NaNs after passing through ProcessNaN. | |
10273 float sn_proc = rawbits_to_float(0x7fd51111); | |
10274 float sm_proc = rawbits_to_float(0x7fd52222); | |
10275 float qn_proc = qn; | |
10276 float qm_proc = qm; | |
10277 ASSERT(IsQuietNaN(sn_proc)); | |
10278 ASSERT(IsQuietNaN(sm_proc)); | |
10279 ASSERT(IsQuietNaN(qn_proc)); | |
10280 ASSERT(IsQuietNaN(qm_proc)); | |
10281 | |
10282 // Quiet NaNs are propagated. | |
10283 ProcessNaNsHelper(qn, 0, qn_proc); | |
10284 ProcessNaNsHelper(0, qm, qm_proc); | |
10285 ProcessNaNsHelper(qn, qm, qn_proc); | |
10286 | |
10287 // Signalling NaNs are propagated, and made quiet. | |
10288 ProcessNaNsHelper(sn, 0, sn_proc); | |
10289 ProcessNaNsHelper(0, sm, sm_proc); | |
10290 ProcessNaNsHelper(sn, sm, sn_proc); | |
10291 | |
10292 // Signalling NaNs take precedence over quiet NaNs. | |
10293 ProcessNaNsHelper(sn, qm, sn_proc); | |
10294 ProcessNaNsHelper(qn, sm, sm_proc); | |
10295 ProcessNaNsHelper(sn, sm, sn_proc); | |
10296 } | |
10297 | |
10298 | |
10299 static void DefaultNaNHelper(float n, float m, float a) { | |
10300 ASSERT(isnan(n) || isnan(m) || isnan(a)); | |
10301 | |
10302 bool test_1op = isnan(n); | |
10303 bool test_2op = isnan(n) || isnan(m); | |
10304 | |
10305 SETUP(); | |
10306 START(); | |
10307 | |
10308 // Enable Default-NaN mode in the FPCR. | |
10309 __ Mrs(x0, FPCR); | |
10310 __ Orr(x1, x0, DN_mask); | |
10311 __ Msr(FPCR, x1); | |
10312 | |
10313 // Execute a number of instructions which all use ProcessNaNs, and check that | |
10314 // they all produce the default NaN. | |
10315 __ Fmov(s0, n); | |
10316 __ Fmov(s1, m); | |
10317 __ Fmov(s2, a); | |
10318 | |
10319 if (test_1op) { | |
10320 // Operations that always propagate NaNs unchanged, even signalling NaNs. | |
10321 __ Fmov(s10, s0); | |
10322 __ Fabs(s11, s0); | |
10323 __ Fneg(s12, s0); | |
10324 | |
10325 // Operations that use ProcessNaN. | |
10326 __ Fsqrt(s13, s0); | |
10327 __ Frinta(s14, s0); | |
10328 __ Frintn(s15, s0); | |
10329 __ Frintz(s16, s0); | |
10330 | |
10331 // Fcvt usually has special NaN handling, but it respects default-NaN mode. | |
10332 __ Fcvt(d17, s0); | |
10333 } | |
10334 | |
10335 if (test_2op) { | |
10336 __ Fadd(s18, s0, s1); | |
10337 __ Fsub(s19, s0, s1); | |
10338 __ Fmul(s20, s0, s1); | |
10339 __ Fdiv(s21, s0, s1); | |
10340 __ Fmax(s22, s0, s1); | |
10341 __ Fmin(s23, s0, s1); | |
10342 } | |
10343 | |
10344 __ Fmadd(s24, s0, s1, s2); | |
10345 __ Fmsub(s25, s0, s1, s2); | |
10346 __ Fnmadd(s26, s0, s1, s2); | |
10347 __ Fnmsub(s27, s0, s1, s2); | |
10348 | |
10349 // Restore FPCR. | |
10350 __ Msr(FPCR, x0); | |
10351 | |
10352 END(); | |
10353 RUN(); | |
10354 | |
10355 if (test_1op) { | |
10356 uint32_t n_raw = float_to_rawbits(n); | |
10357 ASSERT_EQUAL_FP32(n, s10); | |
10358 ASSERT_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11); | |
10359 ASSERT_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12); | |
10360 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s13); | |
10361 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s14); | |
10362 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s15); | |
10363 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s16); | |
10364 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d17); | |
10365 } | |
10366 | |
10367 if (test_2op) { | |
10368 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s18); | |
10369 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s19); | |
10370 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s20); | |
10371 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s21); | |
10372 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s22); | |
10373 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s23); | |
10374 } | |
10375 | |
10376 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s24); | |
10377 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s25); | |
10378 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s26); | |
10379 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s27); | |
10380 | |
10381 TEARDOWN(); | |
10382 } | |
10383 | |
10384 | |
10385 TEST(default_nan_float) { | |
10386 INIT_V8(); | |
10387 float sn = rawbits_to_float(0x7f951111); | |
10388 float sm = rawbits_to_float(0x7f952222); | |
10389 float sa = rawbits_to_float(0x7f95aaaa); | |
10390 float qn = rawbits_to_float(0x7fea1111); | |
10391 float qm = rawbits_to_float(0x7fea2222); | |
10392 float qa = rawbits_to_float(0x7feaaaaa); | |
10393 ASSERT(IsSignallingNaN(sn)); | |
10394 ASSERT(IsSignallingNaN(sm)); | |
10395 ASSERT(IsSignallingNaN(sa)); | |
10396 ASSERT(IsQuietNaN(qn)); | |
10397 ASSERT(IsQuietNaN(qm)); | |
10398 ASSERT(IsQuietNaN(qa)); | |
10399 | |
10400 // - Signalling NaNs | |
10401 DefaultNaNHelper(sn, 0.0f, 0.0f); | |
10402 DefaultNaNHelper(0.0f, sm, 0.0f); | |
10403 DefaultNaNHelper(0.0f, 0.0f, sa); | |
10404 DefaultNaNHelper(sn, sm, 0.0f); | |
10405 DefaultNaNHelper(0.0f, sm, sa); | |
10406 DefaultNaNHelper(sn, 0.0f, sa); | |
10407 DefaultNaNHelper(sn, sm, sa); | |
10408 // - Quiet NaNs | |
10409 DefaultNaNHelper(qn, 0.0f, 0.0f); | |
10410 DefaultNaNHelper(0.0f, qm, 0.0f); | |
10411 DefaultNaNHelper(0.0f, 0.0f, qa); | |
10412 DefaultNaNHelper(qn, qm, 0.0f); | |
10413 DefaultNaNHelper(0.0f, qm, qa); | |
10414 DefaultNaNHelper(qn, 0.0f, qa); | |
10415 DefaultNaNHelper(qn, qm, qa); | |
10416 // - Mixed NaNs | |
10417 DefaultNaNHelper(qn, sm, sa); | |
10418 DefaultNaNHelper(sn, qm, sa); | |
10419 DefaultNaNHelper(sn, sm, qa); | |
10420 DefaultNaNHelper(qn, qm, sa); | |
10421 DefaultNaNHelper(sn, qm, qa); | |
10422 DefaultNaNHelper(qn, sm, qa); | |
10423 DefaultNaNHelper(qn, qm, qa); | |
10424 } | |
10425 | |
10426 | |
10427 static void DefaultNaNHelper(double n, double m, double a) { | |
10428 ASSERT(isnan(n) || isnan(m) || isnan(a)); | |
10429 | |
10430 bool test_1op = isnan(n); | |
10431 bool test_2op = isnan(n) || isnan(m); | |
10432 | |
10433 SETUP(); | |
10434 START(); | |
10435 | |
10436 // Enable Default-NaN mode in the FPCR. | |
10437 __ Mrs(x0, FPCR); | |
10438 __ Orr(x1, x0, DN_mask); | |
10439 __ Msr(FPCR, x1); | |
10440 | |
10441 // Execute a number of instructions which all use ProcessNaNs, and check that | |
10442 // they all produce the default NaN. | |
10443 __ Fmov(d0, n); | |
10444 __ Fmov(d1, m); | |
10445 __ Fmov(d2, a); | |
10446 | |
10447 if (test_1op) { | |
10448 // Operations that always propagate NaNs unchanged, even signalling NaNs. | |
10449 __ Fmov(d10, d0); | |
10450 __ Fabs(d11, d0); | |
10451 __ Fneg(d12, d0); | |
10452 | |
10453 // Operations that use ProcessNaN. | |
10454 __ Fsqrt(d13, d0); | |
10455 __ Frinta(d14, d0); | |
10456 __ Frintn(d15, d0); | |
10457 __ Frintz(d16, d0); | |
10458 | |
10459 // Fcvt usually has special NaN handling, but it respects default-NaN mode. | |
10460 __ Fcvt(s17, d0); | |
10461 } | |
10462 | |
10463 if (test_2op) { | |
10464 __ Fadd(d18, d0, d1); | |
10465 __ Fsub(d19, d0, d1); | |
10466 __ Fmul(d20, d0, d1); | |
10467 __ Fdiv(d21, d0, d1); | |
10468 __ Fmax(d22, d0, d1); | |
10469 __ Fmin(d23, d0, d1); | |
10470 } | |
10471 | |
10472 __ Fmadd(d24, d0, d1, d2); | |
10473 __ Fmsub(d25, d0, d1, d2); | |
10474 __ Fnmadd(d26, d0, d1, d2); | |
10475 __ Fnmsub(d27, d0, d1, d2); | |
10476 | |
10477 // Restore FPCR. | |
10478 __ Msr(FPCR, x0); | |
10479 | |
10480 END(); | |
10481 RUN(); | |
10482 | |
10483 if (test_1op) { | |
10484 uint64_t n_raw = double_to_rawbits(n); | |
10485 ASSERT_EQUAL_FP64(n, d10); | |
10486 ASSERT_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11); | |
10487 ASSERT_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12); | |
10488 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13); | |
10489 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d14); | |
10490 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d15); | |
10491 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d16); | |
10492 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s17); | |
10493 } | |
10494 | |
10495 if (test_2op) { | |
10496 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d18); | |
10497 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d19); | |
10498 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d20); | |
10499 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d21); | |
10500 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d22); | |
10501 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d23); | |
10502 } | |
10503 | |
10504 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d24); | |
10505 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d25); | |
10506 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d26); | |
10507 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d27); | |
10508 | |
10509 TEARDOWN(); | |
10510 } | |
10511 | |
10512 | |
10513 TEST(default_nan_double) { | |
10514 INIT_V8(); | |
10515 double sn = rawbits_to_double(0x7ff5555511111111); | |
10516 double sm = rawbits_to_double(0x7ff5555522222222); | |
10517 double sa = rawbits_to_double(0x7ff55555aaaaaaaa); | |
10518 double qn = rawbits_to_double(0x7ffaaaaa11111111); | |
10519 double qm = rawbits_to_double(0x7ffaaaaa22222222); | |
10520 double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa); | |
10521 ASSERT(IsSignallingNaN(sn)); | |
10522 ASSERT(IsSignallingNaN(sm)); | |
10523 ASSERT(IsSignallingNaN(sa)); | |
10524 ASSERT(IsQuietNaN(qn)); | |
10525 ASSERT(IsQuietNaN(qm)); | |
10526 ASSERT(IsQuietNaN(qa)); | |
10527 | |
10528 // - Signalling NaNs | |
10529 DefaultNaNHelper(sn, 0.0, 0.0); | |
10530 DefaultNaNHelper(0.0, sm, 0.0); | |
10531 DefaultNaNHelper(0.0, 0.0, sa); | |
10532 DefaultNaNHelper(sn, sm, 0.0); | |
10533 DefaultNaNHelper(0.0, sm, sa); | |
10534 DefaultNaNHelper(sn, 0.0, sa); | |
10535 DefaultNaNHelper(sn, sm, sa); | |
10536 // - Quiet NaNs | |
10537 DefaultNaNHelper(qn, 0.0, 0.0); | |
10538 DefaultNaNHelper(0.0, qm, 0.0); | |
10539 DefaultNaNHelper(0.0, 0.0, qa); | |
10540 DefaultNaNHelper(qn, qm, 0.0); | |
10541 DefaultNaNHelper(0.0, qm, qa); | |
10542 DefaultNaNHelper(qn, 0.0, qa); | |
10543 DefaultNaNHelper(qn, qm, qa); | |
10544 // - Mixed NaNs | |
10545 DefaultNaNHelper(qn, sm, sa); | |
10546 DefaultNaNHelper(sn, qm, sa); | |
10547 DefaultNaNHelper(sn, sm, qa); | |
10548 DefaultNaNHelper(qn, qm, sa); | |
10549 DefaultNaNHelper(sn, qm, qa); | |
10550 DefaultNaNHelper(qn, sm, qa); | |
10551 DefaultNaNHelper(qn, qm, qa); | |
10552 } | |
10553 | |
10554 | |
10555 TEST(call_no_relocation) { | |
10556 Address call_start; | |
10557 Address return_address; | |
10558 | |
10559 INIT_V8(); | |
10560 SETUP(); | |
10561 | |
10562 START(); | |
10563 | |
10564 Label function; | |
10565 Label test; | |
10566 | |
10567 __ B(&test); | |
10568 | |
10569 __ Bind(&function); | |
10570 __ Mov(x0, 0x1); | |
10571 __ Ret(); | |
10572 | |
10573 __ Bind(&test); | |
10574 __ Mov(x0, 0x0); | |
10575 __ Push(lr, xzr); | |
10576 { | |
10577 Assembler::BlockConstPoolScope scope(&masm); | |
10578 call_start = buf + __ pc_offset(); | |
10579 __ Call(buf + function.pos(), RelocInfo::NONE64); | |
10580 return_address = buf + __ pc_offset(); | |
10581 } | |
10582 __ Pop(xzr, lr); | |
10583 END(); | |
10584 | |
10585 RUN(); | |
10586 | |
10587 ASSERT_EQUAL_64(1, x0); | |
10588 | |
10589 // The return_address_from_call_start function doesn't currently encounter any | |
10590 // non-relocatable sequences, so we check it here to make sure it works. | |
10591 // TODO(jbramley): Once Crankshaft is complete, decide if we need to support | |
10592 // non-relocatable calls at all. | |
10593 CHECK(return_address == | |
10594 Assembler::return_address_from_call_start(call_start)); | |
10595 | |
10596 TEARDOWN(); | |
10597 } | |
10598 | |
10599 | |
10600 static void AbsHelperX(int64_t value) { | |
10601 int64_t expected; | |
10602 | |
10603 SETUP(); | |
10604 START(); | |
10605 | |
10606 Label fail; | |
10607 Label done; | |
10608 | |
10609 __ Mov(x0, 0); | |
10610 __ Mov(x1, value); | |
10611 | |
10612 if (value != kXMinInt) { | |
10613 expected = labs(value); | |
10614 | |
10615 Label next; | |
10616 // The result is representable. | |
10617 __ Abs(x10, x1); | |
10618 __ Abs(x11, x1, &fail); | |
10619 __ Abs(x12, x1, &fail, &next); | |
10620 __ Bind(&next); | |
10621 __ Abs(x13, x1, NULL, &done); | |
10622 } else { | |
10623 // labs is undefined for kXMinInt but our implementation in the | |
10624 // MacroAssembler will return kXMinInt in such a case. | |
10625 expected = kXMinInt; | |
10626 | |
10627 Label next; | |
10628 // The result is not representable. | |
10629 __ Abs(x10, x1); | |
10630 __ Abs(x11, x1, NULL, &fail); | |
10631 __ Abs(x12, x1, &next, &fail); | |
10632 __ Bind(&next); | |
10633 __ Abs(x13, x1, &done); | |
10634 } | |
10635 | |
10636 __ Bind(&fail); | |
10637 __ Mov(x0, -1); | |
10638 | |
10639 __ Bind(&done); | |
10640 | |
10641 END(); | |
10642 RUN(); | |
10643 | |
10644 ASSERT_EQUAL_64(0, x0); | |
10645 ASSERT_EQUAL_64(value, x1); | |
10646 ASSERT_EQUAL_64(expected, x10); | |
10647 ASSERT_EQUAL_64(expected, x11); | |
10648 ASSERT_EQUAL_64(expected, x12); | |
10649 ASSERT_EQUAL_64(expected, x13); | |
10650 | |
10651 TEARDOWN(); | |
10652 } | |
10653 | |
10654 | |
10655 static void AbsHelperW(int32_t value) { | |
10656 int32_t expected; | |
10657 | |
10658 SETUP(); | |
10659 START(); | |
10660 | |
10661 Label fail; | |
10662 Label done; | |
10663 | |
10664 __ Mov(w0, 0); | |
10665 // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL. | |
10666 // Once it is fixed, we should remove the cast. | |
10667 __ Mov(w1, static_cast<uint32_t>(value)); | |
10668 | |
10669 if (value != kWMinInt) { | |
10670 expected = abs(value); | |
10671 | |
10672 Label next; | |
10673 // The result is representable. | |
10674 __ Abs(w10, w1); | |
10675 __ Abs(w11, w1, &fail); | |
10676 __ Abs(w12, w1, &fail, &next); | |
10677 __ Bind(&next); | |
10678 __ Abs(w13, w1, NULL, &done); | |
10679 } else { | |
10680 // abs is undefined for kWMinInt but our implementation in the | |
10681 // MacroAssembler will return kWMinInt in such a case. | |
10682 expected = kWMinInt; | |
10683 | |
10684 Label next; | |
10685 // The result is not representable. | |
10686 __ Abs(w10, w1); | |
10687 __ Abs(w11, w1, NULL, &fail); | |
10688 __ Abs(w12, w1, &next, &fail); | |
10689 __ Bind(&next); | |
10690 __ Abs(w13, w1, &done); | |
10691 } | |
10692 | |
10693 __ Bind(&fail); | |
10694 __ Mov(w0, -1); | |
10695 | |
10696 __ Bind(&done); | |
10697 | |
10698 END(); | |
10699 RUN(); | |
10700 | |
10701 ASSERT_EQUAL_32(0, w0); | |
10702 ASSERT_EQUAL_32(value, w1); | |
10703 ASSERT_EQUAL_32(expected, w10); | |
10704 ASSERT_EQUAL_32(expected, w11); | |
10705 ASSERT_EQUAL_32(expected, w12); | |
10706 ASSERT_EQUAL_32(expected, w13); | |
10707 | |
10708 TEARDOWN(); | |
10709 } | |
10710 | |
10711 | |
10712 TEST(abs) { | |
10713 INIT_V8(); | |
10714 AbsHelperX(0); | |
10715 AbsHelperX(42); | |
10716 AbsHelperX(-42); | |
10717 AbsHelperX(kXMinInt); | |
10718 AbsHelperX(kXMaxInt); | |
10719 | |
10720 AbsHelperW(0); | |
10721 AbsHelperW(42); | |
10722 AbsHelperW(-42); | |
10723 AbsHelperW(kWMinInt); | |
10724 AbsHelperW(kWMaxInt); | |
10725 } | |
10726 | |
10727 | |
10728 TEST(pool_size) { | |
10729 INIT_V8(); | |
10730 SETUP(); | |
10731 | |
10732 // This test does not execute any code. It only tests that the size of the | |
10733 // pools is read correctly from the RelocInfo. | |
10734 | |
10735 Label exit; | |
10736 __ b(&exit); | |
10737 | |
10738 const unsigned constant_pool_size = 312; | |
10739 const unsigned veneer_pool_size = 184; | |
10740 | |
10741 __ RecordConstPool(constant_pool_size); | |
10742 for (unsigned i = 0; i < constant_pool_size / 4; ++i) { | |
10743 __ dc32(0); | |
10744 } | |
10745 | |
10746 __ RecordVeneerPool(masm.pc_offset(), veneer_pool_size); | |
10747 for (unsigned i = 0; i < veneer_pool_size / kInstructionSize; ++i) { | |
10748 __ nop(); | |
10749 } | |
10750 | |
10751 __ bind(&exit); | |
10752 | |
10753 Heap* heap = isolate->heap(); | |
10754 CodeDesc desc; | |
10755 Object* code_object = NULL; | |
10756 Code* code; | |
10757 masm.GetCode(&desc); | |
10758 MaybeObject* maybe_code = heap->CreateCode(desc, 0, masm.CodeObject()); | |
10759 maybe_code->ToObject(&code_object); | |
10760 code = Code::cast(code_object); | |
10761 | |
10762 unsigned pool_count = 0; | |
10763 int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) | | |
10764 RelocInfo::ModeMask(RelocInfo::VENEER_POOL); | |
10765 for (RelocIterator it(code, pool_mask); !it.done(); it.next()) { | |
10766 RelocInfo* info = it.rinfo(); | |
10767 if (RelocInfo::IsConstPool(info->rmode())) { | |
10768 ASSERT(info->data() == constant_pool_size); | |
10769 ++pool_count; | |
10770 } | |
10771 if (RelocInfo::IsVeneerPool(info->rmode())) { | |
10772 ASSERT(info->data() == veneer_pool_size); | |
10773 ++pool_count; | |
10774 } | |
10775 } | |
10776 | |
10777 ASSERT(pool_count == 2); | |
10778 | |
10779 TEARDOWN(); | |
10780 } | |
OLD | NEW |