Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(24)

Side by Side Diff: test/cctest/test-assembler-mips.cc

Issue 2751973002: MIPS: Move ldc1/sdc1 to macro-assembler. (Closed)
Patch Set: Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips/macro-assembler-mips.cc ('k') | test/cctest/test-code-stubs-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after
272 float fg; 272 float fg;
273 } T; 273 } T;
274 T t; 274 T t;
275 275
276 // Create a function that accepts &t, and loads, manipulates, and stores 276 // Create a function that accepts &t, and loads, manipulates, and stores
277 // the doubles t.a ... t.f. 277 // the doubles t.a ... t.f.
278 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); 278 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
279 Label L, C; 279 Label L, C;
280 280
281 // Double precision floating point instructions. 281 // Double precision floating point instructions.
282 __ ldc1(f4, MemOperand(a0, offsetof(T, a)) ); 282 __ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
283 __ ldc1(f6, MemOperand(a0, offsetof(T, b)) ); 283 __ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
284 __ add_d(f8, f4, f6); 284 __ add_d(f8, f4, f6);
285 __ sdc1(f8, MemOperand(a0, offsetof(T, c)) ); // c = a + b. 285 __ Sdc1(f8, MemOperand(a0, offsetof(T, c))); // c = a + b.
286 286
287 __ mov_d(f10, f8); // c 287 __ mov_d(f10, f8); // c
288 __ neg_d(f12, f6); // -b 288 __ neg_d(f12, f6); // -b
289 __ sub_d(f10, f10, f12); 289 __ sub_d(f10, f10, f12);
290 __ sdc1(f10, MemOperand(a0, offsetof(T, d)) ); // d = c - (-b). 290 __ Sdc1(f10, MemOperand(a0, offsetof(T, d))); // d = c - (-b).
291 291
292 __ sdc1(f4, MemOperand(a0, offsetof(T, b)) ); // b = a. 292 __ Sdc1(f4, MemOperand(a0, offsetof(T, b))); // b = a.
293 293
294 __ li(t0, 120); 294 __ li(t0, 120);
295 __ mtc1(t0, f14); 295 __ mtc1(t0, f14);
296 __ cvt_d_w(f14, f14); // f14 = 120.0. 296 __ cvt_d_w(f14, f14); // f14 = 120.0.
297 __ mul_d(f10, f10, f14); 297 __ mul_d(f10, f10, f14);
298 __ sdc1(f10, MemOperand(a0, offsetof(T, e)) ); // e = d * 120 = 1.8066e16. 298 __ Sdc1(f10, MemOperand(a0, offsetof(T, e))); // e = d * 120 = 1.8066e16.
299 299
300 __ div_d(f12, f10, f4); 300 __ div_d(f12, f10, f4);
301 __ sdc1(f12, MemOperand(a0, offsetof(T, f)) ); // f = e / a = 120.44. 301 __ Sdc1(f12, MemOperand(a0, offsetof(T, f))); // f = e / a = 120.44.
302 302
303 __ sqrt_d(f14, f12); 303 __ sqrt_d(f14, f12);
304 __ sdc1(f14, MemOperand(a0, offsetof(T, g)) ); 304 __ Sdc1(f14, MemOperand(a0, offsetof(T, g)));
305 // g = sqrt(f) = 10.97451593465515908537 305 // g = sqrt(f) = 10.97451593465515908537
306 306
307 if (IsMipsArchVariant(kMips32r2)) { 307 if (IsMipsArchVariant(kMips32r2)) {
308 __ ldc1(f4, MemOperand(a0, offsetof(T, h)) ); 308 __ Ldc1(f4, MemOperand(a0, offsetof(T, h)));
309 __ ldc1(f6, MemOperand(a0, offsetof(T, i)) ); 309 __ Ldc1(f6, MemOperand(a0, offsetof(T, i)));
310 __ madd_d(f14, f6, f4, f6); 310 __ madd_d(f14, f6, f4, f6);
311 __ sdc1(f14, MemOperand(a0, offsetof(T, h)) ); 311 __ Sdc1(f14, MemOperand(a0, offsetof(T, h)));
312 } 312 }
313 313
314 // Single precision floating point instructions. 314 // Single precision floating point instructions.
315 __ lwc1(f4, MemOperand(a0, offsetof(T, fa)) ); 315 __ lwc1(f4, MemOperand(a0, offsetof(T, fa)) );
316 __ lwc1(f6, MemOperand(a0, offsetof(T, fb)) ); 316 __ lwc1(f6, MemOperand(a0, offsetof(T, fb)) );
317 __ add_s(f8, f4, f6); 317 __ add_s(f8, f4, f6);
318 __ swc1(f8, MemOperand(a0, offsetof(T, fc)) ); // fc = fa + fb. 318 __ swc1(f8, MemOperand(a0, offsetof(T, fc)) ); // fc = fa + fb.
319 319
320 __ neg_s(f10, f6); // -fb 320 __ neg_s(f10, f6); // -fb
321 __ sub_s(f10, f8, f10); 321 __ sub_s(f10, f8, f10);
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
397 Isolate* isolate = CcTest::i_isolate(); 397 Isolate* isolate = CcTest::i_isolate();
398 HandleScope scope(isolate); 398 HandleScope scope(isolate);
399 399
400 typedef struct { 400 typedef struct {
401 double a; 401 double a;
402 double b; 402 double b;
403 double c; 403 double c;
404 } T; 404 } T;
405 T t; 405 T t;
406 406
407 Assembler assm(isolate, NULL, 0); 407 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
408 Label L, C; 408 Label L, C;
409 409
410 __ ldc1(f4, MemOperand(a0, offsetof(T, a)) ); 410 __ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
411 __ ldc1(f6, MemOperand(a0, offsetof(T, b)) ); 411 __ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
412 412
413 // Swap f4 and f6, by using four integer registers, t0-t3. 413 // Swap f4 and f6, by using four integer registers, t0-t3.
414 if (IsFp32Mode()) { 414 if (IsFp32Mode()) {
415 __ mfc1(t0, f4); 415 __ mfc1(t0, f4);
416 __ mfc1(t1, f5); 416 __ mfc1(t1, f5);
417 __ mfc1(t2, f6); 417 __ mfc1(t2, f6);
418 __ mfc1(t3, f7); 418 __ mfc1(t3, f7);
419 419
420 __ mtc1(t0, f6); 420 __ mtc1(t0, f6);
421 __ mtc1(t1, f7); 421 __ mtc1(t1, f7);
422 __ mtc1(t2, f4); 422 __ mtc1(t2, f4);
423 __ mtc1(t3, f5); 423 __ mtc1(t3, f5);
424 } else { 424 } else {
425 CHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson)); 425 CHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson));
426 DCHECK(IsFp64Mode() || IsFpxxMode()); 426 DCHECK(IsFp64Mode() || IsFpxxMode());
427 __ mfc1(t0, f4); 427 __ mfc1(t0, f4);
428 __ mfhc1(t1, f4); 428 __ mfhc1(t1, f4);
429 __ mfc1(t2, f6); 429 __ mfc1(t2, f6);
430 __ mfhc1(t3, f6); 430 __ mfhc1(t3, f6);
431 431
432 __ mtc1(t0, f6); 432 __ mtc1(t0, f6);
433 __ mthc1(t1, f6); 433 __ mthc1(t1, f6);
434 __ mtc1(t2, f4); 434 __ mtc1(t2, f4);
435 __ mthc1(t3, f4); 435 __ mthc1(t3, f4);
436 } 436 }
437 437
438 // Store the swapped f4 and f5 back to memory. 438 // Store the swapped f4 and f5 back to memory.
439 __ sdc1(f4, MemOperand(a0, offsetof(T, a)) ); 439 __ Sdc1(f4, MemOperand(a0, offsetof(T, a)));
440 __ sdc1(f6, MemOperand(a0, offsetof(T, c)) ); 440 __ Sdc1(f6, MemOperand(a0, offsetof(T, c)));
441 441
442 __ jr(ra); 442 __ jr(ra);
443 __ nop(); 443 __ nop();
444 444
445 CodeDesc desc; 445 CodeDesc desc;
446 assm.GetCode(&desc); 446 assm.GetCode(&desc);
447 Handle<Code> code = isolate->factory()->NewCode( 447 Handle<Code> code = isolate->factory()->NewCode(
448 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 448 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
449 F3 f = FUNCTION_CAST<F3>(code->entry()); 449 F3 f = FUNCTION_CAST<F3>(code->entry());
450 t.a = 1.5e22; 450 t.a = 1.5e22;
(...skipping 15 matching lines...) Expand all
466 HandleScope scope(isolate); 466 HandleScope scope(isolate);
467 467
468 typedef struct { 468 typedef struct {
469 double a; 469 double a;
470 double b; 470 double b;
471 int i; 471 int i;
472 int j; 472 int j;
473 } T; 473 } T;
474 T t; 474 T t;
475 475
476 Assembler assm(isolate, NULL, 0); 476 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
477 Label L, C; 477 Label L, C;
478 478
479 // Load all structure elements to registers. 479 // Load all structure elements to registers.
480 __ ldc1(f4, MemOperand(a0, offsetof(T, a)) ); 480 __ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
481 __ ldc1(f6, MemOperand(a0, offsetof(T, b)) ); 481 __ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
482 __ lw(t0, MemOperand(a0, offsetof(T, i)) ); 482 __ lw(t0, MemOperand(a0, offsetof(T, i)) );
483 __ lw(t1, MemOperand(a0, offsetof(T, j)) ); 483 __ lw(t1, MemOperand(a0, offsetof(T, j)) );
484 484
485 // Convert double in f4 to int in element i. 485 // Convert double in f4 to int in element i.
486 __ cvt_w_d(f8, f4); 486 __ cvt_w_d(f8, f4);
487 __ mfc1(t2, f8); 487 __ mfc1(t2, f8);
488 __ sw(t2, MemOperand(a0, offsetof(T, i)) ); 488 __ sw(t2, MemOperand(a0, offsetof(T, i)) );
489 489
490 // Convert double in f6 to int in element j. 490 // Convert double in f6 to int in element j.
491 __ cvt_w_d(f10, f6); 491 __ cvt_w_d(f10, f6);
492 __ mfc1(t3, f10); 492 __ mfc1(t3, f10);
493 __ sw(t3, MemOperand(a0, offsetof(T, j)) ); 493 __ sw(t3, MemOperand(a0, offsetof(T, j)) );
494 494
495 // Convert int in original i (t0) to double in a. 495 // Convert int in original i (t0) to double in a.
496 __ mtc1(t0, f12); 496 __ mtc1(t0, f12);
497 __ cvt_d_w(f0, f12); 497 __ cvt_d_w(f0, f12);
498 __ sdc1(f0, MemOperand(a0, offsetof(T, a)) ); 498 __ Sdc1(f0, MemOperand(a0, offsetof(T, a)));
499 499
500 // Convert int in original j (t1) to double in b. 500 // Convert int in original j (t1) to double in b.
501 __ mtc1(t1, f14); 501 __ mtc1(t1, f14);
502 __ cvt_d_w(f2, f14); 502 __ cvt_d_w(f2, f14);
503 __ sdc1(f2, MemOperand(a0, offsetof(T, b)) ); 503 __ Sdc1(f2, MemOperand(a0, offsetof(T, b)));
504 504
505 __ jr(ra); 505 __ jr(ra);
506 __ nop(); 506 __ nop();
507 507
508 CodeDesc desc; 508 CodeDesc desc;
509 assm.GetCode(&desc); 509 assm.GetCode(&desc);
510 Handle<Code> code = isolate->factory()->NewCode( 510 Handle<Code> code = isolate->factory()->NewCode(
511 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 511 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
512 F3 f = FUNCTION_CAST<F3>(code->entry()); 512 F3 f = FUNCTION_CAST<F3>(code->entry());
513 t.a = 1.5e4; 513 t.a = 1.5e4;
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
619 double f; 619 double f;
620 int32_t result; 620 int32_t result;
621 } T; 621 } T;
622 T t; 622 T t;
623 623
624 // Create a function that accepts &t, and loads, manipulates, and stores 624 // Create a function that accepts &t, and loads, manipulates, and stores
625 // the doubles t.a ... t.f. 625 // the doubles t.a ... t.f.
626 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); 626 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
627 Label neither_is_nan, less_than, outa_here; 627 Label neither_is_nan, less_than, outa_here;
628 628
629 __ ldc1(f4, MemOperand(a0, offsetof(T, a)) ); 629 __ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
630 __ ldc1(f6, MemOperand(a0, offsetof(T, b)) ); 630 __ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
631 if (!IsMipsArchVariant(kMips32r6)) { 631 if (!IsMipsArchVariant(kMips32r6)) {
632 __ c(UN, D, f4, f6); 632 __ c(UN, D, f4, f6);
633 __ bc1f(&neither_is_nan); 633 __ bc1f(&neither_is_nan);
634 } else { 634 } else {
635 __ cmp(UN, L, f2, f4, f6); 635 __ cmp(UN, L, f2, f4, f6);
636 __ bc1eqz(&neither_is_nan, f2); 636 __ bc1eqz(&neither_is_nan, f2);
637 } 637 }
638 __ nop(); 638 __ nop();
639 __ sw(zero_reg, MemOperand(a0, offsetof(T, result)) ); 639 __ sw(zero_reg, MemOperand(a0, offsetof(T, result)) );
640 __ Branch(&outa_here); 640 __ Branch(&outa_here);
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after
828 typedef struct { 828 typedef struct {
829 double a; 829 double a;
830 double b; 830 double b;
831 int32_t dbl_mant; 831 int32_t dbl_mant;
832 int32_t dbl_exp; 832 int32_t dbl_exp;
833 int32_t word; 833 int32_t word;
834 int32_t b_word; 834 int32_t b_word;
835 } T; 835 } T;
836 T t; 836 T t;
837 837
838 Assembler assm(isolate, NULL, 0); 838 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
839 Label L, C; 839 Label L, C;
840 840
841 if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) return; 841 if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) return;
842 842
843 // Load all structure elements to registers. 843 // Load all structure elements to registers.
844 // (f0, f1) = a (fp32), f0 = a (fp64) 844 // (f0, f1) = a (fp32), f0 = a (fp64)
845 __ ldc1(f0, MemOperand(a0, offsetof(T, a))); 845 __ Ldc1(f0, MemOperand(a0, offsetof(T, a)));
846 846
847 __ mfc1(t0, f0); // t0 = f0(31..0) 847 __ mfc1(t0, f0); // t0 = f0(31..0)
848 __ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32)) 848 __ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32))
849 __ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0 849 __ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0
850 __ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1 850 __ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1
851 851
852 // Convert double in f0 to word, save hi/lo parts. 852 // Convert double in f0 to word, save hi/lo parts.
853 __ cvt_w_d(f0, f0); // a_word = (word)a 853 __ cvt_w_d(f0, f0); // a_word = (word)a
854 __ mfc1(t0, f0); // f0 has a 32-bits word. t0 = a_word 854 __ mfc1(t0, f0); // f0 has a 32-bits word. t0 = a_word
855 __ sw(t0, MemOperand(a0, offsetof(T, word))); // word = a_word 855 __ sw(t0, MemOperand(a0, offsetof(T, word))); // word = a_word
856 856
857 // Convert the b word to double b. 857 // Convert the b word to double b.
858 __ lw(t0, MemOperand(a0, offsetof(T, b_word))); 858 __ lw(t0, MemOperand(a0, offsetof(T, b_word)));
859 __ mtc1(t0, f8); // f8 has a 32-bits word. 859 __ mtc1(t0, f8); // f8 has a 32-bits word.
860 __ cvt_d_w(f10, f8); 860 __ cvt_d_w(f10, f8);
861 __ sdc1(f10, MemOperand(a0, offsetof(T, b))); 861 __ Sdc1(f10, MemOperand(a0, offsetof(T, b)));
862 862
863 __ jr(ra); 863 __ jr(ra);
864 __ nop(); 864 __ nop();
865 865
866 CodeDesc desc; 866 CodeDesc desc;
867 assm.GetCode(&desc); 867 assm.GetCode(&desc);
868 Handle<Code> code = isolate->factory()->NewCode( 868 Handle<Code> code = isolate->factory()->NewCode(
869 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 869 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
870 F3 f = FUNCTION_CAST<F3>(code->entry()); 870 F3 f = FUNCTION_CAST<F3>(code->entry());
871 t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double. 871 t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
(...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after
1149 uint32_t trunc_small_out; 1149 uint32_t trunc_small_out;
1150 uint32_t cvt_big_in; 1150 uint32_t cvt_big_in;
1151 uint32_t cvt_small_in; 1151 uint32_t cvt_small_in;
1152 } T; 1152 } T;
1153 T t; 1153 T t;
1154 1154
1155 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); 1155 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
1156 1156
1157 __ sw(t0, MemOperand(a0, offsetof(T, cvt_small_in))); 1157 __ sw(t0, MemOperand(a0, offsetof(T, cvt_small_in)));
1158 __ Cvt_d_uw(f10, t0, f4); 1158 __ Cvt_d_uw(f10, t0, f4);
1159 __ sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out))); 1159 __ Sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out)));
1160 1160
1161 __ Trunc_uw_d(f10, f10, f4); 1161 __ Trunc_uw_d(f10, f10, f4);
1162 __ swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out))); 1162 __ swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out)));
1163 1163
1164 __ sw(t0, MemOperand(a0, offsetof(T, cvt_big_in))); 1164 __ sw(t0, MemOperand(a0, offsetof(T, cvt_big_in)));
1165 __ Cvt_d_uw(f8, t0, f4); 1165 __ Cvt_d_uw(f8, t0, f4);
1166 __ sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out))); 1166 __ Sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out)));
1167 1167
1168 __ Trunc_uw_d(f8, f8, f4); 1168 __ Trunc_uw_d(f8, f8, f4);
1169 __ swc1(f8, MemOperand(a0, offsetof(T, trunc_big_out))); 1169 __ swc1(f8, MemOperand(a0, offsetof(T, trunc_big_out)));
1170 1170
1171 __ jr(ra); 1171 __ jr(ra);
1172 __ nop(); 1172 __ nop();
1173 1173
1174 CodeDesc desc; 1174 CodeDesc desc;
1175 assm.GetCode(&desc); 1175 assm.GetCode(&desc);
1176 Handle<Code> code = isolate->factory()->NewCode( 1176 Handle<Code> code = isolate->factory()->NewCode(
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1229 T t; 1229 T t;
1230 1230
1231 #undef ROUND_STRUCT_ELEMENT 1231 #undef ROUND_STRUCT_ELEMENT
1232 1232
1233 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); 1233 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
1234 1234
1235 // Save FCSR. 1235 // Save FCSR.
1236 __ cfc1(a1, FCSR); 1236 __ cfc1(a1, FCSR);
1237 // Disable FPU exceptions. 1237 // Disable FPU exceptions.
1238 __ ctc1(zero_reg, FCSR); 1238 __ ctc1(zero_reg, FCSR);
1239 #define RUN_ROUND_TEST(x) \ 1239 #define RUN_ROUND_TEST(x) \
1240 __ cfc1(t0, FCSR);\ 1240 __ cfc1(t0, FCSR); \
1241 __ sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \ 1241 __ sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \
1242 __ ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \ 1242 __ Ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \
1243 __ x##_w_d(f0, f0); \ 1243 __ x##_w_d(f0, f0); \
1244 __ swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \ 1244 __ swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \
1245 \ 1245 \
1246 __ ldc1(f0, MemOperand(a0, offsetof(T, round_down_in))); \ 1246 __ Ldc1(f0, MemOperand(a0, offsetof(T, round_down_in))); \
1247 __ x##_w_d(f0, f0); \ 1247 __ x##_w_d(f0, f0); \
1248 __ swc1(f0, MemOperand(a0, offsetof(T, x##_down_out))); \ 1248 __ swc1(f0, MemOperand(a0, offsetof(T, x##_down_out))); \
1249 \ 1249 \
1250 __ ldc1(f0, MemOperand(a0, offsetof(T, neg_round_up_in))); \ 1250 __ Ldc1(f0, MemOperand(a0, offsetof(T, neg_round_up_in))); \
1251 __ x##_w_d(f0, f0); \ 1251 __ x##_w_d(f0, f0); \
1252 __ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_up_out))); \ 1252 __ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_up_out))); \
1253 \ 1253 \
1254 __ ldc1(f0, MemOperand(a0, offsetof(T, neg_round_down_in))); \ 1254 __ Ldc1(f0, MemOperand(a0, offsetof(T, neg_round_down_in))); \
1255 __ x##_w_d(f0, f0); \ 1255 __ x##_w_d(f0, f0); \
1256 __ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_down_out))); \ 1256 __ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_down_out))); \
1257 \ 1257 \
1258 __ ldc1(f0, MemOperand(a0, offsetof(T, err1_in))); \ 1258 __ Ldc1(f0, MemOperand(a0, offsetof(T, err1_in))); \
1259 __ ctc1(zero_reg, FCSR); \ 1259 __ ctc1(zero_reg, FCSR); \
1260 __ x##_w_d(f0, f0); \ 1260 __ x##_w_d(f0, f0); \
1261 __ cfc1(a2, FCSR); \ 1261 __ cfc1(a2, FCSR); \
1262 __ sw(a2, MemOperand(a0, offsetof(T, x##_err1_out))); \ 1262 __ sw(a2, MemOperand(a0, offsetof(T, x##_err1_out))); \
1263 \ 1263 \
1264 __ ldc1(f0, MemOperand(a0, offsetof(T, err2_in))); \ 1264 __ Ldc1(f0, MemOperand(a0, offsetof(T, err2_in))); \
1265 __ ctc1(zero_reg, FCSR); \ 1265 __ ctc1(zero_reg, FCSR); \
1266 __ x##_w_d(f0, f0); \ 1266 __ x##_w_d(f0, f0); \
1267 __ cfc1(a2, FCSR); \ 1267 __ cfc1(a2, FCSR); \
1268 __ sw(a2, MemOperand(a0, offsetof(T, x##_err2_out))); \ 1268 __ sw(a2, MemOperand(a0, offsetof(T, x##_err2_out))); \
1269 \ 1269 \
1270 __ ldc1(f0, MemOperand(a0, offsetof(T, err3_in))); \ 1270 __ Ldc1(f0, MemOperand(a0, offsetof(T, err3_in))); \
1271 __ ctc1(zero_reg, FCSR); \ 1271 __ ctc1(zero_reg, FCSR); \
1272 __ x##_w_d(f0, f0); \ 1272 __ x##_w_d(f0, f0); \
1273 __ cfc1(a2, FCSR); \ 1273 __ cfc1(a2, FCSR); \
1274 __ sw(a2, MemOperand(a0, offsetof(T, x##_err3_out))); \ 1274 __ sw(a2, MemOperand(a0, offsetof(T, x##_err3_out))); \
1275 \ 1275 \
1276 __ ldc1(f0, MemOperand(a0, offsetof(T, err4_in))); \ 1276 __ Ldc1(f0, MemOperand(a0, offsetof(T, err4_in))); \
1277 __ ctc1(zero_reg, FCSR); \ 1277 __ ctc1(zero_reg, FCSR); \
1278 __ x##_w_d(f0, f0); \ 1278 __ x##_w_d(f0, f0); \
1279 __ cfc1(a2, FCSR); \ 1279 __ cfc1(a2, FCSR); \
1280 __ sw(a2, MemOperand(a0, offsetof(T, x##_err4_out))); \ 1280 __ sw(a2, MemOperand(a0, offsetof(T, x##_err4_out))); \
1281 __ swc1(f0, MemOperand(a0, offsetof(T, x##_invalid_result))); 1281 __ swc1(f0, MemOperand(a0, offsetof(T, x##_invalid_result)));
1282 1282
1283 RUN_ROUND_TEST(round) 1283 RUN_ROUND_TEST(round)
1284 RUN_ROUND_TEST(floor) 1284 RUN_ROUND_TEST(floor)
1285 RUN_ROUND_TEST(ceil) 1285 RUN_ROUND_TEST(ceil)
1286 RUN_ROUND_TEST(trunc) 1286 RUN_ROUND_TEST(trunc)
1287 RUN_ROUND_TEST(cvt) 1287 RUN_ROUND_TEST(cvt)
1288 1288
1289 // Restore FCSR. 1289 // Restore FCSR.
1290 __ ctc1(a1, FCSR); 1290 __ ctc1(a1, FCSR);
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
1377 __ addiu(t1, zero_reg, 1); // t1 = 1 1377 __ addiu(t1, zero_reg, 1); // t1 = 1
1378 __ seleqz(t3, t1, zero_reg); // t3 = 1 1378 __ seleqz(t3, t1, zero_reg); // t3 = 1
1379 __ sw(t3, MemOperand(a0, offsetof(Test, a))); // a = 1 1379 __ sw(t3, MemOperand(a0, offsetof(Test, a))); // a = 1
1380 __ seleqz(t2, t1, t1); // t2 = 0 1380 __ seleqz(t2, t1, t1); // t2 = 0
1381 __ sw(t2, MemOperand(a0, offsetof(Test, b))); // b = 0 1381 __ sw(t2, MemOperand(a0, offsetof(Test, b))); // b = 0
1382 __ selnez(t3, t1, zero_reg); // t3 = 1; 1382 __ selnez(t3, t1, zero_reg); // t3 = 1;
1383 __ sw(t3, MemOperand(a0, offsetof(Test, c))); // c = 0 1383 __ sw(t3, MemOperand(a0, offsetof(Test, c))); // c = 0
1384 __ selnez(t3, t1, t1); // t3 = 1 1384 __ selnez(t3, t1, t1); // t3 = 1
1385 __ sw(t3, MemOperand(a0, offsetof(Test, d))); // d = 1 1385 __ sw(t3, MemOperand(a0, offsetof(Test, d))); // d = 1
1386 // Floating point part of test. 1386 // Floating point part of test.
1387 __ ldc1(f0, MemOperand(a0, offsetof(Test, e)) ); // src 1387 __ Ldc1(f0, MemOperand(a0, offsetof(Test, e))); // src
1388 __ ldc1(f2, MemOperand(a0, offsetof(Test, f)) ); // test 1388 __ Ldc1(f2, MemOperand(a0, offsetof(Test, f))); // test
1389 __ lwc1(f8, MemOperand(a0, offsetof(Test, i)) ); // src 1389 __ lwc1(f8, MemOperand(a0, offsetof(Test, i)) ); // src
1390 __ lwc1(f10, MemOperand(a0, offsetof(Test, j)) ); // test 1390 __ lwc1(f10, MemOperand(a0, offsetof(Test, j)) ); // test
1391 __ seleqz_d(f4, f0, f2); 1391 __ seleqz_d(f4, f0, f2);
1392 __ selnez_d(f6, f0, f2); 1392 __ selnez_d(f6, f0, f2);
1393 __ seleqz_s(f12, f8, f10); 1393 __ seleqz_s(f12, f8, f10);
1394 __ selnez_s(f14, f8, f10); 1394 __ selnez_s(f14, f8, f10);
1395 __ sdc1(f4, MemOperand(a0, offsetof(Test, g)) ); // src 1395 __ Sdc1(f4, MemOperand(a0, offsetof(Test, g))); // src
1396 __ sdc1(f6, MemOperand(a0, offsetof(Test, h)) ); // src 1396 __ Sdc1(f6, MemOperand(a0, offsetof(Test, h))); // src
1397 __ swc1(f12, MemOperand(a0, offsetof(Test, k)) ); // src 1397 __ swc1(f12, MemOperand(a0, offsetof(Test, k)) ); // src
1398 __ swc1(f14, MemOperand(a0, offsetof(Test, l)) ); // src 1398 __ swc1(f14, MemOperand(a0, offsetof(Test, l)) ); // src
1399 __ jr(ra); 1399 __ jr(ra);
1400 __ nop(); 1400 __ nop();
1401 CodeDesc desc; 1401 CodeDesc desc;
1402 assm.GetCode(&desc); 1402 assm.GetCode(&desc);
1403 Handle<Code> code = isolate->factory()->NewCode( 1403 Handle<Code> code = isolate->factory()->NewCode(
1404 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 1404 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1405 F3 f = FUNCTION_CAST<F3>(code->entry()); 1405 F3 f = FUNCTION_CAST<F3>(code->entry());
1406 1406
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
1491 float inputse[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf, 1491 float inputse[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf,
1492 fnan, 42.0, finf, fminf, finf, fnan}; 1492 fnan, 42.0, finf, fminf, finf, fnan};
1493 float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan, 1493 float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan,
1494 finf, finf, 42.0, finf, fminf, fnan}; 1494 finf, finf, 42.0, finf, fminf, fnan};
1495 float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, 1495 float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0,
1496 -0.0, finf, finf, 42.0, 42.0, 1496 -0.0, finf, finf, 42.0, 42.0,
1497 fminf, fminf, fnan}; 1497 fminf, fminf, fnan};
1498 float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf, 1498 float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf,
1499 finf, finf, finf, finf, finf, fnan}; 1499 finf, finf, finf, finf, finf, fnan};
1500 1500
1501 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); 1501 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
1502 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b))); 1502 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
1503 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e))); 1503 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
1504 __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f))); 1504 __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
1505 __ min_d(f10, f4, f8); 1505 __ min_d(f10, f4, f8);
1506 __ max_d(f12, f4, f8); 1506 __ max_d(f12, f4, f8);
1507 __ min_s(f14, f2, f6); 1507 __ min_s(f14, f2, f6);
1508 __ max_s(f16, f2, f6); 1508 __ max_s(f16, f2, f6);
1509 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c))); 1509 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
1510 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d))); 1510 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
1511 __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g))); 1511 __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
1512 __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h))); 1512 __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
1513 __ jr(ra); 1513 __ jr(ra);
1514 __ nop(); 1514 __ nop();
1515 1515
1516 CodeDesc desc; 1516 CodeDesc desc;
1517 assm.GetCode(&desc); 1517 assm.GetCode(&desc);
1518 Handle<Code> code = isolate->factory()->NewCode( 1518 Handle<Code> code = isolate->factory()->NewCode(
1519 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 1519 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1520 F3 f = FUNCTION_CAST<F3>(code->entry()); 1520 F3 f = FUNCTION_CAST<F3>(code->entry());
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
1607 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 1607 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
1608 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, 1608 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
1609 37778931862957161709568.0, 37778931862957161709569.0, 1609 37778931862957161709568.0, 37778931862957161709569.0,
1610 37778931862957161709580.0, 37778931862957161709581.0, 1610 37778931862957161709580.0, 37778931862957161709581.0,
1611 37778931862957161709582.0, 37778931862957161709583.0, 1611 37778931862957161709582.0, 37778931862957161709583.0,
1612 37778931862957161709584.0, 37778931862957161709585.0, 1612 37778931862957161709584.0, 37778931862957161709585.0,
1613 37778931862957161709586.0, 37778931862957161709587.0}; 1613 37778931862957161709586.0, 37778931862957161709587.0};
1614 int fcsr_inputs[4] = 1614 int fcsr_inputs[4] =
1615 {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; 1615 {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
1616 double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; 1616 double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
1617 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)) ); 1617 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
1618 __ lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)) ); 1618 __ lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)) );
1619 __ cfc1(t1, FCSR); 1619 __ cfc1(t1, FCSR);
1620 __ ctc1(t0, FCSR); 1620 __ ctc1(t0, FCSR);
1621 __ rint_d(f8, f4); 1621 __ rint_d(f8, f4);
1622 __ sdc1(f8, MemOperand(a0, offsetof(TestFloat, b)) ); 1622 __ Sdc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
1623 __ ctc1(t1, FCSR); 1623 __ ctc1(t1, FCSR);
1624 __ jr(ra); 1624 __ jr(ra);
1625 __ nop(); 1625 __ nop();
1626 1626
1627 CodeDesc desc; 1627 CodeDesc desc;
1628 assm.GetCode(&desc); 1628 assm.GetCode(&desc);
1629 Handle<Code> code = isolate->factory()->NewCode( 1629 Handle<Code> code = isolate->factory()->NewCode(
1630 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 1630 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1631 F3 f = FUNCTION_CAST<F3>(code->entry()); 1631 F3 f = FUNCTION_CAST<F3>(code->entry());
1632 1632
(...skipping 20 matching lines...) Expand all
1653 typedef struct test { 1653 typedef struct test {
1654 double dd; 1654 double dd;
1655 double ds; 1655 double ds;
1656 double dt; 1656 double dt;
1657 float fd; 1657 float fd;
1658 float fs; 1658 float fs;
1659 float ft; 1659 float ft;
1660 } Test; 1660 } Test;
1661 1661
1662 Test test; 1662 Test test;
1663 __ ldc1(f0, MemOperand(a0, offsetof(Test, dd)) ); // test 1663 __ Ldc1(f0, MemOperand(a0, offsetof(Test, dd))); // test
1664 __ ldc1(f2, MemOperand(a0, offsetof(Test, ds)) ); // src1 1664 __ Ldc1(f2, MemOperand(a0, offsetof(Test, ds))); // src1
1665 __ ldc1(f4, MemOperand(a0, offsetof(Test, dt)) ); // src2 1665 __ Ldc1(f4, MemOperand(a0, offsetof(Test, dt))); // src2
1666 __ lwc1(f6, MemOperand(a0, offsetof(Test, fd)) ); // test 1666 __ lwc1(f6, MemOperand(a0, offsetof(Test, fd)) ); // test
1667 __ lwc1(f8, MemOperand(a0, offsetof(Test, fs)) ); // src1 1667 __ lwc1(f8, MemOperand(a0, offsetof(Test, fs)) ); // src1
1668 __ lwc1(f10, MemOperand(a0, offsetof(Test, ft)) ); // src2 1668 __ lwc1(f10, MemOperand(a0, offsetof(Test, ft)) ); // src2
1669 __ sel_d(f0, f2, f4); 1669 __ sel_d(f0, f2, f4);
1670 __ sel_s(f6, f8, f10); 1670 __ sel_s(f6, f8, f10);
1671 __ sdc1(f0, MemOperand(a0, offsetof(Test, dd)) ); 1671 __ Sdc1(f0, MemOperand(a0, offsetof(Test, dd)));
1672 __ swc1(f6, MemOperand(a0, offsetof(Test, fd)) ); 1672 __ swc1(f6, MemOperand(a0, offsetof(Test, fd)) );
1673 __ jr(ra); 1673 __ jr(ra);
1674 __ nop(); 1674 __ nop();
1675 CodeDesc desc; 1675 CodeDesc desc;
1676 assm.GetCode(&desc); 1676 assm.GetCode(&desc);
1677 Handle<Code> code = isolate->factory()->NewCode( 1677 Handle<Code> code = isolate->factory()->NewCode(
1678 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 1678 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1679 F3 f = FUNCTION_CAST<F3>(code->entry()); 1679 F3 f = FUNCTION_CAST<F3>(code->entry());
1680 1680
1681 const int test_size = 3; 1681 const int test_size = 3;
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
1843 0x0, 0x41efffffffe00000, 1843 0x0, 0x41efffffffe00000,
1844 0x41e0000000000000, 0x41dfffffffc00000 1844 0x41e0000000000000, 0x41dfffffffc00000
1845 }; 1845 };
1846 1846
1847 int kTableLength = sizeof(inputs)/sizeof(inputs[0]); 1847 int kTableLength = sizeof(inputs)/sizeof(inputs[0]);
1848 1848
1849 TestStruct test; 1849 TestStruct test;
1850 1850
1851 __ lw(t1, MemOperand(a0, offsetof(TestStruct, input))); 1851 __ lw(t1, MemOperand(a0, offsetof(TestStruct, input)));
1852 __ Cvt_d_uw(f4, t1, f6); 1852 __ Cvt_d_uw(f4, t1, f6);
1853 __ sdc1(f4, MemOperand(a0, offsetof(TestStruct, output))); 1853 __ Sdc1(f4, MemOperand(a0, offsetof(TestStruct, output)));
1854 __ jr(ra); 1854 __ jr(ra);
1855 __ nop(); 1855 __ nop();
1856 1856
1857 CodeDesc desc; 1857 CodeDesc desc;
1858 assm.GetCode(&desc); 1858 assm.GetCode(&desc);
1859 Handle<Code> code = isolate->factory()->NewCode( 1859 Handle<Code> code = isolate->factory()->NewCode(
1860 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 1860 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1861 F3 f = FUNCTION_CAST<F3>(code->entry()); 1861 F3 f = FUNCTION_CAST<F3>(code->entry());
1862 for (int i = 0; i < kTableLength; i++) { 1862 for (int i = 0; i < kTableLength; i++) {
1863 test.input = inputs[i]; 1863 test.input = inputs[i];
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1914 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, 1914 9.8, 9.8, 9.8, -9.8, -11.2, -9.8,
1915 3.0, fnan, -0.0, 0.0, fnan, finf, 1915 3.0, fnan, -0.0, 0.0, fnan, finf,
1916 finf, 42.0, finf, fminf, fnan}; 1916 finf, 42.0, finf, fminf, fnan};
1917 float resf[kTableLength] = { 1917 float resf[kTableLength] = {
1918 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, 1918 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8,
1919 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan}; 1919 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan};
1920 float resf1[kTableLength] = { 1920 float resf1[kTableLength] = {
1921 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, 1921 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8,
1922 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan}; 1922 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan};
1923 1923
1924 __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); 1924 __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)));
1925 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, b)) ); 1925 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, b)));
1926 __ lwc1(f8, MemOperand(a0, offsetof(TestFloat, c)) ); 1926 __ lwc1(f8, MemOperand(a0, offsetof(TestFloat, c)) );
1927 __ lwc1(f10, MemOperand(a0, offsetof(TestFloat, d)) ); 1927 __ lwc1(f10, MemOperand(a0, offsetof(TestFloat, d)) );
1928 __ mina_d(f6, f2, f4); 1928 __ mina_d(f6, f2, f4);
1929 __ mina_s(f12, f8, f10); 1929 __ mina_s(f12, f8, f10);
1930 __ maxa_d(f14, f2, f4); 1930 __ maxa_d(f14, f2, f4);
1931 __ maxa_s(f16, f8, f10); 1931 __ maxa_s(f16, f8, f10);
1932 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, resf)) ); 1932 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, resf)) );
1933 __ sdc1(f6, MemOperand(a0, offsetof(TestFloat, resd)) ); 1933 __ Sdc1(f6, MemOperand(a0, offsetof(TestFloat, resd)));
1934 __ swc1(f16, MemOperand(a0, offsetof(TestFloat, resf1)) ); 1934 __ swc1(f16, MemOperand(a0, offsetof(TestFloat, resf1)) );
1935 __ sdc1(f14, MemOperand(a0, offsetof(TestFloat, resd1)) ); 1935 __ Sdc1(f14, MemOperand(a0, offsetof(TestFloat, resd1)));
1936 __ jr(ra); 1936 __ jr(ra);
1937 __ nop(); 1937 __ nop();
1938 1938
1939 CodeDesc desc; 1939 CodeDesc desc;
1940 assm.GetCode(&desc); 1940 assm.GetCode(&desc);
1941 Handle<Code> code = isolate->factory()->NewCode( 1941 Handle<Code> code = isolate->factory()->NewCode(
1942 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 1942 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1943 F3 f = FUNCTION_CAST<F3>(code->entry()); 1943 F3 f = FUNCTION_CAST<F3>(code->entry());
1944 for (int i = 0; i < kTableLength; i++) { 1944 for (int i = 0; i < kTableLength; i++) {
1945 test.a = inputsa[i]; 1945 test.a = inputsa[i];
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
2001 dFPU64InvalidResult}; 2001 dFPU64InvalidResult};
2002 double outputsNaN2008[kTableLength] = { 2002 double outputsNaN2008[kTableLength] = {
2003 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2003 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
2004 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, 2004 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2005 2147483648.0, 2005 2147483648.0,
2006 0, 2006 0,
2007 dFPU64InvalidResult}; 2007 dFPU64InvalidResult};
2008 2008
2009 __ cfc1(t1, FCSR); 2009 __ cfc1(t1, FCSR);
2010 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); 2010 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
2011 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); 2011 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
2012 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); 2012 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
2013 __ trunc_l_d(f8, f4); 2013 __ trunc_l_d(f8, f4);
2014 __ trunc_l_s(f10, f6); 2014 __ trunc_l_s(f10, f6);
2015 __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) ); 2015 __ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
2016 __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) ); 2016 __ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
2017 __ jr(ra); 2017 __ jr(ra);
2018 __ nop(); 2018 __ nop();
2019 Test test; 2019 Test test;
2020 CodeDesc desc; 2020 CodeDesc desc;
2021 assm.GetCode(&desc); 2021 assm.GetCode(&desc);
2022 Handle<Code> code = isolate->factory()->NewCode( 2022 Handle<Code> code = isolate->factory()->NewCode(
2023 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 2023 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
2024 F3 f = FUNCTION_CAST<F3>(code->entry()); 2024 F3 f = FUNCTION_CAST<F3>(code->entry());
2025 for (int i = 0; i < kTableLength; i++) { 2025 for (int i = 0; i < kTableLength; i++) {
2026 test.a = inputs_D[i]; 2026 test.a = inputs_D[i];
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
2069 4.8, 4.8, -4.8, -0.29 2069 4.8, 4.8, -4.8, -0.29
2070 }; 2070 };
2071 2071
2072 float outputs_S[kTableLength] = { 2072 float outputs_S[kTableLength] = {
2073 4.8, 4.8, -4.8, -0.29 2073 4.8, 4.8, -4.8, -0.29
2074 }; 2074 };
2075 double outputs_D[kTableLength] = { 2075 double outputs_D[kTableLength] = {
2076 5.3, -5.3, 5.3, -2.9 2076 5.3, -5.3, 5.3, -2.9
2077 }; 2077 };
2078 2078
2079 __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); 2079 __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)));
2080 __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) ); 2080 __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
2081 __ lw(t0, MemOperand(a0, offsetof(TestFloat, rt)) ); 2081 __ lw(t0, MemOperand(a0, offsetof(TestFloat, rt)) );
2082 __ Move(f12, 0.0); 2082 __ Move(f12, 0.0);
2083 __ Move(f10, 0.0); 2083 __ Move(f10, 0.0);
2084 __ Move(f16, 0.0); 2084 __ Move(f16, 0.0);
2085 __ Move(f14, 0.0); 2085 __ Move(f14, 0.0);
2086 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, bold)) ); 2086 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, bold)));
2087 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, dold)) ); 2087 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, dold)) );
2088 __ sdc1(f16, MemOperand(a0, offsetof(TestFloat, bold1)) ); 2088 __ Sdc1(f16, MemOperand(a0, offsetof(TestFloat, bold1)));
2089 __ swc1(f14, MemOperand(a0, offsetof(TestFloat, dold1)) ); 2089 __ swc1(f14, MemOperand(a0, offsetof(TestFloat, dold1)) );
2090 __ movz_s(f10, f6, t0); 2090 __ movz_s(f10, f6, t0);
2091 __ movz_d(f12, f2, t0); 2091 __ movz_d(f12, f2, t0);
2092 __ movn_s(f14, f6, t0); 2092 __ movn_s(f14, f6, t0);
2093 __ movn_d(f16, f2, t0); 2093 __ movn_d(f16, f2, t0);
2094 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, d)) ); 2094 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, d)) );
2095 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, b)) ); 2095 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, b)));
2096 __ swc1(f14, MemOperand(a0, offsetof(TestFloat, d1)) ); 2096 __ swc1(f14, MemOperand(a0, offsetof(TestFloat, d1)) );
2097 __ sdc1(f16, MemOperand(a0, offsetof(TestFloat, b1)) ); 2097 __ Sdc1(f16, MemOperand(a0, offsetof(TestFloat, b1)));
2098 __ jr(ra); 2098 __ jr(ra);
2099 __ nop(); 2099 __ nop();
2100 2100
2101 CodeDesc desc; 2101 CodeDesc desc;
2102 assm.GetCode(&desc); 2102 assm.GetCode(&desc);
2103 Handle<Code> code = isolate->factory()->NewCode( 2103 Handle<Code> code = isolate->factory()->NewCode(
2104 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 2104 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
2105 F3 f = FUNCTION_CAST<F3>(code->entry()); 2105 F3 f = FUNCTION_CAST<F3>(code->entry());
2106 for (int i = 0; i < kTableLength; i++) { 2106 for (int i = 0; i < kTableLength; i++) {
2107 test.a = inputs_D[i]; 2107 test.a = inputs_D[i];
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
2169 for (int j = 0; j< 8; j++) { 2169 for (int j = 0; j< 8; j++) {
2170 test.cc = condition_flags[j]; 2170 test.cc = condition_flags[j];
2171 if (test.cc == 0) { 2171 if (test.cc == 0) {
2172 test.fcsr = 1 << 23; 2172 test.fcsr = 1 << 23;
2173 } else { 2173 } else {
2174 test.fcsr = 1 << (24+condition_flags[j]); 2174 test.fcsr = 1 << (24+condition_flags[j]);
2175 } 2175 }
2176 HandleScope scope(isolate); 2176 HandleScope scope(isolate);
2177 MacroAssembler assm(isolate, NULL, 0, 2177 MacroAssembler assm(isolate, NULL, 0,
2178 v8::internal::CodeObjectRequired::kYes); 2178 v8::internal::CodeObjectRequired::kYes);
2179 __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)) ); 2179 __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)));
2180 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)) ); 2180 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)) );
2181 __ lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)) ); 2181 __ lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)) );
2182 __ cfc1(t0, FCSR); 2182 __ cfc1(t0, FCSR);
2183 __ ctc1(t1, FCSR); 2183 __ ctc1(t1, FCSR);
2184 __ li(t2, 0x0); 2184 __ li(t2, 0x0);
2185 __ mtc1(t2, f12); 2185 __ mtc1(t2, f12);
2186 __ mtc1(t2, f10); 2186 __ mtc1(t2, f10);
2187 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold)) ); 2187 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold)));
2188 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold)) ); 2188 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold)) );
2189 __ movt_s(f12, f4, test.cc); 2189 __ movt_s(f12, f4, test.cc);
2190 __ movt_d(f10, f2, test.cc); 2190 __ movt_d(f10, f2, test.cc);
2191 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf)) ); 2191 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf)) );
2192 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd)) ); 2192 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd)));
2193 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold1)) ); 2193 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold1)));
2194 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold1)) ); 2194 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold1)) );
2195 __ movf_s(f12, f4, test.cc); 2195 __ movf_s(f12, f4, test.cc);
2196 __ movf_d(f10, f2, test.cc); 2196 __ movf_d(f10, f2, test.cc);
2197 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf1)) ); 2197 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf1)) );
2198 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd1)) ); 2198 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd1)));
2199 __ ctc1(t0, FCSR); 2199 __ ctc1(t0, FCSR);
2200 __ jr(ra); 2200 __ jr(ra);
2201 __ nop(); 2201 __ nop();
2202 2202
2203 CodeDesc desc; 2203 CodeDesc desc;
2204 assm.GetCode(&desc); 2204 assm.GetCode(&desc);
2205 Handle<Code> code = isolate->factory()->NewCode( 2205 Handle<Code> code = isolate->factory()->NewCode(
2206 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 2206 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
2207 F3 f = FUNCTION_CAST<F3>(code->entry()); 2207 F3 f = FUNCTION_CAST<F3>(code->entry());
2208 2208
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
2268 double outputs_RM[kTableLength] = { 2268 double outputs_RM[kTableLength] = {
2269 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2269 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
2270 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, 2270 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
2271 2147483637.0, 2147483638.0, 2147483639.0, 2271 2147483637.0, 2147483638.0, 2147483639.0,
2272 2147483640.0, 2147483641.0, 2147483642.0, 2272 2147483640.0, 2147483641.0, 2147483642.0,
2273 2147483643.0, 2147483644.0, 2147483645.0, 2273 2147483643.0, 2147483644.0, 2147483645.0,
2274 2147483646.0, 2147483647.0, kFPUInvalidResult}; 2274 2147483646.0, 2147483647.0, kFPUInvalidResult};
2275 int fcsr_inputs[4] = 2275 int fcsr_inputs[4] =
2276 {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; 2276 {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
2277 double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; 2277 double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
2278 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); 2278 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
2279 __ lw(t0, MemOperand(a0, offsetof(Test, fcsr)) ); 2279 __ lw(t0, MemOperand(a0, offsetof(Test, fcsr)) );
2280 __ cfc1(t1, FCSR); 2280 __ cfc1(t1, FCSR);
2281 __ ctc1(t0, FCSR); 2281 __ ctc1(t0, FCSR);
2282 __ cvt_w_d(f8, f4); 2282 __ cvt_w_d(f8, f4);
2283 __ swc1(f8, MemOperand(a0, offsetof(Test, b)) ); 2283 __ swc1(f8, MemOperand(a0, offsetof(Test, b)) );
2284 __ ctc1(t1, FCSR); 2284 __ ctc1(t1, FCSR);
2285 __ jr(ra); 2285 __ jr(ra);
2286 __ nop(); 2286 __ nop();
2287 Test test; 2287 Test test;
2288 CodeDesc desc; 2288 CodeDesc desc;
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
2336 kFPUInvalidResult}; 2336 kFPUInvalidResult};
2337 double outputsNaN2008[kTableLength] = { 2337 double outputsNaN2008[kTableLength] = {
2338 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2338 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
2339 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, 2339 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2340 kFPUInvalidResult, 2340 kFPUInvalidResult,
2341 0, 2341 0,
2342 kFPUInvalidResult}; 2342 kFPUInvalidResult};
2343 2343
2344 __ cfc1(t1, FCSR); 2344 __ cfc1(t1, FCSR);
2345 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); 2345 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
2346 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); 2346 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
2347 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); 2347 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
2348 __ trunc_w_d(f8, f4); 2348 __ trunc_w_d(f8, f4);
2349 __ trunc_w_s(f10, f6); 2349 __ trunc_w_s(f10, f6);
2350 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); 2350 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) );
2351 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); 2351 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) );
2352 __ jr(ra); 2352 __ jr(ra);
2353 __ nop(); 2353 __ nop();
2354 Test test; 2354 Test test;
2355 CodeDesc desc; 2355 CodeDesc desc;
2356 assm.GetCode(&desc); 2356 assm.GetCode(&desc);
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
2405 kFPUInvalidResult, kFPUInvalidResult, 2405 kFPUInvalidResult, kFPUInvalidResult,
2406 kFPUInvalidResult}; 2406 kFPUInvalidResult};
2407 double outputsNaN2008[kTableLength] = { 2407 double outputsNaN2008[kTableLength] = {
2408 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, 2408 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
2409 -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, 2409 -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
2410 kFPUInvalidResult, 0, 2410 kFPUInvalidResult, 0,
2411 kFPUInvalidResult}; 2411 kFPUInvalidResult};
2412 2412
2413 __ cfc1(t1, FCSR); 2413 __ cfc1(t1, FCSR);
2414 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); 2414 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
2415 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); 2415 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
2416 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); 2416 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
2417 __ round_w_d(f8, f4); 2417 __ round_w_d(f8, f4);
2418 __ round_w_s(f10, f6); 2418 __ round_w_s(f10, f6);
2419 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); 2419 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) );
2420 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); 2420 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) );
2421 __ jr(ra); 2421 __ jr(ra);
2422 __ nop(); 2422 __ nop();
2423 Test test; 2423 Test test;
2424 CodeDesc desc; 2424 CodeDesc desc;
2425 assm.GetCode(&desc); 2425 assm.GetCode(&desc);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
2477 dFPU64InvalidResult}; 2477 dFPU64InvalidResult};
2478 double outputsNaN2008[kTableLength] = { 2478 double outputsNaN2008[kTableLength] = {
2479 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, 2479 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
2480 -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, 2480 -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
2481 2147483648.0, 2481 2147483648.0,
2482 0, 2482 0,
2483 dFPU64InvalidResult}; 2483 dFPU64InvalidResult};
2484 2484
2485 __ cfc1(t1, FCSR); 2485 __ cfc1(t1, FCSR);
2486 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); 2486 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
2487 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); 2487 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
2488 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); 2488 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
2489 __ round_l_d(f8, f4); 2489 __ round_l_d(f8, f4);
2490 __ round_l_s(f10, f6); 2490 __ round_l_s(f10, f6);
2491 __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) ); 2491 __ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
2492 __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) ); 2492 __ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
2493 __ jr(ra); 2493 __ jr(ra);
2494 __ nop(); 2494 __ nop();
2495 Test test; 2495 Test test;
2496 CodeDesc desc; 2496 CodeDesc desc;
2497 assm.GetCode(&desc); 2497 assm.GetCode(&desc);
2498 Handle<Code> code = isolate->factory()->NewCode( 2498 Handle<Code> code = isolate->factory()->NewCode(
2499 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 2499 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
2500 F3 f = FUNCTION_CAST<F3>(code->entry()); 2500 F3 f = FUNCTION_CAST<F3>(code->entry());
2501 for (int i = 0; i < kTableLength; i++) { 2501 for (int i = 0; i < kTableLength; i++) {
2502 test.a = inputs_D[i]; 2502 test.a = inputs_D[i];
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
2550 float inputft_S[kTableLength] = { 2550 float inputft_S[kTableLength] = {
2551 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, 2551 4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
2552 -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 2552 -4.8, -5.3, -2.9, -4.8, -5.3, -2.9
2553 }; 2553 };
2554 float outputs_S[kTableLength] = { 2554 float outputs_S[kTableLength] = {
2555 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, 2555 0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
2556 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 2556 10.1, 10.1, 5.8, -0.5, 0.5, 0.0
2557 }; 2557 };
2558 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); 2558 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
2559 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) ); 2559 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) );
2560 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) ); 2560 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)));
2561 __ ldc1(f10, MemOperand(a0, offsetof(TestFloat, d)) ); 2561 __ Ldc1(f10, MemOperand(a0, offsetof(TestFloat, d)));
2562 __ sub_s(f6, f2, f4); 2562 __ sub_s(f6, f2, f4);
2563 __ sub_d(f12, f8, f10); 2563 __ sub_d(f12, f8, f10);
2564 __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) ); 2564 __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
2565 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) ); 2565 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
2566 __ jr(ra); 2566 __ jr(ra);
2567 __ nop(); 2567 __ nop();
2568 2568
2569 CodeDesc desc; 2569 CodeDesc desc;
2570 assm.GetCode(&desc); 2570 assm.GetCode(&desc);
2571 Handle<Code> code = isolate->factory()->NewCode( 2571 Handle<Code> code = isolate->factory()->NewCode(
2572 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 2572 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
2573 F3 f = FUNCTION_CAST<F3>(code->entry()); 2573 F3 f = FUNCTION_CAST<F3>(code->entry());
2574 for (int i = 0; i < kTableLength; i++) { 2574 for (int i = 0; i < kTableLength; i++) {
2575 test.a = inputfs_S[i]; 2575 test.a = inputfs_S[i];
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
2616 float inputs_S[kTableLength] = { 2616 float inputs_S[kTableLength] = {
2617 0.0, 4.0, 2.0, 4e-28 2617 0.0, 4.0, 2.0, 4e-28
2618 }; 2618 };
2619 2619
2620 float outputs_S[kTableLength] = { 2620 float outputs_S[kTableLength] = {
2621 0.0, 2.0, sqrt2_s, 2e-14 2621 0.0, 2.0, sqrt2_s, 2e-14
2622 }; 2622 };
2623 2623
2624 2624
2625 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); 2625 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
2626 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) ); 2626 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)));
2627 __ sqrt_s(f6, f2); 2627 __ sqrt_s(f6, f2);
2628 __ sqrt_d(f12, f8); 2628 __ sqrt_d(f12, f8);
2629 2629
2630 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { 2630 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
2631 __ rsqrt_d(f14, f8); 2631 __ rsqrt_d(f14, f8);
2632 __ rsqrt_s(f16, f2); 2632 __ rsqrt_s(f16, f2);
2633 __ recip_d(f18, f8); 2633 __ recip_d(f18, f8);
2634 __ recip_s(f4, f2); 2634 __ recip_s(f4, f2);
2635 } 2635 }
2636 __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) ); 2636 __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
2637 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) ); 2637 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
2638 2638
2639 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { 2639 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
2640 __ swc1(f16, MemOperand(a0, offsetof(TestFloat, resultS1)) ); 2640 __ swc1(f16, MemOperand(a0, offsetof(TestFloat, resultS1)) );
2641 __ sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1)) ); 2641 __ Sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1)));
2642 __ swc1(f4, MemOperand(a0, offsetof(TestFloat, resultS2)) ); 2642 __ swc1(f4, MemOperand(a0, offsetof(TestFloat, resultS2)) );
2643 __ sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2)) ); 2643 __ Sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2)));
2644 } 2644 }
2645 __ jr(ra); 2645 __ jr(ra);
2646 __ nop(); 2646 __ nop();
2647 2647
2648 CodeDesc desc; 2648 CodeDesc desc;
2649 assm.GetCode(&desc); 2649 assm.GetCode(&desc);
2650 Handle<Code> code = isolate->factory()->NewCode( 2650 Handle<Code> code = isolate->factory()->NewCode(
2651 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 2651 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
2652 F3 f = FUNCTION_CAST<F3>(code->entry()); 2652 F3 f = FUNCTION_CAST<F3>(code->entry());
2653 2653
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
2710 0.0, -4.0, 2.0 2710 0.0, -4.0, 2.0
2711 }; 2711 };
2712 float inputs_S[kTableLength] = { 2712 float inputs_S[kTableLength] = {
2713 0.0, 4.0, -2.0 2713 0.0, 4.0, -2.0
2714 }; 2714 };
2715 2715
2716 float outputs_S[kTableLength] = { 2716 float outputs_S[kTableLength] = {
2717 0.0, -4.0, 2.0 2717 0.0, -4.0, 2.0
2718 }; 2718 };
2719 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); 2719 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
2720 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) ); 2720 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)));
2721 __ neg_s(f6, f2); 2721 __ neg_s(f6, f2);
2722 __ neg_d(f12, f8); 2722 __ neg_d(f12, f8);
2723 __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) ); 2723 __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
2724 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) ); 2724 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
2725 __ jr(ra); 2725 __ jr(ra);
2726 __ nop(); 2726 __ nop();
2727 2727
2728 CodeDesc desc; 2728 CodeDesc desc;
2729 assm.GetCode(&desc); 2729 assm.GetCode(&desc);
2730 Handle<Code> code = isolate->factory()->NewCode( 2730 Handle<Code> code = isolate->factory()->NewCode(
2731 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 2731 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
2732 F3 f = FUNCTION_CAST<F3>(code->entry()); 2732 F3 f = FUNCTION_CAST<F3>(code->entry());
2733 for (int i = 0; i < kTableLength; i++) { 2733 for (int i = 0; i < kTableLength; i++) {
2734 test.a = inputs_S[i]; 2734 test.a = inputs_S[i];
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2766 2766
2767 float inputfs_S[kTableLength] = { 2767 float inputfs_S[kTableLength] = {
2768 5.3, -5.3, 5.3, -2.9 2768 5.3, -5.3, 5.3, -2.9
2769 }; 2769 };
2770 float inputft_S[kTableLength] = { 2770 float inputft_S[kTableLength] = {
2771 4.8, 4.8, -4.8, -0.29 2771 4.8, 4.8, -4.8, -0.29
2772 }; 2772 };
2773 2773
2774 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); 2774 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
2775 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) ); 2775 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) );
2776 __ ldc1(f6, MemOperand(a0, offsetof(TestFloat, c)) ); 2776 __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, c)));
2777 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, d)) ); 2777 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, d)));
2778 __ mul_s(f10, f2, f4); 2778 __ mul_s(f10, f2, f4);
2779 __ mul_d(f12, f6, f8); 2779 __ mul_d(f12, f6, f8);
2780 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, resultS)) ); 2780 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, resultS)) );
2781 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) ); 2781 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
2782 __ jr(ra); 2782 __ jr(ra);
2783 __ nop(); 2783 __ nop();
2784 2784
2785 CodeDesc desc; 2785 CodeDesc desc;
2786 assm.GetCode(&desc); 2786 assm.GetCode(&desc);
2787 Handle<Code> code = isolate->factory()->NewCode( 2787 Handle<Code> code = isolate->factory()->NewCode(
2788 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 2788 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
2789 F3 f = FUNCTION_CAST<F3>(code->entry()); 2789 F3 f = FUNCTION_CAST<F3>(code->entry());
2790 for (int i = 0; i < kTableLength; i++) { 2790 for (int i = 0; i < kTableLength; i++) {
2791 test.a = inputfs_S[i]; 2791 test.a = inputfs_S[i];
(...skipping 29 matching lines...) Expand all
2821 4.8, 4.8, -4.8, -0.29 2821 4.8, 4.8, -4.8, -0.29
2822 }; 2822 };
2823 2823
2824 float outputs_S[kTableLength] = { 2824 float outputs_S[kTableLength] = {
2825 4.8, 4.8, -4.8, -0.29 2825 4.8, 4.8, -4.8, -0.29
2826 }; 2826 };
2827 double outputs_D[kTableLength] = { 2827 double outputs_D[kTableLength] = {
2828 5.3, -5.3, 5.3, -2.9 2828 5.3, -5.3, 5.3, -2.9
2829 }; 2829 };
2830 2830
2831 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)) ); 2831 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
2832 __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) ); 2832 __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
2833 __ mov_s(f8, f6); 2833 __ mov_s(f8, f6);
2834 __ mov_d(f10, f4); 2834 __ mov_d(f10, f4);
2835 __ swc1(f8, MemOperand(a0, offsetof(TestFloat, d)) ); 2835 __ swc1(f8, MemOperand(a0, offsetof(TestFloat, d)) );
2836 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, b)) ); 2836 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, b)));
2837 2837
2838 __ jr(ra); 2838 __ jr(ra);
2839 __ nop(); 2839 __ nop();
2840 2840
2841 CodeDesc desc; 2841 CodeDesc desc;
2842 assm.GetCode(&desc); 2842 assm.GetCode(&desc);
2843 Handle<Code> code = isolate->factory()->NewCode( 2843 Handle<Code> code = isolate->factory()->NewCode(
2844 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 2844 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
2845 F3 f = FUNCTION_CAST<F3>(code->entry()); 2845 F3 f = FUNCTION_CAST<F3>(code->entry());
2846 for (int i = 0; i < kTableLength; i++) { 2846 for (int i = 0; i < kTableLength; i++) {
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
2889 kFPUInvalidResult}; 2889 kFPUInvalidResult};
2890 double outputsNaN2008[kTableLength] = { 2890 double outputsNaN2008[kTableLength] = {
2891 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2891 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
2892 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, 2892 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
2893 kFPUInvalidResult, 2893 kFPUInvalidResult,
2894 0, 2894 0,
2895 kFPUInvalidResult}; 2895 kFPUInvalidResult};
2896 2896
2897 __ cfc1(t1, FCSR); 2897 __ cfc1(t1, FCSR);
2898 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); 2898 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
2899 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); 2899 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
2900 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); 2900 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
2901 __ floor_w_d(f8, f4); 2901 __ floor_w_d(f8, f4);
2902 __ floor_w_s(f10, f6); 2902 __ floor_w_s(f10, f6);
2903 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); 2903 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) );
2904 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); 2904 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) );
2905 __ jr(ra); 2905 __ jr(ra);
2906 __ nop(); 2906 __ nop();
2907 Test test; 2907 Test test;
2908 CodeDesc desc; 2908 CodeDesc desc;
2909 assm.GetCode(&desc); 2909 assm.GetCode(&desc);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
2961 dFPU64InvalidResult}; 2961 dFPU64InvalidResult};
2962 double outputsNaN2008[kTableLength] = { 2962 double outputsNaN2008[kTableLength] = {
2963 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 2963 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
2964 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, 2964 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
2965 2147483648.0, 2965 2147483648.0,
2966 0, 2966 0,
2967 dFPU64InvalidResult}; 2967 dFPU64InvalidResult};
2968 2968
2969 __ cfc1(t1, FCSR); 2969 __ cfc1(t1, FCSR);
2970 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); 2970 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
2971 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); 2971 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
2972 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); 2972 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
2973 __ floor_l_d(f8, f4); 2973 __ floor_l_d(f8, f4);
2974 __ floor_l_s(f10, f6); 2974 __ floor_l_s(f10, f6);
2975 __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) ); 2975 __ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
2976 __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) ); 2976 __ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
2977 __ jr(ra); 2977 __ jr(ra);
2978 __ nop(); 2978 __ nop();
2979 Test test; 2979 Test test;
2980 CodeDesc desc; 2980 CodeDesc desc;
2981 assm.GetCode(&desc); 2981 assm.GetCode(&desc);
2982 Handle<Code> code = isolate->factory()->NewCode( 2982 Handle<Code> code = isolate->factory()->NewCode(
2983 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 2983 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
2984 F3 f = FUNCTION_CAST<F3>(code->entry()); 2984 F3 f = FUNCTION_CAST<F3>(code->entry());
2985 for (int i = 0; i < kTableLength; i++) { 2985 for (int i = 0; i < kTableLength; i++) {
2986 test.a = inputs_D[i]; 2986 test.a = inputs_D[i];
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
3033 kFPUInvalidResult}; 3033 kFPUInvalidResult};
3034 double outputsNaN2008[kTableLength] = { 3034 double outputsNaN2008[kTableLength] = {
3035 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 3035 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
3036 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, 3036 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
3037 kFPUInvalidResult, 3037 kFPUInvalidResult,
3038 0, 3038 0,
3039 kFPUInvalidResult}; 3039 kFPUInvalidResult};
3040 3040
3041 __ cfc1(t1, FCSR); 3041 __ cfc1(t1, FCSR);
3042 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); 3042 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
3043 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); 3043 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
3044 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); 3044 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
3045 __ ceil_w_d(f8, f4); 3045 __ ceil_w_d(f8, f4);
3046 __ ceil_w_s(f10, f6); 3046 __ ceil_w_s(f10, f6);
3047 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); 3047 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) );
3048 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); 3048 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) );
3049 __ jr(ra); 3049 __ jr(ra);
3050 __ nop(); 3050 __ nop();
3051 Test test; 3051 Test test;
3052 CodeDesc desc; 3052 CodeDesc desc;
3053 assm.GetCode(&desc); 3053 assm.GetCode(&desc);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
3105 dFPU64InvalidResult}; 3105 dFPU64InvalidResult};
3106 double outputsNaN2008[kTableLength] = { 3106 double outputsNaN2008[kTableLength] = {
3107 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 3107 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
3108 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, 3108 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
3109 2147483648.0, 3109 2147483648.0,
3110 0, 3110 0,
3111 dFPU64InvalidResult}; 3111 dFPU64InvalidResult};
3112 3112
3113 __ cfc1(t1, FCSR); 3113 __ cfc1(t1, FCSR);
3114 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); 3114 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
3115 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); 3115 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
3116 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); 3116 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
3117 __ ceil_l_d(f8, f4); 3117 __ ceil_l_d(f8, f4);
3118 __ ceil_l_s(f10, f6); 3118 __ ceil_l_s(f10, f6);
3119 __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) ); 3119 __ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
3120 __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) ); 3120 __ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
3121 __ jr(ra); 3121 __ jr(ra);
3122 __ nop(); 3122 __ nop();
3123 Test test; 3123 Test test;
3124 CodeDesc desc; 3124 CodeDesc desc;
3125 assm.GetCode(&desc); 3125 assm.GetCode(&desc);
3126 Handle<Code> code = isolate->factory()->NewCode( 3126 Handle<Code> code = isolate->factory()->NewCode(
3127 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 3127 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
3128 F3 f = FUNCTION_CAST<F3>(code->entry()); 3128 F3 f = FUNCTION_CAST<F3>(code->entry());
3129 for (int i = 0; i < kTableLength; i++) { 3129 for (int i = 0; i < kTableLength; i++) {
3130 test.a = inputs_D[i]; 3130 test.a = inputs_D[i];
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after
3435 float fPosNorm; 3435 float fPosNorm;
3436 float fPosSubnorm; 3436 float fPosSubnorm;
3437 float fPosZero; } T; 3437 float fPosZero; } T;
3438 T t; 3438 T t;
3439 3439
3440 // Create a function that accepts &t, and loads, manipulates, and stores 3440 // Create a function that accepts &t, and loads, manipulates, and stores
3441 // the doubles t.a ... t.f. 3441 // the doubles t.a ... t.f.
3442 MacroAssembler assm(isolate, NULL, 0, 3442 MacroAssembler assm(isolate, NULL, 0,
3443 v8::internal::CodeObjectRequired::kYes); 3443 v8::internal::CodeObjectRequired::kYes);
3444 3444
3445 __ ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan))); 3445 __ Ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
3446 __ class_d(f6, f4); 3446 __ class_d(f6, f4);
3447 __ sdc1(f6, MemOperand(a0, offsetof(T, dSignalingNan))); 3447 __ Sdc1(f6, MemOperand(a0, offsetof(T, dSignalingNan)));
3448 3448
3449 __ ldc1(f4, MemOperand(a0, offsetof(T, dQuietNan))); 3449 __ Ldc1(f4, MemOperand(a0, offsetof(T, dQuietNan)));
3450 __ class_d(f6, f4); 3450 __ class_d(f6, f4);
3451 __ sdc1(f6, MemOperand(a0, offsetof(T, dQuietNan))); 3451 __ Sdc1(f6, MemOperand(a0, offsetof(T, dQuietNan)));
3452 3452
3453 __ ldc1(f4, MemOperand(a0, offsetof(T, dNegInf))); 3453 __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegInf)));
3454 __ class_d(f6, f4); 3454 __ class_d(f6, f4);
3455 __ sdc1(f6, MemOperand(a0, offsetof(T, dNegInf))); 3455 __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegInf)));
3456 3456
3457 __ ldc1(f4, MemOperand(a0, offsetof(T, dNegNorm))); 3457 __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegNorm)));
3458 __ class_d(f6, f4); 3458 __ class_d(f6, f4);
3459 __ sdc1(f6, MemOperand(a0, offsetof(T, dNegNorm))); 3459 __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegNorm)));
3460 3460
3461 __ ldc1(f4, MemOperand(a0, offsetof(T, dNegSubnorm))); 3461 __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegSubnorm)));
3462 __ class_d(f6, f4); 3462 __ class_d(f6, f4);
3463 __ sdc1(f6, MemOperand(a0, offsetof(T, dNegSubnorm))); 3463 __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegSubnorm)));
3464 3464
3465 __ ldc1(f4, MemOperand(a0, offsetof(T, dNegZero))); 3465 __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegZero)));
3466 __ class_d(f6, f4); 3466 __ class_d(f6, f4);
3467 __ sdc1(f6, MemOperand(a0, offsetof(T, dNegZero))); 3467 __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegZero)));
3468 3468
3469 __ ldc1(f4, MemOperand(a0, offsetof(T, dPosInf))); 3469 __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosInf)));
3470 __ class_d(f6, f4); 3470 __ class_d(f6, f4);
3471 __ sdc1(f6, MemOperand(a0, offsetof(T, dPosInf))); 3471 __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosInf)));
3472 3472
3473 __ ldc1(f4, MemOperand(a0, offsetof(T, dPosNorm))); 3473 __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosNorm)));
3474 __ class_d(f6, f4); 3474 __ class_d(f6, f4);
3475 __ sdc1(f6, MemOperand(a0, offsetof(T, dPosNorm))); 3475 __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosNorm)));
3476 3476
3477 __ ldc1(f4, MemOperand(a0, offsetof(T, dPosSubnorm))); 3477 __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosSubnorm)));
3478 __ class_d(f6, f4); 3478 __ class_d(f6, f4);
3479 __ sdc1(f6, MemOperand(a0, offsetof(T, dPosSubnorm))); 3479 __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosSubnorm)));
3480 3480
3481 __ ldc1(f4, MemOperand(a0, offsetof(T, dPosZero))); 3481 __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosZero)));
3482 __ class_d(f6, f4); 3482 __ class_d(f6, f4);
3483 __ sdc1(f6, MemOperand(a0, offsetof(T, dPosZero))); 3483 __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosZero)));
3484 3484
3485 // Testing instruction CLASS.S 3485 // Testing instruction CLASS.S
3486 __ lwc1(f4, MemOperand(a0, offsetof(T, fSignalingNan))); 3486 __ lwc1(f4, MemOperand(a0, offsetof(T, fSignalingNan)));
3487 __ class_s(f6, f4); 3487 __ class_s(f6, f4);
3488 __ swc1(f6, MemOperand(a0, offsetof(T, fSignalingNan))); 3488 __ swc1(f6, MemOperand(a0, offsetof(T, fSignalingNan)));
3489 3489
3490 __ lwc1(f4, MemOperand(a0, offsetof(T, fQuietNan))); 3490 __ lwc1(f4, MemOperand(a0, offsetof(T, fQuietNan)));
3491 __ class_s(f6, f4); 3491 __ class_s(f6, f4);
3492 __ swc1(f6, MemOperand(a0, offsetof(T, fQuietNan))); 3492 __ swc1(f6, MemOperand(a0, offsetof(T, fQuietNan)));
3493 3493
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
3597 double fcsr; 3597 double fcsr;
3598 } TestFloat; 3598 } TestFloat;
3599 3599
3600 TestFloat test; 3600 TestFloat test;
3601 3601
3602 // Save FIR. 3602 // Save FIR.
3603 __ cfc1(a1, FCSR); 3603 __ cfc1(a1, FCSR);
3604 // Disable FPU exceptions. 3604 // Disable FPU exceptions.
3605 __ ctc1(zero_reg, FCSR); 3605 __ ctc1(zero_reg, FCSR);
3606 3606
3607 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); 3607 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
3608 __ abs_d(f10, f4); 3608 __ abs_d(f10, f4);
3609 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, a))); 3609 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, a)));
3610 3610
3611 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b))); 3611 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)));
3612 __ abs_s(f10, f4); 3612 __ abs_s(f10, f4);
3613 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, b))); 3613 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, b)));
3614 3614
3615 // Restore FCSR. 3615 // Restore FCSR.
3616 __ ctc1(a1, FCSR); 3616 __ ctc1(a1, FCSR);
3617 3617
3618 __ jr(ra); 3618 __ jr(ra);
3619 __ nop(); 3619 __ nop();
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
3691 double a; 3691 double a;
3692 double b; 3692 double b;
3693 double c; 3693 double c;
3694 float fa; 3694 float fa;
3695 float fb; 3695 float fb;
3696 float fc; 3696 float fc;
3697 } TestFloat; 3697 } TestFloat;
3698 3698
3699 TestFloat test; 3699 TestFloat test;
3700 3700
3701 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); 3701 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
3702 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b))); 3702 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
3703 __ add_d(f10, f8, f4); 3703 __ add_d(f10, f8, f4);
3704 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c))); 3704 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
3705 3705
3706 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, fa))); 3706 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, fa)));
3707 __ lwc1(f8, MemOperand(a0, offsetof(TestFloat, fb))); 3707 __ lwc1(f8, MemOperand(a0, offsetof(TestFloat, fb)));
3708 __ add_s(f10, f8, f4); 3708 __ add_s(f10, f8, f4);
3709 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, fc))); 3709 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, fc)));
3710 3710
3711 __ jr(ra); 3711 __ jr(ra);
3712 __ nop(); 3712 __ nop();
3713 3713
3714 CodeDesc desc; 3714 CodeDesc desc;
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
3778 uint32_t fOlt; 3778 uint32_t fOlt;
3779 uint32_t fUlt; 3779 uint32_t fUlt;
3780 uint32_t fOle; 3780 uint32_t fOle;
3781 uint32_t fUle; 3781 uint32_t fUle;
3782 } TestFloat; 3782 } TestFloat;
3783 3783
3784 TestFloat test; 3784 TestFloat test;
3785 3785
3786 __ li(t1, 1); 3786 __ li(t1, 1);
3787 3787
3788 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1))); 3788 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1)));
3789 __ ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2))); 3789 __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2)));
3790 3790
3791 __ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1))); 3791 __ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1)));
3792 __ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2))); 3792 __ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2)));
3793 3793
3794 __ mov(t2, zero_reg); 3794 __ mov(t2, zero_reg);
3795 __ mov(t3, zero_reg); 3795 __ mov(t3, zero_reg);
3796 __ c_d(F, f4, f6, 0); 3796 __ c_d(F, f4, f6, 0);
3797 __ c_s(F, f14, f16, 2); 3797 __ c_s(F, f14, f16, 2);
3798 __ movt(t2, t1, 0); 3798 __ movt(t2, t1, 0);
3799 __ movt(t3, t1, 2); 3799 __ movt(t3, t1, 2);
(...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after
3996 float fUle; 3996 float fUle;
3997 float fOr; 3997 float fOr;
3998 float fUne; 3998 float fUne;
3999 float fNe; 3999 float fNe;
4000 } TestFloat; 4000 } TestFloat;
4001 4001
4002 TestFloat test; 4002 TestFloat test;
4003 4003
4004 __ li(t1, 1); 4004 __ li(t1, 1);
4005 4005
4006 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1))); 4006 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1)));
4007 __ ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2))); 4007 __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2)));
4008 4008
4009 __ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1))); 4009 __ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1)));
4010 __ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2))); 4010 __ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2)));
4011 4011
4012 __ cmp_d(F, f2, f4, f6); 4012 __ cmp_d(F, f2, f4, f6);
4013 __ cmp_s(F, f12, f14, f16); 4013 __ cmp_s(F, f12, f14, f16);
4014 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dF)) ); 4014 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dF)));
4015 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fF)) ); 4015 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fF)) );
4016 4016
4017 __ cmp_d(UN, f2, f4, f6); 4017 __ cmp_d(UN, f2, f4, f6);
4018 __ cmp_s(UN, f12, f14, f16); 4018 __ cmp_s(UN, f12, f14, f16);
4019 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUn)) ); 4019 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUn)));
4020 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUn)) ); 4020 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUn)) );
4021 4021
4022 __ cmp_d(EQ, f2, f4, f6); 4022 __ cmp_d(EQ, f2, f4, f6);
4023 __ cmp_s(EQ, f12, f14, f16); 4023 __ cmp_s(EQ, f12, f14, f16);
4024 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dEq)) ); 4024 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dEq)));
4025 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fEq)) ); 4025 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fEq)) );
4026 4026
4027 __ cmp_d(UEQ, f2, f4, f6); 4027 __ cmp_d(UEQ, f2, f4, f6);
4028 __ cmp_s(UEQ, f12, f14, f16); 4028 __ cmp_s(UEQ, f12, f14, f16);
4029 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUeq)) ); 4029 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUeq)));
4030 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUeq)) ); 4030 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUeq)) );
4031 4031
4032 __ cmp_d(LT, f2, f4, f6); 4032 __ cmp_d(LT, f2, f4, f6);
4033 __ cmp_s(LT, f12, f14, f16); 4033 __ cmp_s(LT, f12, f14, f16);
4034 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOlt)) ); 4034 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOlt)));
4035 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOlt)) ); 4035 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOlt)) );
4036 4036
4037 __ cmp_d(ULT, f2, f4, f6); 4037 __ cmp_d(ULT, f2, f4, f6);
4038 __ cmp_s(ULT, f12, f14, f16); 4038 __ cmp_s(ULT, f12, f14, f16);
4039 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUlt)) ); 4039 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUlt)));
4040 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUlt)) ); 4040 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUlt)) );
4041 4041
4042 __ cmp_d(LE, f2, f4, f6); 4042 __ cmp_d(LE, f2, f4, f6);
4043 __ cmp_s(LE, f12, f14, f16); 4043 __ cmp_s(LE, f12, f14, f16);
4044 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOle)) ); 4044 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOle)));
4045 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOle)) ); 4045 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOle)) );
4046 4046
4047 __ cmp_d(ULE, f2, f4, f6); 4047 __ cmp_d(ULE, f2, f4, f6);
4048 __ cmp_s(ULE, f12, f14, f16); 4048 __ cmp_s(ULE, f12, f14, f16);
4049 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUle)) ); 4049 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUle)));
4050 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUle)) ); 4050 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUle)) );
4051 4051
4052 __ cmp_d(ORD, f2, f4, f6); 4052 __ cmp_d(ORD, f2, f4, f6);
4053 __ cmp_s(ORD, f12, f14, f16); 4053 __ cmp_s(ORD, f12, f14, f16);
4054 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOr)) ); 4054 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOr)));
4055 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOr)) ); 4055 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOr)) );
4056 4056
4057 __ cmp_d(UNE, f2, f4, f6); 4057 __ cmp_d(UNE, f2, f4, f6);
4058 __ cmp_s(UNE, f12, f14, f16); 4058 __ cmp_s(UNE, f12, f14, f16);
4059 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUne)) ); 4059 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUne)));
4060 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUne)) ); 4060 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUne)) );
4061 4061
4062 __ cmp_d(NE, f2, f4, f6); 4062 __ cmp_d(NE, f2, f4, f6);
4063 __ cmp_s(NE, f12, f14, f16); 4063 __ cmp_s(NE, f12, f14, f16);
4064 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dNe)) ); 4064 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dNe)));
4065 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fNe)) ); 4065 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fNe)) );
4066 4066
4067 __ jr(ra); 4067 __ jr(ra);
4068 __ nop(); 4068 __ nop();
4069 4069
4070 CodeDesc desc; 4070 CodeDesc desc;
4071 assm.GetCode(&desc); 4071 assm.GetCode(&desc);
4072 Handle<Code> code = isolate->factory()->NewCode( 4072 Handle<Code> code = isolate->factory()->NewCode(
4073 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); 4073 desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
4074 F3 f = FUNCTION_CAST<F3>(code->entry()); 4074 F3 f = FUNCTION_CAST<F3>(code->entry());
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
4218 __ cfc1(a1, FCSR); 4218 __ cfc1(a1, FCSR);
4219 // Disable FPU exceptions. 4219 // Disable FPU exceptions.
4220 __ ctc1(zero_reg, FCSR); 4220 __ ctc1(zero_reg, FCSR);
4221 4221
4222 #define GENERATE_CVT_TEST(x, y, z) \ 4222 #define GENERATE_CVT_TEST(x, y, z) \
4223 __ y##c1(f0, MemOperand(a0, offsetof(TestFloat, x##_in))); \ 4223 __ y##c1(f0, MemOperand(a0, offsetof(TestFloat, x##_in))); \
4224 __ x(f0, f0); \ 4224 __ x(f0, f0); \
4225 __ nop(); \ 4225 __ nop(); \
4226 __ z##c1(f0, MemOperand(a0, offsetof(TestFloat, x##_out))); 4226 __ z##c1(f0, MemOperand(a0, offsetof(TestFloat, x##_out)));
4227 4227
4228 GENERATE_CVT_TEST(cvt_d_s, lw, sd) 4228 GENERATE_CVT_TEST(cvt_d_s, lw, Sd)
4229 GENERATE_CVT_TEST(cvt_d_w, lw, sd) 4229 GENERATE_CVT_TEST(cvt_d_w, lw, Sd)
4230 if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && 4230 if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
4231 IsFp64Mode()) { 4231 IsFp64Mode()) {
4232 GENERATE_CVT_TEST(cvt_d_l, ld, sd) 4232 GENERATE_CVT_TEST(cvt_d_l, Ld, Sd)
4233 } 4233 }
4234 4234
4235 if (IsFp64Mode()) { 4235 if (IsFp64Mode()) {
4236 GENERATE_CVT_TEST(cvt_l_s, lw, sd) 4236 GENERATE_CVT_TEST(cvt_l_s, lw, Sd)
4237 GENERATE_CVT_TEST(cvt_l_d, ld, sd) 4237 GENERATE_CVT_TEST(cvt_l_d, Ld, Sd)
4238 } 4238 }
4239 4239
4240 GENERATE_CVT_TEST(cvt_s_d, ld, sw) 4240 GENERATE_CVT_TEST(cvt_s_d, Ld, sw)
4241 GENERATE_CVT_TEST(cvt_s_w, lw, sw) 4241 GENERATE_CVT_TEST(cvt_s_w, lw, sw)
4242 if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && 4242 if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
4243 IsFp64Mode()) { 4243 IsFp64Mode()) {
4244 GENERATE_CVT_TEST(cvt_s_l, ld, sw) 4244 GENERATE_CVT_TEST(cvt_s_l, Ld, sw)
4245 } 4245 }
4246 4246
4247 GENERATE_CVT_TEST(cvt_w_s, lw, sw) 4247 GENERATE_CVT_TEST(cvt_w_s, lw, sw)
4248 GENERATE_CVT_TEST(cvt_w_d, ld, sw) 4248 GENERATE_CVT_TEST(cvt_w_d, Ld, sw)
4249 4249
4250 // Restore FCSR. 4250 // Restore FCSR.
4251 __ ctc1(a1, FCSR); 4251 __ ctc1(a1, FCSR);
4252 4252
4253 __ jr(ra); 4253 __ jr(ra);
4254 __ nop(); 4254 __ nop();
4255 4255
4256 CodeDesc desc; 4256 CodeDesc desc;
4257 assm.GetCode(&desc); 4257 assm.GetCode(&desc);
4258 Handle<Code> code = isolate->factory()->NewCode( 4258 Handle<Code> code = isolate->factory()->NewCode(
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
4440 float fRes; 4440 float fRes;
4441 } Test; 4441 } Test;
4442 4442
4443 Test test; 4443 Test test;
4444 4444
4445 // Save FCSR. 4445 // Save FCSR.
4446 __ cfc1(a1, FCSR); 4446 __ cfc1(a1, FCSR);
4447 // Disable FPU exceptions. 4447 // Disable FPU exceptions.
4448 __ ctc1(zero_reg, FCSR); 4448 __ ctc1(zero_reg, FCSR);
4449 4449
4450 __ ldc1(f4, MemOperand(a0, offsetof(Test, dOp1)) ); 4450 __ Ldc1(f4, MemOperand(a0, offsetof(Test, dOp1)));
4451 __ ldc1(f2, MemOperand(a0, offsetof(Test, dOp2)) ); 4451 __ Ldc1(f2, MemOperand(a0, offsetof(Test, dOp2)));
4452 __ nop(); 4452 __ nop();
4453 __ div_d(f6, f4, f2); 4453 __ div_d(f6, f4, f2);
4454 __ sdc1(f6, MemOperand(a0, offsetof(Test, dRes)) ); 4454 __ Sdc1(f6, MemOperand(a0, offsetof(Test, dRes)));
4455 4455
4456 __ lwc1(f4, MemOperand(a0, offsetof(Test, fOp1)) ); 4456 __ lwc1(f4, MemOperand(a0, offsetof(Test, fOp1)) );
4457 __ lwc1(f2, MemOperand(a0, offsetof(Test, fOp2)) ); 4457 __ lwc1(f2, MemOperand(a0, offsetof(Test, fOp2)) );
4458 __ nop(); 4458 __ nop();
4459 __ div_s(f6, f4, f2); 4459 __ div_s(f6, f4, f2);
4460 __ swc1(f6, MemOperand(a0, offsetof(Test, fRes)) ); 4460 __ swc1(f6, MemOperand(a0, offsetof(Test, fRes)) );
4461 4461
4462 // Restore FCSR. 4462 // Restore FCSR.
4463 __ ctc1(a1, FCSR); 4463 __ ctc1(a1, FCSR);
4464 4464
(...skipping 956 matching lines...) Expand 10 before | Expand all | Expand 10 after
5421 {-x2, -y2, z2, 0.0, 0.0}, 5421 {-x2, -y2, z2, 0.0, 0.0},
5422 {-x2, -y2, -z2, 0.0, 0.0}, 5422 {-x2, -y2, -z2, 0.0, 0.0},
5423 }; 5423 };
5424 5424
5425 if (std::is_same<T, float>::value) { 5425 if (std::is_same<T, float>::value) {
5426 __ lwc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); 5426 __ lwc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
5427 __ lwc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs))); 5427 __ lwc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
5428 __ lwc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft))); 5428 __ lwc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
5429 __ lwc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); 5429 __ lwc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
5430 } else if (std::is_same<T, double>::value) { 5430 } else if (std::is_same<T, double>::value) {
5431 __ ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); 5431 __ Ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
5432 __ ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs))); 5432 __ Ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
5433 __ ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft))); 5433 __ Ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
5434 __ ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); 5434 __ Ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
5435 } else { 5435 } else {
5436 UNREACHABLE(); 5436 UNREACHABLE();
5437 } 5437 }
5438 5438
5439 func(assm); 5439 func(assm);
5440 5440
5441 __ jr(ra); 5441 __ jr(ra);
5442 __ nop(); 5442 __ nop();
5443 5443
5444 CodeDesc desc; 5444 CodeDesc desc;
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
5480 __ swc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add))); 5480 __ swc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add)));
5481 __ msub_s(f16, f4, f6, f8); 5481 __ msub_s(f16, f4, f6, f8);
5482 __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub))); 5482 __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub)));
5483 }); 5483 });
5484 } 5484 }
5485 5485
5486 TEST(madd_msub_d) { 5486 TEST(madd_msub_d) {
5487 if (!IsMipsArchVariant(kMips32r2)) return; 5487 if (!IsMipsArchVariant(kMips32r2)) return;
5488 helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) { 5488 helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) {
5489 __ madd_d(f10, f4, f6, f8); 5489 __ madd_d(f10, f4, f6, f8);
5490 __ sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add))); 5490 __ Sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
5491 __ msub_d(f16, f4, f6, f8); 5491 __ msub_d(f16, f4, f6, f8);
5492 __ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub))); 5492 __ Sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
5493 }); 5493 });
5494 } 5494 }
5495 5495
5496 TEST(maddf_msubf_s) { 5496 TEST(maddf_msubf_s) {
5497 if (!IsMipsArchVariant(kMips32r6)) return; 5497 if (!IsMipsArchVariant(kMips32r6)) return;
5498 helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) { 5498 helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) {
5499 __ maddf_s(f4, f6, f8); 5499 __ maddf_s(f4, f6, f8);
5500 __ swc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add))); 5500 __ swc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add)));
5501 __ msubf_s(f16, f6, f8); 5501 __ msubf_s(f16, f6, f8);
5502 __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub))); 5502 __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub)));
5503 }); 5503 });
5504 } 5504 }
5505 5505
5506 TEST(maddf_msubf_d) { 5506 TEST(maddf_msubf_d) {
5507 if (!IsMipsArchVariant(kMips32r6)) return; 5507 if (!IsMipsArchVariant(kMips32r6)) return;
5508 helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) { 5508 helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) {
5509 __ maddf_d(f4, f6, f8); 5509 __ maddf_d(f4, f6, f8);
5510 __ sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add))); 5510 __ Sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
5511 __ msubf_d(f16, f6, f8); 5511 __ msubf_d(f16, f6, f8);
5512 __ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub))); 5512 __ Sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
5513 }); 5513 });
5514 } 5514 }
5515 5515
5516 #undef __ 5516 #undef __
OLDNEW
« no previous file with comments | « src/mips/macro-assembler-mips.cc ('k') | test/cctest/test-code-stubs-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698