| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 281 float fg; | 281 float fg; |
| 282 } T; | 282 } T; |
| 283 T t; | 283 T t; |
| 284 | 284 |
| 285 // Create a function that accepts &t, and loads, manipulates, and stores | 285 // Create a function that accepts &t, and loads, manipulates, and stores |
| 286 // the doubles t.a ... t.f. | 286 // the doubles t.a ... t.f. |
| 287 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); | 287 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 288 Label L, C; | 288 Label L, C; |
| 289 | 289 |
| 290 // Double precision floating point instructions. | 290 // Double precision floating point instructions. |
| 291 __ ldc1(f4, MemOperand(a0, offsetof(T, a)) ); | 291 __ Ldc1(f4, MemOperand(a0, offsetof(T, a))); |
| 292 __ ldc1(f6, MemOperand(a0, offsetof(T, b)) ); | 292 __ Ldc1(f6, MemOperand(a0, offsetof(T, b))); |
| 293 __ add_d(f8, f4, f6); | 293 __ add_d(f8, f4, f6); |
| 294 __ sdc1(f8, MemOperand(a0, offsetof(T, c)) ); // c = a + b. | 294 __ Sdc1(f8, MemOperand(a0, offsetof(T, c))); // c = a + b. |
| 295 | 295 |
| 296 __ mov_d(f10, f8); // c | 296 __ mov_d(f10, f8); // c |
| 297 __ neg_d(f12, f6); // -b | 297 __ neg_d(f12, f6); // -b |
| 298 __ sub_d(f10, f10, f12); | 298 __ sub_d(f10, f10, f12); |
| 299 __ sdc1(f10, MemOperand(a0, offsetof(T, d)) ); // d = c - (-b). | 299 __ Sdc1(f10, MemOperand(a0, offsetof(T, d))); // d = c - (-b). |
| 300 | 300 |
| 301 __ sdc1(f4, MemOperand(a0, offsetof(T, b)) ); // b = a. | 301 __ Sdc1(f4, MemOperand(a0, offsetof(T, b))); // b = a. |
| 302 | 302 |
| 303 __ li(a4, 120); | 303 __ li(a4, 120); |
| 304 __ mtc1(a4, f14); | 304 __ mtc1(a4, f14); |
| 305 __ cvt_d_w(f14, f14); // f14 = 120.0. | 305 __ cvt_d_w(f14, f14); // f14 = 120.0. |
| 306 __ mul_d(f10, f10, f14); | 306 __ mul_d(f10, f10, f14); |
| 307 __ sdc1(f10, MemOperand(a0, offsetof(T, e)) ); // e = d * 120 = 1.8066e16. | 307 __ Sdc1(f10, MemOperand(a0, offsetof(T, e))); // e = d * 120 = 1.8066e16. |
| 308 | 308 |
| 309 __ div_d(f12, f10, f4); | 309 __ div_d(f12, f10, f4); |
| 310 __ sdc1(f12, MemOperand(a0, offsetof(T, f)) ); // f = e / a = 120.44. | 310 __ Sdc1(f12, MemOperand(a0, offsetof(T, f))); // f = e / a = 120.44. |
| 311 | 311 |
| 312 __ sqrt_d(f14, f12); | 312 __ sqrt_d(f14, f12); |
| 313 __ sdc1(f14, MemOperand(a0, offsetof(T, g)) ); | 313 __ Sdc1(f14, MemOperand(a0, offsetof(T, g))); |
| 314 // g = sqrt(f) = 10.97451593465515908537 | 314 // g = sqrt(f) = 10.97451593465515908537 |
| 315 | 315 |
| 316 if (kArchVariant == kMips64r2) { | 316 if (kArchVariant == kMips64r2) { |
| 317 __ ldc1(f4, MemOperand(a0, offsetof(T, h)) ); | 317 __ Ldc1(f4, MemOperand(a0, offsetof(T, h))); |
| 318 __ ldc1(f6, MemOperand(a0, offsetof(T, i)) ); | 318 __ Ldc1(f6, MemOperand(a0, offsetof(T, i))); |
| 319 __ madd_d(f14, f6, f4, f6); | 319 __ madd_d(f14, f6, f4, f6); |
| 320 __ sdc1(f14, MemOperand(a0, offsetof(T, h)) ); | 320 __ Sdc1(f14, MemOperand(a0, offsetof(T, h))); |
| 321 } | 321 } |
| 322 | 322 |
| 323 // Single precision floating point instructions. | 323 // Single precision floating point instructions. |
| 324 __ lwc1(f4, MemOperand(a0, offsetof(T, fa)) ); | 324 __ Lwc1(f4, MemOperand(a0, offsetof(T, fa))); |
| 325 __ lwc1(f6, MemOperand(a0, offsetof(T, fb)) ); | 325 __ Lwc1(f6, MemOperand(a0, offsetof(T, fb))); |
| 326 __ add_s(f8, f4, f6); | 326 __ add_s(f8, f4, f6); |
| 327 __ swc1(f8, MemOperand(a0, offsetof(T, fc)) ); // fc = fa + fb. | 327 __ Swc1(f8, MemOperand(a0, offsetof(T, fc))); // fc = fa + fb. |
| 328 | 328 |
| 329 __ neg_s(f10, f6); // -fb | 329 __ neg_s(f10, f6); // -fb |
| 330 __ sub_s(f10, f8, f10); | 330 __ sub_s(f10, f8, f10); |
| 331 __ swc1(f10, MemOperand(a0, offsetof(T, fd)) ); // fd = fc - (-fb). | 331 __ Swc1(f10, MemOperand(a0, offsetof(T, fd))); // fd = fc - (-fb). |
| 332 | 332 |
| 333 __ swc1(f4, MemOperand(a0, offsetof(T, fb)) ); // fb = fa. | 333 __ Swc1(f4, MemOperand(a0, offsetof(T, fb))); // fb = fa. |
| 334 | 334 |
| 335 __ li(t0, 120); | 335 __ li(t0, 120); |
| 336 __ mtc1(t0, f14); | 336 __ mtc1(t0, f14); |
| 337 __ cvt_s_w(f14, f14); // f14 = 120.0. | 337 __ cvt_s_w(f14, f14); // f14 = 120.0. |
| 338 __ mul_s(f10, f10, f14); | 338 __ mul_s(f10, f10, f14); |
| 339 __ swc1(f10, MemOperand(a0, offsetof(T, fe)) ); // fe = fd * 120 | 339 __ Swc1(f10, MemOperand(a0, offsetof(T, fe))); // fe = fd * 120 |
| 340 | 340 |
| 341 __ div_s(f12, f10, f4); | 341 __ div_s(f12, f10, f4); |
| 342 __ swc1(f12, MemOperand(a0, offsetof(T, ff)) ); // ff = fe / fa | 342 __ Swc1(f12, MemOperand(a0, offsetof(T, ff))); // ff = fe / fa |
| 343 | 343 |
| 344 __ sqrt_s(f14, f12); | 344 __ sqrt_s(f14, f12); |
| 345 __ swc1(f14, MemOperand(a0, offsetof(T, fg)) ); | 345 __ Swc1(f14, MemOperand(a0, offsetof(T, fg))); |
| 346 | 346 |
| 347 __ jr(ra); | 347 __ jr(ra); |
| 348 __ nop(); | 348 __ nop(); |
| 349 | 349 |
| 350 CodeDesc desc; | 350 CodeDesc desc; |
| 351 assm.GetCode(&desc); | 351 assm.GetCode(&desc); |
| 352 Handle<Code> code = isolate->factory()->NewCode( | 352 Handle<Code> code = isolate->factory()->NewCode( |
| 353 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 353 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 354 F3 f = FUNCTION_CAST<F3>(code->entry()); | 354 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 355 // Double test values. | 355 // Double test values. |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 401 typedef struct { | 401 typedef struct { |
| 402 double a; | 402 double a; |
| 403 double b; | 403 double b; |
| 404 double c; | 404 double c; |
| 405 double d; | 405 double d; |
| 406 int64_t high; | 406 int64_t high; |
| 407 int64_t low; | 407 int64_t low; |
| 408 } T; | 408 } T; |
| 409 T t; | 409 T t; |
| 410 | 410 |
| 411 Assembler assm(isolate, NULL, 0); | 411 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 412 Label L, C; | 412 Label L, C; |
| 413 | 413 |
| 414 __ ldc1(f4, MemOperand(a0, offsetof(T, a))); | 414 __ Ldc1(f4, MemOperand(a0, offsetof(T, a))); |
| 415 __ ldc1(f5, MemOperand(a0, offsetof(T, b))); | 415 __ Ldc1(f5, MemOperand(a0, offsetof(T, b))); |
| 416 | 416 |
| 417 // Swap f4 and f5, by using 3 integer registers, a4-a6, | 417 // Swap f4 and f5, by using 3 integer registers, a4-a6, |
| 418 // both two 32-bit chunks, and one 64-bit chunk. | 418 // both two 32-bit chunks, and one 64-bit chunk. |
| 419 // mXhc1 is mips32/64-r2 only, not r1, | 419 // mXhc1 is mips32/64-r2 only, not r1, |
| 420 // but we will not support r1 in practice. | 420 // but we will not support r1 in practice. |
| 421 __ mfc1(a4, f4); | 421 __ mfc1(a4, f4); |
| 422 __ mfhc1(a5, f4); | 422 __ mfhc1(a5, f4); |
| 423 __ dmfc1(a6, f5); | 423 __ dmfc1(a6, f5); |
| 424 | 424 |
| 425 __ mtc1(a4, f5); | 425 __ mtc1(a4, f5); |
| 426 __ mthc1(a5, f5); | 426 __ mthc1(a5, f5); |
| 427 __ dmtc1(a6, f4); | 427 __ dmtc1(a6, f4); |
| 428 | 428 |
| 429 // Store the swapped f4 and f5 back to memory. | 429 // Store the swapped f4 and f5 back to memory. |
| 430 __ sdc1(f4, MemOperand(a0, offsetof(T, a))); | 430 __ Sdc1(f4, MemOperand(a0, offsetof(T, a))); |
| 431 __ sdc1(f5, MemOperand(a0, offsetof(T, c))); | 431 __ Sdc1(f5, MemOperand(a0, offsetof(T, c))); |
| 432 | 432 |
| 433 // Test sign extension of move operations from coprocessor. | 433 // Test sign extension of move operations from coprocessor. |
| 434 __ ldc1(f4, MemOperand(a0, offsetof(T, d))); | 434 __ Ldc1(f4, MemOperand(a0, offsetof(T, d))); |
| 435 __ mfhc1(a4, f4); | 435 __ mfhc1(a4, f4); |
| 436 __ mfc1(a5, f4); | 436 __ mfc1(a5, f4); |
| 437 | 437 |
| 438 __ sd(a4, MemOperand(a0, offsetof(T, high))); | 438 __ Sd(a4, MemOperand(a0, offsetof(T, high))); |
| 439 __ sd(a5, MemOperand(a0, offsetof(T, low))); | 439 __ Sd(a5, MemOperand(a0, offsetof(T, low))); |
| 440 | 440 |
| 441 __ jr(ra); | 441 __ jr(ra); |
| 442 __ nop(); | 442 __ nop(); |
| 443 | 443 |
| 444 CodeDesc desc; | 444 CodeDesc desc; |
| 445 assm.GetCode(&desc); | 445 assm.GetCode(&desc); |
| 446 Handle<Code> code = isolate->factory()->NewCode( | 446 Handle<Code> code = isolate->factory()->NewCode( |
| 447 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 447 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 448 F3 f = FUNCTION_CAST<F3>(code->entry()); | 448 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 449 t.a = 1.5e22; | 449 t.a = 1.5e22; |
| (...skipping 18 matching lines...) Expand all Loading... |
| 468 HandleScope scope(isolate); | 468 HandleScope scope(isolate); |
| 469 | 469 |
| 470 typedef struct { | 470 typedef struct { |
| 471 double a; | 471 double a; |
| 472 double b; | 472 double b; |
| 473 int i; | 473 int i; |
| 474 int j; | 474 int j; |
| 475 } T; | 475 } T; |
| 476 T t; | 476 T t; |
| 477 | 477 |
| 478 Assembler assm(isolate, NULL, 0); | 478 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 479 Label L, C; | 479 Label L, C; |
| 480 | 480 |
| 481 // Load all structure elements to registers. | 481 // Load all structure elements to registers. |
| 482 __ ldc1(f4, MemOperand(a0, offsetof(T, a)) ); | 482 __ Ldc1(f4, MemOperand(a0, offsetof(T, a))); |
| 483 __ ldc1(f6, MemOperand(a0, offsetof(T, b)) ); | 483 __ Ldc1(f6, MemOperand(a0, offsetof(T, b))); |
| 484 __ lw(a4, MemOperand(a0, offsetof(T, i)) ); | 484 __ Lw(a4, MemOperand(a0, offsetof(T, i))); |
| 485 __ lw(a5, MemOperand(a0, offsetof(T, j)) ); | 485 __ Lw(a5, MemOperand(a0, offsetof(T, j))); |
| 486 | 486 |
| 487 // Convert double in f4 to int in element i. | 487 // Convert double in f4 to int in element i. |
| 488 __ cvt_w_d(f8, f4); | 488 __ cvt_w_d(f8, f4); |
| 489 __ mfc1(a6, f8); | 489 __ mfc1(a6, f8); |
| 490 __ sw(a6, MemOperand(a0, offsetof(T, i)) ); | 490 __ Sw(a6, MemOperand(a0, offsetof(T, i))); |
| 491 | 491 |
| 492 // Convert double in f6 to int in element j. | 492 // Convert double in f6 to int in element j. |
| 493 __ cvt_w_d(f10, f6); | 493 __ cvt_w_d(f10, f6); |
| 494 __ mfc1(a7, f10); | 494 __ mfc1(a7, f10); |
| 495 __ sw(a7, MemOperand(a0, offsetof(T, j)) ); | 495 __ Sw(a7, MemOperand(a0, offsetof(T, j))); |
| 496 | 496 |
| 497 // Convert int in original i (a4) to double in a. | 497 // Convert int in original i (a4) to double in a. |
| 498 __ mtc1(a4, f12); | 498 __ mtc1(a4, f12); |
| 499 __ cvt_d_w(f0, f12); | 499 __ cvt_d_w(f0, f12); |
| 500 __ sdc1(f0, MemOperand(a0, offsetof(T, a)) ); | 500 __ Sdc1(f0, MemOperand(a0, offsetof(T, a))); |
| 501 | 501 |
| 502 // Convert int in original j (a5) to double in b. | 502 // Convert int in original j (a5) to double in b. |
| 503 __ mtc1(a5, f14); | 503 __ mtc1(a5, f14); |
| 504 __ cvt_d_w(f2, f14); | 504 __ cvt_d_w(f2, f14); |
| 505 __ sdc1(f2, MemOperand(a0, offsetof(T, b)) ); | 505 __ Sdc1(f2, MemOperand(a0, offsetof(T, b))); |
| 506 | 506 |
| 507 __ jr(ra); | 507 __ jr(ra); |
| 508 __ nop(); | 508 __ nop(); |
| 509 | 509 |
| 510 CodeDesc desc; | 510 CodeDesc desc; |
| 511 assm.GetCode(&desc); | 511 assm.GetCode(&desc); |
| 512 Handle<Code> code = isolate->factory()->NewCode( | 512 Handle<Code> code = isolate->factory()->NewCode( |
| 513 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 513 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 514 F3 f = FUNCTION_CAST<F3>(code->entry()); | 514 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 515 t.a = 1.5e4; | 515 t.a = 1.5e4; |
| (...skipping 21 matching lines...) Expand all Loading... |
| 537 int32_t si; | 537 int32_t si; |
| 538 int32_t r1; | 538 int32_t r1; |
| 539 int32_t r2; | 539 int32_t r2; |
| 540 int32_t r3; | 540 int32_t r3; |
| 541 int32_t r4; | 541 int32_t r4; |
| 542 int32_t r5; | 542 int32_t r5; |
| 543 int32_t r6; | 543 int32_t r6; |
| 544 } T; | 544 } T; |
| 545 T t; | 545 T t; |
| 546 | 546 |
| 547 Assembler assm(isolate, NULL, 0); | 547 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 548 Label L, C; | 548 Label L, C; |
| 549 | 549 |
| 550 // Basic word load/store. | 550 // Basic word load/store. |
| 551 __ lw(a4, MemOperand(a0, offsetof(T, ui)) ); | 551 __ Lw(a4, MemOperand(a0, offsetof(T, ui))); |
| 552 __ sw(a4, MemOperand(a0, offsetof(T, r1)) ); | 552 __ Sw(a4, MemOperand(a0, offsetof(T, r1))); |
| 553 | 553 |
| 554 // lh with positive data. | 554 // lh with positive data. |
| 555 __ lh(a5, MemOperand(a0, offsetof(T, ui)) ); | 555 __ Lh(a5, MemOperand(a0, offsetof(T, ui))); |
| 556 __ sw(a5, MemOperand(a0, offsetof(T, r2)) ); | 556 __ Sw(a5, MemOperand(a0, offsetof(T, r2))); |
| 557 | 557 |
| 558 // lh with negative data. | 558 // lh with negative data. |
| 559 __ lh(a6, MemOperand(a0, offsetof(T, si)) ); | 559 __ Lh(a6, MemOperand(a0, offsetof(T, si))); |
| 560 __ sw(a6, MemOperand(a0, offsetof(T, r3)) ); | 560 __ Sw(a6, MemOperand(a0, offsetof(T, r3))); |
| 561 | 561 |
| 562 // lhu with negative data. | 562 // lhu with negative data. |
| 563 __ lhu(a7, MemOperand(a0, offsetof(T, si)) ); | 563 __ Lhu(a7, MemOperand(a0, offsetof(T, si))); |
| 564 __ sw(a7, MemOperand(a0, offsetof(T, r4)) ); | 564 __ Sw(a7, MemOperand(a0, offsetof(T, r4))); |
| 565 | 565 |
| 566 // lb with negative data. | 566 // Lb with negative data. |
| 567 __ lb(t0, MemOperand(a0, offsetof(T, si)) ); | 567 __ Lb(t0, MemOperand(a0, offsetof(T, si))); |
| 568 __ sw(t0, MemOperand(a0, offsetof(T, r5)) ); | 568 __ Sw(t0, MemOperand(a0, offsetof(T, r5))); |
| 569 | 569 |
| 570 // sh writes only 1/2 of word. | 570 // sh writes only 1/2 of word. |
| 571 __ lui(t1, 0x3333); | 571 __ lui(t1, 0x3333); |
| 572 __ ori(t1, t1, 0x3333); | 572 __ ori(t1, t1, 0x3333); |
| 573 __ sw(t1, MemOperand(a0, offsetof(T, r6)) ); | 573 __ Sw(t1, MemOperand(a0, offsetof(T, r6))); |
| 574 __ lhu(t1, MemOperand(a0, offsetof(T, si)) ); | 574 __ Lhu(t1, MemOperand(a0, offsetof(T, si))); |
| 575 __ sh(t1, MemOperand(a0, offsetof(T, r6)) ); | 575 __ Sh(t1, MemOperand(a0, offsetof(T, r6))); |
| 576 | 576 |
| 577 __ jr(ra); | 577 __ jr(ra); |
| 578 __ nop(); | 578 __ nop(); |
| 579 | 579 |
| 580 CodeDesc desc; | 580 CodeDesc desc; |
| 581 assm.GetCode(&desc); | 581 assm.GetCode(&desc); |
| 582 Handle<Code> code = isolate->factory()->NewCode( | 582 Handle<Code> code = isolate->factory()->NewCode( |
| 583 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 583 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 584 F3 f = FUNCTION_CAST<F3>(code->entry()); | 584 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 585 t.ui = 0x11223344; | 585 t.ui = 0x11223344; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 619 double f; | 619 double f; |
| 620 int32_t result; | 620 int32_t result; |
| 621 } T; | 621 } T; |
| 622 T t; | 622 T t; |
| 623 | 623 |
| 624 // Create a function that accepts &t, and loads, manipulates, and stores | 624 // Create a function that accepts &t, and loads, manipulates, and stores |
| 625 // the doubles t.a ... t.f. | 625 // the doubles t.a ... t.f. |
| 626 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); | 626 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 627 Label neither_is_nan, less_than, outa_here; | 627 Label neither_is_nan, less_than, outa_here; |
| 628 | 628 |
| 629 __ ldc1(f4, MemOperand(a0, offsetof(T, a)) ); | 629 __ Ldc1(f4, MemOperand(a0, offsetof(T, a))); |
| 630 __ ldc1(f6, MemOperand(a0, offsetof(T, b)) ); | 630 __ Ldc1(f6, MemOperand(a0, offsetof(T, b))); |
| 631 if (kArchVariant != kMips64r6) { | 631 if (kArchVariant != kMips64r6) { |
| 632 __ c(UN, D, f4, f6); | 632 __ c(UN, D, f4, f6); |
| 633 __ bc1f(&neither_is_nan); | 633 __ bc1f(&neither_is_nan); |
| 634 } else { | 634 } else { |
| 635 __ cmp(UN, L, f2, f4, f6); | 635 __ cmp(UN, L, f2, f4, f6); |
| 636 __ bc1eqz(&neither_is_nan, f2); | 636 __ bc1eqz(&neither_is_nan, f2); |
| 637 } | 637 } |
| 638 __ nop(); | 638 __ nop(); |
| 639 __ sw(zero_reg, MemOperand(a0, offsetof(T, result)) ); | 639 __ Sw(zero_reg, MemOperand(a0, offsetof(T, result))); |
| 640 __ Branch(&outa_here); | 640 __ Branch(&outa_here); |
| 641 | 641 |
| 642 __ bind(&neither_is_nan); | 642 __ bind(&neither_is_nan); |
| 643 | 643 |
| 644 if (kArchVariant == kMips64r6) { | 644 if (kArchVariant == kMips64r6) { |
| 645 __ cmp(OLT, L, f2, f6, f4); | 645 __ cmp(OLT, L, f2, f6, f4); |
| 646 __ bc1nez(&less_than, f2); | 646 __ bc1nez(&less_than, f2); |
| 647 } else { | 647 } else { |
| 648 __ c(OLT, D, f6, f4, 2); | 648 __ c(OLT, D, f6, f4, 2); |
| 649 __ bc1t(&less_than, 2); | 649 __ bc1t(&less_than, 2); |
| 650 } | 650 } |
| 651 | 651 |
| 652 __ nop(); | 652 __ nop(); |
| 653 __ sw(zero_reg, MemOperand(a0, offsetof(T, result)) ); | 653 __ Sw(zero_reg, MemOperand(a0, offsetof(T, result))); |
| 654 __ Branch(&outa_here); | 654 __ Branch(&outa_here); |
| 655 | 655 |
| 656 __ bind(&less_than); | 656 __ bind(&less_than); |
| 657 __ Addu(a4, zero_reg, Operand(1)); | 657 __ Addu(a4, zero_reg, Operand(1)); |
| 658 __ sw(a4, MemOperand(a0, offsetof(T, result)) ); // Set true. | 658 __ Sw(a4, MemOperand(a0, offsetof(T, result))); // Set true. |
| 659 | |
| 660 | 659 |
| 661 // This test-case should have additional tests. | 660 // This test-case should have additional tests. |
| 662 | 661 |
| 663 __ bind(&outa_here); | 662 __ bind(&outa_here); |
| 664 | 663 |
| 665 __ jr(ra); | 664 __ jr(ra); |
| 666 __ nop(); | 665 __ nop(); |
| 667 | 666 |
| 668 CodeDesc desc; | 667 CodeDesc desc; |
| 669 assm.GetCode(&desc); | 668 assm.GetCode(&desc); |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 708 int32_t result_rotrv_20; | 707 int32_t result_rotrv_20; |
| 709 int32_t result_rotrv_24; | 708 int32_t result_rotrv_24; |
| 710 int32_t result_rotrv_28; | 709 int32_t result_rotrv_28; |
| 711 } T; | 710 } T; |
| 712 T t; | 711 T t; |
| 713 | 712 |
| 714 MacroAssembler assm(isolate, NULL, 0, | 713 MacroAssembler assm(isolate, NULL, 0, |
| 715 v8::internal::CodeObjectRequired::kYes); | 714 v8::internal::CodeObjectRequired::kYes); |
| 716 | 715 |
| 717 // Basic word load. | 716 // Basic word load. |
| 718 __ lw(a4, MemOperand(a0, offsetof(T, input)) ); | 717 __ Lw(a4, MemOperand(a0, offsetof(T, input))); |
| 719 | 718 |
| 720 // ROTR instruction (called through the Ror macro). | 719 // ROTR instruction (called through the Ror macro). |
| 721 __ Ror(a5, a4, 0x0004); | 720 __ Ror(a5, a4, 0x0004); |
| 722 __ Ror(a6, a4, 0x0008); | 721 __ Ror(a6, a4, 0x0008); |
| 723 __ Ror(a7, a4, 0x000c); | 722 __ Ror(a7, a4, 0x000c); |
| 724 __ Ror(t0, a4, 0x0010); | 723 __ Ror(t0, a4, 0x0010); |
| 725 __ Ror(t1, a4, 0x0014); | 724 __ Ror(t1, a4, 0x0014); |
| 726 __ Ror(t2, a4, 0x0018); | 725 __ Ror(t2, a4, 0x0018); |
| 727 __ Ror(t3, a4, 0x001c); | 726 __ Ror(t3, a4, 0x001c); |
| 728 | 727 |
| 729 // Basic word store. | 728 // Basic word store. |
| 730 __ sw(a5, MemOperand(a0, offsetof(T, result_rotr_4)) ); | 729 __ Sw(a5, MemOperand(a0, offsetof(T, result_rotr_4))); |
| 731 __ sw(a6, MemOperand(a0, offsetof(T, result_rotr_8)) ); | 730 __ Sw(a6, MemOperand(a0, offsetof(T, result_rotr_8))); |
| 732 __ sw(a7, MemOperand(a0, offsetof(T, result_rotr_12)) ); | 731 __ Sw(a7, MemOperand(a0, offsetof(T, result_rotr_12))); |
| 733 __ sw(t0, MemOperand(a0, offsetof(T, result_rotr_16)) ); | 732 __ Sw(t0, MemOperand(a0, offsetof(T, result_rotr_16))); |
| 734 __ sw(t1, MemOperand(a0, offsetof(T, result_rotr_20)) ); | 733 __ Sw(t1, MemOperand(a0, offsetof(T, result_rotr_20))); |
| 735 __ sw(t2, MemOperand(a0, offsetof(T, result_rotr_24)) ); | 734 __ Sw(t2, MemOperand(a0, offsetof(T, result_rotr_24))); |
| 736 __ sw(t3, MemOperand(a0, offsetof(T, result_rotr_28)) ); | 735 __ Sw(t3, MemOperand(a0, offsetof(T, result_rotr_28))); |
| 737 | 736 |
| 738 // ROTRV instruction (called through the Ror macro). | 737 // ROTRV instruction (called through the Ror macro). |
| 739 __ li(t3, 0x0004); | 738 __ li(t3, 0x0004); |
| 740 __ Ror(a5, a4, t3); | 739 __ Ror(a5, a4, t3); |
| 741 __ li(t3, 0x0008); | 740 __ li(t3, 0x0008); |
| 742 __ Ror(a6, a4, t3); | 741 __ Ror(a6, a4, t3); |
| 743 __ li(t3, 0x000C); | 742 __ li(t3, 0x000C); |
| 744 __ Ror(a7, a4, t3); | 743 __ Ror(a7, a4, t3); |
| 745 __ li(t3, 0x0010); | 744 __ li(t3, 0x0010); |
| 746 __ Ror(t0, a4, t3); | 745 __ Ror(t0, a4, t3); |
| 747 __ li(t3, 0x0014); | 746 __ li(t3, 0x0014); |
| 748 __ Ror(t1, a4, t3); | 747 __ Ror(t1, a4, t3); |
| 749 __ li(t3, 0x0018); | 748 __ li(t3, 0x0018); |
| 750 __ Ror(t2, a4, t3); | 749 __ Ror(t2, a4, t3); |
| 751 __ li(t3, 0x001C); | 750 __ li(t3, 0x001C); |
| 752 __ Ror(t3, a4, t3); | 751 __ Ror(t3, a4, t3); |
| 753 | 752 |
| 754 // Basic word store. | 753 // Basic word store. |
| 755 __ sw(a5, MemOperand(a0, offsetof(T, result_rotrv_4)) ); | 754 __ Sw(a5, MemOperand(a0, offsetof(T, result_rotrv_4))); |
| 756 __ sw(a6, MemOperand(a0, offsetof(T, result_rotrv_8)) ); | 755 __ Sw(a6, MemOperand(a0, offsetof(T, result_rotrv_8))); |
| 757 __ sw(a7, MemOperand(a0, offsetof(T, result_rotrv_12)) ); | 756 __ Sw(a7, MemOperand(a0, offsetof(T, result_rotrv_12))); |
| 758 __ sw(t0, MemOperand(a0, offsetof(T, result_rotrv_16)) ); | 757 __ Sw(t0, MemOperand(a0, offsetof(T, result_rotrv_16))); |
| 759 __ sw(t1, MemOperand(a0, offsetof(T, result_rotrv_20)) ); | 758 __ Sw(t1, MemOperand(a0, offsetof(T, result_rotrv_20))); |
| 760 __ sw(t2, MemOperand(a0, offsetof(T, result_rotrv_24)) ); | 759 __ Sw(t2, MemOperand(a0, offsetof(T, result_rotrv_24))); |
| 761 __ sw(t3, MemOperand(a0, offsetof(T, result_rotrv_28)) ); | 760 __ Sw(t3, MemOperand(a0, offsetof(T, result_rotrv_28))); |
| 762 | 761 |
| 763 __ jr(ra); | 762 __ jr(ra); |
| 764 __ nop(); | 763 __ nop(); |
| 765 | 764 |
| 766 CodeDesc desc; | 765 CodeDesc desc; |
| 767 assm.GetCode(&desc); | 766 assm.GetCode(&desc); |
| 768 Handle<Code> code = isolate->factory()->NewCode( | 767 Handle<Code> code = isolate->factory()->NewCode( |
| 769 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 768 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 770 F3 f = FUNCTION_CAST<F3>(code->entry()); | 769 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 771 t.input = 0x12345678; | 770 t.input = 0x12345678; |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 831 int32_t dbl_exp; | 830 int32_t dbl_exp; |
| 832 int32_t long_hi; | 831 int32_t long_hi; |
| 833 int32_t long_lo; | 832 int32_t long_lo; |
| 834 int64_t long_as_int64; | 833 int64_t long_as_int64; |
| 835 int32_t b_long_hi; | 834 int32_t b_long_hi; |
| 836 int32_t b_long_lo; | 835 int32_t b_long_lo; |
| 837 int64_t b_long_as_int64; | 836 int64_t b_long_as_int64; |
| 838 } T; | 837 } T; |
| 839 T t; | 838 T t; |
| 840 | 839 |
| 841 Assembler assm(isolate, NULL, 0); | 840 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 842 Label L, C; | 841 Label L, C; |
| 843 | 842 |
| 844 if (kArchVariant == kMips64r2) { | 843 if (kArchVariant == kMips64r2) { |
| 845 // Rewritten for FR=1 FPU mode: | 844 // Rewritten for FR=1 FPU mode: |
| 846 // - 32 FP regs of 64-bits each, no odd/even pairs. | 845 // - 32 FP regs of 64-bits each, no odd/even pairs. |
| 847 // - Note that cvt_l_d/cvt_d_l ARE legal in FR=1 mode. | 846 // - Note that cvt_l_d/cvt_d_l ARE legal in FR=1 mode. |
| 848 // Load all structure elements to registers. | 847 // Load all structure elements to registers. |
| 849 __ ldc1(f0, MemOperand(a0, offsetof(T, a))); | 848 __ Ldc1(f0, MemOperand(a0, offsetof(T, a))); |
| 850 | 849 |
| 851 // Save the raw bits of the double. | 850 // Save the raw bits of the double. |
| 852 __ mfc1(a4, f0); | 851 __ mfc1(a4, f0); |
| 853 __ mfhc1(a5, f0); | 852 __ mfhc1(a5, f0); |
| 854 __ sw(a4, MemOperand(a0, offsetof(T, dbl_mant))); | 853 __ Sw(a4, MemOperand(a0, offsetof(T, dbl_mant))); |
| 855 __ sw(a5, MemOperand(a0, offsetof(T, dbl_exp))); | 854 __ Sw(a5, MemOperand(a0, offsetof(T, dbl_exp))); |
| 856 | 855 |
| 857 // Convert double in f0 to long, save hi/lo parts. | 856 // Convert double in f0 to long, save hi/lo parts. |
| 858 __ cvt_l_d(f0, f0); | 857 __ cvt_l_d(f0, f0); |
| 859 __ mfc1(a4, f0); // f0 LS 32 bits of long. | 858 __ mfc1(a4, f0); // f0 LS 32 bits of long. |
| 860 __ mfhc1(a5, f0); // f0 MS 32 bits of long. | 859 __ mfhc1(a5, f0); // f0 MS 32 bits of long. |
| 861 __ sw(a4, MemOperand(a0, offsetof(T, long_lo))); | 860 __ Sw(a4, MemOperand(a0, offsetof(T, long_lo))); |
| 862 __ sw(a5, MemOperand(a0, offsetof(T, long_hi))); | 861 __ Sw(a5, MemOperand(a0, offsetof(T, long_hi))); |
| 863 | 862 |
| 864 // Combine the high/low ints, convert back to double. | 863 // Combine the high/low ints, convert back to double. |
| 865 __ dsll32(a6, a5, 0); // Move a5 to high bits of a6. | 864 __ dsll32(a6, a5, 0); // Move a5 to high bits of a6. |
| 866 __ or_(a6, a6, a4); | 865 __ or_(a6, a6, a4); |
| 867 __ dmtc1(a6, f1); | 866 __ dmtc1(a6, f1); |
| 868 __ cvt_d_l(f1, f1); | 867 __ cvt_d_l(f1, f1); |
| 869 __ sdc1(f1, MemOperand(a0, offsetof(T, a_converted))); | 868 __ Sdc1(f1, MemOperand(a0, offsetof(T, a_converted))); |
| 870 | |
| 871 | 869 |
| 872 // Convert the b long integers to double b. | 870 // Convert the b long integers to double b. |
| 873 __ lw(a4, MemOperand(a0, offsetof(T, b_long_lo))); | 871 __ Lw(a4, MemOperand(a0, offsetof(T, b_long_lo))); |
| 874 __ lw(a5, MemOperand(a0, offsetof(T, b_long_hi))); | 872 __ Lw(a5, MemOperand(a0, offsetof(T, b_long_hi))); |
| 875 __ mtc1(a4, f8); // f8 LS 32-bits. | 873 __ mtc1(a4, f8); // f8 LS 32-bits. |
| 876 __ mthc1(a5, f8); // f8 MS 32-bits. | 874 __ mthc1(a5, f8); // f8 MS 32-bits. |
| 877 __ cvt_d_l(f10, f8); | 875 __ cvt_d_l(f10, f8); |
| 878 __ sdc1(f10, MemOperand(a0, offsetof(T, b))); | 876 __ Sdc1(f10, MemOperand(a0, offsetof(T, b))); |
| 879 | 877 |
| 880 // Convert double b back to long-int. | 878 // Convert double b back to long-int. |
| 881 __ ldc1(f31, MemOperand(a0, offsetof(T, b))); | 879 __ Ldc1(f31, MemOperand(a0, offsetof(T, b))); |
| 882 __ cvt_l_d(f31, f31); | 880 __ cvt_l_d(f31, f31); |
| 883 __ dmfc1(a7, f31); | 881 __ dmfc1(a7, f31); |
| 884 __ sd(a7, MemOperand(a0, offsetof(T, b_long_as_int64))); | 882 __ Sd(a7, MemOperand(a0, offsetof(T, b_long_as_int64))); |
| 885 | |
| 886 | 883 |
| 887 __ jr(ra); | 884 __ jr(ra); |
| 888 __ nop(); | 885 __ nop(); |
| 889 | 886 |
| 890 CodeDesc desc; | 887 CodeDesc desc; |
| 891 assm.GetCode(&desc); | 888 assm.GetCode(&desc); |
| 892 Handle<Code> code = isolate->factory()->NewCode( | 889 Handle<Code> code = isolate->factory()->NewCode( |
| 893 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 890 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 894 F3 f = FUNCTION_CAST<F3>(code->entry()); | 891 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 895 t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double. | 892 t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double. |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 934 int32_t swl_1; | 931 int32_t swl_1; |
| 935 int32_t swl_2; | 932 int32_t swl_2; |
| 936 int32_t swl_3; | 933 int32_t swl_3; |
| 937 int32_t swr_0; | 934 int32_t swr_0; |
| 938 int32_t swr_1; | 935 int32_t swr_1; |
| 939 int32_t swr_2; | 936 int32_t swr_2; |
| 940 int32_t swr_3; | 937 int32_t swr_3; |
| 941 } T; | 938 } T; |
| 942 T t; | 939 T t; |
| 943 | 940 |
| 944 Assembler assm(isolate, NULL, 0); | 941 MacroAssembler assm(isolate, NULL, 0, |
| 942 v8::internal::CodeObjectRequired::kYes); |
| 945 | 943 |
| 946 // Test all combinations of LWL and vAddr. | 944 // Test all combinations of LWL and vAddr. |
| 947 __ lw(a4, MemOperand(a0, offsetof(T, reg_init))); | 945 __ Lw(a4, MemOperand(a0, offsetof(T, reg_init))); |
| 948 __ lwl(a4, MemOperand(a0, offsetof(T, mem_init))); | 946 __ lwl(a4, MemOperand(a0, offsetof(T, mem_init))); |
| 949 __ sw(a4, MemOperand(a0, offsetof(T, lwl_0))); | 947 __ Sw(a4, MemOperand(a0, offsetof(T, lwl_0))); |
| 950 | 948 |
| 951 __ lw(a5, MemOperand(a0, offsetof(T, reg_init))); | 949 __ Lw(a5, MemOperand(a0, offsetof(T, reg_init))); |
| 952 __ lwl(a5, MemOperand(a0, offsetof(T, mem_init) + 1)); | 950 __ lwl(a5, MemOperand(a0, offsetof(T, mem_init) + 1)); |
| 953 __ sw(a5, MemOperand(a0, offsetof(T, lwl_1))); | 951 __ Sw(a5, MemOperand(a0, offsetof(T, lwl_1))); |
| 954 | 952 |
| 955 __ lw(a6, MemOperand(a0, offsetof(T, reg_init))); | 953 __ Lw(a6, MemOperand(a0, offsetof(T, reg_init))); |
| 956 __ lwl(a6, MemOperand(a0, offsetof(T, mem_init) + 2)); | 954 __ lwl(a6, MemOperand(a0, offsetof(T, mem_init) + 2)); |
| 957 __ sw(a6, MemOperand(a0, offsetof(T, lwl_2))); | 955 __ Sw(a6, MemOperand(a0, offsetof(T, lwl_2))); |
| 958 | 956 |
| 959 __ lw(a7, MemOperand(a0, offsetof(T, reg_init))); | 957 __ Lw(a7, MemOperand(a0, offsetof(T, reg_init))); |
| 960 __ lwl(a7, MemOperand(a0, offsetof(T, mem_init) + 3)); | 958 __ lwl(a7, MemOperand(a0, offsetof(T, mem_init) + 3)); |
| 961 __ sw(a7, MemOperand(a0, offsetof(T, lwl_3))); | 959 __ Sw(a7, MemOperand(a0, offsetof(T, lwl_3))); |
| 962 | 960 |
| 963 // Test all combinations of LWR and vAddr. | 961 // Test all combinations of LWR and vAddr. |
| 964 __ lw(a4, MemOperand(a0, offsetof(T, reg_init))); | 962 __ Lw(a4, MemOperand(a0, offsetof(T, reg_init))); |
| 965 __ lwr(a4, MemOperand(a0, offsetof(T, mem_init))); | 963 __ lwr(a4, MemOperand(a0, offsetof(T, mem_init))); |
| 966 __ sw(a4, MemOperand(a0, offsetof(T, lwr_0))); | 964 __ Sw(a4, MemOperand(a0, offsetof(T, lwr_0))); |
| 967 | 965 |
| 968 __ lw(a5, MemOperand(a0, offsetof(T, reg_init))); | 966 __ Lw(a5, MemOperand(a0, offsetof(T, reg_init))); |
| 969 __ lwr(a5, MemOperand(a0, offsetof(T, mem_init) + 1)); | 967 __ lwr(a5, MemOperand(a0, offsetof(T, mem_init) + 1)); |
| 970 __ sw(a5, MemOperand(a0, offsetof(T, lwr_1))); | 968 __ Sw(a5, MemOperand(a0, offsetof(T, lwr_1))); |
| 971 | 969 |
| 972 __ lw(a6, MemOperand(a0, offsetof(T, reg_init))); | 970 __ Lw(a6, MemOperand(a0, offsetof(T, reg_init))); |
| 973 __ lwr(a6, MemOperand(a0, offsetof(T, mem_init) + 2)); | 971 __ lwr(a6, MemOperand(a0, offsetof(T, mem_init) + 2)); |
| 974 __ sw(a6, MemOperand(a0, offsetof(T, lwr_2)) ); | 972 __ Sw(a6, MemOperand(a0, offsetof(T, lwr_2))); |
| 975 | 973 |
| 976 __ lw(a7, MemOperand(a0, offsetof(T, reg_init))); | 974 __ Lw(a7, MemOperand(a0, offsetof(T, reg_init))); |
| 977 __ lwr(a7, MemOperand(a0, offsetof(T, mem_init) + 3)); | 975 __ lwr(a7, MemOperand(a0, offsetof(T, mem_init) + 3)); |
| 978 __ sw(a7, MemOperand(a0, offsetof(T, lwr_3)) ); | 976 __ Sw(a7, MemOperand(a0, offsetof(T, lwr_3))); |
| 979 | 977 |
| 980 // Test all combinations of SWL and vAddr. | 978 // Test all combinations of SWL and vAddr. |
| 981 __ lw(a4, MemOperand(a0, offsetof(T, mem_init))); | 979 __ Lw(a4, MemOperand(a0, offsetof(T, mem_init))); |
| 982 __ sw(a4, MemOperand(a0, offsetof(T, swl_0))); | 980 __ Sw(a4, MemOperand(a0, offsetof(T, swl_0))); |
| 983 __ lw(a4, MemOperand(a0, offsetof(T, reg_init))); | 981 __ Lw(a4, MemOperand(a0, offsetof(T, reg_init))); |
| 984 __ swl(a4, MemOperand(a0, offsetof(T, swl_0))); | 982 __ swl(a4, MemOperand(a0, offsetof(T, swl_0))); |
| 985 | 983 |
| 986 __ lw(a5, MemOperand(a0, offsetof(T, mem_init))); | 984 __ Lw(a5, MemOperand(a0, offsetof(T, mem_init))); |
| 987 __ sw(a5, MemOperand(a0, offsetof(T, swl_1))); | 985 __ Sw(a5, MemOperand(a0, offsetof(T, swl_1))); |
| 988 __ lw(a5, MemOperand(a0, offsetof(T, reg_init))); | 986 __ Lw(a5, MemOperand(a0, offsetof(T, reg_init))); |
| 989 __ swl(a5, MemOperand(a0, offsetof(T, swl_1) + 1)); | 987 __ swl(a5, MemOperand(a0, offsetof(T, swl_1) + 1)); |
| 990 | 988 |
| 991 __ lw(a6, MemOperand(a0, offsetof(T, mem_init))); | 989 __ Lw(a6, MemOperand(a0, offsetof(T, mem_init))); |
| 992 __ sw(a6, MemOperand(a0, offsetof(T, swl_2))); | 990 __ Sw(a6, MemOperand(a0, offsetof(T, swl_2))); |
| 993 __ lw(a6, MemOperand(a0, offsetof(T, reg_init))); | 991 __ Lw(a6, MemOperand(a0, offsetof(T, reg_init))); |
| 994 __ swl(a6, MemOperand(a0, offsetof(T, swl_2) + 2)); | 992 __ swl(a6, MemOperand(a0, offsetof(T, swl_2) + 2)); |
| 995 | 993 |
| 996 __ lw(a7, MemOperand(a0, offsetof(T, mem_init))); | 994 __ Lw(a7, MemOperand(a0, offsetof(T, mem_init))); |
| 997 __ sw(a7, MemOperand(a0, offsetof(T, swl_3))); | 995 __ Sw(a7, MemOperand(a0, offsetof(T, swl_3))); |
| 998 __ lw(a7, MemOperand(a0, offsetof(T, reg_init))); | 996 __ Lw(a7, MemOperand(a0, offsetof(T, reg_init))); |
| 999 __ swl(a7, MemOperand(a0, offsetof(T, swl_3) + 3)); | 997 __ swl(a7, MemOperand(a0, offsetof(T, swl_3) + 3)); |
| 1000 | 998 |
| 1001 // Test all combinations of SWR and vAddr. | 999 // Test all combinations of SWR and vAddr. |
| 1002 __ lw(a4, MemOperand(a0, offsetof(T, mem_init))); | 1000 __ Lw(a4, MemOperand(a0, offsetof(T, mem_init))); |
| 1003 __ sw(a4, MemOperand(a0, offsetof(T, swr_0))); | 1001 __ Sw(a4, MemOperand(a0, offsetof(T, swr_0))); |
| 1004 __ lw(a4, MemOperand(a0, offsetof(T, reg_init))); | 1002 __ Lw(a4, MemOperand(a0, offsetof(T, reg_init))); |
| 1005 __ swr(a4, MemOperand(a0, offsetof(T, swr_0))); | 1003 __ swr(a4, MemOperand(a0, offsetof(T, swr_0))); |
| 1006 | 1004 |
| 1007 __ lw(a5, MemOperand(a0, offsetof(T, mem_init))); | 1005 __ Lw(a5, MemOperand(a0, offsetof(T, mem_init))); |
| 1008 __ sw(a5, MemOperand(a0, offsetof(T, swr_1))); | 1006 __ Sw(a5, MemOperand(a0, offsetof(T, swr_1))); |
| 1009 __ lw(a5, MemOperand(a0, offsetof(T, reg_init))); | 1007 __ Lw(a5, MemOperand(a0, offsetof(T, reg_init))); |
| 1010 __ swr(a5, MemOperand(a0, offsetof(T, swr_1) + 1)); | 1008 __ swr(a5, MemOperand(a0, offsetof(T, swr_1) + 1)); |
| 1011 | 1009 |
| 1012 __ lw(a6, MemOperand(a0, offsetof(T, mem_init))); | 1010 __ Lw(a6, MemOperand(a0, offsetof(T, mem_init))); |
| 1013 __ sw(a6, MemOperand(a0, offsetof(T, swr_2))); | 1011 __ Sw(a6, MemOperand(a0, offsetof(T, swr_2))); |
| 1014 __ lw(a6, MemOperand(a0, offsetof(T, reg_init))); | 1012 __ Lw(a6, MemOperand(a0, offsetof(T, reg_init))); |
| 1015 __ swr(a6, MemOperand(a0, offsetof(T, swr_2) + 2)); | 1013 __ swr(a6, MemOperand(a0, offsetof(T, swr_2) + 2)); |
| 1016 | 1014 |
| 1017 __ lw(a7, MemOperand(a0, offsetof(T, mem_init))); | 1015 __ Lw(a7, MemOperand(a0, offsetof(T, mem_init))); |
| 1018 __ sw(a7, MemOperand(a0, offsetof(T, swr_3))); | 1016 __ Sw(a7, MemOperand(a0, offsetof(T, swr_3))); |
| 1019 __ lw(a7, MemOperand(a0, offsetof(T, reg_init))); | 1017 __ Lw(a7, MemOperand(a0, offsetof(T, reg_init))); |
| 1020 __ swr(a7, MemOperand(a0, offsetof(T, swr_3) + 3)); | 1018 __ swr(a7, MemOperand(a0, offsetof(T, swr_3) + 3)); |
| 1021 | 1019 |
| 1022 __ jr(ra); | 1020 __ jr(ra); |
| 1023 __ nop(); | 1021 __ nop(); |
| 1024 | 1022 |
| 1025 CodeDesc desc; | 1023 CodeDesc desc; |
| 1026 assm.GetCode(&desc); | 1024 assm.GetCode(&desc); |
| 1027 Handle<Code> code = isolate->factory()->NewCode( | 1025 Handle<Code> code = isolate->factory()->NewCode( |
| 1028 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 1026 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 1029 F3 f = FUNCTION_CAST<F3>(code->entry()); | 1027 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1090 int32_t y2; | 1088 int32_t y2; |
| 1091 int32_t y3; | 1089 int32_t y3; |
| 1092 int32_t y4; | 1090 int32_t y4; |
| 1093 } T; | 1091 } T; |
| 1094 T t; | 1092 T t; |
| 1095 | 1093 |
| 1096 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); | 1094 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 1097 | 1095 |
| 1098 __ mov(t2, fp); // Save frame pointer. | 1096 __ mov(t2, fp); // Save frame pointer. |
| 1099 __ mov(fp, a0); // Access struct T by fp. | 1097 __ mov(fp, a0); // Access struct T by fp. |
| 1100 __ lw(a4, MemOperand(a0, offsetof(T, y))); | 1098 __ Lw(a4, MemOperand(a0, offsetof(T, y))); |
| 1101 __ lw(a7, MemOperand(a0, offsetof(T, y4))); | 1099 __ Lw(a7, MemOperand(a0, offsetof(T, y4))); |
| 1102 | 1100 |
| 1103 __ addu(a5, a4, a7); | 1101 __ addu(a5, a4, a7); |
| 1104 __ subu(t0, a4, a7); | 1102 __ subu(t0, a4, a7); |
| 1105 __ nop(); | 1103 __ nop(); |
| 1106 __ push(a4); // These instructions disappear after opt. | 1104 __ push(a4); // These instructions disappear after opt. |
| 1107 __ Pop(); | 1105 __ Pop(); |
| 1108 __ addu(a4, a4, a4); | 1106 __ addu(a4, a4, a4); |
| 1109 __ nop(); | 1107 __ nop(); |
| 1110 __ Pop(); // These instructions disappear after opt. | 1108 __ Pop(); // These instructions disappear after opt. |
| 1111 __ push(a7); | 1109 __ push(a7); |
| 1112 __ nop(); | 1110 __ nop(); |
| 1113 __ push(a7); // These instructions disappear after opt. | 1111 __ push(a7); // These instructions disappear after opt. |
| 1114 __ pop(a7); | 1112 __ pop(a7); |
| 1115 __ nop(); | 1113 __ nop(); |
| 1116 __ push(a7); | 1114 __ push(a7); |
| 1117 __ pop(t0); | 1115 __ pop(t0); |
| 1118 __ nop(); | 1116 __ nop(); |
| 1119 __ sw(a4, MemOperand(fp, offsetof(T, y))); | 1117 __ Sw(a4, MemOperand(fp, offsetof(T, y))); |
| 1120 __ lw(a4, MemOperand(fp, offsetof(T, y))); | 1118 __ Lw(a4, MemOperand(fp, offsetof(T, y))); |
| 1121 __ nop(); | 1119 __ nop(); |
| 1122 __ sw(a4, MemOperand(fp, offsetof(T, y))); | 1120 __ Sw(a4, MemOperand(fp, offsetof(T, y))); |
| 1123 __ lw(a5, MemOperand(fp, offsetof(T, y))); | 1121 __ Lw(a5, MemOperand(fp, offsetof(T, y))); |
| 1124 __ nop(); | 1122 __ nop(); |
| 1125 __ push(a5); | 1123 __ push(a5); |
| 1126 __ lw(a5, MemOperand(fp, offsetof(T, y))); | 1124 __ Lw(a5, MemOperand(fp, offsetof(T, y))); |
| 1127 __ pop(a5); | 1125 __ pop(a5); |
| 1128 __ nop(); | 1126 __ nop(); |
| 1129 __ push(a5); | 1127 __ push(a5); |
| 1130 __ lw(a6, MemOperand(fp, offsetof(T, y))); | 1128 __ Lw(a6, MemOperand(fp, offsetof(T, y))); |
| 1131 __ pop(a5); | 1129 __ pop(a5); |
| 1132 __ nop(); | 1130 __ nop(); |
| 1133 __ push(a5); | 1131 __ push(a5); |
| 1134 __ lw(a6, MemOperand(fp, offsetof(T, y))); | 1132 __ Lw(a6, MemOperand(fp, offsetof(T, y))); |
| 1135 __ pop(a6); | 1133 __ pop(a6); |
| 1136 __ nop(); | 1134 __ nop(); |
| 1137 __ push(a6); | 1135 __ push(a6); |
| 1138 __ lw(a6, MemOperand(fp, offsetof(T, y))); | 1136 __ Lw(a6, MemOperand(fp, offsetof(T, y))); |
| 1139 __ pop(a5); | 1137 __ pop(a5); |
| 1140 __ nop(); | 1138 __ nop(); |
| 1141 __ push(a5); | 1139 __ push(a5); |
| 1142 __ lw(a6, MemOperand(fp, offsetof(T, y))); | 1140 __ Lw(a6, MemOperand(fp, offsetof(T, y))); |
| 1143 __ pop(a7); | 1141 __ pop(a7); |
| 1144 __ nop(); | 1142 __ nop(); |
| 1145 | 1143 |
| 1146 __ mov(fp, t2); | 1144 __ mov(fp, t2); |
| 1147 __ jr(ra); | 1145 __ jr(ra); |
| 1148 __ nop(); | 1146 __ nop(); |
| 1149 | 1147 |
| 1150 CodeDesc desc; | 1148 CodeDesc desc; |
| 1151 assm.GetCode(&desc); | 1149 assm.GetCode(&desc); |
| 1152 Handle<Code> code = isolate->factory()->NewCode( | 1150 Handle<Code> code = isolate->factory()->NewCode( |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1177 double cvt_small_out; | 1175 double cvt_small_out; |
| 1178 uint32_t trunc_big_out; | 1176 uint32_t trunc_big_out; |
| 1179 uint32_t trunc_small_out; | 1177 uint32_t trunc_small_out; |
| 1180 uint32_t cvt_big_in; | 1178 uint32_t cvt_big_in; |
| 1181 uint32_t cvt_small_in; | 1179 uint32_t cvt_small_in; |
| 1182 } T; | 1180 } T; |
| 1183 T t; | 1181 T t; |
| 1184 | 1182 |
| 1185 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); | 1183 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 1186 | 1184 |
| 1187 __ sw(a4, MemOperand(a0, offsetof(T, cvt_small_in))); | 1185 __ Sw(a4, MemOperand(a0, offsetof(T, cvt_small_in))); |
| 1188 __ Cvt_d_uw(f10, a4); | 1186 __ Cvt_d_uw(f10, a4); |
| 1189 __ sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out))); | 1187 __ Sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out))); |
| 1190 | 1188 |
| 1191 __ Trunc_uw_d(f10, f10, f4); | 1189 __ Trunc_uw_d(f10, f10, f4); |
| 1192 __ swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out))); | 1190 __ Swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out))); |
| 1193 | 1191 |
| 1194 __ sw(a4, MemOperand(a0, offsetof(T, cvt_big_in))); | 1192 __ Sw(a4, MemOperand(a0, offsetof(T, cvt_big_in))); |
| 1195 __ Cvt_d_uw(f8, a4); | 1193 __ Cvt_d_uw(f8, a4); |
| 1196 __ sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out))); | 1194 __ Sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out))); |
| 1197 | 1195 |
| 1198 __ Trunc_uw_d(f8, f8, f4); | 1196 __ Trunc_uw_d(f8, f8, f4); |
| 1199 __ swc1(f8, MemOperand(a0, offsetof(T, trunc_big_out))); | 1197 __ Swc1(f8, MemOperand(a0, offsetof(T, trunc_big_out))); |
| 1200 | 1198 |
| 1201 __ jr(ra); | 1199 __ jr(ra); |
| 1202 __ nop(); | 1200 __ nop(); |
| 1203 | 1201 |
| 1204 CodeDesc desc; | 1202 CodeDesc desc; |
| 1205 assm.GetCode(&desc); | 1203 assm.GetCode(&desc); |
| 1206 Handle<Code> code = isolate->factory()->NewCode( | 1204 Handle<Code> code = isolate->factory()->NewCode( |
| 1207 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 1205 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 1208 F3 f = FUNCTION_CAST<F3>(code->entry()); | 1206 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 1209 | 1207 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1259 T t; | 1257 T t; |
| 1260 | 1258 |
| 1261 #undef ROUND_STRUCT_ELEMENT | 1259 #undef ROUND_STRUCT_ELEMENT |
| 1262 | 1260 |
| 1263 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); | 1261 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 1264 | 1262 |
| 1265 // Save FCSR. | 1263 // Save FCSR. |
| 1266 __ cfc1(a1, FCSR); | 1264 __ cfc1(a1, FCSR); |
| 1267 // Disable FPU exceptions. | 1265 // Disable FPU exceptions. |
| 1268 __ ctc1(zero_reg, FCSR); | 1266 __ ctc1(zero_reg, FCSR); |
| 1269 #define RUN_ROUND_TEST(x) \ | 1267 #define RUN_ROUND_TEST(x) \ |
| 1270 __ cfc1(t0, FCSR);\ | 1268 __ cfc1(t0, FCSR); \ |
| 1271 __ sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \ | 1269 __ Sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \ |
| 1272 __ ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \ | 1270 __ Ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \ |
| 1273 __ x##_w_d(f0, f0); \ | 1271 __ x##_w_d(f0, f0); \ |
| 1274 __ swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \ | 1272 __ Swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \ |
| 1275 \ | 1273 \ |
| 1276 __ ldc1(f0, MemOperand(a0, offsetof(T, round_down_in))); \ | 1274 __ Ldc1(f0, MemOperand(a0, offsetof(T, round_down_in))); \ |
| 1277 __ x##_w_d(f0, f0); \ | 1275 __ x##_w_d(f0, f0); \ |
| 1278 __ swc1(f0, MemOperand(a0, offsetof(T, x##_down_out))); \ | 1276 __ Swc1(f0, MemOperand(a0, offsetof(T, x##_down_out))); \ |
| 1279 \ | 1277 \ |
| 1280 __ ldc1(f0, MemOperand(a0, offsetof(T, neg_round_up_in))); \ | 1278 __ Ldc1(f0, MemOperand(a0, offsetof(T, neg_round_up_in))); \ |
| 1281 __ x##_w_d(f0, f0); \ | 1279 __ x##_w_d(f0, f0); \ |
| 1282 __ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_up_out))); \ | 1280 __ Swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_up_out))); \ |
| 1283 \ | 1281 \ |
| 1284 __ ldc1(f0, MemOperand(a0, offsetof(T, neg_round_down_in))); \ | 1282 __ Ldc1(f0, MemOperand(a0, offsetof(T, neg_round_down_in))); \ |
| 1285 __ x##_w_d(f0, f0); \ | 1283 __ x##_w_d(f0, f0); \ |
| 1286 __ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_down_out))); \ | 1284 __ Swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_down_out))); \ |
| 1287 \ | 1285 \ |
| 1288 __ ldc1(f0, MemOperand(a0, offsetof(T, err1_in))); \ | 1286 __ Ldc1(f0, MemOperand(a0, offsetof(T, err1_in))); \ |
| 1289 __ ctc1(zero_reg, FCSR); \ | 1287 __ ctc1(zero_reg, FCSR); \ |
| 1290 __ x##_w_d(f0, f0); \ | 1288 __ x##_w_d(f0, f0); \ |
| 1291 __ cfc1(a2, FCSR); \ | 1289 __ cfc1(a2, FCSR); \ |
| 1292 __ sw(a2, MemOperand(a0, offsetof(T, x##_err1_out))); \ | 1290 __ Sw(a2, MemOperand(a0, offsetof(T, x##_err1_out))); \ |
| 1293 \ | 1291 \ |
| 1294 __ ldc1(f0, MemOperand(a0, offsetof(T, err2_in))); \ | 1292 __ Ldc1(f0, MemOperand(a0, offsetof(T, err2_in))); \ |
| 1295 __ ctc1(zero_reg, FCSR); \ | 1293 __ ctc1(zero_reg, FCSR); \ |
| 1296 __ x##_w_d(f0, f0); \ | 1294 __ x##_w_d(f0, f0); \ |
| 1297 __ cfc1(a2, FCSR); \ | 1295 __ cfc1(a2, FCSR); \ |
| 1298 __ sw(a2, MemOperand(a0, offsetof(T, x##_err2_out))); \ | 1296 __ Sw(a2, MemOperand(a0, offsetof(T, x##_err2_out))); \ |
| 1299 \ | 1297 \ |
| 1300 __ ldc1(f0, MemOperand(a0, offsetof(T, err3_in))); \ | 1298 __ Ldc1(f0, MemOperand(a0, offsetof(T, err3_in))); \ |
| 1301 __ ctc1(zero_reg, FCSR); \ | 1299 __ ctc1(zero_reg, FCSR); \ |
| 1302 __ x##_w_d(f0, f0); \ | 1300 __ x##_w_d(f0, f0); \ |
| 1303 __ cfc1(a2, FCSR); \ | 1301 __ cfc1(a2, FCSR); \ |
| 1304 __ sw(a2, MemOperand(a0, offsetof(T, x##_err3_out))); \ | 1302 __ Sw(a2, MemOperand(a0, offsetof(T, x##_err3_out))); \ |
| 1305 \ | 1303 \ |
| 1306 __ ldc1(f0, MemOperand(a0, offsetof(T, err4_in))); \ | 1304 __ Ldc1(f0, MemOperand(a0, offsetof(T, err4_in))); \ |
| 1307 __ ctc1(zero_reg, FCSR); \ | 1305 __ ctc1(zero_reg, FCSR); \ |
| 1308 __ x##_w_d(f0, f0); \ | 1306 __ x##_w_d(f0, f0); \ |
| 1309 __ cfc1(a2, FCSR); \ | 1307 __ cfc1(a2, FCSR); \ |
| 1310 __ sw(a2, MemOperand(a0, offsetof(T, x##_err4_out))); \ | 1308 __ Sw(a2, MemOperand(a0, offsetof(T, x##_err4_out))); \ |
| 1311 __ swc1(f0, MemOperand(a0, offsetof(T, x##_invalid_result))); | 1309 __ Swc1(f0, MemOperand(a0, offsetof(T, x##_invalid_result))); |
| 1312 | 1310 |
| 1313 RUN_ROUND_TEST(round) | 1311 RUN_ROUND_TEST(round) |
| 1314 RUN_ROUND_TEST(floor) | 1312 RUN_ROUND_TEST(floor) |
| 1315 RUN_ROUND_TEST(ceil) | 1313 RUN_ROUND_TEST(ceil) |
| 1316 RUN_ROUND_TEST(trunc) | 1314 RUN_ROUND_TEST(trunc) |
| 1317 RUN_ROUND_TEST(cvt) | 1315 RUN_ROUND_TEST(cvt) |
| 1318 | 1316 |
| 1319 // Restore FCSR. | 1317 // Restore FCSR. |
| 1320 __ ctc1(a1, FCSR); | 1318 __ ctc1(a1, FCSR); |
| 1321 | 1319 |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1396 int64_t r8; | 1394 int64_t r8; |
| 1397 int64_t r9; | 1395 int64_t r9; |
| 1398 int64_t r10; | 1396 int64_t r10; |
| 1399 int64_t r11; | 1397 int64_t r11; |
| 1400 int64_t r12; | 1398 int64_t r12; |
| 1401 uint32_t ui; | 1399 uint32_t ui; |
| 1402 int32_t si; | 1400 int32_t si; |
| 1403 }; | 1401 }; |
| 1404 T t; | 1402 T t; |
| 1405 | 1403 |
| 1406 Assembler assm(isolate, NULL, 0); | 1404 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 1407 Label L, C; | 1405 Label L, C; |
| 1408 | 1406 |
| 1409 // Basic 32-bit word load/store, with un-signed data. | 1407 // Basic 32-bit word load/store, with un-signed data. |
| 1410 __ lw(a4, MemOperand(a0, offsetof(T, ui))); | 1408 __ Lw(a4, MemOperand(a0, offsetof(T, ui))); |
| 1411 __ sw(a4, MemOperand(a0, offsetof(T, r1))); | 1409 __ Sw(a4, MemOperand(a0, offsetof(T, r1))); |
| 1412 | 1410 |
| 1413 // Check that the data got zero-extended into 64-bit a4. | 1411 // Check that the data got zero-extended into 64-bit a4. |
| 1414 __ sd(a4, MemOperand(a0, offsetof(T, r2))); | 1412 __ Sd(a4, MemOperand(a0, offsetof(T, r2))); |
| 1415 | 1413 |
| 1416 // Basic 32-bit word load/store, with SIGNED data. | 1414 // Basic 32-bit word load/store, with SIGNED data. |
| 1417 __ lw(a5, MemOperand(a0, offsetof(T, si))); | 1415 __ Lw(a5, MemOperand(a0, offsetof(T, si))); |
| 1418 __ sw(a5, MemOperand(a0, offsetof(T, r3))); | 1416 __ Sw(a5, MemOperand(a0, offsetof(T, r3))); |
| 1419 | 1417 |
| 1420 // Check that the data got sign-extended into 64-bit a4. | 1418 // Check that the data got sign-extended into 64-bit a4. |
| 1421 __ sd(a5, MemOperand(a0, offsetof(T, r4))); | 1419 __ Sd(a5, MemOperand(a0, offsetof(T, r4))); |
| 1422 | 1420 |
| 1423 // 32-bit UNSIGNED word load/store, with SIGNED data. | 1421 // 32-bit UNSIGNED word load/store, with SIGNED data. |
| 1424 __ lwu(a6, MemOperand(a0, offsetof(T, si))); | 1422 __ Lwu(a6, MemOperand(a0, offsetof(T, si))); |
| 1425 __ sw(a6, MemOperand(a0, offsetof(T, r5))); | 1423 __ Sw(a6, MemOperand(a0, offsetof(T, r5))); |
| 1426 | 1424 |
| 1427 // Check that the data got zero-extended into 64-bit a4. | 1425 // Check that the data got zero-extended into 64-bit a4. |
| 1428 __ sd(a6, MemOperand(a0, offsetof(T, r6))); | 1426 __ Sd(a6, MemOperand(a0, offsetof(T, r6))); |
| 1429 | 1427 |
| 1430 // lh with positive data. | 1428 // lh with positive data. |
| 1431 __ lh(a5, MemOperand(a0, offsetof(T, ui))); | 1429 __ Lh(a5, MemOperand(a0, offsetof(T, ui))); |
| 1432 __ sw(a5, MemOperand(a0, offsetof(T, r7))); | 1430 __ Sw(a5, MemOperand(a0, offsetof(T, r7))); |
| 1433 | 1431 |
| 1434 // lh with negative data. | 1432 // lh with negative data. |
| 1435 __ lh(a6, MemOperand(a0, offsetof(T, si))); | 1433 __ Lh(a6, MemOperand(a0, offsetof(T, si))); |
| 1436 __ sw(a6, MemOperand(a0, offsetof(T, r8))); | 1434 __ Sw(a6, MemOperand(a0, offsetof(T, r8))); |
| 1437 | 1435 |
| 1438 // lhu with negative data. | 1436 // lhu with negative data. |
| 1439 __ lhu(a7, MemOperand(a0, offsetof(T, si))); | 1437 __ Lhu(a7, MemOperand(a0, offsetof(T, si))); |
| 1440 __ sw(a7, MemOperand(a0, offsetof(T, r9))); | 1438 __ Sw(a7, MemOperand(a0, offsetof(T, r9))); |
| 1441 | 1439 |
| 1442 // lb with negative data. | 1440 // Lb with negative data. |
| 1443 __ lb(t0, MemOperand(a0, offsetof(T, si))); | 1441 __ Lb(t0, MemOperand(a0, offsetof(T, si))); |
| 1444 __ sw(t0, MemOperand(a0, offsetof(T, r10))); | 1442 __ Sw(t0, MemOperand(a0, offsetof(T, r10))); |
| 1445 | 1443 |
| 1446 // sh writes only 1/2 of word. | 1444 // sh writes only 1/2 of word. |
| 1447 __ lw(a4, MemOperand(a0, offsetof(T, ui))); | 1445 __ Lw(a4, MemOperand(a0, offsetof(T, ui))); |
| 1448 __ sh(a4, MemOperand(a0, offsetof(T, r11))); | 1446 __ Sh(a4, MemOperand(a0, offsetof(T, r11))); |
| 1449 __ lw(a4, MemOperand(a0, offsetof(T, si))); | 1447 __ Lw(a4, MemOperand(a0, offsetof(T, si))); |
| 1450 __ sh(a4, MemOperand(a0, offsetof(T, r12))); | 1448 __ Sh(a4, MemOperand(a0, offsetof(T, r12))); |
| 1451 | 1449 |
| 1452 __ jr(ra); | 1450 __ jr(ra); |
| 1453 __ nop(); | 1451 __ nop(); |
| 1454 | 1452 |
| 1455 CodeDesc desc; | 1453 CodeDesc desc; |
| 1456 assm.GetCode(&desc); | 1454 assm.GetCode(&desc); |
| 1457 Handle<Code> code = isolate->factory()->NewCode( | 1455 Handle<Code> code = isolate->factory()->NewCode( |
| 1458 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 1456 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 1459 F3 f = FUNCTION_CAST<F3>(code->entry()); | 1457 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 1460 t.ui = 0x44332211; | 1458 t.ui = 0x44332211; |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1552 float i; | 1550 float i; |
| 1553 float j; | 1551 float j; |
| 1554 float k; | 1552 float k; |
| 1555 float l; | 1553 float l; |
| 1556 } Test; | 1554 } Test; |
| 1557 | 1555 |
| 1558 Test test; | 1556 Test test; |
| 1559 // Integer part of test. | 1557 // Integer part of test. |
| 1560 __ addiu(t1, zero_reg, 1); // t1 = 1 | 1558 __ addiu(t1, zero_reg, 1); // t1 = 1 |
| 1561 __ seleqz(t3, t1, zero_reg); // t3 = 1 | 1559 __ seleqz(t3, t1, zero_reg); // t3 = 1 |
| 1562 __ sw(t3, MemOperand(a0, offsetof(Test, a))); // a = 1 | 1560 __ Sw(t3, MemOperand(a0, offsetof(Test, a))); // a = 1 |
| 1563 __ seleqz(t2, t1, t1); // t2 = 0 | 1561 __ seleqz(t2, t1, t1); // t2 = 0 |
| 1564 __ sw(t2, MemOperand(a0, offsetof(Test, b))); // b = 0 | 1562 __ Sw(t2, MemOperand(a0, offsetof(Test, b))); // b = 0 |
| 1565 __ selnez(t3, t1, zero_reg); // t3 = 1; | 1563 __ selnez(t3, t1, zero_reg); // t3 = 1; |
| 1566 __ sw(t3, MemOperand(a0, offsetof(Test, c))); // c = 0 | 1564 __ Sw(t3, MemOperand(a0, offsetof(Test, c))); // c = 0 |
| 1567 __ selnez(t3, t1, t1); // t3 = 1 | 1565 __ selnez(t3, t1, t1); // t3 = 1 |
| 1568 __ sw(t3, MemOperand(a0, offsetof(Test, d))); // d = 1 | 1566 __ Sw(t3, MemOperand(a0, offsetof(Test, d))); // d = 1 |
| 1569 // Floating point part of test. | 1567 // Floating point part of test. |
| 1570 __ ldc1(f0, MemOperand(a0, offsetof(Test, e)) ); // src | 1568 __ Ldc1(f0, MemOperand(a0, offsetof(Test, e))); // src |
| 1571 __ ldc1(f2, MemOperand(a0, offsetof(Test, f)) ); // test | 1569 __ Ldc1(f2, MemOperand(a0, offsetof(Test, f))); // test |
| 1572 __ lwc1(f8, MemOperand(a0, offsetof(Test, i)) ); // src | 1570 __ Lwc1(f8, MemOperand(a0, offsetof(Test, i))); // src |
| 1573 __ lwc1(f10, MemOperand(a0, offsetof(Test, j)) ); // test | 1571 __ Lwc1(f10, MemOperand(a0, offsetof(Test, j))); // test |
| 1574 __ seleqz_d(f4, f0, f2); | 1572 __ seleqz_d(f4, f0, f2); |
| 1575 __ selnez_d(f6, f0, f2); | 1573 __ selnez_d(f6, f0, f2); |
| 1576 __ seleqz_s(f12, f8, f10); | 1574 __ seleqz_s(f12, f8, f10); |
| 1577 __ selnez_s(f14, f8, f10); | 1575 __ selnez_s(f14, f8, f10); |
| 1578 __ sdc1(f4, MemOperand(a0, offsetof(Test, g)) ); // src | 1576 __ Sdc1(f4, MemOperand(a0, offsetof(Test, g))); // src |
| 1579 __ sdc1(f6, MemOperand(a0, offsetof(Test, h)) ); // src | 1577 __ Sdc1(f6, MemOperand(a0, offsetof(Test, h))); // src |
| 1580 __ swc1(f12, MemOperand(a0, offsetof(Test, k)) ); // src | 1578 __ Swc1(f12, MemOperand(a0, offsetof(Test, k))); // src |
| 1581 __ swc1(f14, MemOperand(a0, offsetof(Test, l)) ); // src | 1579 __ Swc1(f14, MemOperand(a0, offsetof(Test, l))); // src |
| 1582 __ jr(ra); | 1580 __ jr(ra); |
| 1583 __ nop(); | 1581 __ nop(); |
| 1584 CodeDesc desc; | 1582 CodeDesc desc; |
| 1585 assm.GetCode(&desc); | 1583 assm.GetCode(&desc); |
| 1586 Handle<Code> code = isolate->factory()->NewCode( | 1584 Handle<Code> code = isolate->factory()->NewCode( |
| 1587 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 1585 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 1588 F3 f = FUNCTION_CAST<F3>(code->entry()); | 1586 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 1589 | 1587 |
| 1590 (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0)); | 1588 (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0)); |
| 1591 | 1589 |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1675 float inputse[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf, | 1673 float inputse[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf, |
| 1676 fnan, 42.0, finf, fminf, finf, fnan}; | 1674 fnan, 42.0, finf, fminf, finf, fnan}; |
| 1677 float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan, | 1675 float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan, |
| 1678 finf, finf, 42.0, finf, fminf, fnan}; | 1676 finf, finf, 42.0, finf, fminf, fnan}; |
| 1679 float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, | 1677 float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, |
| 1680 -0.0, finf, finf, 42.0, 42.0, | 1678 -0.0, finf, finf, 42.0, 42.0, |
| 1681 fminf, fminf, fnan}; | 1679 fminf, fminf, fnan}; |
| 1682 float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf, | 1680 float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf, |
| 1683 finf, finf, finf, finf, finf, fnan}; | 1681 finf, finf, finf, finf, finf, fnan}; |
| 1684 | 1682 |
| 1685 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); | 1683 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); |
| 1686 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b))); | 1684 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b))); |
| 1687 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e))); | 1685 __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, e))); |
| 1688 __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f))); | 1686 __ Lwc1(f6, MemOperand(a0, offsetof(TestFloat, f))); |
| 1689 __ min_d(f10, f4, f8); | 1687 __ min_d(f10, f4, f8); |
| 1690 __ max_d(f12, f4, f8); | 1688 __ max_d(f12, f4, f8); |
| 1691 __ min_s(f14, f2, f6); | 1689 __ min_s(f14, f2, f6); |
| 1692 __ max_s(f16, f2, f6); | 1690 __ max_s(f16, f2, f6); |
| 1693 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c))); | 1691 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c))); |
| 1694 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d))); | 1692 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, d))); |
| 1695 __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g))); | 1693 __ Swc1(f14, MemOperand(a0, offsetof(TestFloat, g))); |
| 1696 __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h))); | 1694 __ Swc1(f16, MemOperand(a0, offsetof(TestFloat, h))); |
| 1697 __ jr(ra); | 1695 __ jr(ra); |
| 1698 __ nop(); | 1696 __ nop(); |
| 1699 | 1697 |
| 1700 CodeDesc desc; | 1698 CodeDesc desc; |
| 1701 assm.GetCode(&desc); | 1699 assm.GetCode(&desc); |
| 1702 Handle<Code> code = isolate->factory()->NewCode( | 1700 Handle<Code> code = isolate->factory()->NewCode( |
| 1703 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 1701 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 1704 F3 f = FUNCTION_CAST<F3>(code->entry()); | 1702 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 1705 for (int i = 4; i < kTableLength; i++) { | 1703 for (int i = 4; i < kTableLength; i++) { |
| 1706 test.a = inputsa[i]; | 1704 test.a = inputsa[i]; |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1791 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, | 1789 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, |
| 1792 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, | 1790 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, |
| 1793 37778931862957161709568.0, 37778931862957161709569.0, | 1791 37778931862957161709568.0, 37778931862957161709569.0, |
| 1794 37778931862957161709580.0, 37778931862957161709581.0, | 1792 37778931862957161709580.0, 37778931862957161709581.0, |
| 1795 37778931862957161709582.0, 37778931862957161709583.0, | 1793 37778931862957161709582.0, 37778931862957161709583.0, |
| 1796 37778931862957161709584.0, 37778931862957161709585.0, | 1794 37778931862957161709584.0, 37778931862957161709585.0, |
| 1797 37778931862957161709586.0, 37778931862957161709587.0}; | 1795 37778931862957161709586.0, 37778931862957161709587.0}; |
| 1798 int fcsr_inputs[4] = | 1796 int fcsr_inputs[4] = |
| 1799 {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; | 1797 {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; |
| 1800 double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; | 1798 double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; |
| 1801 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)) ); | 1799 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); |
| 1802 __ lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)) ); | 1800 __ Lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr))); |
| 1803 __ ctc1(t0, FCSR); | 1801 __ ctc1(t0, FCSR); |
| 1804 __ rint_d(f8, f4); | 1802 __ rint_d(f8, f4); |
| 1805 __ sdc1(f8, MemOperand(a0, offsetof(TestFloat, b)) ); | 1803 __ Sdc1(f8, MemOperand(a0, offsetof(TestFloat, b))); |
| 1806 __ jr(ra); | 1804 __ jr(ra); |
| 1807 __ nop(); | 1805 __ nop(); |
| 1808 | 1806 |
| 1809 CodeDesc desc; | 1807 CodeDesc desc; |
| 1810 assm.GetCode(&desc); | 1808 assm.GetCode(&desc); |
| 1811 Handle<Code> code = isolate->factory()->NewCode( | 1809 Handle<Code> code = isolate->factory()->NewCode( |
| 1812 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 1810 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 1813 F3 f = FUNCTION_CAST<F3>(code->entry()); | 1811 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 1814 | 1812 |
| 1815 for (int j = 0; j < 4; j++) { | 1813 for (int j = 0; j < 4; j++) { |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1835 typedef struct test { | 1833 typedef struct test { |
| 1836 double dd; | 1834 double dd; |
| 1837 double ds; | 1835 double ds; |
| 1838 double dt; | 1836 double dt; |
| 1839 float fd; | 1837 float fd; |
| 1840 float fs; | 1838 float fs; |
| 1841 float ft; | 1839 float ft; |
| 1842 } Test; | 1840 } Test; |
| 1843 | 1841 |
| 1844 Test test; | 1842 Test test; |
| 1845 __ ldc1(f0, MemOperand(a0, offsetof(Test, dd)) ); // test | 1843 __ Ldc1(f0, MemOperand(a0, offsetof(Test, dd))); // test |
| 1846 __ ldc1(f2, MemOperand(a0, offsetof(Test, ds)) ); // src1 | 1844 __ Ldc1(f2, MemOperand(a0, offsetof(Test, ds))); // src1 |
| 1847 __ ldc1(f4, MemOperand(a0, offsetof(Test, dt)) ); // src2 | 1845 __ Ldc1(f4, MemOperand(a0, offsetof(Test, dt))); // src2 |
| 1848 __ lwc1(f6, MemOperand(a0, offsetof(Test, fd)) ); // test | 1846 __ Lwc1(f6, MemOperand(a0, offsetof(Test, fd))); // test |
| 1849 __ lwc1(f8, MemOperand(a0, offsetof(Test, fs)) ); // src1 | 1847 __ Lwc1(f8, MemOperand(a0, offsetof(Test, fs))); // src1 |
| 1850 __ lwc1(f10, MemOperand(a0, offsetof(Test, ft)) ); // src2 | 1848 __ Lwc1(f10, MemOperand(a0, offsetof(Test, ft))); // src2 |
| 1851 __ sel_d(f0, f2, f4); | 1849 __ sel_d(f0, f2, f4); |
| 1852 __ sel_s(f6, f8, f10); | 1850 __ sel_s(f6, f8, f10); |
| 1853 __ sdc1(f0, MemOperand(a0, offsetof(Test, dd)) ); | 1851 __ Sdc1(f0, MemOperand(a0, offsetof(Test, dd))); |
| 1854 __ swc1(f6, MemOperand(a0, offsetof(Test, fd)) ); | 1852 __ Swc1(f6, MemOperand(a0, offsetof(Test, fd))); |
| 1855 __ jr(ra); | 1853 __ jr(ra); |
| 1856 __ nop(); | 1854 __ nop(); |
| 1857 CodeDesc desc; | 1855 CodeDesc desc; |
| 1858 assm.GetCode(&desc); | 1856 assm.GetCode(&desc); |
| 1859 Handle<Code> code = isolate->factory()->NewCode( | 1857 Handle<Code> code = isolate->factory()->NewCode( |
| 1860 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 1858 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 1861 F3 f = FUNCTION_CAST<F3>(code->entry()); | 1859 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 1862 | 1860 |
| 1863 const int test_size = 3; | 1861 const int test_size = 3; |
| 1864 const int input_size = 5; | 1862 const int input_size = 5; |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1970 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, | 1968 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, |
| 1971 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, | 1969 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, |
| 1972 37778931862957161709568.0, 37778931862957161709569.0, | 1970 37778931862957161709568.0, 37778931862957161709569.0, |
| 1973 37778931862957161709580.0, 37778931862957161709581.0, | 1971 37778931862957161709580.0, 37778931862957161709581.0, |
| 1974 37778931862957161709582.0, 37778931862957161709583.0, | 1972 37778931862957161709582.0, 37778931862957161709583.0, |
| 1975 37778931862957161709584.0, 37778931862957161709585.0, | 1973 37778931862957161709584.0, 37778931862957161709585.0, |
| 1976 37778931862957161709586.0, 37778931862957161709587.0}; | 1974 37778931862957161709586.0, 37778931862957161709587.0}; |
| 1977 int fcsr_inputs[4] = | 1975 int fcsr_inputs[4] = |
| 1978 {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; | 1976 {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; |
| 1979 float* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; | 1977 float* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; |
| 1980 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, a)) ); | 1978 __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, a))); |
| 1981 __ lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)) ); | 1979 __ Lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr))); |
| 1982 __ cfc1(t1, FCSR); | 1980 __ cfc1(t1, FCSR); |
| 1983 __ ctc1(t0, FCSR); | 1981 __ ctc1(t0, FCSR); |
| 1984 __ rint_s(f8, f4); | 1982 __ rint_s(f8, f4); |
| 1985 __ swc1(f8, MemOperand(a0, offsetof(TestFloat, b)) ); | 1983 __ Swc1(f8, MemOperand(a0, offsetof(TestFloat, b))); |
| 1986 __ ctc1(t1, FCSR); | 1984 __ ctc1(t1, FCSR); |
| 1987 __ jr(ra); | 1985 __ jr(ra); |
| 1988 __ nop(); | 1986 __ nop(); |
| 1989 | 1987 |
| 1990 CodeDesc desc; | 1988 CodeDesc desc; |
| 1991 assm.GetCode(&desc); | 1989 assm.GetCode(&desc); |
| 1992 Handle<Code> code = isolate->factory()->NewCode( | 1990 Handle<Code> code = isolate->factory()->NewCode( |
| 1993 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 1991 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 1994 F3 f = FUNCTION_CAST<F3>(code->entry()); | 1992 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 1995 | 1993 |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2051 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, | 2049 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, |
| 2052 3.0, fnan, -0.0, 0.0, fnan, finf, | 2050 3.0, fnan, -0.0, 0.0, fnan, finf, |
| 2053 finf, 42.0, finf, fminf, fnan}; | 2051 finf, 42.0, finf, fminf, fnan}; |
| 2054 float resf[kTableLength] = { | 2052 float resf[kTableLength] = { |
| 2055 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, | 2053 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, |
| 2056 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan}; | 2054 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan}; |
| 2057 float resf1[kTableLength] = { | 2055 float resf1[kTableLength] = { |
| 2058 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, | 2056 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, |
| 2059 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan}; | 2057 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan}; |
| 2060 | 2058 |
| 2061 __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); | 2059 __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, a))); |
| 2062 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, b)) ); | 2060 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, b))); |
| 2063 __ lwc1(f8, MemOperand(a0, offsetof(TestFloat, c)) ); | 2061 __ Lwc1(f8, MemOperand(a0, offsetof(TestFloat, c))); |
| 2064 __ lwc1(f10, MemOperand(a0, offsetof(TestFloat, d)) ); | 2062 __ Lwc1(f10, MemOperand(a0, offsetof(TestFloat, d))); |
| 2065 __ mina_d(f6, f2, f4); | 2063 __ mina_d(f6, f2, f4); |
| 2066 __ mina_s(f12, f8, f10); | 2064 __ mina_s(f12, f8, f10); |
| 2067 __ maxa_d(f14, f2, f4); | 2065 __ maxa_d(f14, f2, f4); |
| 2068 __ maxa_s(f16, f8, f10); | 2066 __ maxa_s(f16, f8, f10); |
| 2069 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, resf)) ); | 2067 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, resf))); |
| 2070 __ sdc1(f6, MemOperand(a0, offsetof(TestFloat, resd)) ); | 2068 __ Sdc1(f6, MemOperand(a0, offsetof(TestFloat, resd))); |
| 2071 __ swc1(f16, MemOperand(a0, offsetof(TestFloat, resf1)) ); | 2069 __ Swc1(f16, MemOperand(a0, offsetof(TestFloat, resf1))); |
| 2072 __ sdc1(f14, MemOperand(a0, offsetof(TestFloat, resd1)) ); | 2070 __ Sdc1(f14, MemOperand(a0, offsetof(TestFloat, resd1))); |
| 2073 __ jr(ra); | 2071 __ jr(ra); |
| 2074 __ nop(); | 2072 __ nop(); |
| 2075 | 2073 |
| 2076 CodeDesc desc; | 2074 CodeDesc desc; |
| 2077 assm.GetCode(&desc); | 2075 assm.GetCode(&desc); |
| 2078 Handle<Code> code = isolate->factory()->NewCode( | 2076 Handle<Code> code = isolate->factory()->NewCode( |
| 2079 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2077 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2080 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2078 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2081 for (int i = 0; i < kTableLength; i++) { | 2079 for (int i = 0; i < kTableLength; i++) { |
| 2082 test.a = inputsa[i]; | 2080 test.a = inputsa[i]; |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2138 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, | 2136 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, |
| 2139 2147483648.0, dFPU64InvalidResult, | 2137 2147483648.0, dFPU64InvalidResult, |
| 2140 dFPU64InvalidResult}; | 2138 dFPU64InvalidResult}; |
| 2141 double outputsNaN2008[kTableLength] = { | 2139 double outputsNaN2008[kTableLength] = { |
| 2142 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, | 2140 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, |
| 2143 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, | 2141 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, |
| 2144 2147483648.0, dFPU64InvalidResult, | 2142 2147483648.0, dFPU64InvalidResult, |
| 2145 dFPU64InvalidResult}; | 2143 dFPU64InvalidResult}; |
| 2146 | 2144 |
| 2147 __ cfc1(t1, FCSR); | 2145 __ cfc1(t1, FCSR); |
| 2148 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); | 2146 __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); |
| 2149 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); | 2147 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); |
| 2150 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); | 2148 __ Lwc1(f6, MemOperand(a0, offsetof(Test, b))); |
| 2151 __ trunc_l_d(f8, f4); | 2149 __ trunc_l_d(f8, f4); |
| 2152 __ trunc_l_s(f10, f6); | 2150 __ trunc_l_s(f10, f6); |
| 2153 __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) ); | 2151 __ Sdc1(f8, MemOperand(a0, offsetof(Test, c))); |
| 2154 __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) ); | 2152 __ Sdc1(f10, MemOperand(a0, offsetof(Test, d))); |
| 2155 __ jr(ra); | 2153 __ jr(ra); |
| 2156 __ nop(); | 2154 __ nop(); |
| 2157 Test test; | 2155 Test test; |
| 2158 CodeDesc desc; | 2156 CodeDesc desc; |
| 2159 assm.GetCode(&desc); | 2157 assm.GetCode(&desc); |
| 2160 Handle<Code> code = isolate->factory()->NewCode( | 2158 Handle<Code> code = isolate->factory()->NewCode( |
| 2161 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2159 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2162 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2160 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2163 for (int i = 0; i < kTableLength; i++) { | 2161 for (int i = 0; i < kTableLength; i++) { |
| 2164 test.a = inputs_D[i]; | 2162 test.a = inputs_D[i]; |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2207 4.8, 4.8, -4.8, -0.29 | 2205 4.8, 4.8, -4.8, -0.29 |
| 2208 }; | 2206 }; |
| 2209 | 2207 |
| 2210 float outputs_S[kTableLength] = { | 2208 float outputs_S[kTableLength] = { |
| 2211 4.8, 4.8, -4.8, -0.29 | 2209 4.8, 4.8, -4.8, -0.29 |
| 2212 }; | 2210 }; |
| 2213 double outputs_D[kTableLength] = { | 2211 double outputs_D[kTableLength] = { |
| 2214 5.3, -5.3, 5.3, -2.9 | 2212 5.3, -5.3, 5.3, -2.9 |
| 2215 }; | 2213 }; |
| 2216 | 2214 |
| 2217 __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); | 2215 __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, a))); |
| 2218 __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) ); | 2216 __ Lwc1(f6, MemOperand(a0, offsetof(TestFloat, c))); |
| 2219 __ ld(t0, MemOperand(a0, offsetof(TestFloat, rt))); | 2217 __ Ld(t0, MemOperand(a0, offsetof(TestFloat, rt))); |
| 2220 __ Move(f12, 0.0); | 2218 __ Move(f12, 0.0); |
| 2221 __ Move(f10, 0.0); | 2219 __ Move(f10, 0.0); |
| 2222 __ Move(f16, 0.0); | 2220 __ Move(f16, 0.0); |
| 2223 __ Move(f14, 0.0); | 2221 __ Move(f14, 0.0); |
| 2224 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, bold)) ); | 2222 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, bold))); |
| 2225 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, dold)) ); | 2223 __ Swc1(f10, MemOperand(a0, offsetof(TestFloat, dold))); |
| 2226 __ sdc1(f16, MemOperand(a0, offsetof(TestFloat, bold1)) ); | 2224 __ Sdc1(f16, MemOperand(a0, offsetof(TestFloat, bold1))); |
| 2227 __ swc1(f14, MemOperand(a0, offsetof(TestFloat, dold1)) ); | 2225 __ Swc1(f14, MemOperand(a0, offsetof(TestFloat, dold1))); |
| 2228 __ movz_s(f10, f6, t0); | 2226 __ movz_s(f10, f6, t0); |
| 2229 __ movz_d(f12, f2, t0); | 2227 __ movz_d(f12, f2, t0); |
| 2230 __ movn_s(f14, f6, t0); | 2228 __ movn_s(f14, f6, t0); |
| 2231 __ movn_d(f16, f2, t0); | 2229 __ movn_d(f16, f2, t0); |
| 2232 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, d)) ); | 2230 __ Swc1(f10, MemOperand(a0, offsetof(TestFloat, d))); |
| 2233 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, b)) ); | 2231 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, b))); |
| 2234 __ swc1(f14, MemOperand(a0, offsetof(TestFloat, d1)) ); | 2232 __ Swc1(f14, MemOperand(a0, offsetof(TestFloat, d1))); |
| 2235 __ sdc1(f16, MemOperand(a0, offsetof(TestFloat, b1)) ); | 2233 __ Sdc1(f16, MemOperand(a0, offsetof(TestFloat, b1))); |
| 2236 __ jr(ra); | 2234 __ jr(ra); |
| 2237 __ nop(); | 2235 __ nop(); |
| 2238 | 2236 |
| 2239 CodeDesc desc; | 2237 CodeDesc desc; |
| 2240 assm.GetCode(&desc); | 2238 assm.GetCode(&desc); |
| 2241 Handle<Code> code = isolate->factory()->NewCode( | 2239 Handle<Code> code = isolate->factory()->NewCode( |
| 2242 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2240 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2243 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2241 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2244 for (int i = 0; i < kTableLength; i++) { | 2242 for (int i = 0; i < kTableLength; i++) { |
| 2245 test.a = inputs_D[i]; | 2243 test.a = inputs_D[i]; |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2306 for (int j = 0; j< 8; j++) { | 2304 for (int j = 0; j< 8; j++) { |
| 2307 test.cc = condition_flags[j]; | 2305 test.cc = condition_flags[j]; |
| 2308 if (test.cc == 0) { | 2306 if (test.cc == 0) { |
| 2309 test.fcsr = 1 << 23; | 2307 test.fcsr = 1 << 23; |
| 2310 } else { | 2308 } else { |
| 2311 test.fcsr = 1 << (24+condition_flags[j]); | 2309 test.fcsr = 1 << (24+condition_flags[j]); |
| 2312 } | 2310 } |
| 2313 HandleScope scope(isolate); | 2311 HandleScope scope(isolate); |
| 2314 MacroAssembler assm(isolate, NULL, 0, | 2312 MacroAssembler assm(isolate, NULL, 0, |
| 2315 v8::internal::CodeObjectRequired::kYes); | 2313 v8::internal::CodeObjectRequired::kYes); |
| 2316 __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)) ); | 2314 __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd))); |
| 2317 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)) ); | 2315 __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf))); |
| 2318 __ lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)) ); | 2316 __ Lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr))); |
| 2319 __ cfc1(t0, FCSR); | 2317 __ cfc1(t0, FCSR); |
| 2320 __ ctc1(t1, FCSR); | 2318 __ ctc1(t1, FCSR); |
| 2321 __ li(t2, 0x0); | 2319 __ li(t2, 0x0); |
| 2322 __ mtc1(t2, f12); | 2320 __ mtc1(t2, f12); |
| 2323 __ mtc1(t2, f10); | 2321 __ mtc1(t2, f10); |
| 2324 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold)) ); | 2322 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold))); |
| 2325 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold)) ); | 2323 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold))); |
| 2326 __ movt_s(f12, f4, test.cc); | 2324 __ movt_s(f12, f4, test.cc); |
| 2327 __ movt_d(f10, f2, test.cc); | 2325 __ movt_d(f10, f2, test.cc); |
| 2328 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf)) ); | 2326 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf))); |
| 2329 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd)) ); | 2327 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd))); |
| 2330 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold1)) ); | 2328 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold1))); |
| 2331 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold1)) ); | 2329 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold1))); |
| 2332 __ movf_s(f12, f4, test.cc); | 2330 __ movf_s(f12, f4, test.cc); |
| 2333 __ movf_d(f10, f2, test.cc); | 2331 __ movf_d(f10, f2, test.cc); |
| 2334 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf1)) ); | 2332 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf1))); |
| 2335 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd1)) ); | 2333 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd1))); |
| 2336 __ ctc1(t0, FCSR); | 2334 __ ctc1(t0, FCSR); |
| 2337 __ jr(ra); | 2335 __ jr(ra); |
| 2338 __ nop(); | 2336 __ nop(); |
| 2339 | 2337 |
| 2340 CodeDesc desc; | 2338 CodeDesc desc; |
| 2341 assm.GetCode(&desc); | 2339 assm.GetCode(&desc); |
| 2342 Handle<Code> code = isolate->factory()->NewCode( | 2340 Handle<Code> code = isolate->factory()->NewCode( |
| 2343 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2341 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2344 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2342 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2345 | 2343 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2406 double outputs_RM[kTableLength] = { | 2404 double outputs_RM[kTableLength] = { |
| 2407 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, | 2405 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, |
| 2408 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, | 2406 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, |
| 2409 2147483637.0, 2147483638.0, 2147483639.0, | 2407 2147483637.0, 2147483638.0, 2147483639.0, |
| 2410 2147483640.0, 2147483641.0, 2147483642.0, | 2408 2147483640.0, 2147483641.0, 2147483642.0, |
| 2411 2147483643.0, 2147483644.0, 2147483645.0, | 2409 2147483643.0, 2147483644.0, 2147483645.0, |
| 2412 2147483646.0, 2147483647.0, kFPUInvalidResult}; | 2410 2147483646.0, 2147483647.0, kFPUInvalidResult}; |
| 2413 int fcsr_inputs[4] = | 2411 int fcsr_inputs[4] = |
| 2414 {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; | 2412 {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; |
| 2415 double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; | 2413 double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; |
| 2416 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); | 2414 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); |
| 2417 __ lw(t0, MemOperand(a0, offsetof(Test, fcsr)) ); | 2415 __ Lw(t0, MemOperand(a0, offsetof(Test, fcsr))); |
| 2418 __ cfc1(t1, FCSR); | 2416 __ cfc1(t1, FCSR); |
| 2419 __ ctc1(t0, FCSR); | 2417 __ ctc1(t0, FCSR); |
| 2420 __ cvt_w_d(f8, f4); | 2418 __ cvt_w_d(f8, f4); |
| 2421 __ swc1(f8, MemOperand(a0, offsetof(Test, b)) ); | 2419 __ Swc1(f8, MemOperand(a0, offsetof(Test, b))); |
| 2422 __ ctc1(t1, FCSR); | 2420 __ ctc1(t1, FCSR); |
| 2423 __ jr(ra); | 2421 __ jr(ra); |
| 2424 __ nop(); | 2422 __ nop(); |
| 2425 Test test; | 2423 Test test; |
| 2426 CodeDesc desc; | 2424 CodeDesc desc; |
| 2427 assm.GetCode(&desc); | 2425 assm.GetCode(&desc); |
| 2428 Handle<Code> code = isolate->factory()->NewCode( | 2426 Handle<Code> code = isolate->factory()->NewCode( |
| 2429 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2427 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2430 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2428 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2431 for (int j = 0; j < 4; j++) { | 2429 for (int j = 0; j < 4; j++) { |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2473 kFPUInvalidResult, kFPUInvalidResult, | 2471 kFPUInvalidResult, kFPUInvalidResult, |
| 2474 kFPUInvalidResult}; | 2472 kFPUInvalidResult}; |
| 2475 double outputsNaN2008[kTableLength] = { | 2473 double outputsNaN2008[kTableLength] = { |
| 2476 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, | 2474 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, |
| 2477 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, | 2475 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, |
| 2478 kFPUInvalidResult, | 2476 kFPUInvalidResult, |
| 2479 0, | 2477 0, |
| 2480 kFPUInvalidResult}; | 2478 kFPUInvalidResult}; |
| 2481 | 2479 |
| 2482 __ cfc1(t1, FCSR); | 2480 __ cfc1(t1, FCSR); |
| 2483 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); | 2481 __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); |
| 2484 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); | 2482 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); |
| 2485 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); | 2483 __ Lwc1(f6, MemOperand(a0, offsetof(Test, b))); |
| 2486 __ trunc_w_d(f8, f4); | 2484 __ trunc_w_d(f8, f4); |
| 2487 __ trunc_w_s(f10, f6); | 2485 __ trunc_w_s(f10, f6); |
| 2488 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); | 2486 __ Swc1(f8, MemOperand(a0, offsetof(Test, c))); |
| 2489 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); | 2487 __ Swc1(f10, MemOperand(a0, offsetof(Test, d))); |
| 2490 __ jr(ra); | 2488 __ jr(ra); |
| 2491 __ nop(); | 2489 __ nop(); |
| 2492 Test test; | 2490 Test test; |
| 2493 CodeDesc desc; | 2491 CodeDesc desc; |
| 2494 assm.GetCode(&desc); | 2492 assm.GetCode(&desc); |
| 2495 Handle<Code> code = isolate->factory()->NewCode( | 2493 Handle<Code> code = isolate->factory()->NewCode( |
| 2496 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2494 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2497 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2495 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2498 for (int i = 0; i < kTableLength; i++) { | 2496 for (int i = 0; i < kTableLength; i++) { |
| 2499 test.a = inputs_D[i]; | 2497 test.a = inputs_D[i]; |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2542 -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, | 2540 -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, |
| 2543 kFPUInvalidResult, kFPUInvalidResult, | 2541 kFPUInvalidResult, kFPUInvalidResult, |
| 2544 kFPUInvalidResult}; | 2542 kFPUInvalidResult}; |
| 2545 double outputsNaN2008[kTableLength] = { | 2543 double outputsNaN2008[kTableLength] = { |
| 2546 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, | 2544 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, |
| 2547 -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, | 2545 -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, |
| 2548 kFPUInvalidResult, 0, | 2546 kFPUInvalidResult, 0, |
| 2549 kFPUInvalidResult}; | 2547 kFPUInvalidResult}; |
| 2550 | 2548 |
| 2551 __ cfc1(t1, FCSR); | 2549 __ cfc1(t1, FCSR); |
| 2552 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); | 2550 __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); |
| 2553 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); | 2551 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); |
| 2554 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); | 2552 __ Lwc1(f6, MemOperand(a0, offsetof(Test, b))); |
| 2555 __ round_w_d(f8, f4); | 2553 __ round_w_d(f8, f4); |
| 2556 __ round_w_s(f10, f6); | 2554 __ round_w_s(f10, f6); |
| 2557 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); | 2555 __ Swc1(f8, MemOperand(a0, offsetof(Test, c))); |
| 2558 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); | 2556 __ Swc1(f10, MemOperand(a0, offsetof(Test, d))); |
| 2559 __ jr(ra); | 2557 __ jr(ra); |
| 2560 __ nop(); | 2558 __ nop(); |
| 2561 Test test; | 2559 Test test; |
| 2562 CodeDesc desc; | 2560 CodeDesc desc; |
| 2563 assm.GetCode(&desc); | 2561 assm.GetCode(&desc); |
| 2564 Handle<Code> code = isolate->factory()->NewCode( | 2562 Handle<Code> code = isolate->factory()->NewCode( |
| 2565 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2563 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2566 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2564 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2567 for (int i = 0; i < kTableLength; i++) { | 2565 for (int i = 0; i < kTableLength; i++) { |
| 2568 test.a = inputs_D[i]; | 2566 test.a = inputs_D[i]; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2613 2147483648.0, dFPU64InvalidResult, | 2611 2147483648.0, dFPU64InvalidResult, |
| 2614 dFPU64InvalidResult}; | 2612 dFPU64InvalidResult}; |
| 2615 double outputsNaN2008[kTableLength] = { | 2613 double outputsNaN2008[kTableLength] = { |
| 2616 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, | 2614 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, |
| 2617 -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, | 2615 -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, |
| 2618 2147483648.0, | 2616 2147483648.0, |
| 2619 0, | 2617 0, |
| 2620 dFPU64InvalidResult}; | 2618 dFPU64InvalidResult}; |
| 2621 | 2619 |
| 2622 __ cfc1(t1, FCSR); | 2620 __ cfc1(t1, FCSR); |
| 2623 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); | 2621 __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); |
| 2624 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); | 2622 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); |
| 2625 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); | 2623 __ Lwc1(f6, MemOperand(a0, offsetof(Test, b))); |
| 2626 __ round_l_d(f8, f4); | 2624 __ round_l_d(f8, f4); |
| 2627 __ round_l_s(f10, f6); | 2625 __ round_l_s(f10, f6); |
| 2628 __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) ); | 2626 __ Sdc1(f8, MemOperand(a0, offsetof(Test, c))); |
| 2629 __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) ); | 2627 __ Sdc1(f10, MemOperand(a0, offsetof(Test, d))); |
| 2630 __ jr(ra); | 2628 __ jr(ra); |
| 2631 __ nop(); | 2629 __ nop(); |
| 2632 Test test; | 2630 Test test; |
| 2633 CodeDesc desc; | 2631 CodeDesc desc; |
| 2634 assm.GetCode(&desc); | 2632 assm.GetCode(&desc); |
| 2635 Handle<Code> code = isolate->factory()->NewCode( | 2633 Handle<Code> code = isolate->factory()->NewCode( |
| 2636 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2634 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2637 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2635 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2638 for (int i = 0; i < kTableLength; i++) { | 2636 for (int i = 0; i < kTableLength; i++) { |
| 2639 test.a = inputs_D[i]; | 2637 test.a = inputs_D[i]; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2684 5.3, 4.8, 2.9, -5.3, -4.8, -2.9 | 2682 5.3, 4.8, 2.9, -5.3, -4.8, -2.9 |
| 2685 }; | 2683 }; |
| 2686 float inputft_S[kTableLength] = { | 2684 float inputft_S[kTableLength] = { |
| 2687 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, | 2685 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, |
| 2688 -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 | 2686 -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 |
| 2689 }; | 2687 }; |
| 2690 float outputs_S[kTableLength] = { | 2688 float outputs_S[kTableLength] = { |
| 2691 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, | 2689 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, |
| 2692 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 | 2690 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 |
| 2693 }; | 2691 }; |
| 2694 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); | 2692 __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, a))); |
| 2695 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) ); | 2693 __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, b))); |
| 2696 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) ); | 2694 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c))); |
| 2697 __ ldc1(f10, MemOperand(a0, offsetof(TestFloat, d)) ); | 2695 __ Ldc1(f10, MemOperand(a0, offsetof(TestFloat, d))); |
| 2698 __ sub_s(f6, f2, f4); | 2696 __ sub_s(f6, f2, f4); |
| 2699 __ sub_d(f12, f8, f10); | 2697 __ sub_d(f12, f8, f10); |
| 2700 __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) ); | 2698 __ Swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS))); |
| 2701 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) ); | 2699 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD))); |
| 2702 __ jr(ra); | 2700 __ jr(ra); |
| 2703 __ nop(); | 2701 __ nop(); |
| 2704 | 2702 |
| 2705 CodeDesc desc; | 2703 CodeDesc desc; |
| 2706 assm.GetCode(&desc); | 2704 assm.GetCode(&desc); |
| 2707 Handle<Code> code = isolate->factory()->NewCode( | 2705 Handle<Code> code = isolate->factory()->NewCode( |
| 2708 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2706 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2709 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2707 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2710 for (int i = 0; i < kTableLength; i++) { | 2708 for (int i = 0; i < kTableLength; i++) { |
| 2711 test.a = inputfs_S[i]; | 2709 test.a = inputfs_S[i]; |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2750 0.0L, 2.0L, sqrt2_d, 2e-14L | 2748 0.0L, 2.0L, sqrt2_d, 2e-14L |
| 2751 }; | 2749 }; |
| 2752 float inputs_S[kTableLength] = { | 2750 float inputs_S[kTableLength] = { |
| 2753 0.0, 4.0, 2.0, 4e-28 | 2751 0.0, 4.0, 2.0, 4e-28 |
| 2754 }; | 2752 }; |
| 2755 | 2753 |
| 2756 float outputs_S[kTableLength] = { | 2754 float outputs_S[kTableLength] = { |
| 2757 0.0, 2.0, sqrt2_s, 2e-14 | 2755 0.0, 2.0, sqrt2_s, 2e-14 |
| 2758 }; | 2756 }; |
| 2759 | 2757 |
| 2760 | 2758 __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, a))); |
| 2761 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); | 2759 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c))); |
| 2762 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) ); | |
| 2763 __ sqrt_s(f6, f2); | 2760 __ sqrt_s(f6, f2); |
| 2764 __ sqrt_d(f12, f8); | 2761 __ sqrt_d(f12, f8); |
| 2765 __ rsqrt_d(f14, f8); | 2762 __ rsqrt_d(f14, f8); |
| 2766 __ rsqrt_s(f16, f2); | 2763 __ rsqrt_s(f16, f2); |
| 2767 __ recip_d(f18, f8); | 2764 __ recip_d(f18, f8); |
| 2768 __ recip_s(f4, f2); | 2765 __ recip_s(f4, f2); |
| 2769 __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) ); | 2766 __ Swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS))); |
| 2770 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) ); | 2767 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD))); |
| 2771 __ swc1(f16, MemOperand(a0, offsetof(TestFloat, resultS1)) ); | 2768 __ Swc1(f16, MemOperand(a0, offsetof(TestFloat, resultS1))); |
| 2772 __ sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1)) ); | 2769 __ Sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1))); |
| 2773 __ swc1(f4, MemOperand(a0, offsetof(TestFloat, resultS2)) ); | 2770 __ Swc1(f4, MemOperand(a0, offsetof(TestFloat, resultS2))); |
| 2774 __ sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2)) ); | 2771 __ Sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2))); |
| 2775 __ jr(ra); | 2772 __ jr(ra); |
| 2776 __ nop(); | 2773 __ nop(); |
| 2777 | 2774 |
| 2778 CodeDesc desc; | 2775 CodeDesc desc; |
| 2779 assm.GetCode(&desc); | 2776 assm.GetCode(&desc); |
| 2780 Handle<Code> code = isolate->factory()->NewCode( | 2777 Handle<Code> code = isolate->factory()->NewCode( |
| 2781 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2778 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2782 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2779 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2783 | 2780 |
| 2784 for (int i = 0; i < kTableLength; i++) { | 2781 for (int i = 0; i < kTableLength; i++) { |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2837 double outputs_D[kTableLength] = { | 2834 double outputs_D[kTableLength] = { |
| 2838 -4.0, 2.0 | 2835 -4.0, 2.0 |
| 2839 }; | 2836 }; |
| 2840 float inputs_S[kTableLength] = { | 2837 float inputs_S[kTableLength] = { |
| 2841 4.0, -2.0 | 2838 4.0, -2.0 |
| 2842 }; | 2839 }; |
| 2843 | 2840 |
| 2844 float outputs_S[kTableLength] = { | 2841 float outputs_S[kTableLength] = { |
| 2845 -4.0, 2.0 | 2842 -4.0, 2.0 |
| 2846 }; | 2843 }; |
| 2847 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); | 2844 __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, a))); |
| 2848 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) ); | 2845 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c))); |
| 2849 __ neg_s(f6, f2); | 2846 __ neg_s(f6, f2); |
| 2850 __ neg_d(f12, f8); | 2847 __ neg_d(f12, f8); |
| 2851 __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) ); | 2848 __ Swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS))); |
| 2852 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) ); | 2849 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD))); |
| 2853 __ jr(ra); | 2850 __ jr(ra); |
| 2854 __ nop(); | 2851 __ nop(); |
| 2855 | 2852 |
| 2856 CodeDesc desc; | 2853 CodeDesc desc; |
| 2857 assm.GetCode(&desc); | 2854 assm.GetCode(&desc); |
| 2858 Handle<Code> code = isolate->factory()->NewCode( | 2855 Handle<Code> code = isolate->factory()->NewCode( |
| 2859 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2856 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2860 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2857 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2861 for (int i = 0; i < kTableLength; i++) { | 2858 for (int i = 0; i < kTableLength; i++) { |
| 2862 test.a = inputs_S[i]; | 2859 test.a = inputs_S[i]; |
| (...skipping 30 matching lines...) Expand all Loading... |
| 2893 4.8, 4.8, -4.8, -0.29 | 2890 4.8, 4.8, -4.8, -0.29 |
| 2894 }; | 2891 }; |
| 2895 | 2892 |
| 2896 float inputfs_S[kTableLength] = { | 2893 float inputfs_S[kTableLength] = { |
| 2897 5.3, -5.3, 5.3, -2.9 | 2894 5.3, -5.3, 5.3, -2.9 |
| 2898 }; | 2895 }; |
| 2899 float inputft_S[kTableLength] = { | 2896 float inputft_S[kTableLength] = { |
| 2900 4.8, 4.8, -4.8, -0.29 | 2897 4.8, 4.8, -4.8, -0.29 |
| 2901 }; | 2898 }; |
| 2902 | 2899 |
| 2903 __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); | 2900 __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, a))); |
| 2904 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) ); | 2901 __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, b))); |
| 2905 __ ldc1(f6, MemOperand(a0, offsetof(TestFloat, c)) ); | 2902 __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, c))); |
| 2906 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, d)) ); | 2903 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, d))); |
| 2907 __ mul_s(f10, f2, f4); | 2904 __ mul_s(f10, f2, f4); |
| 2908 __ mul_d(f12, f6, f8); | 2905 __ mul_d(f12, f6, f8); |
| 2909 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, resultS)) ); | 2906 __ Swc1(f10, MemOperand(a0, offsetof(TestFloat, resultS))); |
| 2910 __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) ); | 2907 __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD))); |
| 2911 __ jr(ra); | 2908 __ jr(ra); |
| 2912 __ nop(); | 2909 __ nop(); |
| 2913 | 2910 |
| 2914 CodeDesc desc; | 2911 CodeDesc desc; |
| 2915 assm.GetCode(&desc); | 2912 assm.GetCode(&desc); |
| 2916 Handle<Code> code = isolate->factory()->NewCode( | 2913 Handle<Code> code = isolate->factory()->NewCode( |
| 2917 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2914 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2918 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2915 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2919 for (int i = 0; i < kTableLength; i++) { | 2916 for (int i = 0; i < kTableLength; i++) { |
| 2920 test.a = inputfs_S[i]; | 2917 test.a = inputfs_S[i]; |
| (...skipping 29 matching lines...) Expand all Loading... |
| 2950 4.8, 4.8, -4.8, -0.29 | 2947 4.8, 4.8, -4.8, -0.29 |
| 2951 }; | 2948 }; |
| 2952 | 2949 |
| 2953 float outputs_S[kTableLength] = { | 2950 float outputs_S[kTableLength] = { |
| 2954 4.8, 4.8, -4.8, -0.29 | 2951 4.8, 4.8, -4.8, -0.29 |
| 2955 }; | 2952 }; |
| 2956 double outputs_D[kTableLength] = { | 2953 double outputs_D[kTableLength] = { |
| 2957 5.3, -5.3, 5.3, -2.9 | 2954 5.3, -5.3, 5.3, -2.9 |
| 2958 }; | 2955 }; |
| 2959 | 2956 |
| 2960 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)) ); | 2957 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); |
| 2961 __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) ); | 2958 __ Lwc1(f6, MemOperand(a0, offsetof(TestFloat, c))); |
| 2962 __ mov_s(f8, f6); | 2959 __ mov_s(f8, f6); |
| 2963 __ mov_d(f10, f4); | 2960 __ mov_d(f10, f4); |
| 2964 __ swc1(f8, MemOperand(a0, offsetof(TestFloat, d)) ); | 2961 __ Swc1(f8, MemOperand(a0, offsetof(TestFloat, d))); |
| 2965 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, b)) ); | 2962 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, b))); |
| 2966 __ jr(ra); | 2963 __ jr(ra); |
| 2967 __ nop(); | 2964 __ nop(); |
| 2968 | 2965 |
| 2969 CodeDesc desc; | 2966 CodeDesc desc; |
| 2970 assm.GetCode(&desc); | 2967 assm.GetCode(&desc); |
| 2971 Handle<Code> code = isolate->factory()->NewCode( | 2968 Handle<Code> code = isolate->factory()->NewCode( |
| 2972 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 2969 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 2973 F3 f = FUNCTION_CAST<F3>(code->entry()); | 2970 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 2974 for (int i = 0; i < kTableLength; i++) { | 2971 for (int i = 0; i < kTableLength; i++) { |
| 2975 test.a = inputs_D[i]; | 2972 test.a = inputs_D[i]; |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3016 kFPUInvalidResult, kFPUInvalidResult, | 3013 kFPUInvalidResult, kFPUInvalidResult, |
| 3017 kFPUInvalidResult}; | 3014 kFPUInvalidResult}; |
| 3018 double outputsNaN2008[kTableLength] = { | 3015 double outputsNaN2008[kTableLength] = { |
| 3019 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, | 3016 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, |
| 3020 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, | 3017 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, |
| 3021 kFPUInvalidResult, | 3018 kFPUInvalidResult, |
| 3022 0, | 3019 0, |
| 3023 kFPUInvalidResult}; | 3020 kFPUInvalidResult}; |
| 3024 | 3021 |
| 3025 __ cfc1(t1, FCSR); | 3022 __ cfc1(t1, FCSR); |
| 3026 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); | 3023 __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); |
| 3027 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); | 3024 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); |
| 3028 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); | 3025 __ Lwc1(f6, MemOperand(a0, offsetof(Test, b))); |
| 3029 __ floor_w_d(f8, f4); | 3026 __ floor_w_d(f8, f4); |
| 3030 __ floor_w_s(f10, f6); | 3027 __ floor_w_s(f10, f6); |
| 3031 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); | 3028 __ Swc1(f8, MemOperand(a0, offsetof(Test, c))); |
| 3032 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); | 3029 __ Swc1(f10, MemOperand(a0, offsetof(Test, d))); |
| 3033 __ jr(ra); | 3030 __ jr(ra); |
| 3034 __ nop(); | 3031 __ nop(); |
| 3035 Test test; | 3032 Test test; |
| 3036 CodeDesc desc; | 3033 CodeDesc desc; |
| 3037 assm.GetCode(&desc); | 3034 assm.GetCode(&desc); |
| 3038 Handle<Code> code = isolate->factory()->NewCode( | 3035 Handle<Code> code = isolate->factory()->NewCode( |
| 3039 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 3036 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 3040 F3 f = FUNCTION_CAST<F3>(code->entry()); | 3037 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 3041 for (int i = 0; i < kTableLength; i++) { | 3038 for (int i = 0; i < kTableLength; i++) { |
| 3042 test.a = inputs_D[i]; | 3039 test.a = inputs_D[i]; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3087 2147483648.0, dFPU64InvalidResult, | 3084 2147483648.0, dFPU64InvalidResult, |
| 3088 dFPU64InvalidResult}; | 3085 dFPU64InvalidResult}; |
| 3089 double outputsNaN2008[kTableLength] = { | 3086 double outputsNaN2008[kTableLength] = { |
| 3090 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, | 3087 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, |
| 3091 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, | 3088 -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, |
| 3092 2147483648.0, | 3089 2147483648.0, |
| 3093 0, | 3090 0, |
| 3094 dFPU64InvalidResult}; | 3091 dFPU64InvalidResult}; |
| 3095 | 3092 |
| 3096 __ cfc1(t1, FCSR); | 3093 __ cfc1(t1, FCSR); |
| 3097 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); | 3094 __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); |
| 3098 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); | 3095 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); |
| 3099 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); | 3096 __ Lwc1(f6, MemOperand(a0, offsetof(Test, b))); |
| 3100 __ floor_l_d(f8, f4); | 3097 __ floor_l_d(f8, f4); |
| 3101 __ floor_l_s(f10, f6); | 3098 __ floor_l_s(f10, f6); |
| 3102 __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) ); | 3099 __ Sdc1(f8, MemOperand(a0, offsetof(Test, c))); |
| 3103 __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) ); | 3100 __ Sdc1(f10, MemOperand(a0, offsetof(Test, d))); |
| 3104 __ jr(ra); | 3101 __ jr(ra); |
| 3105 __ nop(); | 3102 __ nop(); |
| 3106 Test test; | 3103 Test test; |
| 3107 CodeDesc desc; | 3104 CodeDesc desc; |
| 3108 assm.GetCode(&desc); | 3105 assm.GetCode(&desc); |
| 3109 Handle<Code> code = isolate->factory()->NewCode( | 3106 Handle<Code> code = isolate->factory()->NewCode( |
| 3110 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 3107 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 3111 F3 f = FUNCTION_CAST<F3>(code->entry()); | 3108 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 3112 for (int i = 0; i < kTableLength; i++) { | 3109 for (int i = 0; i < kTableLength; i++) { |
| 3113 test.a = inputs_D[i]; | 3110 test.a = inputs_D[i]; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3158 kFPUInvalidResult, kFPUInvalidResult, | 3155 kFPUInvalidResult, kFPUInvalidResult, |
| 3159 kFPUInvalidResult}; | 3156 kFPUInvalidResult}; |
| 3160 double outputsNaN2008[kTableLength] = { | 3157 double outputsNaN2008[kTableLength] = { |
| 3161 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, | 3158 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, |
| 3162 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, | 3159 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, |
| 3163 kFPUInvalidResult, | 3160 kFPUInvalidResult, |
| 3164 0, | 3161 0, |
| 3165 kFPUInvalidResult}; | 3162 kFPUInvalidResult}; |
| 3166 | 3163 |
| 3167 __ cfc1(t1, FCSR); | 3164 __ cfc1(t1, FCSR); |
| 3168 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); | 3165 __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); |
| 3169 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); | 3166 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); |
| 3170 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); | 3167 __ Lwc1(f6, MemOperand(a0, offsetof(Test, b))); |
| 3171 __ ceil_w_d(f8, f4); | 3168 __ ceil_w_d(f8, f4); |
| 3172 __ ceil_w_s(f10, f6); | 3169 __ ceil_w_s(f10, f6); |
| 3173 __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); | 3170 __ Swc1(f8, MemOperand(a0, offsetof(Test, c))); |
| 3174 __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); | 3171 __ Swc1(f10, MemOperand(a0, offsetof(Test, d))); |
| 3175 __ jr(ra); | 3172 __ jr(ra); |
| 3176 __ nop(); | 3173 __ nop(); |
| 3177 Test test; | 3174 Test test; |
| 3178 CodeDesc desc; | 3175 CodeDesc desc; |
| 3179 assm.GetCode(&desc); | 3176 assm.GetCode(&desc); |
| 3180 Handle<Code> code = isolate->factory()->NewCode( | 3177 Handle<Code> code = isolate->factory()->NewCode( |
| 3181 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 3178 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 3182 F3 f = FUNCTION_CAST<F3>(code->entry()); | 3179 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 3183 for (int i = 0; i < kTableLength; i++) { | 3180 for (int i = 0; i < kTableLength; i++) { |
| 3184 test.a = inputs_D[i]; | 3181 test.a = inputs_D[i]; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3229 2147483648.0, dFPU64InvalidResult, | 3226 2147483648.0, dFPU64InvalidResult, |
| 3230 dFPU64InvalidResult}; | 3227 dFPU64InvalidResult}; |
| 3231 double outputsNaN2008[kTableLength] = { | 3228 double outputsNaN2008[kTableLength] = { |
| 3232 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, | 3229 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, |
| 3233 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, | 3230 -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, |
| 3234 2147483648.0, | 3231 2147483648.0, |
| 3235 0, | 3232 0, |
| 3236 dFPU64InvalidResult}; | 3233 dFPU64InvalidResult}; |
| 3237 | 3234 |
| 3238 __ cfc1(t1, FCSR); | 3235 __ cfc1(t1, FCSR); |
| 3239 __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); | 3236 __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); |
| 3240 __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) ); | 3237 __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); |
| 3241 __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); | 3238 __ Lwc1(f6, MemOperand(a0, offsetof(Test, b))); |
| 3242 __ ceil_l_d(f8, f4); | 3239 __ ceil_l_d(f8, f4); |
| 3243 __ ceil_l_s(f10, f6); | 3240 __ ceil_l_s(f10, f6); |
| 3244 __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) ); | 3241 __ Sdc1(f8, MemOperand(a0, offsetof(Test, c))); |
| 3245 __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) ); | 3242 __ Sdc1(f10, MemOperand(a0, offsetof(Test, d))); |
| 3246 __ jr(ra); | 3243 __ jr(ra); |
| 3247 __ nop(); | 3244 __ nop(); |
| 3248 Test test; | 3245 Test test; |
| 3249 CodeDesc desc; | 3246 CodeDesc desc; |
| 3250 assm.GetCode(&desc); | 3247 assm.GetCode(&desc); |
| 3251 Handle<Code> code = isolate->factory()->NewCode( | 3248 Handle<Code> code = isolate->factory()->NewCode( |
| 3252 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 3249 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 3253 F3 f = FUNCTION_CAST<F3>(code->entry()); | 3250 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 3254 for (int i = 0; i < kTableLength; i++) { | 3251 for (int i = 0; i < kTableLength; i++) { |
| 3255 test.a = inputs_D[i]; | 3252 test.a = inputs_D[i]; |
| 3256 test.b = inputs_S[i]; | 3253 test.b = inputs_S[i]; |
| 3257 (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0)); | 3254 (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0)); |
| 3258 if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && | 3255 if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && |
| 3259 kArchVariant == kMips64r6) { | 3256 kArchVariant == kMips64r6) { |
| 3260 CHECK_EQ(test.c, outputsNaN2008[i]); | 3257 CHECK_EQ(test.c, outputsNaN2008[i]); |
| 3261 } else { | 3258 } else { |
| 3262 CHECK_EQ(test.c, outputs[i]); | 3259 CHECK_EQ(test.c, outputs[i]); |
| 3263 } | 3260 } |
| 3264 CHECK_EQ(test.d, test.c); | 3261 CHECK_EQ(test.d, test.c); |
| 3265 } | 3262 } |
| 3266 } | 3263 } |
| 3267 | 3264 |
| 3268 | 3265 |
| 3269 TEST(jump_tables1) { | 3266 TEST(jump_tables1) { |
| 3270 // Test jump tables with forward jumps. | 3267 // Test jump tables with forward jumps. |
| 3271 CcTest::InitializeVM(); | 3268 CcTest::InitializeVM(); |
| 3272 Isolate* isolate = CcTest::i_isolate(); | 3269 Isolate* isolate = CcTest::i_isolate(); |
| 3273 HandleScope scope(isolate); | 3270 HandleScope scope(isolate); |
| 3274 Assembler assm(isolate, nullptr, 0); | 3271 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 3275 | 3272 |
| 3276 const int kNumCases = 512; | 3273 const int kNumCases = 512; |
| 3277 int values[kNumCases]; | 3274 int values[kNumCases]; |
| 3278 isolate->random_number_generator()->NextBytes(values, sizeof(values)); | 3275 isolate->random_number_generator()->NextBytes(values, sizeof(values)); |
| 3279 Label labels[kNumCases]; | 3276 Label labels[kNumCases]; |
| 3280 | 3277 |
| 3281 __ daddiu(sp, sp, -8); | 3278 __ daddiu(sp, sp, -8); |
| 3282 __ sd(ra, MemOperand(sp)); | 3279 __ Sd(ra, MemOperand(sp)); |
| 3283 __ Align(8); | 3280 __ Align(8); |
| 3284 | 3281 |
| 3285 Label done; | 3282 Label done; |
| 3286 { | 3283 { |
| 3287 __ BlockTrampolinePoolFor(kNumCases * 2 + 6); | 3284 __ BlockTrampolinePoolFor(kNumCases * 2 + 6); |
| 3288 PredictableCodeSizeScope predictable( | 3285 PredictableCodeSizeScope predictable( |
| 3289 &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize); | 3286 &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize); |
| 3290 Label here; | 3287 Label here; |
| 3291 | 3288 |
| 3292 __ bal(&here); | 3289 __ bal(&here); |
| 3293 __ dsll(at, a0, 3); // In delay slot. | 3290 __ dsll(at, a0, 3); // In delay slot. |
| 3294 __ bind(&here); | 3291 __ bind(&here); |
| 3295 __ daddu(at, at, ra); | 3292 __ daddu(at, at, ra); |
| 3296 __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize)); | 3293 __ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize)); |
| 3297 __ jr(at); | 3294 __ jr(at); |
| 3298 __ nop(); | 3295 __ nop(); |
| 3299 for (int i = 0; i < kNumCases; ++i) { | 3296 for (int i = 0; i < kNumCases; ++i) { |
| 3300 __ dd(&labels[i]); | 3297 __ dd(&labels[i]); |
| 3301 } | 3298 } |
| 3302 } | 3299 } |
| 3303 | 3300 |
| 3304 for (int i = 0; i < kNumCases; ++i) { | 3301 for (int i = 0; i < kNumCases; ++i) { |
| 3305 __ bind(&labels[i]); | 3302 __ bind(&labels[i]); |
| 3306 __ lui(v0, (values[i] >> 16) & 0xffff); | 3303 __ lui(v0, (values[i] >> 16) & 0xffff); |
| 3307 __ ori(v0, v0, values[i] & 0xffff); | 3304 __ ori(v0, v0, values[i] & 0xffff); |
| 3308 __ b(&done); | 3305 __ b(&done); |
| 3309 __ nop(); | 3306 __ nop(); |
| 3310 } | 3307 } |
| 3311 | 3308 |
| 3312 __ bind(&done); | 3309 __ bind(&done); |
| 3313 __ ld(ra, MemOperand(sp)); | 3310 __ Ld(ra, MemOperand(sp)); |
| 3314 __ daddiu(sp, sp, 8); | 3311 __ daddiu(sp, sp, 8); |
| 3315 __ jr(ra); | 3312 __ jr(ra); |
| 3316 __ nop(); | 3313 __ nop(); |
| 3317 | 3314 |
| 3318 CHECK_EQ(0, assm.UnboundLabelsCount()); | 3315 CHECK_EQ(0, assm.UnboundLabelsCount()); |
| 3319 | 3316 |
| 3320 CodeDesc desc; | 3317 CodeDesc desc; |
| 3321 assm.GetCode(&desc); | 3318 assm.GetCode(&desc); |
| 3322 Handle<Code> code = isolate->factory()->NewCode( | 3319 Handle<Code> code = isolate->factory()->NewCode( |
| 3323 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 3320 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 3324 #ifdef OBJECT_PRINT | 3321 #ifdef OBJECT_PRINT |
| 3325 code->Print(std::cout); | 3322 code->Print(std::cout); |
| 3326 #endif | 3323 #endif |
| 3327 F1 f = FUNCTION_CAST<F1>(code->entry()); | 3324 F1 f = FUNCTION_CAST<F1>(code->entry()); |
| 3328 for (int i = 0; i < kNumCases; ++i) { | 3325 for (int i = 0; i < kNumCases; ++i) { |
| 3329 int64_t res = reinterpret_cast<int64_t>( | 3326 int64_t res = reinterpret_cast<int64_t>( |
| 3330 CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0)); | 3327 CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0)); |
| 3331 ::printf("f(%d) = %" PRId64 "\n", i, res); | 3328 ::printf("f(%d) = %" PRId64 "\n", i, res); |
| 3332 CHECK_EQ(values[i], static_cast<int>(res)); | 3329 CHECK_EQ(values[i], static_cast<int>(res)); |
| 3333 } | 3330 } |
| 3334 } | 3331 } |
| 3335 | 3332 |
| 3336 | 3333 |
| 3337 TEST(jump_tables2) { | 3334 TEST(jump_tables2) { |
| 3338 // Test jump tables with backward jumps. | 3335 // Test jump tables with backward jumps. |
| 3339 CcTest::InitializeVM(); | 3336 CcTest::InitializeVM(); |
| 3340 Isolate* isolate = CcTest::i_isolate(); | 3337 Isolate* isolate = CcTest::i_isolate(); |
| 3341 HandleScope scope(isolate); | 3338 HandleScope scope(isolate); |
| 3342 Assembler assm(isolate, nullptr, 0); | 3339 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 3343 | 3340 |
| 3344 const int kNumCases = 512; | 3341 const int kNumCases = 512; |
| 3345 int values[kNumCases]; | 3342 int values[kNumCases]; |
| 3346 isolate->random_number_generator()->NextBytes(values, sizeof(values)); | 3343 isolate->random_number_generator()->NextBytes(values, sizeof(values)); |
| 3347 Label labels[kNumCases]; | 3344 Label labels[kNumCases]; |
| 3348 | 3345 |
| 3349 __ daddiu(sp, sp, -8); | 3346 __ daddiu(sp, sp, -8); |
| 3350 __ sd(ra, MemOperand(sp)); | 3347 __ Sd(ra, MemOperand(sp)); |
| 3351 | 3348 |
| 3352 Label done, dispatch; | 3349 Label done, dispatch; |
| 3353 __ b(&dispatch); | 3350 __ b(&dispatch); |
| 3354 __ nop(); | 3351 __ nop(); |
| 3355 | 3352 |
| 3356 for (int i = 0; i < kNumCases; ++i) { | 3353 for (int i = 0; i < kNumCases; ++i) { |
| 3357 __ bind(&labels[i]); | 3354 __ bind(&labels[i]); |
| 3358 __ lui(v0, (values[i] >> 16) & 0xffff); | 3355 __ lui(v0, (values[i] >> 16) & 0xffff); |
| 3359 __ ori(v0, v0, values[i] & 0xffff); | 3356 __ ori(v0, v0, values[i] & 0xffff); |
| 3360 __ b(&done); | 3357 __ b(&done); |
| 3361 __ nop(); | 3358 __ nop(); |
| 3362 } | 3359 } |
| 3363 | 3360 |
| 3364 __ Align(8); | 3361 __ Align(8); |
| 3365 __ bind(&dispatch); | 3362 __ bind(&dispatch); |
| 3366 { | 3363 { |
| 3367 __ BlockTrampolinePoolFor(kNumCases * 2 + 6); | 3364 __ BlockTrampolinePoolFor(kNumCases * 2 + 6); |
| 3368 PredictableCodeSizeScope predictable( | 3365 PredictableCodeSizeScope predictable( |
| 3369 &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize); | 3366 &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize); |
| 3370 Label here; | 3367 Label here; |
| 3371 | 3368 |
| 3372 __ bal(&here); | 3369 __ bal(&here); |
| 3373 __ dsll(at, a0, 3); // In delay slot. | 3370 __ dsll(at, a0, 3); // In delay slot. |
| 3374 __ bind(&here); | 3371 __ bind(&here); |
| 3375 __ daddu(at, at, ra); | 3372 __ daddu(at, at, ra); |
| 3376 __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize)); | 3373 __ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize)); |
| 3377 __ jr(at); | 3374 __ jr(at); |
| 3378 __ nop(); | 3375 __ nop(); |
| 3379 for (int i = 0; i < kNumCases; ++i) { | 3376 for (int i = 0; i < kNumCases; ++i) { |
| 3380 __ dd(&labels[i]); | 3377 __ dd(&labels[i]); |
| 3381 } | 3378 } |
| 3382 } | 3379 } |
| 3383 | 3380 |
| 3384 __ bind(&done); | 3381 __ bind(&done); |
| 3385 __ ld(ra, MemOperand(sp)); | 3382 __ Ld(ra, MemOperand(sp)); |
| 3386 __ daddiu(sp, sp, 8); | 3383 __ daddiu(sp, sp, 8); |
| 3387 __ jr(ra); | 3384 __ jr(ra); |
| 3388 __ nop(); | 3385 __ nop(); |
| 3389 | 3386 |
| 3390 CodeDesc desc; | 3387 CodeDesc desc; |
| 3391 assm.GetCode(&desc); | 3388 assm.GetCode(&desc); |
| 3392 Handle<Code> code = isolate->factory()->NewCode( | 3389 Handle<Code> code = isolate->factory()->NewCode( |
| 3393 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 3390 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 3394 #ifdef OBJECT_PRINT | 3391 #ifdef OBJECT_PRINT |
| 3395 code->Print(std::cout); | 3392 code->Print(std::cout); |
| 3396 #endif | 3393 #endif |
| 3397 F1 f = FUNCTION_CAST<F1>(code->entry()); | 3394 F1 f = FUNCTION_CAST<F1>(code->entry()); |
| 3398 for (int i = 0; i < kNumCases; ++i) { | 3395 for (int i = 0; i < kNumCases; ++i) { |
| 3399 int64_t res = reinterpret_cast<int64_t>( | 3396 int64_t res = reinterpret_cast<int64_t>( |
| 3400 CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0)); | 3397 CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0)); |
| 3401 ::printf("f(%d) = %" PRId64 "\n", i, res); | 3398 ::printf("f(%d) = %" PRId64 "\n", i, res); |
| 3402 CHECK_EQ(values[i], res); | 3399 CHECK_EQ(values[i], res); |
| 3403 } | 3400 } |
| 3404 } | 3401 } |
| 3405 | 3402 |
| 3406 | 3403 |
| 3407 TEST(jump_tables3) { | 3404 TEST(jump_tables3) { |
| 3408 // Test jump tables with backward jumps and embedded heap objects. | 3405 // Test jump tables with backward jumps and embedded heap objects. |
| 3409 CcTest::InitializeVM(); | 3406 CcTest::InitializeVM(); |
| 3410 Isolate* isolate = CcTest::i_isolate(); | 3407 Isolate* isolate = CcTest::i_isolate(); |
| 3411 HandleScope scope(isolate); | 3408 HandleScope scope(isolate); |
| 3412 Assembler assm(isolate, nullptr, 0); | 3409 MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes); |
| 3413 | 3410 |
| 3414 const int kNumCases = 512; | 3411 const int kNumCases = 512; |
| 3415 Handle<Object> values[kNumCases]; | 3412 Handle<Object> values[kNumCases]; |
| 3416 for (int i = 0; i < kNumCases; ++i) { | 3413 for (int i = 0; i < kNumCases; ++i) { |
| 3417 double value = isolate->random_number_generator()->NextDouble(); | 3414 double value = isolate->random_number_generator()->NextDouble(); |
| 3418 values[i] = isolate->factory()->NewHeapNumber(value, IMMUTABLE, TENURED); | 3415 values[i] = isolate->factory()->NewHeapNumber(value, IMMUTABLE, TENURED); |
| 3419 } | 3416 } |
| 3420 Label labels[kNumCases]; | 3417 Label labels[kNumCases]; |
| 3421 Object* obj; | 3418 Object* obj; |
| 3422 int64_t imm64; | 3419 int64_t imm64; |
| 3423 | 3420 |
| 3424 __ daddiu(sp, sp, -8); | 3421 __ daddiu(sp, sp, -8); |
| 3425 __ sd(ra, MemOperand(sp)); | 3422 __ Sd(ra, MemOperand(sp)); |
| 3426 | 3423 |
| 3427 Label done, dispatch; | 3424 Label done, dispatch; |
| 3428 __ b(&dispatch); | 3425 __ b(&dispatch); |
| 3429 __ nop(); | 3426 __ nop(); |
| 3430 | 3427 |
| 3431 | 3428 |
| 3432 for (int i = 0; i < kNumCases; ++i) { | 3429 for (int i = 0; i < kNumCases; ++i) { |
| 3433 __ bind(&labels[i]); | 3430 __ bind(&labels[i]); |
| 3434 obj = *values[i]; | 3431 obj = *values[i]; |
| 3435 imm64 = reinterpret_cast<intptr_t>(obj); | 3432 imm64 = reinterpret_cast<intptr_t>(obj); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 3446 { | 3443 { |
| 3447 __ BlockTrampolinePoolFor(kNumCases * 2 + 6); | 3444 __ BlockTrampolinePoolFor(kNumCases * 2 + 6); |
| 3448 PredictableCodeSizeScope predictable( | 3445 PredictableCodeSizeScope predictable( |
| 3449 &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize); | 3446 &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize); |
| 3450 Label here; | 3447 Label here; |
| 3451 | 3448 |
| 3452 __ bal(&here); | 3449 __ bal(&here); |
| 3453 __ dsll(at, a0, 3); // In delay slot. | 3450 __ dsll(at, a0, 3); // In delay slot. |
| 3454 __ bind(&here); | 3451 __ bind(&here); |
| 3455 __ daddu(at, at, ra); | 3452 __ daddu(at, at, ra); |
| 3456 __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize)); | 3453 __ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize)); |
| 3457 __ jr(at); | 3454 __ jr(at); |
| 3458 __ nop(); | 3455 __ nop(); |
| 3459 for (int i = 0; i < kNumCases; ++i) { | 3456 for (int i = 0; i < kNumCases; ++i) { |
| 3460 __ dd(&labels[i]); | 3457 __ dd(&labels[i]); |
| 3461 } | 3458 } |
| 3462 } | 3459 } |
| 3463 | 3460 |
| 3464 __ bind(&done); | 3461 __ bind(&done); |
| 3465 __ ld(ra, MemOperand(sp)); | 3462 __ Ld(ra, MemOperand(sp)); |
| 3466 __ daddiu(sp, sp, 8); | 3463 __ daddiu(sp, sp, 8); |
| 3467 __ jr(ra); | 3464 __ jr(ra); |
| 3468 __ nop(); | 3465 __ nop(); |
| 3469 | 3466 |
| 3470 CodeDesc desc; | 3467 CodeDesc desc; |
| 3471 assm.GetCode(&desc); | 3468 assm.GetCode(&desc); |
| 3472 Handle<Code> code = isolate->factory()->NewCode( | 3469 Handle<Code> code = isolate->factory()->NewCode( |
| 3473 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 3470 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 3474 #ifdef OBJECT_PRINT | 3471 #ifdef OBJECT_PRINT |
| 3475 code->Print(std::cout); | 3472 code->Print(std::cout); |
| (...skipping 22 matching lines...) Expand all Loading... |
| 3498 typedef struct { | 3495 typedef struct { |
| 3499 int64_t r1; | 3496 int64_t r1; |
| 3500 int64_t r2; | 3497 int64_t r2; |
| 3501 int64_t r3; | 3498 int64_t r3; |
| 3502 int64_t r4; | 3499 int64_t r4; |
| 3503 int64_t r5; | 3500 int64_t r5; |
| 3504 int64_t r6; | 3501 int64_t r6; |
| 3505 } T; | 3502 } T; |
| 3506 T t; | 3503 T t; |
| 3507 | 3504 |
| 3508 Assembler assm(isolate, NULL, 0); | 3505 MacroAssembler assm(isolate, NULL, 0, |
| 3506 v8::internal::CodeObjectRequired::kYes); |
| 3509 | 3507 |
| 3510 __ ld(a4, MemOperand(a0, offsetof(T, r1))); | 3508 __ Ld(a4, MemOperand(a0, offsetof(T, r1))); |
| 3511 __ nop(); | 3509 __ nop(); |
| 3512 __ bitswap(a6, a4); | 3510 __ bitswap(a6, a4); |
| 3513 __ sd(a6, MemOperand(a0, offsetof(T, r1))); | 3511 __ Sd(a6, MemOperand(a0, offsetof(T, r1))); |
| 3514 | 3512 |
| 3515 __ ld(a4, MemOperand(a0, offsetof(T, r2))); | 3513 __ Ld(a4, MemOperand(a0, offsetof(T, r2))); |
| 3516 __ nop(); | 3514 __ nop(); |
| 3517 __ bitswap(a6, a4); | 3515 __ bitswap(a6, a4); |
| 3518 __ sd(a6, MemOperand(a0, offsetof(T, r2))); | 3516 __ Sd(a6, MemOperand(a0, offsetof(T, r2))); |
| 3519 | 3517 |
| 3520 __ ld(a4, MemOperand(a0, offsetof(T, r3))); | 3518 __ Ld(a4, MemOperand(a0, offsetof(T, r3))); |
| 3521 __ nop(); | 3519 __ nop(); |
| 3522 __ bitswap(a6, a4); | 3520 __ bitswap(a6, a4); |
| 3523 __ sd(a6, MemOperand(a0, offsetof(T, r3))); | 3521 __ Sd(a6, MemOperand(a0, offsetof(T, r3))); |
| 3524 | 3522 |
| 3525 __ ld(a4, MemOperand(a0, offsetof(T, r4))); | 3523 __ Ld(a4, MemOperand(a0, offsetof(T, r4))); |
| 3526 __ nop(); | 3524 __ nop(); |
| 3527 __ bitswap(a6, a4); | 3525 __ bitswap(a6, a4); |
| 3528 __ sd(a6, MemOperand(a0, offsetof(T, r4))); | 3526 __ Sd(a6, MemOperand(a0, offsetof(T, r4))); |
| 3529 | 3527 |
| 3530 __ ld(a4, MemOperand(a0, offsetof(T, r5))); | 3528 __ Ld(a4, MemOperand(a0, offsetof(T, r5))); |
| 3531 __ nop(); | 3529 __ nop(); |
| 3532 __ dbitswap(a6, a4); | 3530 __ dbitswap(a6, a4); |
| 3533 __ sd(a6, MemOperand(a0, offsetof(T, r5))); | 3531 __ Sd(a6, MemOperand(a0, offsetof(T, r5))); |
| 3534 | 3532 |
| 3535 __ ld(a4, MemOperand(a0, offsetof(T, r6))); | 3533 __ Ld(a4, MemOperand(a0, offsetof(T, r6))); |
| 3536 __ nop(); | 3534 __ nop(); |
| 3537 __ dbitswap(a6, a4); | 3535 __ dbitswap(a6, a4); |
| 3538 __ sd(a6, MemOperand(a0, offsetof(T, r6))); | 3536 __ Sd(a6, MemOperand(a0, offsetof(T, r6))); |
| 3539 | 3537 |
| 3540 __ jr(ra); | 3538 __ jr(ra); |
| 3541 __ nop(); | 3539 __ nop(); |
| 3542 | 3540 |
| 3543 CodeDesc desc; | 3541 CodeDesc desc; |
| 3544 assm.GetCode(&desc); | 3542 assm.GetCode(&desc); |
| 3545 Handle<Code> code = isolate->factory()->NewCode( | 3543 Handle<Code> code = isolate->factory()->NewCode( |
| 3546 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 3544 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 3547 F3 f = FUNCTION_CAST<F3>(code->entry()); | 3545 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 3548 t.r1 = 0x00102100781A15C3; | 3546 t.r1 = 0x00102100781A15C3; |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3592 float fPosNorm; | 3590 float fPosNorm; |
| 3593 float fPosSubnorm; | 3591 float fPosSubnorm; |
| 3594 float fPosZero; } T; | 3592 float fPosZero; } T; |
| 3595 T t; | 3593 T t; |
| 3596 | 3594 |
| 3597 // Create a function that accepts &t, and loads, manipulates, and stores | 3595 // Create a function that accepts &t, and loads, manipulates, and stores |
| 3598 // the doubles t.a ... t.f. | 3596 // the doubles t.a ... t.f. |
| 3599 MacroAssembler assm(isolate, NULL, 0, | 3597 MacroAssembler assm(isolate, NULL, 0, |
| 3600 v8::internal::CodeObjectRequired::kYes); | 3598 v8::internal::CodeObjectRequired::kYes); |
| 3601 | 3599 |
| 3602 __ ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan))); | 3600 __ Ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan))); |
| 3603 __ class_d(f6, f4); | 3601 __ class_d(f6, f4); |
| 3604 __ sdc1(f6, MemOperand(a0, offsetof(T, dSignalingNan))); | 3602 __ Sdc1(f6, MemOperand(a0, offsetof(T, dSignalingNan))); |
| 3605 | 3603 |
| 3606 __ ldc1(f4, MemOperand(a0, offsetof(T, dQuietNan))); | 3604 __ Ldc1(f4, MemOperand(a0, offsetof(T, dQuietNan))); |
| 3607 __ class_d(f6, f4); | 3605 __ class_d(f6, f4); |
| 3608 __ sdc1(f6, MemOperand(a0, offsetof(T, dQuietNan))); | 3606 __ Sdc1(f6, MemOperand(a0, offsetof(T, dQuietNan))); |
| 3609 | 3607 |
| 3610 __ ldc1(f4, MemOperand(a0, offsetof(T, dNegInf))); | 3608 __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegInf))); |
| 3611 __ class_d(f6, f4); | 3609 __ class_d(f6, f4); |
| 3612 __ sdc1(f6, MemOperand(a0, offsetof(T, dNegInf))); | 3610 __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegInf))); |
| 3613 | 3611 |
| 3614 __ ldc1(f4, MemOperand(a0, offsetof(T, dNegNorm))); | 3612 __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegNorm))); |
| 3615 __ class_d(f6, f4); | 3613 __ class_d(f6, f4); |
| 3616 __ sdc1(f6, MemOperand(a0, offsetof(T, dNegNorm))); | 3614 __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegNorm))); |
| 3617 | 3615 |
| 3618 __ ldc1(f4, MemOperand(a0, offsetof(T, dNegSubnorm))); | 3616 __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegSubnorm))); |
| 3619 __ class_d(f6, f4); | 3617 __ class_d(f6, f4); |
| 3620 __ sdc1(f6, MemOperand(a0, offsetof(T, dNegSubnorm))); | 3618 __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegSubnorm))); |
| 3621 | 3619 |
| 3622 __ ldc1(f4, MemOperand(a0, offsetof(T, dNegZero))); | 3620 __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegZero))); |
| 3623 __ class_d(f6, f4); | 3621 __ class_d(f6, f4); |
| 3624 __ sdc1(f6, MemOperand(a0, offsetof(T, dNegZero))); | 3622 __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegZero))); |
| 3625 | 3623 |
| 3626 __ ldc1(f4, MemOperand(a0, offsetof(T, dPosInf))); | 3624 __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosInf))); |
| 3627 __ class_d(f6, f4); | 3625 __ class_d(f6, f4); |
| 3628 __ sdc1(f6, MemOperand(a0, offsetof(T, dPosInf))); | 3626 __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosInf))); |
| 3629 | 3627 |
| 3630 __ ldc1(f4, MemOperand(a0, offsetof(T, dPosNorm))); | 3628 __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosNorm))); |
| 3631 __ class_d(f6, f4); | 3629 __ class_d(f6, f4); |
| 3632 __ sdc1(f6, MemOperand(a0, offsetof(T, dPosNorm))); | 3630 __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosNorm))); |
| 3633 | 3631 |
| 3634 __ ldc1(f4, MemOperand(a0, offsetof(T, dPosSubnorm))); | 3632 __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosSubnorm))); |
| 3635 __ class_d(f6, f4); | 3633 __ class_d(f6, f4); |
| 3636 __ sdc1(f6, MemOperand(a0, offsetof(T, dPosSubnorm))); | 3634 __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosSubnorm))); |
| 3637 | 3635 |
| 3638 __ ldc1(f4, MemOperand(a0, offsetof(T, dPosZero))); | 3636 __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosZero))); |
| 3639 __ class_d(f6, f4); | 3637 __ class_d(f6, f4); |
| 3640 __ sdc1(f6, MemOperand(a0, offsetof(T, dPosZero))); | 3638 __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosZero))); |
| 3641 | 3639 |
| 3642 // Testing instruction CLASS.S | 3640 // Testing instruction CLASS.S |
| 3643 __ lwc1(f4, MemOperand(a0, offsetof(T, fSignalingNan))); | 3641 __ Lwc1(f4, MemOperand(a0, offsetof(T, fSignalingNan))); |
| 3644 __ class_s(f6, f4); | 3642 __ class_s(f6, f4); |
| 3645 __ swc1(f6, MemOperand(a0, offsetof(T, fSignalingNan))); | 3643 __ Swc1(f6, MemOperand(a0, offsetof(T, fSignalingNan))); |
| 3646 | 3644 |
| 3647 __ lwc1(f4, MemOperand(a0, offsetof(T, fQuietNan))); | 3645 __ Lwc1(f4, MemOperand(a0, offsetof(T, fQuietNan))); |
| 3648 __ class_s(f6, f4); | 3646 __ class_s(f6, f4); |
| 3649 __ swc1(f6, MemOperand(a0, offsetof(T, fQuietNan))); | 3647 __ Swc1(f6, MemOperand(a0, offsetof(T, fQuietNan))); |
| 3650 | 3648 |
| 3651 __ lwc1(f4, MemOperand(a0, offsetof(T, fNegInf))); | 3649 __ Lwc1(f4, MemOperand(a0, offsetof(T, fNegInf))); |
| 3652 __ class_s(f6, f4); | 3650 __ class_s(f6, f4); |
| 3653 __ swc1(f6, MemOperand(a0, offsetof(T, fNegInf))); | 3651 __ Swc1(f6, MemOperand(a0, offsetof(T, fNegInf))); |
| 3654 | 3652 |
| 3655 __ lwc1(f4, MemOperand(a0, offsetof(T, fNegNorm))); | 3653 __ Lwc1(f4, MemOperand(a0, offsetof(T, fNegNorm))); |
| 3656 __ class_s(f6, f4); | 3654 __ class_s(f6, f4); |
| 3657 __ swc1(f6, MemOperand(a0, offsetof(T, fNegNorm))); | 3655 __ Swc1(f6, MemOperand(a0, offsetof(T, fNegNorm))); |
| 3658 | 3656 |
| 3659 __ lwc1(f4, MemOperand(a0, offsetof(T, fNegSubnorm))); | 3657 __ Lwc1(f4, MemOperand(a0, offsetof(T, fNegSubnorm))); |
| 3660 __ class_s(f6, f4); | 3658 __ class_s(f6, f4); |
| 3661 __ swc1(f6, MemOperand(a0, offsetof(T, fNegSubnorm))); | 3659 __ Swc1(f6, MemOperand(a0, offsetof(T, fNegSubnorm))); |
| 3662 | 3660 |
| 3663 __ lwc1(f4, MemOperand(a0, offsetof(T, fNegZero))); | 3661 __ Lwc1(f4, MemOperand(a0, offsetof(T, fNegZero))); |
| 3664 __ class_s(f6, f4); | 3662 __ class_s(f6, f4); |
| 3665 __ swc1(f6, MemOperand(a0, offsetof(T, fNegZero))); | 3663 __ Swc1(f6, MemOperand(a0, offsetof(T, fNegZero))); |
| 3666 | 3664 |
| 3667 __ lwc1(f4, MemOperand(a0, offsetof(T, fPosInf))); | 3665 __ Lwc1(f4, MemOperand(a0, offsetof(T, fPosInf))); |
| 3668 __ class_s(f6, f4); | 3666 __ class_s(f6, f4); |
| 3669 __ swc1(f6, MemOperand(a0, offsetof(T, fPosInf))); | 3667 __ Swc1(f6, MemOperand(a0, offsetof(T, fPosInf))); |
| 3670 | 3668 |
| 3671 __ lwc1(f4, MemOperand(a0, offsetof(T, fPosNorm))); | 3669 __ Lwc1(f4, MemOperand(a0, offsetof(T, fPosNorm))); |
| 3672 __ class_s(f6, f4); | 3670 __ class_s(f6, f4); |
| 3673 __ swc1(f6, MemOperand(a0, offsetof(T, fPosNorm))); | 3671 __ Swc1(f6, MemOperand(a0, offsetof(T, fPosNorm))); |
| 3674 | 3672 |
| 3675 __ lwc1(f4, MemOperand(a0, offsetof(T, fPosSubnorm))); | 3673 __ Lwc1(f4, MemOperand(a0, offsetof(T, fPosSubnorm))); |
| 3676 __ class_s(f6, f4); | 3674 __ class_s(f6, f4); |
| 3677 __ swc1(f6, MemOperand(a0, offsetof(T, fPosSubnorm))); | 3675 __ Swc1(f6, MemOperand(a0, offsetof(T, fPosSubnorm))); |
| 3678 | 3676 |
| 3679 __ lwc1(f4, MemOperand(a0, offsetof(T, fPosZero))); | 3677 __ Lwc1(f4, MemOperand(a0, offsetof(T, fPosZero))); |
| 3680 __ class_s(f6, f4); | 3678 __ class_s(f6, f4); |
| 3681 __ swc1(f6, MemOperand(a0, offsetof(T, fPosZero))); | 3679 __ Swc1(f6, MemOperand(a0, offsetof(T, fPosZero))); |
| 3682 | 3680 |
| 3683 __ jr(ra); | 3681 __ jr(ra); |
| 3684 __ nop(); | 3682 __ nop(); |
| 3685 | 3683 |
| 3686 CodeDesc desc; | 3684 CodeDesc desc; |
| 3687 assm.GetCode(&desc); | 3685 assm.GetCode(&desc); |
| 3688 Handle<Code> code = isolate->factory()->NewCode( | 3686 Handle<Code> code = isolate->factory()->NewCode( |
| 3689 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 3687 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 3690 F3 f = FUNCTION_CAST<F3>(code->entry()); | 3688 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 3691 | 3689 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3752 int64_t fir; | 3750 int64_t fir; |
| 3753 double a; | 3751 double a; |
| 3754 float b; | 3752 float b; |
| 3755 double fcsr; | 3753 double fcsr; |
| 3756 } TestFloat; | 3754 } TestFloat; |
| 3757 | 3755 |
| 3758 TestFloat test; | 3756 TestFloat test; |
| 3759 | 3757 |
| 3760 // Save FIR. | 3758 // Save FIR. |
| 3761 __ cfc1(a1, FCSR); | 3759 __ cfc1(a1, FCSR); |
| 3762 __ sd(a1, MemOperand(a0, offsetof(TestFloat, fcsr))); | 3760 __ Sd(a1, MemOperand(a0, offsetof(TestFloat, fcsr))); |
| 3763 // Disable FPU exceptions. | 3761 // Disable FPU exceptions. |
| 3764 __ ctc1(zero_reg, FCSR); | 3762 __ ctc1(zero_reg, FCSR); |
| 3765 | 3763 |
| 3766 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); | 3764 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); |
| 3767 __ abs_d(f10, f4); | 3765 __ abs_d(f10, f4); |
| 3768 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, a))); | 3766 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, a))); |
| 3769 | 3767 |
| 3770 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b))); | 3768 __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, b))); |
| 3771 __ abs_s(f10, f4); | 3769 __ abs_s(f10, f4); |
| 3772 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, b))); | 3770 __ Swc1(f10, MemOperand(a0, offsetof(TestFloat, b))); |
| 3773 | 3771 |
| 3774 // Restore FCSR. | 3772 // Restore FCSR. |
| 3775 __ ctc1(a1, FCSR); | 3773 __ ctc1(a1, FCSR); |
| 3776 | 3774 |
| 3777 __ jr(ra); | 3775 __ jr(ra); |
| 3778 __ nop(); | 3776 __ nop(); |
| 3779 | 3777 |
| 3780 CodeDesc desc; | 3778 CodeDesc desc; |
| 3781 assm.GetCode(&desc); | 3779 assm.GetCode(&desc); |
| 3782 Handle<Code> code = isolate->factory()->NewCode( | 3780 Handle<Code> code = isolate->factory()->NewCode( |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3850 double a; | 3848 double a; |
| 3851 double b; | 3849 double b; |
| 3852 double c; | 3850 double c; |
| 3853 float fa; | 3851 float fa; |
| 3854 float fb; | 3852 float fb; |
| 3855 float fc; | 3853 float fc; |
| 3856 } TestFloat; | 3854 } TestFloat; |
| 3857 | 3855 |
| 3858 TestFloat test; | 3856 TestFloat test; |
| 3859 | 3857 |
| 3860 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); | 3858 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); |
| 3861 __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b))); | 3859 __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b))); |
| 3862 __ add_d(f10, f8, f4); | 3860 __ add_d(f10, f8, f4); |
| 3863 __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c))); | 3861 __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c))); |
| 3864 | 3862 |
| 3865 __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, fa))); | 3863 __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, fa))); |
| 3866 __ lwc1(f8, MemOperand(a0, offsetof(TestFloat, fb))); | 3864 __ Lwc1(f8, MemOperand(a0, offsetof(TestFloat, fb))); |
| 3867 __ add_s(f10, f8, f4); | 3865 __ add_s(f10, f8, f4); |
| 3868 __ swc1(f10, MemOperand(a0, offsetof(TestFloat, fc))); | 3866 __ Swc1(f10, MemOperand(a0, offsetof(TestFloat, fc))); |
| 3869 | 3867 |
| 3870 __ jr(ra); | 3868 __ jr(ra); |
| 3871 __ nop(); | 3869 __ nop(); |
| 3872 | 3870 |
| 3873 CodeDesc desc; | 3871 CodeDesc desc; |
| 3874 assm.GetCode(&desc); | 3872 assm.GetCode(&desc); |
| 3875 Handle<Code> code = isolate->factory()->NewCode( | 3873 Handle<Code> code = isolate->factory()->NewCode( |
| 3876 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 3874 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 3877 F3 f = FUNCTION_CAST<F3>(code->entry()); | 3875 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 3878 test.a = 2.0; | 3876 test.a = 2.0; |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3937 uint32_t fOlt; | 3935 uint32_t fOlt; |
| 3938 uint32_t fUlt; | 3936 uint32_t fUlt; |
| 3939 uint32_t fOle; | 3937 uint32_t fOle; |
| 3940 uint32_t fUle; | 3938 uint32_t fUle; |
| 3941 } TestFloat; | 3939 } TestFloat; |
| 3942 | 3940 |
| 3943 TestFloat test; | 3941 TestFloat test; |
| 3944 | 3942 |
| 3945 __ li(t1, 1); | 3943 __ li(t1, 1); |
| 3946 | 3944 |
| 3947 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1))); | 3945 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1))); |
| 3948 __ ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2))); | 3946 __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2))); |
| 3949 | 3947 |
| 3950 __ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1))); | 3948 __ Lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1))); |
| 3951 __ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2))); | 3949 __ Lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2))); |
| 3952 | 3950 |
| 3953 __ mov(t2, zero_reg); | 3951 __ mov(t2, zero_reg); |
| 3954 __ mov(t3, zero_reg); | 3952 __ mov(t3, zero_reg); |
| 3955 __ c_d(F, f4, f6, 0); | 3953 __ c_d(F, f4, f6, 0); |
| 3956 __ c_s(F, f14, f16, 2); | 3954 __ c_s(F, f14, f16, 2); |
| 3957 __ movt(t2, t1, 0); | 3955 __ movt(t2, t1, 0); |
| 3958 __ movt(t3, t1, 2); | 3956 __ movt(t3, t1, 2); |
| 3959 __ sw(t2, MemOperand(a0, offsetof(TestFloat, dF)) ); | 3957 __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dF))); |
| 3960 __ sw(t3, MemOperand(a0, offsetof(TestFloat, fF)) ); | 3958 __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fF))); |
| 3961 | 3959 |
| 3962 __ mov(t2, zero_reg); | 3960 __ mov(t2, zero_reg); |
| 3963 __ mov(t3, zero_reg); | 3961 __ mov(t3, zero_reg); |
| 3964 __ c_d(UN, f4, f6, 2); | 3962 __ c_d(UN, f4, f6, 2); |
| 3965 __ c_s(UN, f14, f16, 4); | 3963 __ c_s(UN, f14, f16, 4); |
| 3966 __ movt(t2, t1, 2); | 3964 __ movt(t2, t1, 2); |
| 3967 __ movt(t3, t1, 4); | 3965 __ movt(t3, t1, 4); |
| 3968 __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUn)) ); | 3966 __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dUn))); |
| 3969 __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUn)) ); | 3967 __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fUn))); |
| 3970 | 3968 |
| 3971 __ mov(t2, zero_reg); | 3969 __ mov(t2, zero_reg); |
| 3972 __ mov(t3, zero_reg); | 3970 __ mov(t3, zero_reg); |
| 3973 __ c_d(EQ, f4, f6, 4); | 3971 __ c_d(EQ, f4, f6, 4); |
| 3974 __ c_s(EQ, f14, f16, 6); | 3972 __ c_s(EQ, f14, f16, 6); |
| 3975 __ movt(t2, t1, 4); | 3973 __ movt(t2, t1, 4); |
| 3976 __ movt(t3, t1, 6); | 3974 __ movt(t3, t1, 6); |
| 3977 __ sw(t2, MemOperand(a0, offsetof(TestFloat, dEq)) ); | 3975 __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dEq))); |
| 3978 __ sw(t3, MemOperand(a0, offsetof(TestFloat, fEq)) ); | 3976 __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fEq))); |
| 3979 | 3977 |
| 3980 __ mov(t2, zero_reg); | 3978 __ mov(t2, zero_reg); |
| 3981 __ mov(t3, zero_reg); | 3979 __ mov(t3, zero_reg); |
| 3982 __ c_d(UEQ, f4, f6, 6); | 3980 __ c_d(UEQ, f4, f6, 6); |
| 3983 __ c_s(UEQ, f14, f16, 0); | 3981 __ c_s(UEQ, f14, f16, 0); |
| 3984 __ movt(t2, t1, 6); | 3982 __ movt(t2, t1, 6); |
| 3985 __ movt(t3, t1, 0); | 3983 __ movt(t3, t1, 0); |
| 3986 __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUeq)) ); | 3984 __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dUeq))); |
| 3987 __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUeq)) ); | 3985 __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fUeq))); |
| 3988 | 3986 |
| 3989 __ mov(t2, zero_reg); | 3987 __ mov(t2, zero_reg); |
| 3990 __ mov(t3, zero_reg); | 3988 __ mov(t3, zero_reg); |
| 3991 __ c_d(OLT, f4, f6, 0); | 3989 __ c_d(OLT, f4, f6, 0); |
| 3992 __ c_s(OLT, f14, f16, 2); | 3990 __ c_s(OLT, f14, f16, 2); |
| 3993 __ movt(t2, t1, 0); | 3991 __ movt(t2, t1, 0); |
| 3994 __ movt(t3, t1, 2); | 3992 __ movt(t3, t1, 2); |
| 3995 __ sw(t2, MemOperand(a0, offsetof(TestFloat, dOlt)) ); | 3993 __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dOlt))); |
| 3996 __ sw(t3, MemOperand(a0, offsetof(TestFloat, fOlt)) ); | 3994 __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fOlt))); |
| 3997 | 3995 |
| 3998 __ mov(t2, zero_reg); | 3996 __ mov(t2, zero_reg); |
| 3999 __ mov(t3, zero_reg); | 3997 __ mov(t3, zero_reg); |
| 4000 __ c_d(ULT, f4, f6, 2); | 3998 __ c_d(ULT, f4, f6, 2); |
| 4001 __ c_s(ULT, f14, f16, 4); | 3999 __ c_s(ULT, f14, f16, 4); |
| 4002 __ movt(t2, t1, 2); | 4000 __ movt(t2, t1, 2); |
| 4003 __ movt(t3, t1, 4); | 4001 __ movt(t3, t1, 4); |
| 4004 __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUlt)) ); | 4002 __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dUlt))); |
| 4005 __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUlt)) ); | 4003 __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fUlt))); |
| 4006 | 4004 |
| 4007 __ mov(t2, zero_reg); | 4005 __ mov(t2, zero_reg); |
| 4008 __ mov(t3, zero_reg); | 4006 __ mov(t3, zero_reg); |
| 4009 __ c_d(OLE, f4, f6, 4); | 4007 __ c_d(OLE, f4, f6, 4); |
| 4010 __ c_s(OLE, f14, f16, 6); | 4008 __ c_s(OLE, f14, f16, 6); |
| 4011 __ movt(t2, t1, 4); | 4009 __ movt(t2, t1, 4); |
| 4012 __ movt(t3, t1, 6); | 4010 __ movt(t3, t1, 6); |
| 4013 __ sw(t2, MemOperand(a0, offsetof(TestFloat, dOle)) ); | 4011 __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dOle))); |
| 4014 __ sw(t3, MemOperand(a0, offsetof(TestFloat, fOle)) ); | 4012 __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fOle))); |
| 4015 | 4013 |
| 4016 __ mov(t2, zero_reg); | 4014 __ mov(t2, zero_reg); |
| 4017 __ mov(t3, zero_reg); | 4015 __ mov(t3, zero_reg); |
| 4018 __ c_d(ULE, f4, f6, 6); | 4016 __ c_d(ULE, f4, f6, 6); |
| 4019 __ c_s(ULE, f14, f16, 0); | 4017 __ c_s(ULE, f14, f16, 0); |
| 4020 __ movt(t2, t1, 6); | 4018 __ movt(t2, t1, 6); |
| 4021 __ movt(t3, t1, 0); | 4019 __ movt(t3, t1, 0); |
| 4022 __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUle)) ); | 4020 __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dUle))); |
| 4023 __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUle)) ); | 4021 __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fUle))); |
| 4024 | 4022 |
| 4025 __ jr(ra); | 4023 __ jr(ra); |
| 4026 __ nop(); | 4024 __ nop(); |
| 4027 | 4025 |
| 4028 CodeDesc desc; | 4026 CodeDesc desc; |
| 4029 assm.GetCode(&desc); | 4027 assm.GetCode(&desc); |
| 4030 Handle<Code> code = isolate->factory()->NewCode( | 4028 Handle<Code> code = isolate->factory()->NewCode( |
| 4031 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 4029 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 4032 F3 f = FUNCTION_CAST<F3>(code->entry()); | 4030 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 4033 test.dOp1 = 2.0; | 4031 test.dOp1 = 2.0; |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4155 float fUle; | 4153 float fUle; |
| 4156 float fOr; | 4154 float fOr; |
| 4157 float fUne; | 4155 float fUne; |
| 4158 float fNe; | 4156 float fNe; |
| 4159 } TestFloat; | 4157 } TestFloat; |
| 4160 | 4158 |
| 4161 TestFloat test; | 4159 TestFloat test; |
| 4162 | 4160 |
| 4163 __ li(t1, 1); | 4161 __ li(t1, 1); |
| 4164 | 4162 |
| 4165 __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1))); | 4163 __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1))); |
| 4166 __ ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2))); | 4164 __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2))); |
| 4167 | 4165 |
| 4168 __ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1))); | 4166 __ Lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1))); |
| 4169 __ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2))); | 4167 __ Lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2))); |
| 4170 | 4168 |
| 4171 __ cmp_d(F, f2, f4, f6); | 4169 __ cmp_d(F, f2, f4, f6); |
| 4172 __ cmp_s(F, f12, f14, f16); | 4170 __ cmp_s(F, f12, f14, f16); |
| 4173 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dF)) ); | 4171 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dF))); |
| 4174 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fF)) ); | 4172 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fF))); |
| 4175 | 4173 |
| 4176 __ cmp_d(UN, f2, f4, f6); | 4174 __ cmp_d(UN, f2, f4, f6); |
| 4177 __ cmp_s(UN, f12, f14, f16); | 4175 __ cmp_s(UN, f12, f14, f16); |
| 4178 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUn)) ); | 4176 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUn))); |
| 4179 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUn)) ); | 4177 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fUn))); |
| 4180 | 4178 |
| 4181 __ cmp_d(EQ, f2, f4, f6); | 4179 __ cmp_d(EQ, f2, f4, f6); |
| 4182 __ cmp_s(EQ, f12, f14, f16); | 4180 __ cmp_s(EQ, f12, f14, f16); |
| 4183 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dEq)) ); | 4181 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dEq))); |
| 4184 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fEq)) ); | 4182 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fEq))); |
| 4185 | 4183 |
| 4186 __ cmp_d(UEQ, f2, f4, f6); | 4184 __ cmp_d(UEQ, f2, f4, f6); |
| 4187 __ cmp_s(UEQ, f12, f14, f16); | 4185 __ cmp_s(UEQ, f12, f14, f16); |
| 4188 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUeq)) ); | 4186 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUeq))); |
| 4189 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUeq)) ); | 4187 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fUeq))); |
| 4190 | 4188 |
| 4191 __ cmp_d(LT, f2, f4, f6); | 4189 __ cmp_d(LT, f2, f4, f6); |
| 4192 __ cmp_s(LT, f12, f14, f16); | 4190 __ cmp_s(LT, f12, f14, f16); |
| 4193 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOlt)) ); | 4191 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOlt))); |
| 4194 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOlt)) ); | 4192 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fOlt))); |
| 4195 | 4193 |
| 4196 __ cmp_d(ULT, f2, f4, f6); | 4194 __ cmp_d(ULT, f2, f4, f6); |
| 4197 __ cmp_s(ULT, f12, f14, f16); | 4195 __ cmp_s(ULT, f12, f14, f16); |
| 4198 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUlt)) ); | 4196 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUlt))); |
| 4199 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUlt)) ); | 4197 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fUlt))); |
| 4200 | 4198 |
| 4201 __ cmp_d(LE, f2, f4, f6); | 4199 __ cmp_d(LE, f2, f4, f6); |
| 4202 __ cmp_s(LE, f12, f14, f16); | 4200 __ cmp_s(LE, f12, f14, f16); |
| 4203 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOle)) ); | 4201 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOle))); |
| 4204 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOle)) ); | 4202 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fOle))); |
| 4205 | 4203 |
| 4206 __ cmp_d(ULE, f2, f4, f6); | 4204 __ cmp_d(ULE, f2, f4, f6); |
| 4207 __ cmp_s(ULE, f12, f14, f16); | 4205 __ cmp_s(ULE, f12, f14, f16); |
| 4208 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUle)) ); | 4206 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUle))); |
| 4209 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUle)) ); | 4207 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fUle))); |
| 4210 | 4208 |
| 4211 __ cmp_d(ORD, f2, f4, f6); | 4209 __ cmp_d(ORD, f2, f4, f6); |
| 4212 __ cmp_s(ORD, f12, f14, f16); | 4210 __ cmp_s(ORD, f12, f14, f16); |
| 4213 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOr)) ); | 4211 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOr))); |
| 4214 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOr)) ); | 4212 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fOr))); |
| 4215 | 4213 |
| 4216 __ cmp_d(UNE, f2, f4, f6); | 4214 __ cmp_d(UNE, f2, f4, f6); |
| 4217 __ cmp_s(UNE, f12, f14, f16); | 4215 __ cmp_s(UNE, f12, f14, f16); |
| 4218 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUne)) ); | 4216 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUne))); |
| 4219 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUne)) ); | 4217 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fUne))); |
| 4220 | 4218 |
| 4221 __ cmp_d(NE, f2, f4, f6); | 4219 __ cmp_d(NE, f2, f4, f6); |
| 4222 __ cmp_s(NE, f12, f14, f16); | 4220 __ cmp_s(NE, f12, f14, f16); |
| 4223 __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dNe)) ); | 4221 __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dNe))); |
| 4224 __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fNe)) ); | 4222 __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fNe))); |
| 4225 | 4223 |
| 4226 __ jr(ra); | 4224 __ jr(ra); |
| 4227 __ nop(); | 4225 __ nop(); |
| 4228 | 4226 |
| 4229 CodeDesc desc; | 4227 CodeDesc desc; |
| 4230 assm.GetCode(&desc); | 4228 assm.GetCode(&desc); |
| 4231 Handle<Code> code = isolate->factory()->NewCode( | 4229 Handle<Code> code = isolate->factory()->NewCode( |
| 4232 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 4230 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 4233 F3 f = FUNCTION_CAST<F3>(code->entry()); | 4231 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 4234 uint64_t dTrue = 0xFFFFFFFFFFFFFFFF; | 4232 uint64_t dTrue = 0xFFFFFFFFFFFFFFFF; |
| (...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4551 float fRes; | 4549 float fRes; |
| 4552 } Test; | 4550 } Test; |
| 4553 | 4551 |
| 4554 Test test; | 4552 Test test; |
| 4555 | 4553 |
| 4556 // Save FCSR. | 4554 // Save FCSR. |
| 4557 __ cfc1(a1, FCSR); | 4555 __ cfc1(a1, FCSR); |
| 4558 // Disable FPU exceptions. | 4556 // Disable FPU exceptions. |
| 4559 __ ctc1(zero_reg, FCSR); | 4557 __ ctc1(zero_reg, FCSR); |
| 4560 | 4558 |
| 4561 __ ldc1(f4, MemOperand(a0, offsetof(Test, dOp1)) ); | 4559 __ Ldc1(f4, MemOperand(a0, offsetof(Test, dOp1))); |
| 4562 __ ldc1(f2, MemOperand(a0, offsetof(Test, dOp2)) ); | 4560 __ Ldc1(f2, MemOperand(a0, offsetof(Test, dOp2))); |
| 4563 __ nop(); | 4561 __ nop(); |
| 4564 __ div_d(f6, f4, f2); | 4562 __ div_d(f6, f4, f2); |
| 4565 __ sdc1(f6, MemOperand(a0, offsetof(Test, dRes)) ); | 4563 __ Sdc1(f6, MemOperand(a0, offsetof(Test, dRes))); |
| 4566 | 4564 |
| 4567 __ lwc1(f4, MemOperand(a0, offsetof(Test, fOp1)) ); | 4565 __ Lwc1(f4, MemOperand(a0, offsetof(Test, fOp1))); |
| 4568 __ lwc1(f2, MemOperand(a0, offsetof(Test, fOp2)) ); | 4566 __ Lwc1(f2, MemOperand(a0, offsetof(Test, fOp2))); |
| 4569 __ nop(); | 4567 __ nop(); |
| 4570 __ div_s(f6, f4, f2); | 4568 __ div_s(f6, f4, f2); |
| 4571 __ swc1(f6, MemOperand(a0, offsetof(Test, fRes)) ); | 4569 __ Swc1(f6, MemOperand(a0, offsetof(Test, fRes))); |
| 4572 | 4570 |
| 4573 // Restore FCSR. | 4571 // Restore FCSR. |
| 4574 __ ctc1(a1, FCSR); | 4572 __ ctc1(a1, FCSR); |
| 4575 | 4573 |
| 4576 __ jr(ra); | 4574 __ jr(ra); |
| 4577 __ nop(); | 4575 __ nop(); |
| 4578 CodeDesc desc; | 4576 CodeDesc desc; |
| 4579 assm.GetCode(&desc); | 4577 assm.GetCode(&desc); |
| 4580 Handle<Code> code = isolate->factory()->NewCode( | 4578 Handle<Code> code = isolate->factory()->NewCode( |
| 4581 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | 4579 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
| 4582 F3 f = FUNCTION_CAST<F3>(code->entry()); | 4580 F3 f = FUNCTION_CAST<F3>(code->entry()); |
| 4583 | 4581 |
| (...skipping 1380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5964 {x2, y2, -z2, 0.0, 0.0}, | 5962 {x2, y2, -z2, 0.0, 0.0}, |
| 5965 {x2, -y2, z2, 0.0, 0.0}, | 5963 {x2, -y2, z2, 0.0, 0.0}, |
| 5966 {x2, -y2, -z2, 0.0, 0.0}, | 5964 {x2, -y2, -z2, 0.0, 0.0}, |
| 5967 {-x2, y2, z2, 0.0, 0.0}, | 5965 {-x2, y2, z2, 0.0, 0.0}, |
| 5968 {-x2, y2, -z2, 0.0, 0.0}, | 5966 {-x2, y2, -z2, 0.0, 0.0}, |
| 5969 {-x2, -y2, z2, 0.0, 0.0}, | 5967 {-x2, -y2, z2, 0.0, 0.0}, |
| 5970 {-x2, -y2, -z2, 0.0, 0.0}, | 5968 {-x2, -y2, -z2, 0.0, 0.0}, |
| 5971 }; | 5969 }; |
| 5972 | 5970 |
| 5973 if (std::is_same<T, float>::value) { | 5971 if (std::is_same<T, float>::value) { |
| 5974 __ lwc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); | 5972 __ Lwc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); |
| 5975 __ lwc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs))); | 5973 __ Lwc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs))); |
| 5976 __ lwc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft))); | 5974 __ Lwc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft))); |
| 5977 __ lwc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); | 5975 __ Lwc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); |
| 5978 } else if (std::is_same<T, double>::value) { | 5976 } else if (std::is_same<T, double>::value) { |
| 5979 __ ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); | 5977 __ Ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); |
| 5980 __ ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs))); | 5978 __ Ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs))); |
| 5981 __ ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft))); | 5979 __ Ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft))); |
| 5982 __ ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); | 5980 __ Ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr))); |
| 5983 } else { | 5981 } else { |
| 5984 UNREACHABLE(); | 5982 UNREACHABLE(); |
| 5985 } | 5983 } |
| 5986 | 5984 |
| 5987 func(assm); | 5985 func(assm); |
| 5988 | 5986 |
| 5989 __ jr(ra); | 5987 __ jr(ra); |
| 5990 __ nop(); | 5988 __ nop(); |
| 5991 | 5989 |
| 5992 CodeDesc desc; | 5990 CodeDesc desc; |
| (...skipping 23 matching lines...) Expand all Loading... |
| 6016 | 6014 |
| 6017 CHECK_EQ(tc.fd_add, res_add); | 6015 CHECK_EQ(tc.fd_add, res_add); |
| 6018 CHECK_EQ(tc.fd_sub, res_sub); | 6016 CHECK_EQ(tc.fd_sub, res_sub); |
| 6019 } | 6017 } |
| 6020 } | 6018 } |
| 6021 | 6019 |
| 6022 TEST(madd_msub_s) { | 6020 TEST(madd_msub_s) { |
| 6023 if (kArchVariant == kMips64r6) return; | 6021 if (kArchVariant == kMips64r6) return; |
| 6024 helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) { | 6022 helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) { |
| 6025 __ madd_s(f10, f4, f6, f8); | 6023 __ madd_s(f10, f4, f6, f8); |
| 6026 __ swc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add))); | 6024 __ Swc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add))); |
| 6027 __ msub_s(f16, f4, f6, f8); | 6025 __ msub_s(f16, f4, f6, f8); |
| 6028 __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub))); | 6026 __ Swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub))); |
| 6029 }); | 6027 }); |
| 6030 } | 6028 } |
| 6031 | 6029 |
| 6032 TEST(madd_msub_d) { | 6030 TEST(madd_msub_d) { |
| 6033 if (kArchVariant == kMips64r6) return; | 6031 if (kArchVariant == kMips64r6) return; |
| 6034 helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) { | 6032 helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) { |
| 6035 __ madd_d(f10, f4, f6, f8); | 6033 __ madd_d(f10, f4, f6, f8); |
| 6036 __ sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add))); | 6034 __ Sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add))); |
| 6037 __ msub_d(f16, f4, f6, f8); | 6035 __ msub_d(f16, f4, f6, f8); |
| 6038 __ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub))); | 6036 __ Sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub))); |
| 6039 }); | 6037 }); |
| 6040 } | 6038 } |
| 6041 | 6039 |
| 6042 TEST(maddf_msubf_s) { | 6040 TEST(maddf_msubf_s) { |
| 6043 if (kArchVariant != kMips64r6) return; | 6041 if (kArchVariant != kMips64r6) return; |
| 6044 helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) { | 6042 helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) { |
| 6045 __ maddf_s(f4, f6, f8); | 6043 __ maddf_s(f4, f6, f8); |
| 6046 __ swc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add))); | 6044 __ Swc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add))); |
| 6047 __ msubf_s(f16, f6, f8); | 6045 __ msubf_s(f16, f6, f8); |
| 6048 __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub))); | 6046 __ Swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub))); |
| 6049 }); | 6047 }); |
| 6050 } | 6048 } |
| 6051 | 6049 |
| 6052 TEST(maddf_msubf_d) { | 6050 TEST(maddf_msubf_d) { |
| 6053 if (kArchVariant != kMips64r6) return; | 6051 if (kArchVariant != kMips64r6) return; |
| 6054 helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) { | 6052 helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) { |
| 6055 __ maddf_d(f4, f6, f8); | 6053 __ maddf_d(f4, f6, f8); |
| 6056 __ sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add))); | 6054 __ Sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add))); |
| 6057 __ msubf_d(f16, f6, f8); | 6055 __ msubf_d(f16, f6, f8); |
| 6058 __ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub))); | 6056 __ Sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub))); |
| 6059 }); | 6057 }); |
| 6060 } | 6058 } |
| 6061 | 6059 |
| 6062 #undef __ | 6060 #undef __ |
| OLD | NEW |