OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" | 5 #include "vm/globals.h" |
6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/code_generator.h" | 9 #include "vm/code_generator.h" |
10 #include "vm/compiler.h" | 10 #include "vm/compiler.h" |
(...skipping 426 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
437 | 437 |
438 __ SetPrologueOffset(); | 438 __ SetPrologueOffset(); |
439 __ Comment("GenerateDeoptimizationSequence"); | 439 __ Comment("GenerateDeoptimizationSequence"); |
440 // DeoptimizeCopyFrame expects a Dart frame. | 440 // DeoptimizeCopyFrame expects a Dart frame. |
441 __ EnterStubFrame(kPushedRegistersSize); | 441 __ EnterStubFrame(kPushedRegistersSize); |
442 | 442 |
443 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry | 443 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry |
444 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. | 444 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. |
445 const intptr_t saved_result_slot_from_fp = | 445 const intptr_t saved_result_slot_from_fp = |
446 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); | 446 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); |
| 447 const intptr_t saved_exception_slot_from_fp = |
| 448 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); |
| 449 const intptr_t saved_stacktrace_slot_from_fp = |
| 450 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V1); |
447 // Result in V0 is preserved as part of pushing all registers below. | 451 // Result in V0 is preserved as part of pushing all registers below. |
448 | 452 |
449 // Push registers in their enumeration order: lowest register number at | 453 // Push registers in their enumeration order: lowest register number at |
450 // lowest address. | 454 // lowest address. |
451 for (int i = 0; i < kNumberOfCpuRegisters; i++) { | 455 for (int i = 0; i < kNumberOfCpuRegisters; i++) { |
452 const int slot = kNumberOfCpuRegisters - i; | 456 const int slot = kNumberOfCpuRegisters - i; |
453 Register reg = static_cast<Register>(i); | 457 Register reg = static_cast<Register>(i); |
454 if (reg == CODE_REG) { | 458 if (reg == CODE_REG) { |
455 // Save the original value of CODE_REG pushed before invoking this stub | 459 // Save the original value of CODE_REG pushed before invoking this stub |
456 // instead of the value used to call this stub. | 460 // instead of the value used to call this stub. |
457 COMPILE_ASSERT(TMP < CODE_REG); // Assert TMP is pushed first. | 461 COMPILE_ASSERT(TMP < CODE_REG); // Assert TMP is pushed first. |
458 __ lw(TMP, Address(FP, kCallerSpSlotFromFp * kWordSize)); | 462 __ lw(TMP, Address(FP, kCallerSpSlotFromFp * kWordSize)); |
459 __ sw(TMP, Address(SP, kPushedRegistersSize - slot * kWordSize)); | 463 __ sw(TMP, Address(SP, kPushedRegistersSize - slot * kWordSize)); |
460 } else { | 464 } else { |
461 __ sw(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | 465 __ sw(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); |
462 } | 466 } |
463 } | 467 } |
464 for (int i = 0; i < kNumberOfFRegisters; i++) { | 468 for (int i = 0; i < kNumberOfFRegisters; i++) { |
465 // These go below the CPU registers. | 469 // These go below the CPU registers. |
466 const int slot = kNumberOfCpuRegisters + kNumberOfFRegisters - i; | 470 const int slot = kNumberOfCpuRegisters + kNumberOfFRegisters - i; |
467 FRegister reg = static_cast<FRegister>(i); | 471 FRegister reg = static_cast<FRegister>(i); |
468 __ swc1(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | 472 __ swc1(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); |
469 } | 473 } |
470 | 474 |
471 __ mov(A0, SP); // Pass address of saved registers block. | 475 __ mov(A0, SP); // Pass address of saved registers block. |
472 __ LoadImmediate(A1, (kind == kLazyDeopt) ? 1 : 0); | 476 bool is_lazy = (kind == kLazyDeoptFromReturn) || |
| 477 (kind == kLazyDeoptFromThrow); |
| 478 __ LoadImmediate(A1, is_lazy ? 1 : 0); |
473 __ ReserveAlignedFrameSpace(1 * kWordSize); | 479 __ ReserveAlignedFrameSpace(1 * kWordSize); |
474 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); | 480 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); |
475 // Result (V0) is stack-size (FP - SP) in bytes, incl. the return address. | 481 // Result (V0) is stack-size (FP - SP) in bytes, incl. the return address. |
476 | 482 |
477 const bool preserve_result = (kind == kLazyDeopt); | 483 if (kind == kLazyDeoptFromReturn) { |
478 if (preserve_result) { | |
479 // Restore result into T1 temporarily. | 484 // Restore result into T1 temporarily. |
480 __ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize)); | 485 __ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize)); |
| 486 } else if (kind == kLazyDeoptFromThrow) { |
| 487 // Restore result into T1 temporarily. |
| 488 __ lw(T1, Address(FP, saved_exception_slot_from_fp * kWordSize)); |
| 489 __ lw(T2, Address(FP, saved_stacktrace_slot_from_fp * kWordSize)); |
481 } | 490 } |
482 | 491 |
483 __ RestoreCodePointer(); | 492 __ RestoreCodePointer(); |
484 __ LeaveDartFrame(); | 493 __ LeaveDartFrame(); |
485 __ subu(SP, FP, V0); | 494 __ subu(SP, FP, V0); |
486 | 495 |
487 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there | 496 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there |
488 // is no need to set the correct PC marker or load PP, since they get patched. | 497 // is no need to set the correct PC marker or load PP, since they get patched. |
489 __ EnterStubFrame(); | 498 __ EnterStubFrame(); |
490 | 499 |
491 __ mov(A0, FP); // Get last FP address. | 500 __ mov(A0, FP); // Get last FP address. |
492 if (preserve_result) { | 501 if (kind == kLazyDeoptFromReturn) { |
493 __ Push(T1); // Preserve result as first local. | 502 __ Push(T1); // Preserve result as first local. |
| 503 } else if (kind == kLazyDeoptFromThrow) { |
| 504 __ Push(T1); // Preserve exception as first local. |
| 505 __ Push(T2); // Preserve stacktrace as second local. |
494 } | 506 } |
495 __ ReserveAlignedFrameSpace(1 * kWordSize); | 507 __ ReserveAlignedFrameSpace(1 * kWordSize); |
496 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in A0. | 508 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in A0. |
497 if (preserve_result) { | 509 if (kind == kLazyDeoptFromReturn) { |
498 // Restore result into T1. | 510 // Restore result into T1. |
499 __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); | 511 __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); |
| 512 } else if (kind == kLazyDeoptFromThrow) { |
| 513 // Restore result into T1. |
| 514 __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); |
| 515 __ lw(T2, Address(FP, (kFirstLocalSlotFromFp - 1) * kWordSize)); |
500 } | 516 } |
501 // Code above cannot cause GC. | 517 // Code above cannot cause GC. |
502 __ RestoreCodePointer(); | 518 __ RestoreCodePointer(); |
503 __ LeaveStubFrame(); | 519 __ LeaveStubFrame(); |
504 | 520 |
505 // Frame is fully rewritten at this point and it is safe to perform a GC. | 521 // Frame is fully rewritten at this point and it is safe to perform a GC. |
506 // Materialize any objects that were deferred by FillFrame because they | 522 // Materialize any objects that were deferred by FillFrame because they |
507 // require allocation. | 523 // require allocation. |
508 // Enter stub frame with loading PP. The caller's PP is not materialized yet. | 524 // Enter stub frame with loading PP. The caller's PP is not materialized yet. |
509 __ EnterStubFrame(); | 525 __ EnterStubFrame(); |
510 if (preserve_result) { | 526 if (kind == kLazyDeoptFromReturn) { |
511 __ Push(T1); // Preserve result, it will be GC-d here. | 527 __ Push(T1); // Preserve result, it will be GC-d here. |
| 528 } else if (kind == kLazyDeoptFromThrow) { |
| 529 __ Push(T1); // Preserve exception, it will be GC-d here. |
| 530 __ Push(T2); // Preserve stacktrace, it will be GC-d here. |
512 } | 531 } |
513 __ PushObject(Smi::ZoneHandle()); // Space for the result. | 532 __ PushObject(Smi::ZoneHandle()); // Space for the result. |
514 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); | 533 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); |
515 // Result tells stub how many bytes to remove from the expression stack | 534 // Result tells stub how many bytes to remove from the expression stack |
516 // of the bottom-most frame. They were used as materialization arguments. | 535 // of the bottom-most frame. They were used as materialization arguments. |
517 __ Pop(T1); | 536 __ Pop(T1); |
518 if (preserve_result) { | 537 if (kind == kLazyDeoptFromReturn) { |
519 __ Pop(V0); // Restore result. | 538 __ Pop(V0); // Restore result. |
| 539 } else if (kind == kLazyDeoptFromThrow) { |
| 540 __ Pop(V1); // Restore stacktrace. |
| 541 __ Pop(V0); // Restore exception. |
520 } | 542 } |
521 __ LeaveStubFrame(); | 543 __ LeaveStubFrame(); |
522 // Remove materialization arguments. | 544 // Remove materialization arguments. |
523 __ SmiUntag(T1); | 545 __ SmiUntag(T1); |
524 __ addu(SP, SP, T1); | 546 __ addu(SP, SP, T1); |
525 __ Ret(); | 547 __ Ret(); |
526 } | 548 } |
527 | 549 |
528 | 550 // RA: return address + call-instruction-size |
529 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) { | 551 // V0: result, must be preserved |
| 552 void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) { |
530 // Correct return address to point just after the call that is being | 553 // Correct return address to point just after the call that is being |
531 // deoptimized. | 554 // deoptimized. |
532 __ AddImmediate(RA, -CallPattern::kDeoptCallLengthInBytes); | 555 __ AddImmediate(RA, -CallPattern::kDeoptCallLengthInBytes); |
533 // Push zap value instead of CODE_REG for lazy deopt. | 556 // Push zap value instead of CODE_REG for lazy deopt. |
534 __ LoadImmediate(TMP, 0xf1f1f1f1); | 557 __ LoadImmediate(TMP, 0xf1f1f1f1); |
535 __ Push(TMP); | 558 __ Push(TMP); |
536 GenerateDeoptimizationSequence(assembler, kLazyDeopt); | 559 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn); |
537 } | 560 } |
538 | 561 |
539 | 562 |
| 563 // RA: return address + call-instruction-size |
| 564 // V0: exception, must be preserved |
| 565 // V1: stacktrace, must be preserved |
| 566 void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) { |
| 567 // Correct return address to point just after the call that is being |
| 568 // deoptimized. |
| 569 __ AddImmediate(RA, -CallPattern::kDeoptCallLengthInBytes); |
| 570 // Push zap value instead of CODE_REG for lazy deopt. |
| 571 __ LoadImmediate(TMP, 0xf1f1f1f1); |
| 572 __ Push(TMP); |
| 573 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow); |
| 574 } |
| 575 |
| 576 |
540 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { | 577 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { |
541 GenerateDeoptimizationSequence(assembler, kEagerDeopt); | 578 GenerateDeoptimizationSequence(assembler, kEagerDeopt); |
542 } | 579 } |
543 | 580 |
544 | 581 |
545 static void GenerateDispatcherCode(Assembler* assembler, | 582 static void GenerateDispatcherCode(Assembler* assembler, |
546 Label* call_target_function) { | 583 Label* call_target_function) { |
547 __ Comment("NoSuchMethodDispatch"); | 584 __ Comment("NoSuchMethodDispatch"); |
548 // When lazily generated invocation dispatchers are disabled, the | 585 // When lazily generated invocation dispatchers are disabled, the |
549 // miss-handler may return null. | 586 // miss-handler may return null. |
(...skipping 1804 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2354 } | 2391 } |
2355 | 2392 |
2356 | 2393 |
2357 void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) { | 2394 void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) { |
2358 __ break_(0); | 2395 __ break_(0); |
2359 } | 2396 } |
2360 | 2397 |
2361 } // namespace dart | 2398 } // namespace dart |
2362 | 2399 |
2363 #endif // defined TARGET_ARCH_MIPS | 2400 #endif // defined TARGET_ARCH_MIPS |
OLD | NEW |