OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
133 s8_fp, | 133 s8_fp, |
134 ra | 134 ra |
135 }; | 135 }; |
136 return kRegisters[num]; | 136 return kRegisters[num]; |
137 } | 137 } |
138 | 138 |
139 | 139 |
140 // ----------------------------------------------------------------------------- | 140 // ----------------------------------------------------------------------------- |
141 // Implementation of RelocInfo. | 141 // Implementation of RelocInfo. |
142 | 142 |
143 const int RelocInfo::kApplyMask = 0; | 143 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; |
144 | 144 |
145 | 145 |
146 bool RelocInfo::IsCodedSpecially() { | 146 bool RelocInfo::IsCodedSpecially() { |
147 // The deserializer needs to know whether a pointer is specially coded. Being | 147 // The deserializer needs to know whether a pointer is specially coded. Being |
148 // specially coded on MIPS means that it is a lui/ori instruction, and that is | 148 // specially coded on MIPS means that it is a lui/ori instruction, and that is |
149 // always the case inside code objects. | 149 // always the case inside code objects. |
150 return true; | 150 return true; |
151 } | 151 } |
152 | 152 |
153 | 153 |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
268 } | 268 } |
269 | 269 |
270 // Setup buffer pointers. | 270 // Setup buffer pointers. |
271 ASSERT(buffer_ != NULL); | 271 ASSERT(buffer_ != NULL); |
272 pc_ = buffer_; | 272 pc_ = buffer_; |
273 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); | 273 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); |
274 | 274 |
275 last_trampoline_pool_end_ = 0; | 275 last_trampoline_pool_end_ = 0; |
276 no_trampoline_pool_before_ = 0; | 276 no_trampoline_pool_before_ = 0; |
277 trampoline_pool_blocked_nesting_ = 0; | 277 trampoline_pool_blocked_nesting_ = 0; |
278 next_buffer_check_ = kMaxBranchOffset - kTrampolineSize; | 278 // We leave space (16 * kTrampolineSlotsSize) |
| 279 // for BlockTrampolinePoolScope buffer. |
| 280 next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16; |
279 internal_trampoline_exception_ = false; | 281 internal_trampoline_exception_ = false; |
280 last_bound_pos_ = 0; | 282 last_bound_pos_ = 0; |
281 | 283 |
| 284 trampoline_emitted_ = false; |
| 285 unbound_labels_count_ = 0; |
| 286 block_buffer_growth_ = false; |
| 287 |
282 ast_id_for_reloc_info_ = kNoASTId; | 288 ast_id_for_reloc_info_ = kNoASTId; |
283 } | 289 } |
284 | 290 |
285 | 291 |
286 Assembler::~Assembler() { | 292 Assembler::~Assembler() { |
287 if (own_buffer_) { | 293 if (own_buffer_) { |
288 if (isolate()->assembler_spare_buffer() == NULL && | 294 if (isolate()->assembler_spare_buffer() == NULL && |
289 buffer_size_ == kMinimalBufferSize) { | 295 buffer_size_ == kMinimalBufferSize) { |
290 isolate()->set_assembler_spare_buffer(buffer_); | 296 isolate()->set_assembler_spare_buffer(buffer_); |
291 } else { | 297 } else { |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
379 uint32_t Assembler::GetSaField(Instr instr) { | 385 uint32_t Assembler::GetSaField(Instr instr) { |
380 return instr & kSaFieldMask; | 386 return instr & kSaFieldMask; |
381 } | 387 } |
382 | 388 |
383 | 389 |
384 uint32_t Assembler::GetOpcodeField(Instr instr) { | 390 uint32_t Assembler::GetOpcodeField(Instr instr) { |
385 return instr & kOpcodeMask; | 391 return instr & kOpcodeMask; |
386 } | 392 } |
387 | 393 |
388 | 394 |
| 395 uint32_t Assembler::GetFunction(Instr instr) { |
| 396 return (instr & kFunctionFieldMask) >> kFunctionShift; |
| 397 } |
| 398 |
| 399 |
| 400 uint32_t Assembler::GetFunctionField(Instr instr) { |
| 401 return instr & kFunctionFieldMask; |
| 402 } |
| 403 |
| 404 |
389 uint32_t Assembler::GetImmediate16(Instr instr) { | 405 uint32_t Assembler::GetImmediate16(Instr instr) { |
390 return instr & kImm16Mask; | 406 return instr & kImm16Mask; |
391 } | 407 } |
392 | 408 |
393 | 409 |
394 uint32_t Assembler::GetLabelConst(Instr instr) { | 410 uint32_t Assembler::GetLabelConst(Instr instr) { |
395 return instr & ~kImm16Mask; | 411 return instr & ~kImm16Mask; |
396 } | 412 } |
397 | 413 |
398 | 414 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
437 // Linked labels refer to unknown positions in the code | 453 // Linked labels refer to unknown positions in the code |
438 // to be generated; pos() is the position of the last | 454 // to be generated; pos() is the position of the last |
439 // instruction using the label. | 455 // instruction using the label. |
440 | 456 |
441 // The link chain is terminated by a value in the instruction of -1, | 457 // The link chain is terminated by a value in the instruction of -1, |
442 // which is an otherwise illegal value (branch -1 is inf loop). | 458 // which is an otherwise illegal value (branch -1 is inf loop). |
443 // The instruction 16-bit offset field addresses 32-bit words, but in | 459 // The instruction 16-bit offset field addresses 32-bit words, but in |
444 // code is conv to an 18-bit value addressing bytes, hence the -4 value. | 460 // code is conv to an 18-bit value addressing bytes, hence the -4 value. |
445 | 461 |
446 const int kEndOfChain = -4; | 462 const int kEndOfChain = -4; |
| 463 // Determines the end of the Jump chain (a subset of the label link chain). |
| 464 const int kEndOfJumpChain = 0; |
447 | 465 |
448 | 466 |
449 bool Assembler::IsBranch(Instr instr) { | 467 bool Assembler::IsBranch(Instr instr) { |
450 uint32_t opcode = GetOpcodeField(instr); | 468 uint32_t opcode = GetOpcodeField(instr); |
451 uint32_t rt_field = GetRtField(instr); | 469 uint32_t rt_field = GetRtField(instr); |
452 uint32_t rs_field = GetRsField(instr); | 470 uint32_t rs_field = GetRsField(instr); |
453 uint32_t label_constant = GetLabelConst(instr); | 471 uint32_t label_constant = GetLabelConst(instr); |
454 // Checks if the instruction is a branch. | 472 // Checks if the instruction is a branch. |
455 return opcode == BEQ || | 473 return opcode == BEQ || |
456 opcode == BNE || | 474 opcode == BNE || |
(...skipping 13 matching lines...) Expand all Loading... |
470 bool Assembler::IsBeq(Instr instr) { | 488 bool Assembler::IsBeq(Instr instr) { |
471 return GetOpcodeField(instr) == BEQ; | 489 return GetOpcodeField(instr) == BEQ; |
472 } | 490 } |
473 | 491 |
474 | 492 |
475 bool Assembler::IsBne(Instr instr) { | 493 bool Assembler::IsBne(Instr instr) { |
476 return GetOpcodeField(instr) == BNE; | 494 return GetOpcodeField(instr) == BNE; |
477 } | 495 } |
478 | 496 |
479 | 497 |
| 498 bool Assembler::IsJump(Instr instr) { |
| 499 uint32_t opcode = GetOpcodeField(instr); |
| 500 uint32_t rt_field = GetRtField(instr); |
| 501 uint32_t rd_field = GetRdField(instr); |
| 502 uint32_t function_field = GetFunctionField(instr); |
| 503 // Checks if the instruction is a jump. |
| 504 return opcode == J || opcode == JAL || |
| 505 (opcode == SPECIAL && rt_field == 0 && |
| 506 ((function_field == JALR) || (rd_field == 0 && (function_field == JR)))); |
| 507 } |
| 508 |
| 509 |
| 510 bool Assembler::IsJ(Instr instr) { |
| 511 uint32_t opcode = GetOpcodeField(instr); |
| 512 // Checks if the instruction is a jump. |
| 513 return opcode == J; |
| 514 } |
| 515 |
| 516 |
| 517 bool Assembler::IsLui(Instr instr) { |
| 518 uint32_t opcode = GetOpcodeField(instr); |
| 519 // Checks if the instruction is a load upper immediate. |
| 520 return opcode == LUI; |
| 521 } |
| 522 |
| 523 |
| 524 bool Assembler::IsOri(Instr instr) { |
| 525 uint32_t opcode = GetOpcodeField(instr); |
| 526 // Checks if the instruction is a load upper immediate. |
| 527 return opcode == ORI; |
| 528 } |
| 529 |
| 530 |
480 bool Assembler::IsNop(Instr instr, unsigned int type) { | 531 bool Assembler::IsNop(Instr instr, unsigned int type) { |
481 // See Assembler::nop(type). | 532 // See Assembler::nop(type). |
482 ASSERT(type < 32); | 533 ASSERT(type < 32); |
483 uint32_t opcode = GetOpcodeField(instr); | 534 uint32_t opcode = GetOpcodeField(instr); |
484 uint32_t rt = GetRt(instr); | 535 uint32_t rt = GetRt(instr); |
485 uint32_t rs = GetRs(instr); | 536 uint32_t rs = GetRs(instr); |
486 uint32_t sa = GetSa(instr); | 537 uint32_t sa = GetSa(instr); |
487 | 538 |
488 // nop(type) == sll(zero_reg, zero_reg, type); | 539 // nop(type) == sll(zero_reg, zero_reg, type); |
489 // Technically all these values will be 0 but | 540 // Technically all these values will be 0 but |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
557 Instr instr = instr_at(pos); | 608 Instr instr = instr_at(pos); |
558 if ((instr & ~kImm16Mask) == 0) { | 609 if ((instr & ~kImm16Mask) == 0) { |
559 // Emitted label constant, not part of a branch. | 610 // Emitted label constant, not part of a branch. |
560 if (instr == 0) { | 611 if (instr == 0) { |
561 return kEndOfChain; | 612 return kEndOfChain; |
562 } else { | 613 } else { |
563 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; | 614 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; |
564 return (imm18 + pos); | 615 return (imm18 + pos); |
565 } | 616 } |
566 } | 617 } |
567 // Check we have a branch instruction. | 618 // Check we have a branch or jump instruction. |
568 ASSERT(IsBranch(instr)); | 619 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr)); |
569 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming | 620 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming |
570 // the compiler uses arithmectic shifts for signed integers. | 621 // the compiler uses arithmectic shifts for signed integers. |
571 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; | 622 if (IsBranch(instr)) { |
| 623 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; |
572 | 624 |
573 if (imm18 == kEndOfChain) { | 625 if (imm18 == kEndOfChain) { |
574 // EndOfChain sentinel is returned directly, not relative to pc or pos. | 626 // EndOfChain sentinel is returned directly, not relative to pc or pos. |
575 return kEndOfChain; | 627 return kEndOfChain; |
| 628 } else { |
| 629 return pos + kBranchPCOffset + imm18; |
| 630 } |
| 631 } else if (IsLui(instr)) { |
| 632 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); |
| 633 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); |
| 634 ASSERT(IsOri(instr_ori)); |
| 635 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift; |
| 636 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask)); |
| 637 |
| 638 if (imm == kEndOfJumpChain) { |
| 639 // EndOfChain sentinel is returned directly, not relative to pc or pos. |
| 640 return kEndOfChain; |
| 641 } else { |
| 642 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos); |
| 643 int32_t delta = instr_address - imm; |
| 644 ASSERT(pos > delta); |
| 645 return pos - delta; |
| 646 } |
576 } else { | 647 } else { |
577 return pos + kBranchPCOffset + imm18; | 648 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; |
| 649 if (imm28 == kEndOfJumpChain) { |
| 650 // EndOfChain sentinel is returned directly, not relative to pc or pos. |
| 651 return kEndOfChain; |
| 652 } else { |
| 653 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos); |
| 654 instr_address &= kImm28Mask; |
| 655 int32_t delta = instr_address - imm28; |
| 656 ASSERT(pos > delta); |
| 657 return pos - delta; |
| 658 } |
578 } | 659 } |
579 } | 660 } |
580 | 661 |
581 | 662 |
582 void Assembler::target_at_put(int32_t pos, int32_t target_pos) { | 663 void Assembler::target_at_put(int32_t pos, int32_t target_pos) { |
583 Instr instr = instr_at(pos); | 664 Instr instr = instr_at(pos); |
584 if ((instr & ~kImm16Mask) == 0) { | 665 if ((instr & ~kImm16Mask) == 0) { |
585 ASSERT(target_pos == kEndOfChain || target_pos >= 0); | 666 ASSERT(target_pos == kEndOfChain || target_pos >= 0); |
586 // Emitted label constant, not part of a branch. | 667 // Emitted label constant, not part of a branch. |
587 // Make label relative to Code* of generated Code object. | 668 // Make label relative to Code* of generated Code object. |
588 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); | 669 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); |
589 return; | 670 return; |
590 } | 671 } |
591 | 672 |
592 ASSERT(IsBranch(instr)); | 673 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr)); |
593 int32_t imm18 = target_pos - (pos + kBranchPCOffset); | 674 if (IsBranch(instr)) { |
594 ASSERT((imm18 & 3) == 0); | 675 int32_t imm18 = target_pos - (pos + kBranchPCOffset); |
| 676 ASSERT((imm18 & 3) == 0); |
595 | 677 |
596 instr &= ~kImm16Mask; | 678 instr &= ~kImm16Mask; |
597 int32_t imm16 = imm18 >> 2; | 679 int32_t imm16 = imm18 >> 2; |
598 ASSERT(is_int16(imm16)); | 680 ASSERT(is_int16(imm16)); |
599 | 681 |
600 instr_at_put(pos, instr | (imm16 & kImm16Mask)); | 682 instr_at_put(pos, instr | (imm16 & kImm16Mask)); |
| 683 } else if (IsLui(instr)) { |
| 684 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); |
| 685 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); |
| 686 ASSERT(IsOri(instr_ori)); |
| 687 uint32_t imm = (uint32_t)buffer_ + target_pos; |
| 688 ASSERT((imm & 3) == 0); |
| 689 |
| 690 instr_lui &= ~kImm16Mask; |
| 691 instr_ori &= ~kImm16Mask; |
| 692 |
| 693 instr_at_put(pos + 0 * Assembler::kInstrSize, |
| 694 instr_lui | ((imm & kHiMask) >> kLuiShift)); |
| 695 instr_at_put(pos + 1 * Assembler::kInstrSize, |
| 696 instr_ori | (imm & kImm16Mask)); |
| 697 } else { |
| 698 uint32_t imm28 = (uint32_t)buffer_ + target_pos; |
| 699 imm28 &= kImm28Mask; |
| 700 ASSERT((imm28 & 3) == 0); |
| 701 |
| 702 instr &= ~kImm26Mask; |
| 703 uint32_t imm26 = imm28 >> 2; |
| 704 ASSERT(is_uint26(imm26)); |
| 705 |
| 706 instr_at_put(pos, instr | (imm26 & kImm26Mask)); |
| 707 } |
601 } | 708 } |
602 | 709 |
603 | 710 |
604 void Assembler::print(Label* L) { | 711 void Assembler::print(Label* L) { |
605 if (L->is_unused()) { | 712 if (L->is_unused()) { |
606 PrintF("unused label\n"); | 713 PrintF("unused label\n"); |
607 } else if (L->is_bound()) { | 714 } else if (L->is_bound()) { |
608 PrintF("bound label to %d\n", L->pos()); | 715 PrintF("bound label to %d\n", L->pos()); |
609 } else if (L->is_linked()) { | 716 } else if (L->is_linked()) { |
610 Label l = *L; | 717 Label l = *L; |
611 PrintF("unbound label"); | 718 PrintF("unbound label"); |
612 while (l.is_linked()) { | 719 while (l.is_linked()) { |
613 PrintF("@ %d ", l.pos()); | 720 PrintF("@ %d ", l.pos()); |
614 Instr instr = instr_at(l.pos()); | 721 Instr instr = instr_at(l.pos()); |
615 if ((instr & ~kImm16Mask) == 0) { | 722 if ((instr & ~kImm16Mask) == 0) { |
616 PrintF("value\n"); | 723 PrintF("value\n"); |
617 } else { | 724 } else { |
618 PrintF("%d\n", instr); | 725 PrintF("%d\n", instr); |
619 } | 726 } |
620 next(&l); | 727 next(&l); |
621 } | 728 } |
622 } else { | 729 } else { |
623 PrintF("label in inconsistent state (pos = %d)\n", L->pos_); | 730 PrintF("label in inconsistent state (pos = %d)\n", L->pos_); |
624 } | 731 } |
625 } | 732 } |
626 | 733 |
627 | 734 |
628 void Assembler::bind_to(Label* L, int pos) { | 735 void Assembler::bind_to(Label* L, int pos) { |
629 ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position. | 736 ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position. |
| 737 int32_t trampoline_pos = kInvalidSlotPos; |
| 738 if (L->is_linked() && !trampoline_emitted_) { |
| 739 unbound_labels_count_--; |
| 740 next_buffer_check_ += kTrampolineSlotsSize; |
| 741 } |
| 742 |
630 while (L->is_linked()) { | 743 while (L->is_linked()) { |
631 int32_t fixup_pos = L->pos(); | 744 int32_t fixup_pos = L->pos(); |
632 int32_t dist = pos - fixup_pos; | 745 int32_t dist = pos - fixup_pos; |
633 next(L); // Call next before overwriting link with target at fixup_pos. | 746 next(L); // Call next before overwriting link with target at fixup_pos. |
634 if (dist > kMaxBranchOffset) { | 747 Instr instr = instr_at(fixup_pos); |
635 do { | 748 if (IsBranch(instr)) { |
636 int32_t trampoline_pos = get_trampoline_entry(fixup_pos); | 749 if (dist > kMaxBranchOffset) { |
637 if (kInvalidSlotPos == trampoline_pos) { | 750 if (trampoline_pos == kInvalidSlotPos) { |
638 // Internal error. | 751 trampoline_pos = get_trampoline_entry(fixup_pos); |
639 return; | 752 CHECK(trampoline_pos != kInvalidSlotPos); |
640 } | 753 } |
641 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset); | 754 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset); |
642 target_at_put(fixup_pos, trampoline_pos); | 755 target_at_put(fixup_pos, trampoline_pos); |
643 fixup_pos = trampoline_pos; | 756 fixup_pos = trampoline_pos; |
644 dist = pos - fixup_pos; | 757 dist = pos - fixup_pos; |
645 } while (dist > kMaxBranchOffset); | 758 } |
646 } else if (dist < -kMaxBranchOffset) { | 759 target_at_put(fixup_pos, pos); |
647 do { | 760 } else { |
648 int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false); | 761 ASSERT(IsJ(instr) || IsLui(instr)); |
649 if (kInvalidSlotPos == trampoline_pos) { | 762 target_at_put(fixup_pos, pos); |
650 // Internal error. | 763 } |
651 return; | |
652 } | |
653 ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset); | |
654 target_at_put(fixup_pos, trampoline_pos); | |
655 fixup_pos = trampoline_pos; | |
656 dist = pos - fixup_pos; | |
657 } while (dist < -kMaxBranchOffset); | |
658 }; | |
659 target_at_put(fixup_pos, pos); | |
660 } | 764 } |
661 L->bind_to(pos); | 765 L->bind_to(pos); |
662 | 766 |
663 // Keep track of the last bound label so we don't eliminate any instructions | 767 // Keep track of the last bound label so we don't eliminate any instructions |
664 // before a bound label. | 768 // before a bound label. |
665 if (pos > last_bound_pos_) | 769 if (pos > last_bound_pos_) |
666 last_bound_pos_ = pos; | 770 last_bound_pos_ = pos; |
667 } | 771 } |
668 | 772 |
669 | 773 |
670 void Assembler::link_to(Label* L, Label* appendix) { | |
671 if (appendix->is_linked()) { | |
672 if (L->is_linked()) { | |
673 // Append appendix to L's list. | |
674 int fixup_pos; | |
675 int link = L->pos(); | |
676 do { | |
677 fixup_pos = link; | |
678 link = target_at(fixup_pos); | |
679 } while (link > 0); | |
680 ASSERT(link == kEndOfChain); | |
681 target_at_put(fixup_pos, appendix->pos()); | |
682 } else { | |
683 // L is empty, simply use appendix. | |
684 *L = *appendix; | |
685 } | |
686 } | |
687 appendix->Unuse(); // Appendix should not be used anymore. | |
688 } | |
689 | |
690 | |
691 void Assembler::bind(Label* L) { | 774 void Assembler::bind(Label* L) { |
692 ASSERT(!L->is_bound()); // Label can only be bound once. | 775 ASSERT(!L->is_bound()); // Label can only be bound once. |
693 bind_to(L, pc_offset()); | 776 bind_to(L, pc_offset()); |
694 } | 777 } |
695 | 778 |
696 | 779 |
697 void Assembler::next(Label* L) { | 780 void Assembler::next(Label* L) { |
698 ASSERT(L->is_linked()); | 781 ASSERT(L->is_linked()); |
699 int link = target_at(L->pos()); | 782 int link = target_at(L->pos()); |
700 ASSERT(link > 0 || link == kEndOfChain); | 783 ASSERT(link > 0 || link == kEndOfChain); |
701 if (link == kEndOfChain) { | 784 if (link == kEndOfChain) { |
702 L->Unuse(); | 785 L->Unuse(); |
703 } else if (link > 0) { | 786 } else if (link > 0) { |
704 L->link_to(link); | 787 L->link_to(link); |
705 } | 788 } |
706 } | 789 } |
707 | 790 |
| 791 bool Assembler::is_near(Label* L) { |
| 792 if (L->is_bound()) { |
| 793 return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize); |
| 794 } |
| 795 return false; |
| 796 } |
708 | 797 |
709 // We have to use a temporary register for things that can be relocated even | 798 // We have to use a temporary register for things that can be relocated even |
710 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction | 799 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction |
711 // space. There is no guarantee that the relocated location can be similarly | 800 // space. There is no guarantee that the relocated location can be similarly |
712 // encoded. | 801 // encoded. |
713 bool Assembler::MustUseReg(RelocInfo::Mode rmode) { | 802 bool Assembler::MustUseReg(RelocInfo::Mode rmode) { |
714 return rmode != RelocInfo::NONE; | 803 return rmode != RelocInfo::NONE; |
715 } | 804 } |
716 | 805 |
717 | 806 |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
810 FPURegister ft, | 899 FPURegister ft, |
811 int32_t j) { | 900 int32_t j) { |
812 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); | 901 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); |
813 ASSERT(CpuFeatures::IsEnabled(FPU)); | 902 ASSERT(CpuFeatures::IsEnabled(FPU)); |
814 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) | 903 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
815 | (j & kImm16Mask); | 904 | (j & kImm16Mask); |
816 emit(instr); | 905 emit(instr); |
817 } | 906 } |
818 | 907 |
819 | 908 |
820 // Registers are in the order of the instruction encoding, from left to right. | |
821 void Assembler::GenInstrJump(Opcode opcode, | 909 void Assembler::GenInstrJump(Opcode opcode, |
822 uint32_t address) { | 910 uint32_t address) { |
823 BlockTrampolinePoolScope block_trampoline_pool(this); | 911 BlockTrampolinePoolScope block_trampoline_pool(this); |
824 ASSERT(is_uint26(address)); | 912 ASSERT(is_uint26(address)); |
825 Instr instr = opcode | address; | 913 Instr instr = opcode | address; |
826 emit(instr); | 914 emit(instr); |
827 BlockTrampolinePoolFor(1); // For associated delay slot. | 915 BlockTrampolinePoolFor(1); // For associated delay slot. |
828 } | 916 } |
829 | 917 |
830 | 918 |
831 // Returns the next free label entry from the next trampoline pool. | 919 // Returns the next free trampoline entry. |
832 int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) { | 920 int32_t Assembler::get_trampoline_entry(int32_t pos) { |
833 int trampoline_count = trampolines_.length(); | |
834 int32_t label_entry = 0; | |
835 ASSERT(trampoline_count > 0); | |
836 | |
837 if (next_pool) { | |
838 for (int i = 0; i < trampoline_count; i++) { | |
839 if (trampolines_[i].start() > pos) { | |
840 label_entry = trampolines_[i].take_label(); | |
841 break; | |
842 } | |
843 } | |
844 } else { // Caller needs a label entry from the previous pool. | |
845 for (int i = trampoline_count-1; i >= 0; i--) { | |
846 if (trampolines_[i].end() < pos) { | |
847 label_entry = trampolines_[i].take_label(); | |
848 break; | |
849 } | |
850 } | |
851 } | |
852 return label_entry; | |
853 } | |
854 | |
855 | |
856 // Returns the next free trampoline entry from the next trampoline pool. | |
857 int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) { | |
858 int trampoline_count = trampolines_.length(); | |
859 int32_t trampoline_entry = kInvalidSlotPos; | 921 int32_t trampoline_entry = kInvalidSlotPos; |
860 ASSERT(trampoline_count > 0); | |
861 | 922 |
862 if (!internal_trampoline_exception_) { | 923 if (!internal_trampoline_exception_) { |
863 if (next_pool) { | 924 if (trampoline_.start() > pos) { |
864 for (int i = 0; i < trampoline_count; i++) { | 925 trampoline_entry = trampoline_.take_slot(); |
865 if (trampolines_[i].start() > pos) { | |
866 trampoline_entry = trampolines_[i].take_slot(); | |
867 break; | |
868 } | |
869 } | |
870 } else { // Caller needs a trampoline entry from the previous pool. | |
871 for (int i = trampoline_count-1; i >= 0; i--) { | |
872 if (trampolines_[i].end() < pos) { | |
873 trampoline_entry = trampolines_[i].take_slot(); | |
874 break; | |
875 } | |
876 } | |
877 } | 926 } |
| 927 |
878 if (kInvalidSlotPos == trampoline_entry) { | 928 if (kInvalidSlotPos == trampoline_entry) { |
879 internal_trampoline_exception_ = true; | 929 internal_trampoline_exception_ = true; |
880 } | 930 } |
881 } | 931 } |
882 return trampoline_entry; | 932 return trampoline_entry; |
883 } | 933 } |
884 | 934 |
885 | 935 |
886 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { | 936 uint32_t Assembler::jump_address(Label* L) { |
887 int32_t target_pos; | 937 int32_t target_pos; |
888 int32_t pc_offset_v = pc_offset(); | |
889 | 938 |
890 if (L->is_bound()) { | 939 if (L->is_bound()) { |
891 target_pos = L->pos(); | 940 target_pos = L->pos(); |
892 int32_t dist = pc_offset_v - target_pos; | |
893 if (dist > kMaxBranchOffset) { | |
894 do { | |
895 int32_t trampoline_pos = get_trampoline_entry(target_pos); | |
896 if (kInvalidSlotPos == trampoline_pos) { | |
897 // Internal error. | |
898 return 0; | |
899 } | |
900 ASSERT((trampoline_pos - target_pos) > 0); | |
901 ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset); | |
902 target_at_put(trampoline_pos, target_pos); | |
903 target_pos = trampoline_pos; | |
904 dist = pc_offset_v - target_pos; | |
905 } while (dist > kMaxBranchOffset); | |
906 } else if (dist < -kMaxBranchOffset) { | |
907 do { | |
908 int32_t trampoline_pos = get_trampoline_entry(target_pos, false); | |
909 if (kInvalidSlotPos == trampoline_pos) { | |
910 // Internal error. | |
911 return 0; | |
912 } | |
913 ASSERT((target_pos - trampoline_pos) > 0); | |
914 ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset); | |
915 target_at_put(trampoline_pos, target_pos); | |
916 target_pos = trampoline_pos; | |
917 dist = pc_offset_v - target_pos; | |
918 } while (dist < -kMaxBranchOffset); | |
919 } | |
920 } else { | 941 } else { |
921 if (L->is_linked()) { | 942 if (L->is_linked()) { |
922 target_pos = L->pos(); // L's link. | 943 target_pos = L->pos(); // L's link. |
923 int32_t dist = pc_offset_v - target_pos; | |
924 if (dist > kMaxBranchOffset) { | |
925 do { | |
926 int32_t label_pos = get_label_entry(target_pos); | |
927 ASSERT((label_pos - target_pos) < kMaxBranchOffset); | |
928 label_at_put(L, label_pos); | |
929 target_pos = label_pos; | |
930 dist = pc_offset_v - target_pos; | |
931 } while (dist > kMaxBranchOffset); | |
932 } else if (dist < -kMaxBranchOffset) { | |
933 do { | |
934 int32_t label_pos = get_label_entry(target_pos, false); | |
935 ASSERT((label_pos - target_pos) > -kMaxBranchOffset); | |
936 label_at_put(L, label_pos); | |
937 target_pos = label_pos; | |
938 dist = pc_offset_v - target_pos; | |
939 } while (dist < -kMaxBranchOffset); | |
940 } | |
941 L->link_to(pc_offset()); | 944 L->link_to(pc_offset()); |
942 } else { | 945 } else { |
943 L->link_to(pc_offset()); | 946 L->link_to(pc_offset()); |
| 947 return kEndOfJumpChain; |
| 948 } |
| 949 } |
| 950 |
| 951 uint32_t imm = (uint32_t)buffer_ + target_pos; |
| 952 ASSERT((imm & 3) == 0); |
| 953 |
| 954 return imm; |
| 955 } |
| 956 |
| 957 |
| 958 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { |
| 959 int32_t target_pos; |
| 960 |
| 961 if (L->is_bound()) { |
| 962 target_pos = L->pos(); |
| 963 } else { |
| 964 if (L->is_linked()) { |
| 965 target_pos = L->pos(); |
| 966 L->link_to(pc_offset()); |
| 967 } else { |
| 968 L->link_to(pc_offset()); |
| 969 if (!trampoline_emitted_) { |
| 970 unbound_labels_count_++; |
| 971 next_buffer_check_ -= kTrampolineSlotsSize; |
| 972 } |
944 return kEndOfChain; | 973 return kEndOfChain; |
945 } | 974 } |
946 } | 975 } |
947 | 976 |
948 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); | 977 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); |
949 ASSERT((offset & 3) == 0); | 978 ASSERT((offset & 3) == 0); |
950 ASSERT(is_int16(offset >> 2)); | 979 ASSERT(is_int16(offset >> 2)); |
951 | 980 |
952 return offset; | 981 return offset; |
953 } | 982 } |
954 | 983 |
955 | 984 |
956 void Assembler::label_at_put(Label* L, int at_offset) { | 985 void Assembler::label_at_put(Label* L, int at_offset) { |
957 int target_pos; | 986 int target_pos; |
958 if (L->is_bound()) { | 987 if (L->is_bound()) { |
959 target_pos = L->pos(); | 988 target_pos = L->pos(); |
960 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); | 989 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); |
961 } else { | 990 } else { |
962 if (L->is_linked()) { | 991 if (L->is_linked()) { |
963 target_pos = L->pos(); // L's link. | 992 target_pos = L->pos(); // L's link. |
964 int32_t imm18 = target_pos - at_offset; | 993 int32_t imm18 = target_pos - at_offset; |
965 ASSERT((imm18 & 3) == 0); | 994 ASSERT((imm18 & 3) == 0); |
966 int32_t imm16 = imm18 >> 2; | 995 int32_t imm16 = imm18 >> 2; |
967 ASSERT(is_int16(imm16)); | 996 ASSERT(is_int16(imm16)); |
968 instr_at_put(at_offset, (imm16 & kImm16Mask)); | 997 instr_at_put(at_offset, (imm16 & kImm16Mask)); |
969 } else { | 998 } else { |
970 target_pos = kEndOfChain; | 999 target_pos = kEndOfChain; |
971 instr_at_put(at_offset, 0); | 1000 instr_at_put(at_offset, 0); |
| 1001 if (!trampoline_emitted_) { |
| 1002 unbound_labels_count_++; |
| 1003 next_buffer_check_ -= kTrampolineSlotsSize; |
| 1004 } |
972 } | 1005 } |
973 L->link_to(at_offset); | 1006 L->link_to(at_offset); |
974 } | 1007 } |
975 } | 1008 } |
976 | 1009 |
977 | 1010 |
978 //------- Branch and jump instructions -------- | 1011 //------- Branch and jump instructions -------- |
979 | 1012 |
980 void Assembler::b(int16_t offset) { | 1013 void Assembler::b(int16_t offset) { |
981 beq(zero_reg, zero_reg, offset); | 1014 beq(zero_reg, zero_reg, offset); |
(...skipping 802 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1784 | 1817 |
1785 | 1818 |
1786 void Assembler::RecordComment(const char* msg) { | 1819 void Assembler::RecordComment(const char* msg) { |
1787 if (FLAG_code_comments) { | 1820 if (FLAG_code_comments) { |
1788 CheckBuffer(); | 1821 CheckBuffer(); |
1789 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); | 1822 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); |
1790 } | 1823 } |
1791 } | 1824 } |
1792 | 1825 |
1793 | 1826 |
| 1827 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) { |
| 1828 Instr instr = instr_at(pc); |
| 1829 ASSERT(IsJ(instr) || IsLui(instr)); |
| 1830 if (IsLui(instr)) { |
| 1831 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize); |
| 1832 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize); |
| 1833 ASSERT(IsOri(instr_ori)); |
| 1834 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift; |
| 1835 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask)); |
| 1836 if (imm == kEndOfJumpChain) { |
| 1837 return 0; // Number of instructions patched. |
| 1838 } |
| 1839 imm += pc_delta; |
| 1840 ASSERT((imm & 3) == 0); |
| 1841 |
| 1842 instr_lui &= ~kImm16Mask; |
| 1843 instr_ori &= ~kImm16Mask; |
| 1844 |
| 1845 instr_at_put(pc + 0 * Assembler::kInstrSize, |
| 1846 instr_lui | ((imm >> kLuiShift) & kImm16Mask)); |
| 1847 instr_at_put(pc + 1 * Assembler::kInstrSize, |
| 1848 instr_ori | (imm & kImm16Mask)); |
| 1849 return 2; // Number of instructions patched. |
| 1850 } else { |
| 1851 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; |
| 1852 if ((int32_t)imm28 == kEndOfJumpChain) { |
| 1853 return 0; // Number of instructions patched. |
| 1854 } |
| 1855 imm28 += pc_delta; |
| 1856 imm28 &= kImm28Mask; |
| 1857 ASSERT((imm28 & 3) == 0); |
| 1858 |
| 1859 instr &= ~kImm26Mask; |
| 1860 uint32_t imm26 = imm28 >> 2; |
| 1861 ASSERT(is_uint26(imm26)); |
| 1862 |
| 1863 instr_at_put(pc, instr | (imm26 & kImm26Mask)); |
| 1864 return 1; // Number of instructions patched. |
| 1865 } |
| 1866 } |
| 1867 |
| 1868 |
1794 void Assembler::GrowBuffer() { | 1869 void Assembler::GrowBuffer() { |
1795 if (!own_buffer_) FATAL("external code buffer is too small"); | 1870 if (!own_buffer_) FATAL("external code buffer is too small"); |
1796 | 1871 |
1797 // Compute new buffer size. | 1872 // Compute new buffer size. |
1798 CodeDesc desc; // The new buffer. | 1873 CodeDesc desc; // The new buffer. |
1799 if (buffer_size_ < 4*KB) { | 1874 if (buffer_size_ < 4*KB) { |
1800 desc.buffer_size = 4*KB; | 1875 desc.buffer_size = 4*KB; |
1801 } else if (buffer_size_ < 1*MB) { | 1876 } else if (buffer_size_ < 1*MB) { |
1802 desc.buffer_size = 2*buffer_size_; | 1877 desc.buffer_size = 2*buffer_size_; |
1803 } else { | 1878 } else { |
(...skipping 15 matching lines...) Expand all Loading... |
1819 reloc_info_writer.pos(), desc.reloc_size); | 1894 reloc_info_writer.pos(), desc.reloc_size); |
1820 | 1895 |
1821 // Switch buffers. | 1896 // Switch buffers. |
1822 DeleteArray(buffer_); | 1897 DeleteArray(buffer_); |
1823 buffer_ = desc.buffer; | 1898 buffer_ = desc.buffer; |
1824 buffer_size_ = desc.buffer_size; | 1899 buffer_size_ = desc.buffer_size; |
1825 pc_ += pc_delta; | 1900 pc_ += pc_delta; |
1826 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, | 1901 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
1827 reloc_info_writer.last_pc() + pc_delta); | 1902 reloc_info_writer.last_pc() + pc_delta); |
1828 | 1903 |
1829 // On ia32 and ARM pc relative addressing is used, and we thus need to apply a | 1904 // Relocate runtime entries. |
1830 // shift by pc_delta. But on MIPS the target address it directly loaded, so | 1905 for (RelocIterator it(desc); !it.done(); it.next()) { |
1831 // we do not need to relocate here. | 1906 RelocInfo::Mode rmode = it.rinfo()->rmode(); |
| 1907 if (rmode == RelocInfo::INTERNAL_REFERENCE) { |
| 1908 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc()); |
| 1909 RelocateInternalReference(p, pc_delta); |
| 1910 } |
| 1911 } |
1832 | 1912 |
1833 ASSERT(!overflow()); | 1913 ASSERT(!overflow()); |
1834 } | 1914 } |
1835 | 1915 |
1836 | 1916 |
1837 void Assembler::db(uint8_t data) { | 1917 void Assembler::db(uint8_t data) { |
1838 CheckBuffer(); | 1918 CheckBuffer(); |
1839 *reinterpret_cast<uint8_t*>(pc_) = data; | 1919 *reinterpret_cast<uint8_t*>(pc_) = data; |
1840 pc_ += sizeof(uint8_t); | 1920 pc_ += sizeof(uint8_t); |
1841 } | 1921 } |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1876 } | 1956 } |
1877 } | 1957 } |
1878 } | 1958 } |
1879 | 1959 |
1880 | 1960 |
1881 void Assembler::BlockTrampolinePoolFor(int instructions) { | 1961 void Assembler::BlockTrampolinePoolFor(int instructions) { |
1882 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); | 1962 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); |
1883 } | 1963 } |
1884 | 1964 |
1885 | 1965 |
1886 void Assembler::CheckTrampolinePool(bool force_emit) { | 1966 void Assembler::CheckTrampolinePool() { |
1887 // Calculate the offset of the next check. | |
1888 next_buffer_check_ = pc_offset() + kCheckConstInterval; | |
1889 | |
1890 int dist = pc_offset() - last_trampoline_pool_end_; | |
1891 | |
1892 if (dist <= kMaxDistBetweenPools && !force_emit) { | |
1893 return; | |
1894 } | |
1895 | |
1896 // Some small sequences of instructions must not be broken up by the | 1967 // Some small sequences of instructions must not be broken up by the |
1897 // insertion of a trampoline pool; such sequences are protected by setting | 1968 // insertion of a trampoline pool; such sequences are protected by setting |
1898 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, | 1969 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, |
1899 // which are both checked here. Also, recursive calls to CheckTrampolinePool | 1970 // which are both checked here. Also, recursive calls to CheckTrampolinePool |
1900 // are blocked by trampoline_pool_blocked_nesting_. | 1971 // are blocked by trampoline_pool_blocked_nesting_. |
1901 if ((trampoline_pool_blocked_nesting_ > 0) || | 1972 if ((trampoline_pool_blocked_nesting_ > 0) || |
1902 (pc_offset() < no_trampoline_pool_before_)) { | 1973 (pc_offset() < no_trampoline_pool_before_)) { |
1903 // Emission is currently blocked; make sure we try again as soon as | 1974 // Emission is currently blocked; make sure we try again as soon as |
1904 // possible. | 1975 // possible. |
1905 if (trampoline_pool_blocked_nesting_ > 0) { | 1976 if (trampoline_pool_blocked_nesting_ > 0) { |
1906 next_buffer_check_ = pc_offset() + kInstrSize; | 1977 next_buffer_check_ = pc_offset() + kInstrSize; |
1907 } else { | 1978 } else { |
1908 next_buffer_check_ = no_trampoline_pool_before_; | 1979 next_buffer_check_ = no_trampoline_pool_before_; |
1909 } | 1980 } |
1910 return; | 1981 return; |
1911 } | 1982 } |
1912 | 1983 |
1913 // First we emit jump (2 instructions), then we emit trampoline pool. | 1984 ASSERT(!trampoline_emitted_); |
1914 { BlockTrampolinePoolScope block_trampoline_pool(this); | 1985 ASSERT(unbound_labels_count_ >= 0); |
1915 Label after_pool; | 1986 if (unbound_labels_count_ > 0) { |
1916 b(&after_pool); | 1987 // First we emit jump (2 instructions), then we emit trampoline pool. |
1917 nop(); | 1988 { BlockTrampolinePoolScope block_trampoline_pool(this); |
1918 | 1989 Label after_pool; |
1919 int pool_start = pc_offset(); | |
1920 for (int i = 0; i < kSlotsPerTrampoline; i++) { | |
1921 b(&after_pool); | 1990 b(&after_pool); |
1922 nop(); | 1991 nop(); |
| 1992 |
| 1993 int pool_start = pc_offset(); |
| 1994 for (int i = 0; i < unbound_labels_count_; i++) { |
| 1995 uint32_t imm32; |
| 1996 imm32 = jump_address(&after_pool); |
| 1997 { BlockGrowBufferScope block_buf_growth(this); |
| 1998 // Buffer growth (and relocation) must be blocked for internal |
| 1999 // references until associated instructions are emitted and available |
| 2000 // to be patched. |
| 2001 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); |
| 2002 lui(at, (imm32 & kHiMask) >> kLuiShift); |
| 2003 ori(at, at, (imm32 & kImm16Mask)); |
| 2004 } |
| 2005 jr(at); |
| 2006 nop(); |
| 2007 } |
| 2008 bind(&after_pool); |
| 2009 trampoline_ = Trampoline(pool_start, unbound_labels_count_); |
| 2010 |
| 2011 trampoline_emitted_ = true; |
| 2012 // As we are only going to emit trampoline once, we need to prevent any |
| 2013 // further emission. |
| 2014 next_buffer_check_ = kMaxInt; |
1923 } | 2015 } |
1924 for (int i = 0; i < kLabelsPerTrampoline; i++) { | 2016 } else { |
1925 emit(0); | 2017 // Number of branches to unbound label at this point is zero, so we can |
1926 } | 2018 // move next buffer check to maximum. |
1927 last_trampoline_pool_end_ = pc_offset() - kInstrSize; | 2019 next_buffer_check_ = pc_offset() + |
1928 bind(&after_pool); | 2020 kMaxBranchOffset - kTrampolineSlotsSize * 16; |
1929 trampolines_.Add(Trampoline(pool_start, | |
1930 kSlotsPerTrampoline, | |
1931 kLabelsPerTrampoline)); | |
1932 | |
1933 // Since a trampoline pool was just emitted, | |
1934 // move the check offset forward by the standard interval. | |
1935 next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools; | |
1936 } | 2021 } |
1937 return; | 2022 return; |
1938 } | 2023 } |
1939 | 2024 |
1940 | 2025 |
1941 Address Assembler::target_address_at(Address pc) { | 2026 Address Assembler::target_address_at(Address pc) { |
1942 Instr instr1 = instr_at(pc); | 2027 Instr instr1 = instr_at(pc); |
1943 Instr instr2 = instr_at(pc + kInstrSize); | 2028 Instr instr2 = instr_at(pc + kInstrSize); |
1944 // Interpret 2 instructions generated by li: lui/ori | 2029 // Interpret 2 instructions generated by li: lui/ori |
1945 if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) { | 2030 if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) { |
(...skipping 29 matching lines...) Expand all Loading... |
1975 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); | 2060 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); |
1976 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); | 2061 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); |
1977 | 2062 |
1978 CPU::FlushICache(pc, 2 * sizeof(int32_t)); | 2063 CPU::FlushICache(pc, 2 * sizeof(int32_t)); |
1979 } | 2064 } |
1980 | 2065 |
1981 | 2066 |
1982 } } // namespace v8::internal | 2067 } } // namespace v8::internal |
1983 | 2068 |
1984 #endif // V8_TARGET_ARCH_MIPS | 2069 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |