OLD | NEW |
---|---|
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
133 s8_fp, | 133 s8_fp, |
134 ra | 134 ra |
135 }; | 135 }; |
136 return kRegisters[num]; | 136 return kRegisters[num]; |
137 } | 137 } |
138 | 138 |
139 | 139 |
140 // ----------------------------------------------------------------------------- | 140 // ----------------------------------------------------------------------------- |
141 // Implementation of RelocInfo. | 141 // Implementation of RelocInfo. |
142 | 142 |
143 const int RelocInfo::kApplyMask = 0; | 143 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; |
144 | 144 |
145 | 145 |
146 bool RelocInfo::IsCodedSpecially() { | 146 bool RelocInfo::IsCodedSpecially() { |
147 // The deserializer needs to know whether a pointer is specially coded. Being | 147 // The deserializer needs to know whether a pointer is specially coded. Being |
148 // specially coded on MIPS means that it is a lui/ori instruction, and that is | 148 // specially coded on MIPS means that it is a lui/ori instruction, and that is |
149 // always the case inside code objects. | 149 // always the case inside code objects. |
150 return true; | 150 return true; |
151 } | 151 } |
152 | 152 |
153 | 153 |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
268 } | 268 } |
269 | 269 |
270 // Setup buffer pointers. | 270 // Setup buffer pointers. |
271 ASSERT(buffer_ != NULL); | 271 ASSERT(buffer_ != NULL); |
272 pc_ = buffer_; | 272 pc_ = buffer_; |
273 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); | 273 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); |
274 | 274 |
275 last_trampoline_pool_end_ = 0; | 275 last_trampoline_pool_end_ = 0; |
276 no_trampoline_pool_before_ = 0; | 276 no_trampoline_pool_before_ = 0; |
277 trampoline_pool_blocked_nesting_ = 0; | 277 trampoline_pool_blocked_nesting_ = 0; |
278 next_buffer_check_ = kMaxBranchOffset - kTrampolineSize; | 278 // We leave space (16 * kTrampolineSlotsSize) |
279 // for BlockTrampolinePoolScope buffer. | |
280 next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16; | |
279 internal_trampoline_exception_ = false; | 281 internal_trampoline_exception_ = false; |
280 last_bound_pos_ = 0; | 282 last_bound_pos_ = 0; |
281 | 283 |
284 trampoline_emitted_ = false; | |
285 unbound_labels_count = 0; | |
286 | |
282 ast_id_for_reloc_info_ = kNoASTId; | 287 ast_id_for_reloc_info_ = kNoASTId; |
283 } | 288 } |
284 | 289 |
285 | 290 |
286 Assembler::~Assembler() { | 291 Assembler::~Assembler() { |
287 if (own_buffer_) { | 292 if (own_buffer_) { |
288 if (isolate()->assembler_spare_buffer() == NULL && | 293 if (isolate()->assembler_spare_buffer() == NULL && |
289 buffer_size_ == kMinimalBufferSize) { | 294 buffer_size_ == kMinimalBufferSize) { |
290 isolate()->set_assembler_spare_buffer(buffer_); | 295 isolate()->set_assembler_spare_buffer(buffer_); |
291 } else { | 296 } else { |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
379 uint32_t Assembler::GetSaField(Instr instr) { | 384 uint32_t Assembler::GetSaField(Instr instr) { |
380 return instr & kSaFieldMask; | 385 return instr & kSaFieldMask; |
381 } | 386 } |
382 | 387 |
383 | 388 |
384 uint32_t Assembler::GetOpcodeField(Instr instr) { | 389 uint32_t Assembler::GetOpcodeField(Instr instr) { |
385 return instr & kOpcodeMask; | 390 return instr & kOpcodeMask; |
386 } | 391 } |
387 | 392 |
388 | 393 |
394 uint32_t Assembler::GetFunction(Instr instr) { | |
395 return (instr & kFunctionFieldMask) >> kFunctionShift; | |
396 } | |
397 | |
398 | |
399 uint32_t Assembler::GetFunctionField(Instr instr) { | |
400 return instr & kFunctionFieldMask; | |
401 } | |
402 | |
403 | |
389 uint32_t Assembler::GetImmediate16(Instr instr) { | 404 uint32_t Assembler::GetImmediate16(Instr instr) { |
390 return instr & kImm16Mask; | 405 return instr & kImm16Mask; |
391 } | 406 } |
392 | 407 |
393 | 408 |
394 uint32_t Assembler::GetLabelConst(Instr instr) { | 409 uint32_t Assembler::GetLabelConst(Instr instr) { |
395 return instr & ~kImm16Mask; | 410 return instr & ~kImm16Mask; |
396 } | 411 } |
397 | 412 |
398 | 413 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
437 // Linked labels refer to unknown positions in the code | 452 // Linked labels refer to unknown positions in the code |
438 // to be generated; pos() is the position of the last | 453 // to be generated; pos() is the position of the last |
439 // instruction using the label. | 454 // instruction using the label. |
440 | 455 |
441 // The link chain is terminated by a value in the instruction of -1, | 456 // The link chain is terminated by a value in the instruction of -1, |
442 // which is an otherwise illegal value (branch -1 is inf loop). | 457 // which is an otherwise illegal value (branch -1 is inf loop). |
443 // The instruction 16-bit offset field addresses 32-bit words, but in | 458 // The instruction 16-bit offset field addresses 32-bit words, but in |
444 // code is conv to an 18-bit value addressing bytes, hence the -4 value. | 459 // code is conv to an 18-bit value addressing bytes, hence the -4 value. |
445 | 460 |
446 const int kEndOfChain = -4; | 461 const int kEndOfChain = -4; |
462 // Determines the end of the Jump chain (a subset of the label link chain). | |
463 const int kEndOfJumpChain = 0; | |
447 | 464 |
448 | 465 |
449 bool Assembler::IsBranch(Instr instr) { | 466 bool Assembler::IsBranch(Instr instr) { |
450 uint32_t opcode = GetOpcodeField(instr); | 467 uint32_t opcode = GetOpcodeField(instr); |
451 uint32_t rt_field = GetRtField(instr); | 468 uint32_t rt_field = GetRtField(instr); |
452 uint32_t rs_field = GetRsField(instr); | 469 uint32_t rs_field = GetRsField(instr); |
453 uint32_t label_constant = GetLabelConst(instr); | 470 uint32_t label_constant = GetLabelConst(instr); |
454 // Checks if the instruction is a branch. | 471 // Checks if the instruction is a branch. |
455 return opcode == BEQ || | 472 return opcode == BEQ || |
456 opcode == BNE || | 473 opcode == BNE || |
(...skipping 13 matching lines...) Expand all Loading... | |
470 bool Assembler::IsBeq(Instr instr) { | 487 bool Assembler::IsBeq(Instr instr) { |
471 return GetOpcodeField(instr) == BEQ; | 488 return GetOpcodeField(instr) == BEQ; |
472 } | 489 } |
473 | 490 |
474 | 491 |
475 bool Assembler::IsBne(Instr instr) { | 492 bool Assembler::IsBne(Instr instr) { |
476 return GetOpcodeField(instr) == BNE; | 493 return GetOpcodeField(instr) == BNE; |
477 } | 494 } |
478 | 495 |
479 | 496 |
497 bool Assembler::IsJump(Instr instr) { | |
498 uint32_t opcode = GetOpcodeField(instr); | |
499 uint32_t rt_field = GetRtField(instr); | |
500 uint32_t rd_field = GetRdField(instr); | |
501 uint32_t function_field = GetFunctionField(instr); | |
502 // Checks if the instruction is a jump. | |
503 return opcode == J || opcode == JAL || | |
504 (opcode == SPECIAL && rt_field == 0 && | |
505 ((function_field == JALR) || (rd_field == 0 && (function_field == JR)))); | |
506 } | |
507 | |
508 | |
509 bool Assembler::IsJ(Instr instr) { | |
510 uint32_t opcode = GetOpcodeField(instr); | |
511 // Checks if the instruction is a jump. | |
512 return opcode == J; | |
513 } | |
514 | |
515 | |
516 bool Assembler::IsLui(Instr instr) { | |
517 uint32_t opcode = GetOpcodeField(instr); | |
518 // Checks if the instruction is a load upper immediate. | |
519 return opcode == LUI; | |
520 } | |
521 | |
522 | |
523 bool Assembler::IsOri(Instr instr) { | |
524 uint32_t opcode = GetOpcodeField(instr); | |
525 // Checks if the instruction is a load upper immediate. | |
526 return opcode == ORI; | |
527 } | |
528 | |
529 | |
480 bool Assembler::IsNop(Instr instr, unsigned int type) { | 530 bool Assembler::IsNop(Instr instr, unsigned int type) { |
481 // See Assembler::nop(type). | 531 // See Assembler::nop(type). |
482 ASSERT(type < 32); | 532 ASSERT(type < 32); |
483 uint32_t opcode = GetOpcodeField(instr); | 533 uint32_t opcode = GetOpcodeField(instr); |
484 uint32_t rt = GetRt(instr); | 534 uint32_t rt = GetRt(instr); |
485 uint32_t rs = GetRs(instr); | 535 uint32_t rs = GetRs(instr); |
486 uint32_t sa = GetSa(instr); | 536 uint32_t sa = GetSa(instr); |
487 | 537 |
488 // nop(type) == sll(zero_reg, zero_reg, type); | 538 // nop(type) == sll(zero_reg, zero_reg, type); |
489 // Technically all these values will be 0 but | 539 // Technically all these values will be 0 but |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
557 Instr instr = instr_at(pos); | 607 Instr instr = instr_at(pos); |
558 if ((instr & ~kImm16Mask) == 0) { | 608 if ((instr & ~kImm16Mask) == 0) { |
559 // Emitted label constant, not part of a branch. | 609 // Emitted label constant, not part of a branch. |
560 if (instr == 0) { | 610 if (instr == 0) { |
561 return kEndOfChain; | 611 return kEndOfChain; |
562 } else { | 612 } else { |
563 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; | 613 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; |
564 return (imm18 + pos); | 614 return (imm18 + pos); |
565 } | 615 } |
566 } | 616 } |
567 // Check we have a branch instruction. | 617 // Check we have a branch or jump instruction. |
568 ASSERT(IsBranch(instr)); | 618 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr)); |
569 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming | 619 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming |
570 // the compiler uses arithmectic shifts for signed integers. | 620 // the compiler uses arithmectic shifts for signed integers. |
571 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; | 621 if (IsBranch(instr)) { |
622 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; | |
572 | 623 |
573 if (imm18 == kEndOfChain) { | 624 if (imm18 == kEndOfChain) { |
574 // EndOfChain sentinel is returned directly, not relative to pc or pos. | 625 // EndOfChain sentinel is returned directly, not relative to pc or pos. |
575 return kEndOfChain; | 626 return kEndOfChain; |
627 } else { | |
628 return pos + kBranchPCOffset + imm18; | |
629 } | |
630 } else if (IsLui(instr)) { | |
631 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); | |
632 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); | |
633 ASSERT(IsLui(instr_lui) && IsOri(instr_ori)); | |
Søren Thygesen Gjesse
2011/06/24 13:05:15
IsLui(instr_lui) is not needed in the ASSERT (alre
Paul Lind
2011/06/28 06:53:14
Done.
| |
634 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift; | |
635 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask)); | |
636 | |
637 if (imm == kEndOfJumpChain) { | |
638 // EndOfChain sentinel is returned directly, not relative to pc or pos. | |
639 return kEndOfChain; | |
640 } else { | |
641 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos); | |
642 int32_t delta = instr_address - imm; | |
643 ASSERT(pos > delta); | |
644 return pos - delta; | |
645 } | |
576 } else { | 646 } else { |
577 return pos + kBranchPCOffset + imm18; | 647 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; |
648 if (imm28 == kEndOfJumpChain) { | |
649 // EndOfChain sentinel is returned directly, not relative to pc or pos. | |
650 return kEndOfChain; | |
651 } else { | |
652 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos); | |
653 instr_address &= kImm28Mask; | |
654 int32_t delta = instr_address - imm28; | |
655 ASSERT(pos > delta); | |
656 return pos - delta; | |
657 } | |
578 } | 658 } |
579 } | 659 } |
580 | 660 |
581 | 661 |
582 void Assembler::target_at_put(int32_t pos, int32_t target_pos) { | 662 void Assembler::target_at_put(int32_t pos, int32_t target_pos) { |
583 Instr instr = instr_at(pos); | 663 Instr instr = instr_at(pos); |
584 if ((instr & ~kImm16Mask) == 0) { | 664 if ((instr & ~kImm16Mask) == 0) { |
585 ASSERT(target_pos == kEndOfChain || target_pos >= 0); | 665 ASSERT(target_pos == kEndOfChain || target_pos >= 0); |
586 // Emitted label constant, not part of a branch. | 666 // Emitted label constant, not part of a branch. |
587 // Make label relative to Code* of generated Code object. | 667 // Make label relative to Code* of generated Code object. |
588 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); | 668 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); |
589 return; | 669 return; |
590 } | 670 } |
591 | 671 |
592 ASSERT(IsBranch(instr)); | 672 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr)); |
593 int32_t imm18 = target_pos - (pos + kBranchPCOffset); | 673 if (IsBranch(instr)) { |
594 ASSERT((imm18 & 3) == 0); | 674 int32_t imm18 = target_pos - (pos + kBranchPCOffset); |
675 ASSERT((imm18 & 3) == 0); | |
595 | 676 |
596 instr &= ~kImm16Mask; | 677 instr &= ~kImm16Mask; |
597 int32_t imm16 = imm18 >> 2; | 678 int32_t imm16 = imm18 >> 2; |
598 ASSERT(is_int16(imm16)); | 679 ASSERT(is_int16(imm16)); |
599 | 680 |
600 instr_at_put(pos, instr | (imm16 & kImm16Mask)); | 681 instr_at_put(pos, instr | (imm16 & kImm16Mask)); |
682 } else if (IsLui(instr)) { | |
683 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); | |
684 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); | |
685 ASSERT(IsLui(instr_lui) && IsOri(instr_ori)); | |
Søren Thygesen Gjesse
2011/06/24 13:05:15
Ditto.
Paul Lind
2011/06/28 06:53:14
Done.
| |
686 uint32_t imm = (uint32_t)buffer_ + target_pos; | |
687 ASSERT((imm & 3) == 0); | |
688 | |
689 instr_lui &= ~kImm16Mask; | |
690 instr_ori &= ~kImm16Mask; | |
691 | |
692 instr_at_put(pos + 0 * Assembler::kInstrSize, | |
693 instr_lui | ((imm & kHiMask) >> kLuiShift)); | |
694 instr_at_put(pos + 1 * Assembler::kInstrSize, | |
695 instr_ori | (imm & kImm16Mask)); | |
696 } else { | |
697 uint32_t imm28 = (uint32_t)buffer_ + target_pos; | |
698 imm28 &= kImm28Mask; | |
699 ASSERT((imm28 & 3) == 0); | |
700 | |
701 instr &= ~kImm26Mask; | |
702 uint32_t imm26 = imm28 >> 2; | |
703 ASSERT(is_uint26(imm26)); | |
704 | |
705 instr_at_put(pos, instr | (imm26 & kImm26Mask)); | |
706 } | |
601 } | 707 } |
602 | 708 |
603 | 709 |
604 void Assembler::print(Label* L) { | 710 void Assembler::print(Label* L) { |
605 if (L->is_unused()) { | 711 if (L->is_unused()) { |
606 PrintF("unused label\n"); | 712 PrintF("unused label\n"); |
607 } else if (L->is_bound()) { | 713 } else if (L->is_bound()) { |
608 PrintF("bound label to %d\n", L->pos()); | 714 PrintF("bound label to %d\n", L->pos()); |
609 } else if (L->is_linked()) { | 715 } else if (L->is_linked()) { |
610 Label l = *L; | 716 Label l = *L; |
611 PrintF("unbound label"); | 717 PrintF("unbound label"); |
612 while (l.is_linked()) { | 718 while (l.is_linked()) { |
613 PrintF("@ %d ", l.pos()); | 719 PrintF("@ %d ", l.pos()); |
614 Instr instr = instr_at(l.pos()); | 720 Instr instr = instr_at(l.pos()); |
615 if ((instr & ~kImm16Mask) == 0) { | 721 if ((instr & ~kImm16Mask) == 0) { |
616 PrintF("value\n"); | 722 PrintF("value\n"); |
617 } else { | 723 } else { |
618 PrintF("%d\n", instr); | 724 PrintF("%d\n", instr); |
619 } | 725 } |
620 next(&l); | 726 next(&l); |
621 } | 727 } |
622 } else { | 728 } else { |
623 PrintF("label in inconsistent state (pos = %d)\n", L->pos_); | 729 PrintF("label in inconsistent state (pos = %d)\n", L->pos_); |
624 } | 730 } |
625 } | 731 } |
626 | 732 |
627 | 733 |
628 void Assembler::bind_to(Label* L, int pos) { | 734 void Assembler::bind_to(Label* L, int pos) { |
629 ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position. | 735 ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position. |
736 int32_t trampoline_pos = kInvalidSlotPos; | |
737 if (L->is_linked() && !trampoline_emitted_) { | |
738 unbound_labels_count--; | |
739 next_buffer_check_ += kTrampolineSlotsSize; | |
740 } | |
741 | |
630 while (L->is_linked()) { | 742 while (L->is_linked()) { |
631 int32_t fixup_pos = L->pos(); | 743 int32_t fixup_pos = L->pos(); |
632 int32_t dist = pos - fixup_pos; | 744 int32_t dist = pos - fixup_pos; |
633 next(L); // Call next before overwriting link with target at fixup_pos. | 745 next(L); // Call next before overwriting link with target at fixup_pos. |
634 if (dist > kMaxBranchOffset) { | 746 Instr instr = instr_at(fixup_pos); |
635 do { | 747 if (IsBranch(instr)) { |
636 int32_t trampoline_pos = get_trampoline_entry(fixup_pos); | 748 if (dist > kMaxBranchOffset) { |
637 if (kInvalidSlotPos == trampoline_pos) { | 749 if (trampoline_pos == kInvalidSlotPos) { |
638 // Internal error. | 750 trampoline_pos = get_trampoline_entry(fixup_pos); |
639 return; | 751 CHECK(trampoline_pos != kInvalidSlotPos); |
640 } | 752 } |
641 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset); | 753 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset); |
642 target_at_put(fixup_pos, trampoline_pos); | 754 target_at_put(fixup_pos, trampoline_pos); |
643 fixup_pos = trampoline_pos; | 755 fixup_pos = trampoline_pos; |
644 dist = pos - fixup_pos; | 756 dist = pos - fixup_pos; |
645 } while (dist > kMaxBranchOffset); | 757 } |
646 } else if (dist < -kMaxBranchOffset) { | 758 target_at_put(fixup_pos, pos); |
647 do { | 759 } else { |
648 int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false); | 760 ASSERT(IsJ(instr) || IsLui(instr)); |
649 if (kInvalidSlotPos == trampoline_pos) { | 761 target_at_put(fixup_pos, pos); |
650 // Internal error. | 762 } |
651 return; | |
652 } | |
653 ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset); | |
654 target_at_put(fixup_pos, trampoline_pos); | |
655 fixup_pos = trampoline_pos; | |
656 dist = pos - fixup_pos; | |
657 } while (dist < -kMaxBranchOffset); | |
658 }; | |
659 target_at_put(fixup_pos, pos); | |
660 } | 763 } |
661 L->bind_to(pos); | 764 L->bind_to(pos); |
662 | 765 |
663 // Keep track of the last bound label so we don't eliminate any instructions | 766 // Keep track of the last bound label so we don't eliminate any instructions |
664 // before a bound label. | 767 // before a bound label. |
665 if (pos > last_bound_pos_) | 768 if (pos > last_bound_pos_) |
666 last_bound_pos_ = pos; | 769 last_bound_pos_ = pos; |
667 } | 770 } |
668 | 771 |
669 | 772 |
670 void Assembler::link_to(Label* L, Label* appendix) { | |
671 if (appendix->is_linked()) { | |
672 if (L->is_linked()) { | |
673 // Append appendix to L's list. | |
674 int fixup_pos; | |
675 int link = L->pos(); | |
676 do { | |
677 fixup_pos = link; | |
678 link = target_at(fixup_pos); | |
679 } while (link > 0); | |
680 ASSERT(link == kEndOfChain); | |
681 target_at_put(fixup_pos, appendix->pos()); | |
682 } else { | |
683 // L is empty, simply use appendix. | |
684 *L = *appendix; | |
685 } | |
686 } | |
687 appendix->Unuse(); // Appendix should not be used anymore. | |
688 } | |
689 | |
690 | |
691 void Assembler::bind(Label* L) { | 773 void Assembler::bind(Label* L) { |
692 ASSERT(!L->is_bound()); // Label can only be bound once. | 774 ASSERT(!L->is_bound()); // Label can only be bound once. |
693 bind_to(L, pc_offset()); | 775 bind_to(L, pc_offset()); |
694 } | 776 } |
695 | 777 |
696 | 778 |
697 void Assembler::next(Label* L) { | 779 void Assembler::next(Label* L) { |
698 ASSERT(L->is_linked()); | 780 ASSERT(L->is_linked()); |
699 int link = target_at(L->pos()); | 781 int link = target_at(L->pos()); |
700 ASSERT(link > 0 || link == kEndOfChain); | 782 ASSERT(link > 0 || link == kEndOfChain); |
701 if (link == kEndOfChain) { | 783 if (link == kEndOfChain) { |
702 L->Unuse(); | 784 L->Unuse(); |
703 } else if (link > 0) { | 785 } else if (link > 0) { |
704 L->link_to(link); | 786 L->link_to(link); |
705 } | 787 } |
706 } | 788 } |
707 | 789 |
790 bool Assembler::is_near(Label* L) { | |
791 if (L->is_bound()) { | |
792 return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize); | |
793 } | |
794 return false; | |
795 } | |
708 | 796 |
709 // We have to use a temporary register for things that can be relocated even | 797 // We have to use a temporary register for things that can be relocated even |
710 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction | 798 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction |
711 // space. There is no guarantee that the relocated location can be similarly | 799 // space. There is no guarantee that the relocated location can be similarly |
712 // encoded. | 800 // encoded. |
713 bool Assembler::MustUseReg(RelocInfo::Mode rmode) { | 801 bool Assembler::MustUseReg(RelocInfo::Mode rmode) { |
714 return rmode != RelocInfo::NONE; | 802 return rmode != RelocInfo::NONE; |
715 } | 803 } |
716 | 804 |
717 | 805 |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
780 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; | 868 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; |
781 emit(instr); | 869 emit(instr); |
782 } | 870 } |
783 | 871 |
784 | 872 |
785 // Instructions with immediate value. | 873 // Instructions with immediate value. |
786 // Registers are in the order of the instruction encoding, from left to right. | 874 // Registers are in the order of the instruction encoding, from left to right. |
787 void Assembler::GenInstrImmediate(Opcode opcode, | 875 void Assembler::GenInstrImmediate(Opcode opcode, |
788 Register rs, | 876 Register rs, |
789 Register rt, | 877 Register rt, |
790 int32_t j) { | 878 int32_t j, |
879 bool check_buffer) { | |
791 ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); | 880 ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); |
792 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | 881 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
793 | (j & kImm16Mask); | 882 | (j & kImm16Mask); |
794 emit(instr); | 883 emit(instr, check_buffer); |
795 } | 884 } |
796 | 885 |
797 | 886 |
798 void Assembler::GenInstrImmediate(Opcode opcode, | 887 void Assembler::GenInstrImmediate(Opcode opcode, |
799 Register rs, | 888 Register rs, |
800 SecondaryField SF, | 889 SecondaryField SF, |
801 int32_t j) { | 890 int32_t j) { |
802 ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j))); | 891 ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j))); |
803 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); | 892 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); |
804 emit(instr); | 893 emit(instr); |
(...skipping 11 matching lines...) Expand all Loading... | |
816 emit(instr); | 905 emit(instr); |
817 } | 906 } |
818 | 907 |
819 | 908 |
820 // Registers are in the order of the instruction encoding, from left to right. | 909 // Registers are in the order of the instruction encoding, from left to right. |
821 void Assembler::GenInstrJump(Opcode opcode, | 910 void Assembler::GenInstrJump(Opcode opcode, |
822 uint32_t address) { | 911 uint32_t address) { |
823 BlockTrampolinePoolScope block_trampoline_pool(this); | 912 BlockTrampolinePoolScope block_trampoline_pool(this); |
824 ASSERT(is_uint26(address)); | 913 ASSERT(is_uint26(address)); |
825 Instr instr = opcode | address; | 914 Instr instr = opcode | address; |
826 emit(instr); | 915 // Prevent buffer growth as internal reference in relocation info is linked |
916 // to the instruction that has not been emitted yet. | |
917 emit(instr, false); | |
827 BlockTrampolinePoolFor(1); // For associated delay slot. | 918 BlockTrampolinePoolFor(1); // For associated delay slot. |
828 } | 919 } |
829 | 920 |
830 | 921 |
831 // Returns the next free label entry from the next trampoline pool. | 922 // Returns the next free trampoline entry. |
832 int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) { | 923 int32_t Assembler::get_trampoline_entry(int32_t pos) { |
833 int trampoline_count = trampolines_.length(); | |
834 int32_t label_entry = 0; | |
835 ASSERT(trampoline_count > 0); | |
836 | |
837 if (next_pool) { | |
838 for (int i = 0; i < trampoline_count; i++) { | |
839 if (trampolines_[i].start() > pos) { | |
840 label_entry = trampolines_[i].take_label(); | |
841 break; | |
842 } | |
843 } | |
844 } else { // Caller needs a label entry from the previous pool. | |
845 for (int i = trampoline_count-1; i >= 0; i--) { | |
846 if (trampolines_[i].end() < pos) { | |
847 label_entry = trampolines_[i].take_label(); | |
848 break; | |
849 } | |
850 } | |
851 } | |
852 return label_entry; | |
853 } | |
854 | |
855 | |
856 // Returns the next free trampoline entry from the next trampoline pool. | |
857 int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) { | |
858 int trampoline_count = trampolines_.length(); | |
859 int32_t trampoline_entry = kInvalidSlotPos; | 924 int32_t trampoline_entry = kInvalidSlotPos; |
860 ASSERT(trampoline_count > 0); | |
861 | 925 |
862 if (!internal_trampoline_exception_) { | 926 if (!internal_trampoline_exception_) { |
863 if (next_pool) { | 927 if (trampoline_.start() > pos) { |
864 for (int i = 0; i < trampoline_count; i++) { | 928 trampoline_entry = trampoline_.take_slot(); |
865 if (trampolines_[i].start() > pos) { | |
866 trampoline_entry = trampolines_[i].take_slot(); | |
867 break; | |
868 } | |
869 } | |
870 } else { // Caller needs a trampoline entry from the previous pool. | |
871 for (int i = trampoline_count-1; i >= 0; i--) { | |
872 if (trampolines_[i].end() < pos) { | |
873 trampoline_entry = trampolines_[i].take_slot(); | |
874 break; | |
875 } | |
876 } | |
877 } | 929 } |
930 | |
878 if (kInvalidSlotPos == trampoline_entry) { | 931 if (kInvalidSlotPos == trampoline_entry) { |
879 internal_trampoline_exception_ = true; | 932 internal_trampoline_exception_ = true; |
880 } | 933 } |
881 } | 934 } |
882 return trampoline_entry; | 935 return trampoline_entry; |
883 } | 936 } |
884 | 937 |
885 | 938 |
886 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { | 939 uint32_t Assembler::jump_address(Label* L) { |
887 int32_t target_pos; | 940 int32_t target_pos; |
888 int32_t pc_offset_v = pc_offset(); | |
889 | 941 |
890 if (L->is_bound()) { | 942 if (L->is_bound()) { |
891 target_pos = L->pos(); | 943 target_pos = L->pos(); |
892 int32_t dist = pc_offset_v - target_pos; | |
893 if (dist > kMaxBranchOffset) { | |
894 do { | |
895 int32_t trampoline_pos = get_trampoline_entry(target_pos); | |
896 if (kInvalidSlotPos == trampoline_pos) { | |
897 // Internal error. | |
898 return 0; | |
899 } | |
900 ASSERT((trampoline_pos - target_pos) > 0); | |
901 ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset); | |
902 target_at_put(trampoline_pos, target_pos); | |
903 target_pos = trampoline_pos; | |
904 dist = pc_offset_v - target_pos; | |
905 } while (dist > kMaxBranchOffset); | |
906 } else if (dist < -kMaxBranchOffset) { | |
907 do { | |
908 int32_t trampoline_pos = get_trampoline_entry(target_pos, false); | |
909 if (kInvalidSlotPos == trampoline_pos) { | |
910 // Internal error. | |
911 return 0; | |
912 } | |
913 ASSERT((target_pos - trampoline_pos) > 0); | |
914 ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset); | |
915 target_at_put(trampoline_pos, target_pos); | |
916 target_pos = trampoline_pos; | |
917 dist = pc_offset_v - target_pos; | |
918 } while (dist < -kMaxBranchOffset); | |
919 } | |
920 } else { | 944 } else { |
921 if (L->is_linked()) { | 945 if (L->is_linked()) { |
922 target_pos = L->pos(); // L's link. | 946 target_pos = L->pos(); // L's link. |
923 int32_t dist = pc_offset_v - target_pos; | |
924 if (dist > kMaxBranchOffset) { | |
925 do { | |
926 int32_t label_pos = get_label_entry(target_pos); | |
927 ASSERT((label_pos - target_pos) < kMaxBranchOffset); | |
928 label_at_put(L, label_pos); | |
929 target_pos = label_pos; | |
930 dist = pc_offset_v - target_pos; | |
931 } while (dist > kMaxBranchOffset); | |
932 } else if (dist < -kMaxBranchOffset) { | |
933 do { | |
934 int32_t label_pos = get_label_entry(target_pos, false); | |
935 ASSERT((label_pos - target_pos) > -kMaxBranchOffset); | |
936 label_at_put(L, label_pos); | |
937 target_pos = label_pos; | |
938 dist = pc_offset_v - target_pos; | |
939 } while (dist < -kMaxBranchOffset); | |
940 } | |
941 L->link_to(pc_offset()); | 947 L->link_to(pc_offset()); |
942 } else { | 948 } else { |
943 L->link_to(pc_offset()); | 949 L->link_to(pc_offset()); |
950 return kEndOfJumpChain; | |
951 } | |
952 } | |
953 | |
954 uint32_t imm = (uint32_t)buffer_ + target_pos; | |
955 ASSERT((imm & 3) == 0); | |
956 | |
957 return imm; | |
958 } | |
959 | |
960 | |
961 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { | |
962 int32_t target_pos; | |
963 | |
964 if (L->is_bound()) { | |
965 target_pos = L->pos(); | |
966 } else { | |
967 if (L->is_linked()) { | |
968 target_pos = L->pos(); | |
969 L->link_to(pc_offset()); | |
970 } else { | |
971 L->link_to(pc_offset()); | |
972 if (!trampoline_emitted_) { | |
973 unbound_labels_count++; | |
974 next_buffer_check_ -= kTrampolineSlotsSize; | |
975 } | |
944 return kEndOfChain; | 976 return kEndOfChain; |
945 } | 977 } |
946 } | 978 } |
947 | 979 |
948 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); | 980 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); |
949 ASSERT((offset & 3) == 0); | 981 ASSERT((offset & 3) == 0); |
950 ASSERT(is_int16(offset >> 2)); | 982 ASSERT(is_int16(offset >> 2)); |
951 | 983 |
952 return offset; | 984 return offset; |
953 } | 985 } |
954 | 986 |
955 | 987 |
956 void Assembler::label_at_put(Label* L, int at_offset) { | 988 void Assembler::label_at_put(Label* L, int at_offset) { |
957 int target_pos; | 989 int target_pos; |
958 if (L->is_bound()) { | 990 if (L->is_bound()) { |
959 target_pos = L->pos(); | 991 target_pos = L->pos(); |
960 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); | 992 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); |
961 } else { | 993 } else { |
962 if (L->is_linked()) { | 994 if (L->is_linked()) { |
963 target_pos = L->pos(); // L's link. | 995 target_pos = L->pos(); // L's link. |
964 int32_t imm18 = target_pos - at_offset; | 996 int32_t imm18 = target_pos - at_offset; |
965 ASSERT((imm18 & 3) == 0); | 997 ASSERT((imm18 & 3) == 0); |
966 int32_t imm16 = imm18 >> 2; | 998 int32_t imm16 = imm18 >> 2; |
967 ASSERT(is_int16(imm16)); | 999 ASSERT(is_int16(imm16)); |
968 instr_at_put(at_offset, (imm16 & kImm16Mask)); | 1000 instr_at_put(at_offset, (imm16 & kImm16Mask)); |
969 } else { | 1001 } else { |
970 target_pos = kEndOfChain; | 1002 target_pos = kEndOfChain; |
971 instr_at_put(at_offset, 0); | 1003 instr_at_put(at_offset, 0); |
1004 if (!trampoline_emitted_) { | |
1005 unbound_labels_count++; | |
1006 next_buffer_check_ -= kTrampolineSlotsSize; | |
1007 } | |
972 } | 1008 } |
973 L->link_to(at_offset); | 1009 L->link_to(at_offset); |
974 } | 1010 } |
975 } | 1011 } |
976 | 1012 |
977 | 1013 |
978 //------- Branch and jump instructions -------- | 1014 //------- Branch and jump instructions -------- |
979 | 1015 |
980 void Assembler::b(int16_t offset) { | 1016 void Assembler::b(int16_t offset) { |
981 beq(zero_reg, zero_reg, offset); | 1017 beq(zero_reg, zero_reg, offset); |
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1131 void Assembler::andi(Register rt, Register rs, int32_t j) { | 1167 void Assembler::andi(Register rt, Register rs, int32_t j) { |
1132 GenInstrImmediate(ANDI, rs, rt, j); | 1168 GenInstrImmediate(ANDI, rs, rt, j); |
1133 } | 1169 } |
1134 | 1170 |
1135 | 1171 |
1136 void Assembler::or_(Register rd, Register rs, Register rt) { | 1172 void Assembler::or_(Register rd, Register rs, Register rt) { |
1137 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR); | 1173 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR); |
1138 } | 1174 } |
1139 | 1175 |
1140 | 1176 |
1141 void Assembler::ori(Register rt, Register rs, int32_t j) { | 1177 void Assembler::ori(Register rt, Register rs, int32_t j, bool check_buffer) { |
1142 GenInstrImmediate(ORI, rs, rt, j); | 1178 GenInstrImmediate(ORI, rs, rt, j, check_buffer); |
1143 } | 1179 } |
1144 | 1180 |
1145 | 1181 |
1146 void Assembler::xor_(Register rd, Register rs, Register rt) { | 1182 void Assembler::xor_(Register rd, Register rs, Register rt) { |
1147 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR); | 1183 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR); |
1148 } | 1184 } |
1149 | 1185 |
1150 | 1186 |
1151 void Assembler::xori(Register rt, Register rs, int32_t j) { | 1187 void Assembler::xori(Register rt, Register rs, int32_t j) { |
1152 GenInstrImmediate(XORI, rs, rt, j); | 1188 GenInstrImmediate(XORI, rs, rt, j); |
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1321 void Assembler::swl(Register rd, const MemOperand& rs) { | 1357 void Assembler::swl(Register rd, const MemOperand& rs) { |
1322 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); | 1358 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); |
1323 } | 1359 } |
1324 | 1360 |
1325 | 1361 |
1326 void Assembler::swr(Register rd, const MemOperand& rs) { | 1362 void Assembler::swr(Register rd, const MemOperand& rs) { |
1327 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); | 1363 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); |
1328 } | 1364 } |
1329 | 1365 |
1330 | 1366 |
1331 void Assembler::lui(Register rd, int32_t j) { | 1367 void Assembler::lui(Register rd, int32_t j, bool check_buffer) { |
1332 GenInstrImmediate(LUI, zero_reg, rd, j); | 1368 GenInstrImmediate(LUI, zero_reg, rd, j, check_buffer); |
1333 } | 1369 } |
1334 | 1370 |
1335 | 1371 |
1336 //-------------Misc-instructions-------------- | 1372 //-------------Misc-instructions-------------- |
1337 | 1373 |
1338 // Break / Trap instructions. | 1374 // Break / Trap instructions. |
1339 void Assembler::break_(uint32_t code, bool break_as_stop) { | 1375 void Assembler::break_(uint32_t code, bool break_as_stop) { |
1340 ASSERT((code & ~0xfffff) == 0); | 1376 ASSERT((code & ~0xfffff) == 0); |
1341 // We need to invalidate breaks that could be stops as well because the | 1377 // We need to invalidate breaks that could be stops as well because the |
1342 // simulator expects a char pointer after the stop instruction. | 1378 // simulator expects a char pointer after the stop instruction. |
(...skipping 441 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1784 | 1820 |
1785 | 1821 |
1786 void Assembler::RecordComment(const char* msg) { | 1822 void Assembler::RecordComment(const char* msg) { |
1787 if (FLAG_code_comments) { | 1823 if (FLAG_code_comments) { |
1788 CheckBuffer(); | 1824 CheckBuffer(); |
1789 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); | 1825 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); |
1790 } | 1826 } |
1791 } | 1827 } |
1792 | 1828 |
1793 | 1829 |
1830 void Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) { | |
1831 Instr instr = instr_at(pc); | |
1832 ASSERT(IsJ(instr) || IsLui(instr)); | |
1833 if (IsLui(instr)) { | |
1834 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize); | |
1835 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize); | |
1836 ASSERT(IsLui(instr_lui) && IsOri(instr_ori)); | |
Søren Thygesen Gjesse
2011/06/24 13:05:15
Ditto.
Paul Lind
2011/06/28 06:53:14
Done.
| |
1837 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift; | |
1838 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask)); | |
1839 if (imm == kEndOfJumpChain) { | |
1840 return; | |
1841 } | |
1842 imm += pc_delta; | |
1843 ASSERT((imm & 3) == 0); | |
1844 | |
1845 instr_lui &= ~kImm16Mask; | |
1846 instr_ori &= ~kImm16Mask; | |
1847 | |
1848 instr_at_put(pc + 0 * Assembler::kInstrSize, | |
1849 instr_lui | ((imm >> kLuiShift) & kImm16Mask)); | |
1850 instr_at_put(pc + 1 * Assembler::kInstrSize, | |
1851 instr_ori | (imm & kImm16Mask)); | |
1852 } else { | |
1853 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; | |
1854 if ((int32_t)imm28 == kEndOfJumpChain) { | |
1855 return; | |
1856 } | |
1857 imm28 += pc_delta; | |
1858 imm28 &= kImm28Mask; | |
1859 ASSERT((imm28 & 3) == 0); | |
1860 | |
1861 instr &= ~kImm26Mask; | |
1862 uint32_t imm26 = imm28 >> 2; | |
1863 ASSERT(is_uint26(imm26)); | |
1864 | |
1865 instr_at_put(pc, instr | (imm26 & kImm26Mask)); | |
1866 } | |
1867 } | |
1868 | |
1869 | |
1794 void Assembler::GrowBuffer() { | 1870 void Assembler::GrowBuffer() { |
1795 if (!own_buffer_) FATAL("external code buffer is too small"); | 1871 if (!own_buffer_) FATAL("external code buffer is too small"); |
1796 | 1872 |
1797 // Compute new buffer size. | 1873 // Compute new buffer size. |
1798 CodeDesc desc; // The new buffer. | 1874 CodeDesc desc; // The new buffer. |
1799 if (buffer_size_ < 4*KB) { | 1875 if (buffer_size_ < 4*KB) { |
1800 desc.buffer_size = 4*KB; | 1876 desc.buffer_size = 4*KB; |
1801 } else if (buffer_size_ < 1*MB) { | 1877 } else if (buffer_size_ < 1*MB) { |
1802 desc.buffer_size = 2*buffer_size_; | 1878 desc.buffer_size = 2*buffer_size_; |
1803 } else { | 1879 } else { |
(...skipping 15 matching lines...) Expand all Loading... | |
1819 reloc_info_writer.pos(), desc.reloc_size); | 1895 reloc_info_writer.pos(), desc.reloc_size); |
1820 | 1896 |
1821 // Switch buffers. | 1897 // Switch buffers. |
1822 DeleteArray(buffer_); | 1898 DeleteArray(buffer_); |
1823 buffer_ = desc.buffer; | 1899 buffer_ = desc.buffer; |
1824 buffer_size_ = desc.buffer_size; | 1900 buffer_size_ = desc.buffer_size; |
1825 pc_ += pc_delta; | 1901 pc_ += pc_delta; |
1826 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, | 1902 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
1827 reloc_info_writer.last_pc() + pc_delta); | 1903 reloc_info_writer.last_pc() + pc_delta); |
1828 | 1904 |
1829 // On ia32 and ARM pc relative addressing is used, and we thus need to apply a | 1905 // Relocate runtime entries. |
1830 // shift by pc_delta. But on MIPS the target address it directly loaded, so | 1906 for (RelocIterator it(desc); !it.done(); it.next()) { |
1831 // we do not need to relocate here. | 1907 RelocInfo::Mode rmode = it.rinfo()->rmode(); |
1908 if (rmode == RelocInfo::INTERNAL_REFERENCE) { | |
1909 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc()); | |
1910 RelocateInternalReference(p, pc_delta); | |
1911 } | |
1912 } | |
1832 | 1913 |
1833 ASSERT(!overflow()); | 1914 ASSERT(!overflow()); |
1834 } | 1915 } |
1835 | 1916 |
1836 | 1917 |
1837 void Assembler::db(uint8_t data) { | 1918 void Assembler::db(uint8_t data) { |
1838 CheckBuffer(); | 1919 CheckBuffer(); |
1839 *reinterpret_cast<uint8_t*>(pc_) = data; | 1920 *reinterpret_cast<uint8_t*>(pc_) = data; |
1840 pc_ += sizeof(uint8_t); | 1921 pc_ += sizeof(uint8_t); |
1841 } | 1922 } |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1876 } | 1957 } |
1877 } | 1958 } |
1878 } | 1959 } |
1879 | 1960 |
1880 | 1961 |
1881 void Assembler::BlockTrampolinePoolFor(int instructions) { | 1962 void Assembler::BlockTrampolinePoolFor(int instructions) { |
1882 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); | 1963 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); |
1883 } | 1964 } |
1884 | 1965 |
1885 | 1966 |
1886 void Assembler::CheckTrampolinePool(bool force_emit) { | 1967 void Assembler::CheckTrampolinePool() { |
1887 // Calculate the offset of the next check. | |
1888 next_buffer_check_ = pc_offset() + kCheckConstInterval; | |
1889 | |
1890 int dist = pc_offset() - last_trampoline_pool_end_; | |
1891 | |
1892 if (dist <= kMaxDistBetweenPools && !force_emit) { | |
1893 return; | |
1894 } | |
1895 | |
1896 // Some small sequences of instructions must not be broken up by the | 1968 // Some small sequences of instructions must not be broken up by the |
1897 // insertion of a trampoline pool; such sequences are protected by setting | 1969 // insertion of a trampoline pool; such sequences are protected by setting |
1898 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, | 1970 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, |
1899 // which are both checked here. Also, recursive calls to CheckTrampolinePool | 1971 // which are both checked here. Also, recursive calls to CheckTrampolinePool |
1900 // are blocked by trampoline_pool_blocked_nesting_. | 1972 // are blocked by trampoline_pool_blocked_nesting_. |
1901 if ((trampoline_pool_blocked_nesting_ > 0) || | 1973 if ((trampoline_pool_blocked_nesting_ > 0) || |
1902 (pc_offset() < no_trampoline_pool_before_)) { | 1974 (pc_offset() < no_trampoline_pool_before_)) { |
1903 // Emission is currently blocked; make sure we try again as soon as | 1975 // Emission is currently blocked; make sure we try again as soon as |
1904 // possible. | 1976 // possible. |
1905 if (trampoline_pool_blocked_nesting_ > 0) { | 1977 if (trampoline_pool_blocked_nesting_ > 0) { |
1906 next_buffer_check_ = pc_offset() + kInstrSize; | 1978 next_buffer_check_ = pc_offset() + kInstrSize; |
1907 } else { | 1979 } else { |
1908 next_buffer_check_ = no_trampoline_pool_before_; | 1980 next_buffer_check_ = no_trampoline_pool_before_; |
1909 } | 1981 } |
1910 return; | 1982 return; |
1911 } | 1983 } |
1912 | 1984 |
1913 // First we emit jump (2 instructions), then we emit trampoline pool. | 1985 ASSERT(!trampoline_emitted_); |
1914 { BlockTrampolinePoolScope block_trampoline_pool(this); | 1986 ASSERT(unbound_labels_count >= 0); |
1915 Label after_pool; | 1987 if (unbound_labels_count > 0) { |
1916 b(&after_pool); | 1988 // First we emit jump (2 instructions), then we emit trampoline pool. |
1917 nop(); | 1989 { BlockTrampolinePoolScope block_trampoline_pool(this); |
1918 | 1990 Label after_pool; |
1919 int pool_start = pc_offset(); | |
1920 for (int i = 0; i < kSlotsPerTrampoline; i++) { | |
1921 b(&after_pool); | 1991 b(&after_pool); |
1922 nop(); | 1992 nop(); |
1993 | |
1994 int pool_start = pc_offset(); | |
1995 for (int i = 0; i < unbound_labels_count; i++) { | |
1996 uint32_t imm32; | |
1997 imm32 = jump_address(&after_pool); | |
1998 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); | |
1999 lui(at, (imm32 & kHiMask) >> kLuiShift, false); | |
2000 ori(at, at, (imm32 & kImm16Mask), false); | |
Søren Thygesen Gjesse
2011/06/24 13:05:15
Can't the jr go between the lui and the ori and th
Paul Lind
2011/06/28 06:53:14
Unfortunately, it cannot. This is jr (jump) to 'at
| |
2001 jr(at); | |
2002 nop(); | |
2003 } | |
2004 bind(&after_pool); | |
2005 trampoline_ = Trampoline(pool_start, unbound_labels_count); | |
2006 | |
2007 trampoline_emitted_ = true; | |
2008 // As we are only going to emit trampoline once, we need to prevent any | |
2009 // further emission. | |
2010 next_buffer_check_ = kMaxInt; | |
1923 } | 2011 } |
1924 for (int i = 0; i < kLabelsPerTrampoline; i++) { | 2012 } else { |
1925 emit(0); | 2013 // Number of branches to unbound label at this point is zero, so we can |
1926 } | 2014 // move next buffer check to maximum. |
1927 last_trampoline_pool_end_ = pc_offset() - kInstrSize; | 2015 next_buffer_check_ = pc_offset() + kMaxBranchOffset |
1928 bind(&after_pool); | 2016 - kTrampolineSlotsSize * 16; |
1929 trampolines_.Add(Trampoline(pool_start, | |
1930 kSlotsPerTrampoline, | |
1931 kLabelsPerTrampoline)); | |
1932 | |
1933 // Since a trampoline pool was just emitted, | |
1934 // move the check offset forward by the standard interval. | |
1935 next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools; | |
1936 } | 2017 } |
1937 return; | 2018 return; |
1938 } | 2019 } |
1939 | 2020 |
1940 | 2021 |
1941 Address Assembler::target_address_at(Address pc) { | 2022 Address Assembler::target_address_at(Address pc) { |
1942 Instr instr1 = instr_at(pc); | 2023 Instr instr1 = instr_at(pc); |
1943 Instr instr2 = instr_at(pc + kInstrSize); | 2024 Instr instr2 = instr_at(pc + kInstrSize); |
1944 // Interpret 2 instructions generated by li: lui/ori | 2025 // Interpret 2 instructions generated by li: lui/ori |
1945 if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) { | 2026 if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) { |
(...skipping 29 matching lines...) Expand all Loading... | |
1975 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); | 2056 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); |
1976 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); | 2057 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); |
1977 | 2058 |
1978 CPU::FlushICache(pc, 2 * sizeof(int32_t)); | 2059 CPU::FlushICache(pc, 2 * sizeof(int32_t)); |
1979 } | 2060 } |
1980 | 2061 |
1981 | 2062 |
1982 } } // namespace v8::internal | 2063 } } // namespace v8::internal |
1983 | 2064 |
1984 #endif // V8_TARGET_ARCH_MIPS | 2065 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |