Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
| 6 // are met: | 6 // are met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 272 // mov lr, pc | 272 // mov lr, pc |
| 273 const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12; | 273 const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12; |
| 274 // ldr rd, [pc, #offset] | 274 // ldr rd, [pc, #offset] |
| 275 const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16; | 275 const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16; |
| 276 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16; | 276 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16; |
| 277 // blxcc rm | 277 // blxcc rm |
| 278 const Instr kBlxRegMask = | 278 const Instr kBlxRegMask = |
| 279 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; | 279 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; |
| 280 const Instr kBlxRegPattern = | 280 const Instr kBlxRegPattern = |
| 281 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4; | 281 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4; |
| 282 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; | |
| 283 const Instr kMovMvnPattern = 0x1d * B21; | |
| 284 const Instr kMovMvnFlip = B22; | |
| 285 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; | |
| 286 const Instr kCmpCmnPattern = 0x15 * B20; | |
| 287 const Instr kCmpCmnFlip = B21; | |
| 288 const Instr kALUMask = 0x6f * B21; | |
| 289 const Instr kAddPattern = 0x4 * B21; | |
| 290 const Instr kSubPattern = 0x2 * B21; | |
| 291 const Instr kBicPattern = 0xe * B21; | |
| 292 const Instr kAndPattern = 0x0 * B21; | |
| 293 const Instr kAddSubFlip = 0x6 * B21; | |
| 294 const Instr kAndBicFlip = 0xe * B21; | |
| 295 | |
| 282 // A mask for the Rd register for push, pop, ldr, str instructions. | 296 // A mask for the Rd register for push, pop, ldr, str instructions. |
| 283 const Instr kRdMask = 0x0000f000; | 297 const Instr kRdMask = 0x0000f000; |
| 284 static const int kRdShift = 12; | 298 static const int kRdShift = 12; |
| 285 static const Instr kLdrRegFpOffsetPattern = | 299 static const Instr kLdrRegFpOffsetPattern = |
| 286 al | B26 | L | Offset | fp.code() * B16; | 300 al | B26 | L | Offset | fp.code() * B16; |
| 287 static const Instr kStrRegFpOffsetPattern = | 301 static const Instr kStrRegFpOffsetPattern = |
| 288 al | B26 | Offset | fp.code() * B16; | 302 al | B26 | Offset | fp.code() * B16; |
| 289 static const Instr kLdrRegFpNegOffsetPattern = | 303 static const Instr kLdrRegFpNegOffsetPattern = |
| 290 al | B26 | L | NegOffset | fp.code() * B16; | 304 al | B26 | L | NegOffset | fp.code() * B16; |
| 291 static const Instr kStrRegFpNegOffsetPattern = | 305 static const Instr kStrRegFpNegOffsetPattern = |
| (...skipping 328 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 620 if (link > 0) { | 634 if (link > 0) { |
| 621 L->link_to(link); | 635 L->link_to(link); |
| 622 } else { | 636 } else { |
| 623 ASSERT(link == kEndOfChain); | 637 ASSERT(link == kEndOfChain); |
| 624 L->Unuse(); | 638 L->Unuse(); |
| 625 } | 639 } |
| 626 } | 640 } |
| 627 | 641 |
| 628 | 642 |
| 629 // Low-level code emission routines depending on the addressing mode. | 643 // Low-level code emission routines depending on the addressing mode. |
| 644 // If this returns true then you have to use the rotate_imm and immed_8 | |
| 645 // that it returns, because it may have already changed the instruction | |
| 646 // to match them! | |
| 630 static bool fits_shifter(uint32_t imm32, | 647 static bool fits_shifter(uint32_t imm32, |
| 631 uint32_t* rotate_imm, | 648 uint32_t* rotate_imm, |
| 632 uint32_t* immed_8, | 649 uint32_t* immed_8, |
| 633 Instr* instr) { | 650 Instr* instr) { |
| 634 // imm32 must be unsigned. | 651 // imm32 must be unsigned. |
| 635 for (int rot = 0; rot < 16; rot++) { | 652 for (int rot = 0; rot < 16; rot++) { |
| 636 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); | 653 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); |
| 637 if ((imm8 <= 0xff)) { | 654 if ((imm8 <= 0xff)) { |
| 638 *rotate_imm = rot; | 655 *rotate_imm = rot; |
| 639 *immed_8 = imm8; | 656 *immed_8 = imm8; |
| 640 return true; | 657 return true; |
| 641 } | 658 } |
| 642 } | 659 } |
| 643 // If the opcode is mov or mvn and if ~imm32 fits, change the opcode. | 660 // If the opcode is one with a complementary version and the complementary |
|
Søren Thygesen Gjesse
2010/06/14 07:56:47
Shouldn't we have some test cases for this instruc
Erik Corry
2010/06/14 21:05:46
Done.
| |
| 644 if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) { | 661 // immediate fits, change the opcode. |
| 645 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { | 662 if (instr != NULL) { |
| 646 *instr ^= 0x2*B21; | 663 if ((*instr & kMovMvnMask) == kMovMvnPattern) { |
| 647 return true; | 664 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { |
| 665 *instr ^= kMovMvnFlip; | |
| 666 return true; | |
| 667 } | |
| 668 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { | |
| 669 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { | |
| 670 *instr ^= kCmpCmnFlip; | |
| 671 return true; | |
| 672 } | |
| 673 } else { | |
| 674 Instr alu_insn = (*instr & kALUMask); | |
| 675 if (alu_insn == kAddPattern || | |
| 676 alu_insn == kSubPattern) { | |
| 677 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { | |
| 678 *instr ^= kAddSubFlip; | |
| 679 return true; | |
| 680 } | |
| 681 } else if (alu_insn == kAndPattern || | |
| 682 alu_insn == kBicPattern) { | |
| 683 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { | |
| 684 *instr ^= kAndBicFlip; | |
| 685 return true; | |
| 686 } | |
| 687 } | |
| 648 } | 688 } |
| 649 } | 689 } |
| 650 return false; | 690 return false; |
| 651 } | 691 } |
| 652 | 692 |
| 653 | 693 |
| 654 // We have to use the temporary register for things that can be relocated even | 694 // We have to use the temporary register for things that can be relocated even |
| 655 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction | 695 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction |
| 656 // space. There is no guarantee that the relocated location can be similarly | 696 // space. There is no guarantee that the relocated location can be similarly |
| 657 // encoded. | 697 // encoded. |
| 658 static bool MustUseIp(RelocInfo::Mode rmode) { | 698 static bool MustUseIp(RelocInfo::Mode rmode) { |
| 659 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { | 699 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { |
| 660 #ifdef DEBUG | 700 #ifdef DEBUG |
| 661 if (!Serializer::enabled()) { | 701 if (!Serializer::enabled()) { |
| 662 Serializer::TooLateToEnableNow(); | 702 Serializer::TooLateToEnableNow(); |
| 663 } | 703 } |
| 664 #endif // def DEBUG | 704 #endif // def DEBUG |
| 665 return Serializer::enabled(); | 705 return Serializer::enabled(); |
| 666 } else if (rmode == RelocInfo::NONE) { | 706 } else if (rmode == RelocInfo::NONE) { |
| 667 return false; | 707 return false; |
| 668 } | 708 } |
| 669 return true; | 709 return true; |
| 670 } | 710 } |
| 671 | 711 |
| 672 | 712 |
| 713 bool Operand::is_single_instruction() const { | |
| 714 if (rm_.is_valid()) return true; | |
| 715 if (MustUseIp(rmode_)) return false; | |
| 716 uint32_t dummy1, dummy2; | |
| 717 return fits_shifter(imm32_, &dummy1, &dummy2, NULL); | |
| 718 } | |
| 719 | |
| 720 | |
| 673 void Assembler::addrmod1(Instr instr, | 721 void Assembler::addrmod1(Instr instr, |
| 674 Register rn, | 722 Register rn, |
| 675 Register rd, | 723 Register rd, |
| 676 const Operand& x) { | 724 const Operand& x) { |
| 677 CheckBuffer(); | 725 CheckBuffer(); |
| 678 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); | 726 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); |
| 679 if (!x.rm_.is_valid()) { | 727 if (!x.rm_.is_valid()) { |
| 680 // Immediate. | 728 // Immediate. |
| 681 uint32_t rotate_imm; | 729 uint32_t rotate_imm; |
| 682 uint32_t immed_8; | 730 uint32_t immed_8; |
| (...skipping 1602 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2285 | 2333 |
| 2286 // Since a constant pool was just emitted, move the check offset forward by | 2334 // Since a constant pool was just emitted, move the check offset forward by |
| 2287 // the standard interval. | 2335 // the standard interval. |
| 2288 next_buffer_check_ = pc_offset() + kCheckConstInterval; | 2336 next_buffer_check_ = pc_offset() + kCheckConstInterval; |
| 2289 } | 2337 } |
| 2290 | 2338 |
| 2291 | 2339 |
| 2292 } } // namespace v8::internal | 2340 } } // namespace v8::internal |
| 2293 | 2341 |
| 2294 #endif // V8_TARGET_ARCH_ARM | 2342 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |