Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(14)

Side by Side Diff: src/arm/assembler-arm.cc

Issue 2821014: Add movw and movt support for ARMv7. This includes some code from... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 264 matching lines...) Expand 10 before | Expand all | Expand 10 after
275 const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16; 275 const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
276 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16; 276 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
277 // blxcc rm 277 // blxcc rm
278 const Instr kBlxRegMask = 278 const Instr kBlxRegMask =
279 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; 279 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
280 const Instr kBlxRegPattern = 280 const Instr kBlxRegPattern =
281 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4; 281 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
282 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; 282 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
283 const Instr kMovMvnPattern = 0xd * B21; 283 const Instr kMovMvnPattern = 0xd * B21;
284 const Instr kMovMvnFlip = B22; 284 const Instr kMovMvnFlip = B22;
285 const Instr kMovLeaveCCMask = 0xdff * B16;
286 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
287 const Instr kMovwMask = 0xff * B20;
288 const Instr kMovwPattern = 0x30 * B20;
289 const Instr kMovwLeaveCCFlip = 0x5 * B21;
285 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; 290 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
286 const Instr kCmpCmnPattern = 0x15 * B20; 291 const Instr kCmpCmnPattern = 0x15 * B20;
287 const Instr kCmpCmnFlip = B21; 292 const Instr kCmpCmnFlip = B21;
288 const Instr kALUMask = 0x6f * B21; 293 const Instr kALUMask = 0x6f * B21;
289 const Instr kAddPattern = 0x4 * B21; 294 const Instr kAddPattern = 0x4 * B21;
290 const Instr kSubPattern = 0x2 * B21; 295 const Instr kSubPattern = 0x2 * B21;
291 const Instr kBicPattern = 0xe * B21; 296 const Instr kBicPattern = 0xe * B21;
292 const Instr kAndPattern = 0x0 * B21; 297 const Instr kAndPattern = 0x0 * B21;
293 const Instr kAddSubFlip = 0x6 * B21; 298 const Instr kAddSubFlip = 0x6 * B21;
294 const Instr kAndBicFlip = 0xe * B21; 299 const Instr kAndBicFlip = 0xe * B21;
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after
633 int link = target_at(L->pos()); 638 int link = target_at(L->pos());
634 if (link > 0) { 639 if (link > 0) {
635 L->link_to(link); 640 L->link_to(link);
636 } else { 641 } else {
637 ASSERT(link == kEndOfChain); 642 ASSERT(link == kEndOfChain);
638 L->Unuse(); 643 L->Unuse();
639 } 644 }
640 } 645 }
641 646
642 647
648 static Instr EncodeMovwImmediate(uint32_t immediate) {
649 ASSERT(immediate < 0x10000);
650 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
651 }
652
653
643 // Low-level code emission routines depending on the addressing mode. 654 // Low-level code emission routines depending on the addressing mode.
644 // If this returns true then you have to use the rotate_imm and immed_8 655 // If this returns true then you have to use the rotate_imm and immed_8
645 // that it returns, because it may have already changed the instruction 656 // that it returns, because it may have already changed the instruction
646 // to match them! 657 // to match them!
647 static bool fits_shifter(uint32_t imm32, 658 static bool fits_shifter(uint32_t imm32,
648 uint32_t* rotate_imm, 659 uint32_t* rotate_imm,
649 uint32_t* immed_8, 660 uint32_t* immed_8,
650 Instr* instr) { 661 Instr* instr) {
651 // imm32 must be unsigned. 662 // imm32 must be unsigned.
652 for (int rot = 0; rot < 16; rot++) { 663 for (int rot = 0; rot < 16; rot++) {
653 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); 664 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
654 if ((imm8 <= 0xff)) { 665 if ((imm8 <= 0xff)) {
655 *rotate_imm = rot; 666 *rotate_imm = rot;
656 *immed_8 = imm8; 667 *immed_8 = imm8;
657 return true; 668 return true;
658 } 669 }
659 } 670 }
660 // If the opcode is one with a complementary version and the complementary 671 // If the opcode is one with a complementary version and the complementary
661 // immediate fits, change the opcode. 672 // immediate fits, change the opcode.
662 if (instr != NULL) { 673 if (instr != NULL) {
663 if ((*instr & kMovMvnMask) == kMovMvnPattern) { 674 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
664 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { 675 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
665 *instr ^= kMovMvnFlip; 676 *instr ^= kMovMvnFlip;
666 return true; 677 return true;
678 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
679 if (CpuFeatures::IsSupported(ARMv7)) {
680 if (imm32 < 0x10000) {
681 *instr ^= kMovwLeaveCCFlip;
682 *instr |= EncodeMovwImmediate(imm32);
683 *rotate_imm = *immed_8 = 0; // Not used for movw.
684 return true;
685 }
686 }
667 } 687 }
668 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { 688 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
669 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { 689 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
670 *instr ^= kCmpCmnFlip; 690 *instr ^= kCmpCmnFlip;
671 return true; 691 return true;
672 } 692 }
673 } else { 693 } else {
674 Instr alu_insn = (*instr & kALUMask); 694 Instr alu_insn = (*instr & kALUMask);
675 if (alu_insn == kAddPattern || 695 if (alu_insn == kAddPattern ||
676 alu_insn == kSubPattern) { 696 alu_insn == kSubPattern) {
(...skipping 11 matching lines...) Expand all
688 } 708 }
689 } 709 }
690 return false; 710 return false;
691 } 711 }
692 712
693 713
694 // We have to use the temporary register for things that can be relocated even 714 // We have to use the temporary register for things that can be relocated even
695 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction 715 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
696 // space. There is no guarantee that the relocated location can be similarly 716 // space. There is no guarantee that the relocated location can be similarly
697 // encoded. 717 // encoded.
698 static bool MustUseIp(RelocInfo::Mode rmode) { 718 static bool MustUseConstantPool(RelocInfo::Mode rmode) {
699 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { 719 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
700 #ifdef DEBUG 720 #ifdef DEBUG
701 if (!Serializer::enabled()) { 721 if (!Serializer::enabled()) {
702 Serializer::TooLateToEnableNow(); 722 Serializer::TooLateToEnableNow();
703 } 723 }
704 #endif // def DEBUG 724 #endif // def DEBUG
705 return Serializer::enabled(); 725 return Serializer::enabled();
706 } else if (rmode == RelocInfo::NONE) { 726 } else if (rmode == RelocInfo::NONE) {
707 return false; 727 return false;
708 } 728 }
709 return true; 729 return true;
710 } 730 }
711 731
712 732
713 bool Operand::is_single_instruction() const { 733 bool Operand::is_single_instruction() const {
714 if (rm_.is_valid()) return true; 734 if (rm_.is_valid()) return true;
715 if (MustUseIp(rmode_)) return false; 735 if (MustUseConstantPool(rmode_)) return false;
716 uint32_t dummy1, dummy2; 736 uint32_t dummy1, dummy2;
717 return fits_shifter(imm32_, &dummy1, &dummy2, NULL); 737 return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
718 } 738 }
719 739
720 740
721 void Assembler::addrmod1(Instr instr, 741 void Assembler::addrmod1(Instr instr,
722 Register rn, 742 Register rn,
723 Register rd, 743 Register rd,
724 const Operand& x) { 744 const Operand& x) {
725 CheckBuffer(); 745 CheckBuffer();
726 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); 746 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
727 if (!x.rm_.is_valid()) { 747 if (!x.rm_.is_valid()) {
728 // Immediate. 748 // Immediate.
729 uint32_t rotate_imm; 749 uint32_t rotate_imm;
730 uint32_t immed_8; 750 uint32_t immed_8;
731 if (MustUseIp(x.rmode_) || 751 if (MustUseConstantPool(x.rmode_) ||
732 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { 752 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
733 // The immediate operand cannot be encoded as a shifter operand, so load 753 // The immediate operand cannot be encoded as a shifter operand, so load
734 // it first to register ip and change the original instruction to use ip. 754 // it first to register ip and change the original instruction to use ip.
735 // However, if the original instruction is a 'mov rd, x' (not setting the 755 // However, if the original instruction is a 'mov rd, x' (not setting the
736 // condition code), then replace it with a 'ldr rd, [pc]'. 756 // condition code), then replace it with a 'ldr rd, [pc]'.
737 RecordRelocInfo(x.rmode_, x.imm32_);
738 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed 757 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
739 Condition cond = static_cast<Condition>(instr & CondMask); 758 Condition cond = static_cast<Condition>(instr & CondMask);
740 if ((instr & ~CondMask) == 13*B21) { // mov, S not set 759 if ((instr & ~CondMask) == 13*B21) { // mov, S not set
741 ldr(rd, MemOperand(pc, 0), cond); 760 if (MustUseConstantPool(x.rmode_) ||
761 !CpuFeatures::IsSupported(ARMv7)) {
762 RecordRelocInfo(x.rmode_, x.imm32_);
763 ldr(rd, MemOperand(pc, 0), cond);
764 } else {
765 // Will probably use movw, will certainly not use constant pool.
766 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
767 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
768 }
742 } else { 769 } else {
743 ldr(ip, MemOperand(pc, 0), cond); 770 // If this is not a mov or mvn instruction we may still be able to avoid
771 // a constant pool entry by using mvn or movw.
772 if (!MustUseConstantPool(x.rmode_) &&
773 (instr & kMovMvnMask) != kMovMvnPattern) {
774 mov(ip, x, LeaveCC, cond);
775 } else {
776 RecordRelocInfo(x.rmode_, x.imm32_);
777 ldr(ip, MemOperand(pc, 0), cond);
778 }
744 addrmod1(instr, rn, rd, Operand(ip)); 779 addrmod1(instr, rn, rd, Operand(ip));
745 } 780 }
746 return; 781 return;
747 } 782 }
748 instr |= I | rotate_imm*B8 | immed_8; 783 instr |= I | rotate_imm*B8 | immed_8;
749 } else if (!x.rs_.is_valid()) { 784 } else if (!x.rs_.is_valid()) {
750 // Immediate shift. 785 // Immediate shift.
751 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); 786 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
752 } else { 787 } else {
753 // Register shift. 788 // Register shift.
(...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after
1044 WriteRecordedPositions(); 1079 WriteRecordedPositions();
1045 } 1080 }
1046 // Don't allow nop instructions in the form mov rn, rn to be generated using 1081 // Don't allow nop instructions in the form mov rn, rn to be generated using
1047 // the mov instruction. They must be generated using nop(int) 1082 // the mov instruction. They must be generated using nop(int)
1048 // pseudo instructions. 1083 // pseudo instructions.
1049 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); 1084 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1050 addrmod1(cond | 13*B21 | s, r0, dst, src); 1085 addrmod1(cond | 13*B21 | s, r0, dst, src);
1051 } 1086 }
1052 1087
1053 1088
1089 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1090 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1091 }
1092
1093
1054 void Assembler::bic(Register dst, Register src1, const Operand& src2, 1094 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1055 SBit s, Condition cond) { 1095 SBit s, Condition cond) {
1056 addrmod1(cond | 14*B21 | s, src1, dst, src2); 1096 addrmod1(cond | 14*B21 | s, src1, dst, src2);
1057 } 1097 }
1058 1098
1059 1099
1060 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { 1100 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1061 addrmod1(cond | 15*B21 | s, r0, dst, src); 1101 addrmod1(cond | 15*B21 | s, r0, dst, src);
1062 } 1102 }
1063 1103
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
1224 1264
1225 1265
1226 void Assembler::msr(SRegisterFieldMask fields, const Operand& src, 1266 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1227 Condition cond) { 1267 Condition cond) {
1228 ASSERT(fields >= B16 && fields < B20); // at least one field set 1268 ASSERT(fields >= B16 && fields < B20); // at least one field set
1229 Instr instr; 1269 Instr instr;
1230 if (!src.rm_.is_valid()) { 1270 if (!src.rm_.is_valid()) {
1231 // Immediate. 1271 // Immediate.
1232 uint32_t rotate_imm; 1272 uint32_t rotate_imm;
1233 uint32_t immed_8; 1273 uint32_t immed_8;
1234 if (MustUseIp(src.rmode_) || 1274 if (MustUseConstantPool(src.rmode_) ||
1235 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { 1275 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1236 // Immediate operand cannot be encoded, load it first to register ip. 1276 // Immediate operand cannot be encoded, load it first to register ip.
1237 RecordRelocInfo(src.rmode_, src.imm32_); 1277 RecordRelocInfo(src.rmode_, src.imm32_);
1238 ldr(ip, MemOperand(pc, 0), cond); 1278 ldr(ip, MemOperand(pc, 0), cond);
1239 msr(fields, Operand(ip), cond); 1279 msr(fields, Operand(ip), cond);
1240 return; 1280 return;
1241 } 1281 }
1242 instr = I | rotate_imm*B8 | immed_8; 1282 instr = I | rotate_imm*B8 | immed_8;
1243 } else { 1283 } else {
1244 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed 1284 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
(...skipping 1088 matching lines...) Expand 10 before | Expand all | Expand 10 after
2333 2373
2334 // Since a constant pool was just emitted, move the check offset forward by 2374 // Since a constant pool was just emitted, move the check offset forward by
2335 // the standard interval. 2375 // the standard interval.
2336 next_buffer_check_ = pc_offset() + kCheckConstInterval; 2376 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2337 } 2377 }
2338 2378
2339 2379
2340 } } // namespace v8::internal 2380 } } // namespace v8::internal
2341 2381
2342 #endif // V8_TARGET_ARCH_ARM 2382 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698