OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
6 // are met: | 6 // are met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
110 supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; | 110 supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; |
111 } | 111 } |
112 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled | 112 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled |
113 if (FLAG_enable_armv7) { | 113 if (FLAG_enable_armv7) { |
114 supported_ |= 1u << ARMv7; | 114 supported_ |= 1u << ARMv7; |
115 } | 115 } |
116 | 116 |
117 if (FLAG_enable_sudiv) { | 117 if (FLAG_enable_sudiv) { |
118 supported_ |= 1u << SUDIV; | 118 supported_ |= 1u << SUDIV; |
119 } | 119 } |
| 120 |
| 121 if (FLAG_enable_movw_movt) { |
| 122 supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; |
| 123 } |
120 #else // __arm__ | 124 #else // __arm__ |
121 // Probe for additional features not already known to be available. | 125 // Probe for additional features not already known to be available. |
122 if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { | 126 if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { |
123 // This implementation also sets the VFP flags if runtime | 127 // This implementation also sets the VFP flags if runtime |
124 // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI | 128 // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI |
125 // 0406B, page A1-6. | 129 // 0406B, page A1-6. |
126 found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; | 130 found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; |
127 } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) { | 131 } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) { |
128 found_by_runtime_probing_ |= 1u << VFP2; | 132 found_by_runtime_probing_ |= 1u << VFP2; |
129 } | 133 } |
130 | 134 |
131 if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) { | 135 if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) { |
132 found_by_runtime_probing_ |= 1u << ARMv7; | 136 found_by_runtime_probing_ |= 1u << ARMv7; |
133 } | 137 } |
134 | 138 |
135 if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) { | 139 if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) { |
136 found_by_runtime_probing_ |= 1u << SUDIV; | 140 found_by_runtime_probing_ |= 1u << SUDIV; |
137 } | 141 } |
138 | 142 |
139 if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) { | 143 if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) { |
140 found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES; | 144 found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES; |
141 } | 145 } |
142 | 146 |
| 147 if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER && |
| 148 OS::ArmCpuHasFeature(ARMv7)) { |
| 149 found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; |
| 150 } |
| 151 |
143 supported_ |= found_by_runtime_probing_; | 152 supported_ |= found_by_runtime_probing_; |
144 #endif | 153 #endif |
145 | 154 |
146 // Assert that VFP3 implies VFP2 and ARMv7. | 155 // Assert that VFP3 implies VFP2 and ARMv7. |
147 ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7))); | 156 ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7))); |
148 } | 157 } |
149 | 158 |
150 | 159 |
151 // ----------------------------------------------------------------------------- | 160 // ----------------------------------------------------------------------------- |
152 // Implementation of RelocInfo | 161 // Implementation of RelocInfo |
(...skipping 570 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
723 int link = target_at(L->pos()); | 732 int link = target_at(L->pos()); |
724 if (link == kEndOfChain) { | 733 if (link == kEndOfChain) { |
725 L->Unuse(); | 734 L->Unuse(); |
726 } else { | 735 } else { |
727 ASSERT(link >= 0); | 736 ASSERT(link >= 0); |
728 L->link_to(link); | 737 L->link_to(link); |
729 } | 738 } |
730 } | 739 } |
731 | 740 |
732 | 741 |
733 static Instr EncodeMovwImmediate(uint32_t immediate) { | |
734 ASSERT(immediate < 0x10000); | |
735 return ((immediate & 0xf000) << 4) | (immediate & 0xfff); | |
736 } | |
737 | |
738 | |
739 // Low-level code emission routines depending on the addressing mode. | 742 // Low-level code emission routines depending on the addressing mode. |
740 // If this returns true then you have to use the rotate_imm and immed_8 | 743 // If this returns true then you have to use the rotate_imm and immed_8 |
741 // that it returns, because it may have already changed the instruction | 744 // that it returns, because it may have already changed the instruction |
742 // to match them! | 745 // to match them! |
743 static bool fits_shifter(uint32_t imm32, | 746 static bool fits_shifter(uint32_t imm32, |
744 uint32_t* rotate_imm, | 747 uint32_t* rotate_imm, |
745 uint32_t* immed_8, | 748 uint32_t* immed_8, |
746 Instr* instr) { | 749 Instr* instr) { |
747 // imm32 must be unsigned. | 750 // imm32 must be unsigned. |
748 for (int rot = 0; rot < 16; rot++) { | 751 for (int rot = 0; rot < 16; rot++) { |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
793 } | 796 } |
794 } | 797 } |
795 return false; | 798 return false; |
796 } | 799 } |
797 | 800 |
798 | 801 |
799 // We have to use the temporary register for things that can be relocated even | 802 // We have to use the temporary register for things that can be relocated even |
800 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction | 803 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction |
801 // space. There is no guarantee that the relocated location can be similarly | 804 // space. There is no guarantee that the relocated location can be similarly |
802 // encoded. | 805 // encoded. |
803 bool Operand::must_use_constant_pool(const Assembler* assembler) const { | 806 bool Operand::must_output_reloc_info(const Assembler* assembler) const { |
804 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { | 807 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { |
805 #ifdef DEBUG | 808 #ifdef DEBUG |
806 if (!Serializer::enabled()) { | 809 if (!Serializer::enabled()) { |
807 Serializer::TooLateToEnableNow(); | 810 Serializer::TooLateToEnableNow(); |
808 } | 811 } |
809 #endif // def DEBUG | 812 #endif // def DEBUG |
810 if (assembler != NULL && assembler->predictable_code_size()) return true; | 813 if (assembler != NULL && assembler->predictable_code_size()) return true; |
811 return Serializer::enabled(); | 814 return Serializer::enabled(); |
812 } else if (rmode_ == RelocInfo::NONE) { | 815 } else if (rmode_ == RelocInfo::NONE) { |
813 return false; | 816 return false; |
814 } | 817 } |
815 return true; | 818 return true; |
816 } | 819 } |
817 | 820 |
818 | 821 |
819 bool Operand::is_single_instruction(const Assembler* assembler, | 822 bool Operand::is_single_instruction(const Assembler* assembler, |
820 Instr instr) const { | 823 Instr instr) const { |
821 if (rm_.is_valid()) return true; | 824 if (rm_.is_valid()) return true; |
822 uint32_t dummy1, dummy2; | 825 uint32_t dummy1, dummy2; |
823 if (must_use_constant_pool(assembler) || | 826 if (must_output_reloc_info(assembler) || |
824 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { | 827 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { |
825 // The immediate operand cannot be encoded as a shifter operand, or use of | 828 // The immediate operand cannot be encoded as a shifter operand, or use of |
826 // constant pool is required. For a mov instruction not setting the | 829 // constant pool is required. For a mov instruction not setting the |
827 // condition code additional instruction conventions can be used. | 830 // condition code additional instruction conventions can be used. |
828 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set | 831 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set |
829 if (must_use_constant_pool(assembler) || | 832 #ifdef USE_BLX |
830 !CpuFeatures::IsSupported(ARMv7)) { | 833 // When using BLX, there are two things that must be true for the address |
831 // mov instruction will be an ldr from constant pool (one instruction). | 834 // load to be longer than a single instruction. First, immediate loads |
832 return true; | 835 // using movw/movt must be supported (and fast) on the target ARM |
833 } else { | 836 // architecture. Second, the reloc mode must be something other than NONE, |
834 // mov instruction will be a mov or movw followed by movt (two | 837 // since NONE is a used whenever the constant pool cannot be used for |
835 // instructions). | 838 // technical reasons, e.g. back-patching calls site in optimized code with |
836 return false; | 839 // a call to a lazy deopt routine. |
837 } | 840 return !Assembler::allow_immediate_constant_pool_loads(assembler) && |
| 841 rmode_ != RelocInfo::NONE; |
| 842 #else |
| 843 // It's not possible to use immediate loads to the pc to do a call, (the |
| 844 // pc would be inconsistent half-way through the load), so loading the |
| 845 // destination address without USE_BLX is always a single instruction of |
| 846 // the form ldr pc, [pc + #xxx]. |
| 847 return true; |
| 848 #endif |
838 } else { | 849 } else { |
839 // If this is not a mov or mvn instruction there will always an additional | 850 // If this is not a mov or mvn instruction there will always an additional |
840 // instructions - either mov or ldr. The mov might actually be two | 851 // instructions - either mov or ldr. The mov might actually be two |
841 // instructions mov or movw followed by movt so including the actual | 852 // instructions mov or movw followed by movt so including the actual |
842 // instruction two or three instructions will be generated. | 853 // instruction two or three instructions will be generated. |
843 return false; | 854 return false; |
844 } | 855 } |
845 } else { | 856 } else { |
846 // No use of constant pool and the immediate operand can be encoded as a | 857 // No use of constant pool and the immediate operand can be encoded as a |
847 // shifter operand. | 858 // shifter operand. |
848 return true; | 859 return true; |
849 } | 860 } |
850 } | 861 } |
851 | 862 |
852 | 863 |
| 864 void Assembler::move_32_bit_immediate(Condition cond, |
| 865 Register rd, |
| 866 SBit s, |
| 867 const Operand& x) { |
| 868 if (rd.code() != pc.code() && s == LeaveCC) { |
| 869 // Candidate for immediate load. |
| 870 if (x.must_output_reloc_info(this)) { |
| 871 if (!Assembler::allow_immediate_constant_pool_loads(this)) { |
| 872 RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL); |
| 873 ldr(rd, MemOperand(pc, 0), cond); |
| 874 return; |
| 875 } |
| 876 RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL); |
| 877 // Make sure the movw/movt doesn't get separated. |
| 878 BlockConstPoolFor(2); |
| 879 } |
| 880 |
| 881 // Emit a real movw/movt pair. |
| 882 emit(cond | 0x30*B20 | rd.code()*B12 | |
| 883 EncodeMovwImmediate(x.imm32_ & 0xffff)); |
| 884 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond); |
| 885 } else { |
| 886 RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL); |
| 887 ldr(rd, MemOperand(pc, 0), cond); |
| 888 } |
| 889 } |
| 890 |
| 891 |
853 void Assembler::addrmod1(Instr instr, | 892 void Assembler::addrmod1(Instr instr, |
854 Register rn, | 893 Register rn, |
855 Register rd, | 894 Register rd, |
856 const Operand& x) { | 895 const Operand& x) { |
857 CheckBuffer(); | 896 CheckBuffer(); |
858 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0); | 897 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0); |
859 if (!x.rm_.is_valid()) { | 898 if (!x.rm_.is_valid()) { |
860 // Immediate. | 899 // Immediate. |
861 uint32_t rotate_imm; | 900 uint32_t rotate_imm; |
862 uint32_t immed_8; | 901 uint32_t immed_8; |
863 if (x.must_use_constant_pool(this) || | 902 if (x.must_output_reloc_info(this) || |
864 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { | 903 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { |
865 // The immediate operand cannot be encoded as a shifter operand, so load | 904 // The immediate operand cannot be encoded as a shifter operand, so load |
866 // it first to register ip and change the original instruction to use ip. | 905 // it first to register ip and change the original instruction to use ip. |
867 // However, if the original instruction is a 'mov rd, x' (not setting the | 906 // However, if the original instruction is a 'mov rd, x' (not setting the |
868 // condition code), then replace it with a 'ldr rd, [pc]'. | 907 // condition code), then replace it with a 'ldr rd, [pc]'. |
869 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed | 908 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed |
870 Condition cond = Instruction::ConditionField(instr); | 909 Condition cond = Instruction::ConditionField(instr); |
871 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set | 910 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set |
872 if (x.must_use_constant_pool(this) || | 911 move_32_bit_immediate(cond, rd, LeaveCC, x); |
873 !CpuFeatures::IsSupported(ARMv7)) { | |
874 RecordRelocInfo(x.rmode_, x.imm32_); | |
875 ldr(rd, MemOperand(pc, 0), cond); | |
876 } else { | |
877 // Will probably use movw, will certainly not use constant pool. | |
878 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond); | |
879 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond); | |
880 } | |
881 } else { | 912 } else { |
882 // If this is not a mov or mvn instruction we may still be able to avoid | 913 // If this is not a mov or mvn instruction we may still be able to avoid |
883 // a constant pool entry by using mvn or movw. | 914 // a constant pool entry by using mvn or movw. |
884 if (!x.must_use_constant_pool(this) && | 915 if (!x.must_output_reloc_info(this) && |
885 (instr & kMovMvnMask) != kMovMvnPattern) { | 916 (instr & kMovMvnMask) != kMovMvnPattern) { |
886 mov(ip, x, LeaveCC, cond); | 917 mov(ip, x, LeaveCC, cond); |
887 } else { | 918 } else { |
888 RecordRelocInfo(x.rmode_, x.imm32_); | 919 move_32_bit_immediate(cond, ip, |
889 ldr(ip, MemOperand(pc, 0), cond); | 920 static_cast<SBit>(instr & (1 << 20)), x); |
890 } | 921 } |
891 addrmod1(instr, rn, rd, Operand(ip)); | 922 addrmod1(instr, rn, rd, Operand(ip)); |
892 } | 923 } |
893 return; | 924 return; |
894 } | 925 } |
895 instr |= I | rotate_imm*B8 | immed_8; | 926 instr |= I | rotate_imm*B8 | immed_8; |
896 } else if (!x.rs_.is_valid()) { | 927 } else if (!x.rs_.is_valid()) { |
897 // Immediate shift. | 928 // Immediate shift. |
898 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); | 929 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); |
899 } else { | 930 } else { |
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1186 // Don't allow nop instructions in the form mov rn, rn to be generated using | 1217 // Don't allow nop instructions in the form mov rn, rn to be generated using |
1187 // the mov instruction. They must be generated using nop(int/NopMarkerTypes) | 1218 // the mov instruction. They must be generated using nop(int/NopMarkerTypes) |
1188 // or MarkCode(int/NopMarkerTypes) pseudo instructions. | 1219 // or MarkCode(int/NopMarkerTypes) pseudo instructions. |
1189 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); | 1220 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); |
1190 addrmod1(cond | MOV | s, r0, dst, src); | 1221 addrmod1(cond | MOV | s, r0, dst, src); |
1191 } | 1222 } |
1192 | 1223 |
1193 | 1224 |
1194 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { | 1225 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { |
1195 ASSERT(immediate < 0x10000); | 1226 ASSERT(immediate < 0x10000); |
| 1227 // May use movw if supported, but on unsupported platforms will try to use |
| 1228 // equivalent rotated immed_8 value and other tricks before falling back to a |
| 1229 // constant pool load. |
1196 mov(reg, Operand(immediate), LeaveCC, cond); | 1230 mov(reg, Operand(immediate), LeaveCC, cond); |
1197 } | 1231 } |
1198 | 1232 |
1199 | 1233 |
1200 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { | 1234 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { |
1201 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); | 1235 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); |
1202 } | 1236 } |
1203 | 1237 |
1204 | 1238 |
1205 void Assembler::bic(Register dst, Register src1, const Operand& src2, | 1239 void Assembler::bic(Register dst, Register src1, const Operand& src2, |
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1415 | 1449 |
1416 | 1450 |
1417 void Assembler::msr(SRegisterFieldMask fields, const Operand& src, | 1451 void Assembler::msr(SRegisterFieldMask fields, const Operand& src, |
1418 Condition cond) { | 1452 Condition cond) { |
1419 ASSERT(fields >= B16 && fields < B20); // at least one field set | 1453 ASSERT(fields >= B16 && fields < B20); // at least one field set |
1420 Instr instr; | 1454 Instr instr; |
1421 if (!src.rm_.is_valid()) { | 1455 if (!src.rm_.is_valid()) { |
1422 // Immediate. | 1456 // Immediate. |
1423 uint32_t rotate_imm; | 1457 uint32_t rotate_imm; |
1424 uint32_t immed_8; | 1458 uint32_t immed_8; |
1425 if (src.must_use_constant_pool(this) || | 1459 if (src.must_output_reloc_info(this) || |
1426 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { | 1460 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { |
1427 // Immediate operand cannot be encoded, load it first to register ip. | 1461 // Immediate operand cannot be encoded, load it first to register ip. |
1428 RecordRelocInfo(src.rmode_, src.imm32_); | 1462 RecordRelocInfo(src.rmode_, src.imm32_); |
1429 ldr(ip, MemOperand(pc, 0), cond); | 1463 ldr(ip, MemOperand(pc, 0), cond); |
1430 msr(fields, Operand(ip), cond); | 1464 msr(fields, Operand(ip), cond); |
1431 return; | 1465 return; |
1432 } | 1466 } |
1433 instr = I | rotate_imm*B8 | immed_8; | 1467 instr = I | rotate_imm*B8 | immed_8; |
1434 } else { | 1468 } else { |
1435 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed | 1469 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed |
(...skipping 1007 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2443 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes | 2477 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes |
2444 // some of the CPU's pipeline and has to issue. Older ARM chips simply used | 2478 // some of the CPU's pipeline and has to issue. Older ARM chips simply used |
2445 // MOV Rx, Rx as NOP and it performs better even in newer CPUs. | 2479 // MOV Rx, Rx as NOP and it performs better even in newer CPUs. |
2446 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode | 2480 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode |
2447 // a type. | 2481 // a type. |
2448 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. | 2482 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. |
2449 emit(al | 13*B21 | type*B12 | type); | 2483 emit(al | 13*B21 | type*B12 | type); |
2450 } | 2484 } |
2451 | 2485 |
2452 | 2486 |
| 2487 bool Assembler::IsMovT(Instr instr) { |
| 2488 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions |
| 2489 ((kNumRegisters-1)*B12) | // mask out register |
| 2490 EncodeMovwImmediate(0xFFFF)); // mask out immediate value |
| 2491 return instr == 0x34*B20; |
| 2492 } |
| 2493 |
| 2494 |
| 2495 bool Assembler::IsMovW(Instr instr) { |
| 2496 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions |
| 2497 ((kNumRegisters-1)*B12) | // mask out destination |
| 2498 EncodeMovwImmediate(0xFFFF)); // mask out immediate value |
| 2499 return instr == 0x30*B20; |
| 2500 } |
| 2501 |
| 2502 |
2453 bool Assembler::IsNop(Instr instr, int type) { | 2503 bool Assembler::IsNop(Instr instr, int type) { |
2454 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. | 2504 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. |
2455 // Check for mov rx, rx where x = type. | 2505 // Check for mov rx, rx where x = type. |
2456 return instr == (al | 13*B21 | type*B12 | type); | 2506 return instr == (al | 13*B21 | type*B12 | type); |
2457 } | 2507 } |
2458 | 2508 |
2459 | 2509 |
2460 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { | 2510 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { |
2461 uint32_t dummy1; | 2511 uint32_t dummy1; |
2462 uint32_t dummy2; | 2512 uint32_t dummy2; |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2561 // No relocation info should be pending while using dd. dd is used | 2611 // No relocation info should be pending while using dd. dd is used |
2562 // to write pure data with no pointers and the constant pool should | 2612 // to write pure data with no pointers and the constant pool should |
2563 // be emitted before using dd. | 2613 // be emitted before using dd. |
2564 ASSERT(num_pending_reloc_info_ == 0); | 2614 ASSERT(num_pending_reloc_info_ == 0); |
2565 CheckBuffer(); | 2615 CheckBuffer(); |
2566 *reinterpret_cast<uint32_t*>(pc_) = data; | 2616 *reinterpret_cast<uint32_t*>(pc_) = data; |
2567 pc_ += sizeof(uint32_t); | 2617 pc_ += sizeof(uint32_t); |
2568 } | 2618 } |
2569 | 2619 |
2570 | 2620 |
2571 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { | 2621 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, |
| 2622 UseConstantPoolMode mode) { |
2572 // We do not try to reuse pool constants. | 2623 // We do not try to reuse pool constants. |
2573 RelocInfo rinfo(pc_, rmode, data, NULL); | 2624 RelocInfo rinfo(pc_, rmode, data, NULL); |
2574 if (((rmode >= RelocInfo::JS_RETURN) && | 2625 if (((rmode >= RelocInfo::JS_RETURN) && |
2575 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || | 2626 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || |
2576 (rmode == RelocInfo::CONST_POOL)) { | 2627 (rmode == RelocInfo::CONST_POOL) || |
| 2628 mode == DONT_USE_CONSTANT_POOL) { |
2577 // Adjust code for new modes. | 2629 // Adjust code for new modes. |
2578 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) | 2630 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) |
2579 || RelocInfo::IsJSReturn(rmode) | 2631 || RelocInfo::IsJSReturn(rmode) |
2580 || RelocInfo::IsComment(rmode) | 2632 || RelocInfo::IsComment(rmode) |
2581 || RelocInfo::IsPosition(rmode) | 2633 || RelocInfo::IsPosition(rmode) |
2582 || RelocInfo::IsConstPool(rmode)); | 2634 || RelocInfo::IsConstPool(rmode) |
| 2635 || mode == DONT_USE_CONSTANT_POOL); |
2583 // These modes do not need an entry in the constant pool. | 2636 // These modes do not need an entry in the constant pool. |
2584 } else { | 2637 } else { |
2585 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); | 2638 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); |
2586 if (num_pending_reloc_info_ == 0) { | 2639 if (num_pending_reloc_info_ == 0) { |
2587 first_const_pool_use_ = pc_offset(); | 2640 first_const_pool_use_ = pc_offset(); |
2588 } | 2641 } |
2589 pending_reloc_info_[num_pending_reloc_info_++] = rinfo; | 2642 pending_reloc_info_[num_pending_reloc_info_++] = rinfo; |
2590 // Make sure the constant pool is not emitted in place of the next | 2643 // Make sure the constant pool is not emitted in place of the next |
2591 // instruction for which we just recorded relocation info. | 2644 // instruction for which we just recorded relocation info. |
2592 BlockConstPoolFor(1); | 2645 BlockConstPoolFor(1); |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2691 // Emit constant pool entries. | 2744 // Emit constant pool entries. |
2692 for (int i = 0; i < num_pending_reloc_info_; i++) { | 2745 for (int i = 0; i < num_pending_reloc_info_; i++) { |
2693 RelocInfo& rinfo = pending_reloc_info_[i]; | 2746 RelocInfo& rinfo = pending_reloc_info_[i]; |
2694 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | 2747 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && |
2695 rinfo.rmode() != RelocInfo::POSITION && | 2748 rinfo.rmode() != RelocInfo::POSITION && |
2696 rinfo.rmode() != RelocInfo::STATEMENT_POSITION && | 2749 rinfo.rmode() != RelocInfo::STATEMENT_POSITION && |
2697 rinfo.rmode() != RelocInfo::CONST_POOL); | 2750 rinfo.rmode() != RelocInfo::CONST_POOL); |
2698 | 2751 |
2699 Instr instr = instr_at(rinfo.pc()); | 2752 Instr instr = instr_at(rinfo.pc()); |
2700 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. | 2753 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. |
2701 ASSERT(IsLdrPcImmediateOffset(instr) && | 2754 if (IsLdrPcImmediateOffset(instr) && |
2702 GetLdrRegisterImmediateOffset(instr) == 0); | 2755 GetLdrRegisterImmediateOffset(instr) == 0) { |
| 2756 int delta = pc_ - rinfo.pc() - kPcLoadDelta; |
| 2757 // 0 is the smallest delta: |
| 2758 // ldr rd, [pc, #0] |
| 2759 // constant pool marker |
| 2760 // data |
| 2761 ASSERT(is_uint12(delta)); |
2703 | 2762 |
2704 int delta = pc_ - rinfo.pc() - kPcLoadDelta; | 2763 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); |
2705 // 0 is the smallest delta: | 2764 } else { |
2706 // ldr rd, [pc, #0] | 2765 ASSERT(IsMovW(instr)); |
2707 // constant pool marker | 2766 } |
2708 // data | |
2709 ASSERT(is_uint12(delta)); | |
2710 | |
2711 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); | |
2712 emit(rinfo.data()); | 2767 emit(rinfo.data()); |
2713 } | 2768 } |
2714 | 2769 |
2715 num_pending_reloc_info_ = 0; | 2770 num_pending_reloc_info_ = 0; |
2716 first_const_pool_use_ = -1; | 2771 first_const_pool_use_ = -1; |
2717 | 2772 |
2718 RecordComment("]"); | 2773 RecordComment("]"); |
2719 | 2774 |
2720 if (after_pool.is_linked()) { | 2775 if (after_pool.is_linked()) { |
2721 bind(&after_pool); | 2776 bind(&after_pool); |
2722 } | 2777 } |
2723 } | 2778 } |
2724 | 2779 |
2725 // Since a constant pool was just emitted, move the check offset forward by | 2780 // Since a constant pool was just emitted, move the check offset forward by |
2726 // the standard interval. | 2781 // the standard interval. |
2727 next_buffer_check_ = pc_offset() + kCheckPoolInterval; | 2782 next_buffer_check_ = pc_offset() + kCheckPoolInterval; |
2728 } | 2783 } |
2729 | 2784 |
2730 | 2785 |
2731 } } // namespace v8::internal | 2786 } } // namespace v8::internal |
2732 | 2787 |
2733 #endif // V8_TARGET_ARCH_ARM | 2788 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |