Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(500)

Side by Side Diff: src/arm/assembler-arm.cc

Issue 11037023: Use movw/movt instead of constant pool on ARMv7 (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Set flag properly Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
110 supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; 110 supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
111 } 111 }
112 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled 112 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
113 if (FLAG_enable_armv7) { 113 if (FLAG_enable_armv7) {
114 supported_ |= 1u << ARMv7; 114 supported_ |= 1u << ARMv7;
115 } 115 }
116 116
117 if (FLAG_enable_sudiv) { 117 if (FLAG_enable_sudiv) {
118 supported_ |= 1u << SUDIV; 118 supported_ |= 1u << SUDIV;
119 } 119 }
120
121 if (FLAG_enable_movw_movt) {
122 supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
123 }
120 #else // __arm__ 124 #else // __arm__
121 // Probe for additional features not already known to be available. 125 // Probe for additional features not already known to be available.
122 if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { 126 if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
123 // This implementation also sets the VFP flags if runtime 127 // This implementation also sets the VFP flags if runtime
124 // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI 128 // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
125 // 0406B, page A1-6. 129 // 0406B, page A1-6.
126 found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; 130 found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
127 } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) { 131 } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
128 found_by_runtime_probing_ |= 1u << VFP2; 132 found_by_runtime_probing_ |= 1u << VFP2;
129 } 133 }
130 134
131 if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) { 135 if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
132 found_by_runtime_probing_ |= 1u << ARMv7; 136 found_by_runtime_probing_ |= 1u << ARMv7;
133 } 137 }
134 138
135 if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) { 139 if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) {
136 found_by_runtime_probing_ |= 1u << SUDIV; 140 found_by_runtime_probing_ |= 1u << SUDIV;
137 } 141 }
138 142
139 if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) { 143 if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) {
140 found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES; 144 found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES;
141 } 145 }
142 146
147 if (OS::GetCpuImplementer() == QualcommImplementer &&
148 OS::ArmCpuHasFeature(ARMv7)) {
149 found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
150 }
151
143 supported_ |= found_by_runtime_probing_; 152 supported_ |= found_by_runtime_probing_;
144 #endif 153 #endif
145 154
146 // Assert that VFP3 implies VFP2 and ARMv7. 155 // Assert that VFP3 implies VFP2 and ARMv7.
147 ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7))); 156 ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
148 } 157 }
149 158
150 159
151 // ----------------------------------------------------------------------------- 160 // -----------------------------------------------------------------------------
152 // Implementation of RelocInfo 161 // Implementation of RelocInfo
(...skipping 570 matching lines...) Expand 10 before | Expand all | Expand 10 after
723 int link = target_at(L->pos()); 732 int link = target_at(L->pos());
724 if (link == kEndOfChain) { 733 if (link == kEndOfChain) {
725 L->Unuse(); 734 L->Unuse();
726 } else { 735 } else {
727 ASSERT(link >= 0); 736 ASSERT(link >= 0);
728 L->link_to(link); 737 L->link_to(link);
729 } 738 }
730 } 739 }
731 740
732 741
733 static Instr EncodeMovwImmediate(uint32_t immediate) {
734 ASSERT(immediate < 0x10000);
735 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
736 }
737
738
739 // Low-level code emission routines depending on the addressing mode. 742 // Low-level code emission routines depending on the addressing mode.
740 // If this returns true then you have to use the rotate_imm and immed_8 743 // If this returns true then you have to use the rotate_imm and immed_8
741 // that it returns, because it may have already changed the instruction 744 // that it returns, because it may have already changed the instruction
742 // to match them! 745 // to match them!
743 static bool fits_shifter(uint32_t imm32, 746 static bool fits_shifter(uint32_t imm32,
744 uint32_t* rotate_imm, 747 uint32_t* rotate_imm,
745 uint32_t* immed_8, 748 uint32_t* immed_8,
746 Instr* instr) { 749 Instr* instr) {
747 // imm32 must be unsigned. 750 // imm32 must be unsigned.
748 for (int rot = 0; rot < 16; rot++) { 751 for (int rot = 0; rot < 16; rot++) {
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
793 } 796 }
794 } 797 }
795 return false; 798 return false;
796 } 799 }
797 800
798 801
799 // We have to use the temporary register for things that can be relocated even 802 // We have to use the temporary register for things that can be relocated even
800 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction 803 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
801 // space. There is no guarantee that the relocated location can be similarly 804 // space. There is no guarantee that the relocated location can be similarly
802 // encoded. 805 // encoded.
803 bool Operand::must_use_constant_pool(const Assembler* assembler) const { 806 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
804 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { 807 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
805 #ifdef DEBUG 808 #ifdef DEBUG
806 if (!Serializer::enabled()) { 809 if (!Serializer::enabled()) {
807 Serializer::TooLateToEnableNow(); 810 Serializer::TooLateToEnableNow();
808 } 811 }
809 #endif // def DEBUG 812 #endif // def DEBUG
810 if (assembler != NULL && assembler->predictable_code_size()) return true; 813 if (assembler != NULL && assembler->predictable_code_size()) return true;
811 return Serializer::enabled(); 814 return Serializer::enabled();
812 } else if (rmode_ == RelocInfo::NONE) { 815 } else if (rmode_ == RelocInfo::NONE) {
813 return false; 816 return false;
814 } 817 }
815 return true; 818 return true;
816 } 819 }
817 820
818 821
819 bool Operand::is_single_instruction(const Assembler* assembler, 822 bool Operand::is_single_instruction(const Assembler* assembler,
820 Instr instr) const { 823 Instr instr) const {
821 if (rm_.is_valid()) return true; 824 if (rm_.is_valid()) return true;
822 uint32_t dummy1, dummy2; 825 uint32_t dummy1, dummy2;
823 if (must_use_constant_pool(assembler) || 826 if (must_output_reloc_info(assembler) ||
824 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { 827 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
825 // The immediate operand cannot be encoded as a shifter operand, or use of 828 // The immediate operand cannot be encoded as a shifter operand, or use of
826 // constant pool is required. For a mov instruction not setting the 829 // constant pool is required. For a mov instruction not setting the
827 // condition code additional instruction conventions can be used. 830 // condition code additional instruction conventions can be used.
828 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set 831 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
829 if (must_use_constant_pool(assembler) || 832 #ifdef USE_BLX
830 !CpuFeatures::IsSupported(ARMv7)) { 833 // When using BLX, there are two things that must be true for the address
831 // mov instruction will be an ldr from constant pool (one instruction). 834 // load to be longer than a single instruction. First, immediate loads
832 return true; 835 // using movw/movt must be supported (and fast) on the target ARM
833 } else { 836 // architecture. Second, the reloc mode must be something other than NONE,
834 // mov instruction will be a mov or movw followed by movt (two 837 // since NONE is a used whenever the constant pool cannot be used for
835 // instructions). 838 // technical reasons, e.g. back-patching calls site in optimized code with
836 return false; 839 // a call to a lazy deopt routine.
837 } 840 return !Assembler::allow_immediate_constant_pool_loads(assembler) &&
841 rmode_ != RelocInfo::NONE;
842 #else
843 return true;
ulan 2012/10/18 09:07:53 Why don't we check for !CpuFeatures::IsSupported(A
danno 2012/10/18 12:22:10 It's not possible to use immediate loads to the pc
844 #endif
838 } else { 845 } else {
839 // If this is not a mov or mvn instruction there will always an additional 846 // If this is not a mov or mvn instruction there will always an additional
840 // instructions - either mov or ldr. The mov might actually be two 847 // instructions - either mov or ldr. The mov might actually be two
841 // instructions mov or movw followed by movt so including the actual 848 // instructions mov or movw followed by movt so including the actual
842 // instruction two or three instructions will be generated. 849 // instruction two or three instructions will be generated.
843 return false; 850 return false;
844 } 851 }
845 } else { 852 } else {
846 // No use of constant pool and the immediate operand can be encoded as a 853 // No use of constant pool and the immediate operand can be encoded as a
847 // shifter operand. 854 // shifter operand.
848 return true; 855 return true;
849 } 856 }
850 } 857 }
851 858
852 859
860 void Assembler::move_32_bit_immediate(Condition cond,
861 Register rd,
862 SBit s,
863 const Operand& x) {
864 if (rd.code() != pc.code() && s == LeaveCC) {
865 // Candidate for immediate load.
866 if (x.must_output_reloc_info(this)) {
867 if (!Assembler::allow_immediate_constant_pool_loads(this)) {
868 RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
869 ldr(rd, MemOperand(pc, 0), cond);
870 return;
871 }
872 RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
873 // Make sure the movw/movt doesn't get separated.
874 BlockConstPoolFor(2);
875 }
876
877 // Emit a real movw/movt pair.
878 emit(cond | 0x30*B20 | rd.code()*B12 |
879 EncodeMovwImmediate(x.imm32_ & 0xffff));
880 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
881 } else {
882 RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
883 ldr(rd, MemOperand(pc, 0), cond);
884 }
885 }
886
887
853 void Assembler::addrmod1(Instr instr, 888 void Assembler::addrmod1(Instr instr,
854 Register rn, 889 Register rn,
855 Register rd, 890 Register rd,
856 const Operand& x) { 891 const Operand& x) {
857 CheckBuffer(); 892 CheckBuffer();
858 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0); 893 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
859 if (!x.rm_.is_valid()) { 894 if (!x.rm_.is_valid()) {
860 // Immediate. 895 // Immediate.
861 uint32_t rotate_imm; 896 uint32_t rotate_imm;
862 uint32_t immed_8; 897 uint32_t immed_8;
863 if (x.must_use_constant_pool(this) || 898 if (x.must_output_reloc_info(this) ||
864 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { 899 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
865 // The immediate operand cannot be encoded as a shifter operand, so load 900 // The immediate operand cannot be encoded as a shifter operand, so load
866 // it first to register ip and change the original instruction to use ip. 901 // it first to register ip and change the original instruction to use ip.
867 // However, if the original instruction is a 'mov rd, x' (not setting the 902 // However, if the original instruction is a 'mov rd, x' (not setting the
868 // condition code), then replace it with a 'ldr rd, [pc]'. 903 // condition code), then replace it with a 'ldr rd, [pc]'.
869 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed 904 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
870 Condition cond = Instruction::ConditionField(instr); 905 Condition cond = Instruction::ConditionField(instr);
871 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set 906 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
872 if (x.must_use_constant_pool(this) || 907 move_32_bit_immediate(cond, rd, LeaveCC, x);
873 !CpuFeatures::IsSupported(ARMv7)) {
874 RecordRelocInfo(x.rmode_, x.imm32_);
875 ldr(rd, MemOperand(pc, 0), cond);
876 } else {
877 // Will probably use movw, will certainly not use constant pool.
878 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
879 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
880 }
881 } else { 908 } else {
882 // If this is not a mov or mvn instruction we may still be able to avoid 909 // If this is not a mov or mvn instruction we may still be able to avoid
883 // a constant pool entry by using mvn or movw. 910 // a constant pool entry by using mvn or movw.
884 if (!x.must_use_constant_pool(this) && 911 if (!x.must_output_reloc_info(this) &&
885 (instr & kMovMvnMask) != kMovMvnPattern) { 912 (instr & kMovMvnMask) != kMovMvnPattern) {
886 mov(ip, x, LeaveCC, cond); 913 mov(ip, x, LeaveCC, cond);
887 } else { 914 } else {
888 RecordRelocInfo(x.rmode_, x.imm32_); 915 move_32_bit_immediate(cond, ip,
889 ldr(ip, MemOperand(pc, 0), cond); 916 static_cast<SBit>(instr & (1 << 20)), x);
890 } 917 }
891 addrmod1(instr, rn, rd, Operand(ip)); 918 addrmod1(instr, rn, rd, Operand(ip));
892 } 919 }
893 return; 920 return;
894 } 921 }
895 instr |= I | rotate_imm*B8 | immed_8; 922 instr |= I | rotate_imm*B8 | immed_8;
896 } else if (!x.rs_.is_valid()) { 923 } else if (!x.rs_.is_valid()) {
897 // Immediate shift. 924 // Immediate shift.
898 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); 925 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
899 } else { 926 } else {
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after
1186 // Don't allow nop instructions in the form mov rn, rn to be generated using 1213 // Don't allow nop instructions in the form mov rn, rn to be generated using
1187 // the mov instruction. They must be generated using nop(int/NopMarkerTypes) 1214 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1188 // or MarkCode(int/NopMarkerTypes) pseudo instructions. 1215 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1189 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); 1216 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1190 addrmod1(cond | MOV | s, r0, dst, src); 1217 addrmod1(cond | MOV | s, r0, dst, src);
1191 } 1218 }
1192 1219
1193 1220
1194 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { 1221 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1195 ASSERT(immediate < 0x10000); 1222 ASSERT(immediate < 0x10000);
1223 // May use movw if supported, but on unsupported platforms will try to use
1224 // equivalent rotated immed_8 value and other tricks before falling back to a
1225 // constant pool load.
1196 mov(reg, Operand(immediate), LeaveCC, cond); 1226 mov(reg, Operand(immediate), LeaveCC, cond);
1197 } 1227 }
1198 1228
1199 1229
1200 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { 1230 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1201 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); 1231 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1202 } 1232 }
1203 1233
1204 1234
1205 void Assembler::bic(Register dst, Register src1, const Operand& src2, 1235 void Assembler::bic(Register dst, Register src1, const Operand& src2,
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after
1415 1445
1416 1446
1417 void Assembler::msr(SRegisterFieldMask fields, const Operand& src, 1447 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1418 Condition cond) { 1448 Condition cond) {
1419 ASSERT(fields >= B16 && fields < B20); // at least one field set 1449 ASSERT(fields >= B16 && fields < B20); // at least one field set
1420 Instr instr; 1450 Instr instr;
1421 if (!src.rm_.is_valid()) { 1451 if (!src.rm_.is_valid()) {
1422 // Immediate. 1452 // Immediate.
1423 uint32_t rotate_imm; 1453 uint32_t rotate_imm;
1424 uint32_t immed_8; 1454 uint32_t immed_8;
1425 if (src.must_use_constant_pool(this) || 1455 if (src.must_output_reloc_info(this) ||
1426 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { 1456 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1427 // Immediate operand cannot be encoded, load it first to register ip. 1457 // Immediate operand cannot be encoded, load it first to register ip.
1428 RecordRelocInfo(src.rmode_, src.imm32_); 1458 RecordRelocInfo(src.rmode_, src.imm32_);
1429 ldr(ip, MemOperand(pc, 0), cond); 1459 ldr(ip, MemOperand(pc, 0), cond);
1430 msr(fields, Operand(ip), cond); 1460 msr(fields, Operand(ip), cond);
1431 return; 1461 return;
1432 } 1462 }
1433 instr = I | rotate_imm*B8 | immed_8; 1463 instr = I | rotate_imm*B8 | immed_8;
1434 } else { 1464 } else {
1435 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed 1465 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
(...skipping 1007 matching lines...) Expand 10 before | Expand all | Expand 10 after
2443 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes 2473 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
2444 // some of the CPU's pipeline and has to issue. Older ARM chips simply used 2474 // some of the CPU's pipeline and has to issue. Older ARM chips simply used
2445 // MOV Rx, Rx as NOP and it performs better even in newer CPUs. 2475 // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
2446 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode 2476 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
2447 // a type. 2477 // a type.
2448 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. 2478 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
2449 emit(al | 13*B21 | type*B12 | type); 2479 emit(al | 13*B21 | type*B12 | type);
2450 } 2480 }
2451 2481
2452 2482
2483 bool Assembler::IsMovT(Instr instr) {
2484 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
2485 ((kNumRegisters-1)*B12) | // mask out register
2486 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
2487 return instr == 0x34*B20;
2488 }
2489
2490
2491 bool Assembler::IsMovW(Instr instr) {
2492 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
2493 ((kNumRegisters-1)*B12) | // mask out destination
2494 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
2495 return instr == 0x30*B20;
2496 }
2497
2498
2453 bool Assembler::IsNop(Instr instr, int type) { 2499 bool Assembler::IsNop(Instr instr, int type) {
2454 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. 2500 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
2455 // Check for mov rx, rx where x = type. 2501 // Check for mov rx, rx where x = type.
2456 return instr == (al | 13*B21 | type*B12 | type); 2502 return instr == (al | 13*B21 | type*B12 | type);
2457 } 2503 }
2458 2504
2459 2505
2460 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { 2506 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
2461 uint32_t dummy1; 2507 uint32_t dummy1;
2462 uint32_t dummy2; 2508 uint32_t dummy2;
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
2561 // No relocation info should be pending while using dd. dd is used 2607 // No relocation info should be pending while using dd. dd is used
2562 // to write pure data with no pointers and the constant pool should 2608 // to write pure data with no pointers and the constant pool should
2563 // be emitted before using dd. 2609 // be emitted before using dd.
2564 ASSERT(num_pending_reloc_info_ == 0); 2610 ASSERT(num_pending_reloc_info_ == 0);
2565 CheckBuffer(); 2611 CheckBuffer();
2566 *reinterpret_cast<uint32_t*>(pc_) = data; 2612 *reinterpret_cast<uint32_t*>(pc_) = data;
2567 pc_ += sizeof(uint32_t); 2613 pc_ += sizeof(uint32_t);
2568 } 2614 }
2569 2615
2570 2616
2571 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { 2617 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
2618 UseConstantPoolMode mode) {
2572 // We do not try to reuse pool constants. 2619 // We do not try to reuse pool constants.
2573 RelocInfo rinfo(pc_, rmode, data, NULL); 2620 RelocInfo rinfo(pc_, rmode, data, NULL);
2574 if (((rmode >= RelocInfo::JS_RETURN) && 2621 if (((rmode >= RelocInfo::JS_RETURN) &&
2575 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || 2622 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
2576 (rmode == RelocInfo::CONST_POOL)) { 2623 (rmode == RelocInfo::CONST_POOL) ||
2624 mode == DONT_USE_CONSTANT_POOL) {
2577 // Adjust code for new modes. 2625 // Adjust code for new modes.
2578 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) 2626 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2579 || RelocInfo::IsJSReturn(rmode) 2627 || RelocInfo::IsJSReturn(rmode)
2580 || RelocInfo::IsComment(rmode) 2628 || RelocInfo::IsComment(rmode)
2581 || RelocInfo::IsPosition(rmode) 2629 || RelocInfo::IsPosition(rmode)
2582 || RelocInfo::IsConstPool(rmode)); 2630 || RelocInfo::IsConstPool(rmode)
2631 || mode == DONT_USE_CONSTANT_POOL);
2583 // These modes do not need an entry in the constant pool. 2632 // These modes do not need an entry in the constant pool.
2584 } else { 2633 } else {
2585 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); 2634 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
2586 if (num_pending_reloc_info_ == 0) { 2635 if (num_pending_reloc_info_ == 0) {
2587 first_const_pool_use_ = pc_offset(); 2636 first_const_pool_use_ = pc_offset();
2588 } 2637 }
2589 pending_reloc_info_[num_pending_reloc_info_++] = rinfo; 2638 pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
2590 // Make sure the constant pool is not emitted in place of the next 2639 // Make sure the constant pool is not emitted in place of the next
2591 // instruction for which we just recorded relocation info. 2640 // instruction for which we just recorded relocation info.
2592 BlockConstPoolFor(1); 2641 BlockConstPoolFor(1);
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
2691 // Emit constant pool entries. 2740 // Emit constant pool entries.
2692 for (int i = 0; i < num_pending_reloc_info_; i++) { 2741 for (int i = 0; i < num_pending_reloc_info_; i++) {
2693 RelocInfo& rinfo = pending_reloc_info_[i]; 2742 RelocInfo& rinfo = pending_reloc_info_[i];
2694 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && 2743 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2695 rinfo.rmode() != RelocInfo::POSITION && 2744 rinfo.rmode() != RelocInfo::POSITION &&
2696 rinfo.rmode() != RelocInfo::STATEMENT_POSITION && 2745 rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
2697 rinfo.rmode() != RelocInfo::CONST_POOL); 2746 rinfo.rmode() != RelocInfo::CONST_POOL);
2698 2747
2699 Instr instr = instr_at(rinfo.pc()); 2748 Instr instr = instr_at(rinfo.pc());
2700 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. 2749 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
2701 ASSERT(IsLdrPcImmediateOffset(instr) && 2750 if (IsLdrPcImmediateOffset(instr) &&
2702 GetLdrRegisterImmediateOffset(instr) == 0); 2751 GetLdrRegisterImmediateOffset(instr) == 0) {
2752 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
2753 // 0 is the smallest delta:
2754 // ldr rd, [pc, #0]
2755 // constant pool marker
2756 // data
2757 ASSERT(is_uint12(delta));
2703 2758
2704 int delta = pc_ - rinfo.pc() - kPcLoadDelta; 2759 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
2705 // 0 is the smallest delta: 2760 } else {
2706 // ldr rd, [pc, #0] 2761 ASSERT(IsMovW(instr));
2707 // constant pool marker 2762 }
2708 // data
2709 ASSERT(is_uint12(delta));
2710
2711 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
2712 emit(rinfo.data()); 2763 emit(rinfo.data());
2713 } 2764 }
2714 2765
2715 num_pending_reloc_info_ = 0; 2766 num_pending_reloc_info_ = 0;
2716 first_const_pool_use_ = -1; 2767 first_const_pool_use_ = -1;
2717 2768
2718 RecordComment("]"); 2769 RecordComment("]");
2719 2770
2720 if (after_pool.is_linked()) { 2771 if (after_pool.is_linked()) {
2721 bind(&after_pool); 2772 bind(&after_pool);
2722 } 2773 }
2723 } 2774 }
2724 2775
2725 // Since a constant pool was just emitted, move the check offset forward by 2776 // Since a constant pool was just emitted, move the check offset forward by
2726 // the standard interval. 2777 // the standard interval.
2727 next_buffer_check_ = pc_offset() + kCheckPoolInterval; 2778 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
2728 } 2779 }
2729 2780
2730 2781
2731 } } // namespace v8::internal 2782 } } // namespace v8::internal
2732 2783
2733 #endif // V8_TARGET_ARCH_ARM 2784 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698