OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
6 // are met: | 6 // are met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 1036 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1047 #endif // def DEBUG | 1047 #endif // def DEBUG |
1048 if (assembler != NULL && assembler->predictable_code_size()) return true; | 1048 if (assembler != NULL && assembler->predictable_code_size()) return true; |
1049 return Serializer::enabled(); | 1049 return Serializer::enabled(); |
1050 } else if (RelocInfo::IsNone(rmode_)) { | 1050 } else if (RelocInfo::IsNone(rmode_)) { |
1051 return false; | 1051 return false; |
1052 } | 1052 } |
1053 return true; | 1053 return true; |
1054 } | 1054 } |
1055 | 1055 |
1056 | 1056 |
1057 static bool use_movw_movt(const Operand& x, const Assembler* assembler) { | 1057 static bool use_mov_immediate_load(const Operand& x, |
1058 if (Assembler::use_immediate_embedded_pointer_loads(assembler)) { | 1058 const Assembler* assembler) { |
| 1059 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && |
| 1060 (assembler == NULL || !assembler->predictable_code_size())) { |
| 1061 // Prefer movw / movt to constant pool if it is more efficient on the CPU. |
1059 return true; | 1062 return true; |
| 1063 } else if (x.must_output_reloc_info(assembler)) { |
| 1064 // Prefer constant pool if data is likely to be patched. |
| 1065 return false; |
| 1066 } else { |
| 1067 // Otherwise, use immediate load if movw / movt is available. |
| 1068 return CpuFeatures::IsSupported(ARMv7); |
1060 } | 1069 } |
1061 if (x.must_output_reloc_info(assembler)) { | |
1062 return false; | |
1063 } | |
1064 return CpuFeatures::IsSupported(ARMv7); | |
1065 } | 1070 } |
1066 | 1071 |
1067 | 1072 |
1068 bool Operand::is_single_instruction(const Assembler* assembler, | 1073 bool Operand::is_single_instruction(const Assembler* assembler, |
1069 Instr instr) const { | 1074 Instr instr) const { |
1070 if (rm_.is_valid()) return true; | 1075 if (rm_.is_valid()) return true; |
1071 uint32_t dummy1, dummy2; | 1076 uint32_t dummy1, dummy2; |
1072 if (must_output_reloc_info(assembler) || | 1077 if (must_output_reloc_info(assembler) || |
1073 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { | 1078 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { |
1074 // The immediate operand cannot be encoded as a shifter operand, or use of | 1079 // The immediate operand cannot be encoded as a shifter operand, or use of |
1075 // constant pool is required. For a mov instruction not setting the | 1080 // constant pool is required. For a mov instruction not setting the |
1076 // condition code additional instruction conventions can be used. | 1081 // condition code additional instruction conventions can be used. |
1077 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set | 1082 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set |
1078 return !use_movw_movt(*this, assembler); | 1083 return !use_mov_immediate_load(*this, assembler); |
1079 } else { | 1084 } else { |
1080 // If this is not a mov or mvn instruction there will always an additional | 1085 // If this is not a mov or mvn instruction there will always an additional |
1081 // instructions - either mov or ldr. The mov might actually be two | 1086 // instructions - either mov or ldr. The mov might actually be two |
1082 // instructions mov or movw followed by movt so including the actual | 1087 // instructions mov or movw followed by movt so including the actual |
1083 // instruction two or three instructions will be generated. | 1088 // instruction two or three instructions will be generated. |
1084 return false; | 1089 return false; |
1085 } | 1090 } |
1086 } else { | 1091 } else { |
1087 // No use of constant pool and the immediate operand can be encoded as a | 1092 // No use of constant pool and the immediate operand can be encoded as a |
1088 // shifter operand. | 1093 // shifter operand. |
1089 return true; | 1094 return true; |
1090 } | 1095 } |
1091 } | 1096 } |
1092 | 1097 |
1093 | 1098 |
1094 void Assembler::move_32_bit_immediate(Condition cond, | 1099 void Assembler::move_32_bit_immediate(Register rd, |
1095 Register rd, | 1100 const Operand& x, |
1096 SBit s, | 1101 Condition cond) { |
1097 const Operand& x) { | 1102 if (rd.code() != pc.code()) { |
1098 if (rd.code() != pc.code() && s == LeaveCC) { | 1103 if (use_mov_immediate_load(x, this)) { |
1099 if (use_movw_movt(x, this)) { | |
1100 if (x.must_output_reloc_info(this)) { | 1104 if (x.must_output_reloc_info(this)) { |
1101 RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL); | 1105 RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL); |
1102 // Make sure the movw/movt doesn't get separated. | 1106 // Make sure the movw/movt doesn't get separated. |
1103 BlockConstPoolFor(2); | 1107 BlockConstPoolFor(2); |
1104 } | 1108 } |
1105 emit(cond | 0x30*B20 | rd.code()*B12 | | 1109 emit(cond | 0x30*B20 | rd.code()*B12 | |
1106 EncodeMovwImmediate(x.imm32_ & 0xffff)); | 1110 EncodeMovwImmediate(x.imm32_ & 0xffff)); |
1107 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond); | 1111 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond); |
1108 return; | 1112 return; |
1109 } | 1113 } |
(...skipping 16 matching lines...) Expand all Loading... |
1126 uint32_t immed_8; | 1130 uint32_t immed_8; |
1127 if (x.must_output_reloc_info(this) || | 1131 if (x.must_output_reloc_info(this) || |
1128 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { | 1132 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { |
1129 // The immediate operand cannot be encoded as a shifter operand, so load | 1133 // The immediate operand cannot be encoded as a shifter operand, so load |
1130 // it first to register ip and change the original instruction to use ip. | 1134 // it first to register ip and change the original instruction to use ip. |
1131 // However, if the original instruction is a 'mov rd, x' (not setting the | 1135 // However, if the original instruction is a 'mov rd, x' (not setting the |
1132 // condition code), then replace it with a 'ldr rd, [pc]'. | 1136 // condition code), then replace it with a 'ldr rd, [pc]'. |
1133 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed | 1137 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed |
1134 Condition cond = Instruction::ConditionField(instr); | 1138 Condition cond = Instruction::ConditionField(instr); |
1135 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set | 1139 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set |
1136 move_32_bit_immediate(cond, rd, LeaveCC, x); | 1140 move_32_bit_immediate(rd, x, cond); |
1137 } else { | 1141 } else { |
1138 if ((instr & kMovMvnMask) == kMovMvnPattern) { | 1142 mov(ip, x, LeaveCC, cond); |
1139 // Moves need to use a constant pool entry. | |
1140 RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL); | |
1141 ldr(ip, MemOperand(pc, 0), cond); | |
1142 } else if (x.must_output_reloc_info(this)) { | |
1143 // Otherwise, use most efficient form of fetching from constant pool. | |
1144 move_32_bit_immediate(cond, ip, LeaveCC, x); | |
1145 } else { | |
1146 // If this is not a mov or mvn instruction we may still be able to | |
1147 // avoid a constant pool entry by using mvn or movw. | |
1148 mov(ip, x, LeaveCC, cond); | |
1149 } | |
1150 addrmod1(instr, rn, rd, Operand(ip)); | 1143 addrmod1(instr, rn, rd, Operand(ip)); |
1151 } | 1144 } |
1152 return; | 1145 return; |
1153 } | 1146 } |
1154 instr |= I | rotate_imm*B8 | immed_8; | 1147 instr |= I | rotate_imm*B8 | immed_8; |
1155 } else if (!x.rs_.is_valid()) { | 1148 } else if (!x.rs_.is_valid()) { |
1156 // Immediate shift. | 1149 // Immediate shift. |
1157 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); | 1150 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); |
1158 } else { | 1151 } else { |
1159 // Register shift. | 1152 // Register shift. |
(...skipping 647 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1807 Condition cond) { | 1800 Condition cond) { |
1808 ASSERT(fields >= B16 && fields < B20); // at least one field set | 1801 ASSERT(fields >= B16 && fields < B20); // at least one field set |
1809 Instr instr; | 1802 Instr instr; |
1810 if (!src.rm_.is_valid()) { | 1803 if (!src.rm_.is_valid()) { |
1811 // Immediate. | 1804 // Immediate. |
1812 uint32_t rotate_imm; | 1805 uint32_t rotate_imm; |
1813 uint32_t immed_8; | 1806 uint32_t immed_8; |
1814 if (src.must_output_reloc_info(this) || | 1807 if (src.must_output_reloc_info(this) || |
1815 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { | 1808 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { |
1816 // Immediate operand cannot be encoded, load it first to register ip. | 1809 // Immediate operand cannot be encoded, load it first to register ip. |
1817 RecordRelocInfo(src.rmode_, src.imm32_); | 1810 move_32_bit_immediate(ip, src); |
1818 ldr(ip, MemOperand(pc, 0), cond); | |
1819 msr(fields, Operand(ip), cond); | 1811 msr(fields, Operand(ip), cond); |
1820 return; | 1812 return; |
1821 } | 1813 } |
1822 instr = I | rotate_imm*B8 | immed_8; | 1814 instr = I | rotate_imm*B8 | immed_8; |
1823 } else { | 1815 } else { |
1824 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed | 1816 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed |
1825 instr = src.rm_.code(); | 1817 instr = src.rm_.code(); |
1826 } | 1818 } |
1827 emit(cond | instr | B24 | B21 | fields | 15*B12); | 1819 emit(cond | instr | B24 | B21 | fields | 15*B12); |
1828 } | 1820 } |
(...skipping 1663 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3492 | 3484 |
3493 // Since a constant pool was just emitted, move the check offset forward by | 3485 // Since a constant pool was just emitted, move the check offset forward by |
3494 // the standard interval. | 3486 // the standard interval. |
3495 next_buffer_check_ = pc_offset() + kCheckPoolInterval; | 3487 next_buffer_check_ = pc_offset() + kCheckPoolInterval; |
3496 } | 3488 } |
3497 | 3489 |
3498 | 3490 |
3499 } } // namespace v8::internal | 3491 } } // namespace v8::internal |
3500 | 3492 |
3501 #endif // V8_TARGET_ARCH_ARM | 3493 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |