| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
| 6 // are met: | 6 // are met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 200 // Only use statically determined features for cross compile (snapshot). | 200 // Only use statically determined features for cross compile (snapshot). |
| 201 if (cross_compile) { | 201 if (cross_compile) { |
| 202 supported_ |= command_line & CpuFeaturesFromCompiler(); | 202 supported_ |= command_line & CpuFeaturesFromCompiler(); |
| 203 return; | 203 return; |
| 204 } | 204 } |
| 205 | 205 |
| 206 #ifndef __arm__ | 206 #ifndef __arm__ |
| 207 // For the simulator build, use whatever the flags specify. | 207 // For the simulator build, use whatever the flags specify. |
| 208 supported_ |= command_line; | 208 supported_ |= command_line; |
| 209 | 209 |
| 210 if (FLAG_enable_movw_movt && ((supported_ & kArmv7) == kArmv7)) { | |
| 211 supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; | |
| 212 } | |
| 213 | |
| 214 #else // __arm__ | 210 #else // __arm__ |
| 215 // Probe for additional features at runtime. | 211 // Probe for additional features at runtime. |
| 216 base::CPU cpu; | 212 base::CPU cpu; |
| 217 // Runtime detection is slightly fuzzy, and some inferences are necessary. | 213 // Runtime detection is slightly fuzzy, and some inferences are necessary. |
| 218 unsigned runtime = kArmv6; | 214 unsigned runtime = kArmv6; |
| 219 // NEON and VFPv3 imply at least ARMv7-A. | 215 // NEON and VFPv3 imply at least ARMv7-A. |
| 220 if (cpu.has_neon() && cpu.has_vfp3_d32()) { | 216 if (cpu.has_neon() && cpu.has_vfp3_d32()) { |
| 221 DCHECK(cpu.has_vfp3()); | 217 DCHECK(cpu.has_vfp3()); |
| 222 runtime |= kArmv7; | 218 runtime |= kArmv7; |
| 223 if (cpu.has_idiva()) { | 219 if (cpu.has_idiva()) { |
| 224 runtime |= kArmv7WithSudiv; | 220 runtime |= kArmv7WithSudiv; |
| 225 if (cpu.architecture() >= 8) { | 221 if (cpu.architecture() >= 8) { |
| 226 runtime |= kArmv8; | 222 runtime |= kArmv8; |
| 227 } | 223 } |
| 228 } | 224 } |
| 229 } | 225 } |
| 230 | 226 |
| 231 // Use the best of the features found by CPU detection and those inferred from | 227 // Use the best of the features found by CPU detection and those inferred from |
| 232 // the build system. In both cases, restrict available features using the | 228 // the build system. In both cases, restrict available features using the |
| 233 // command-line. Note that the command-line flags are very permissive (kArmv8) | 229 // command-line. Note that the command-line flags are very permissive (kArmv8) |
| 234 // by default. | 230 // by default. |
| 235 supported_ |= command_line & CpuFeaturesFromCompiler(); | 231 supported_ |= command_line & CpuFeaturesFromCompiler(); |
| 236 supported_ |= command_line & runtime; | 232 supported_ |= command_line & runtime; |
| 237 | 233 |
| 238 // Additional tuning options. | 234 // Additional tuning options. |
| 239 | 235 |
| 240 // Prefer to use movw/movt for QUALCOMM ARMv7 cores. | |
| 241 if (FLAG_enable_movw_movt && ((supported_ & kArmv7) == kArmv7) && | |
| 242 (cpu.implementer() == base::CPU::QUALCOMM)) { | |
| 243 supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; | |
| 244 } | |
| 245 | |
| 246 // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines. | 236 // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines. |
| 247 if (cpu.implementer() == base::CPU::ARM && | 237 if (cpu.implementer() == base::CPU::ARM && |
| 248 (cpu.part() == base::CPU::ARM_CORTEX_A5 || | 238 (cpu.part() == base::CPU::ARM_CORTEX_A5 || |
| 249 cpu.part() == base::CPU::ARM_CORTEX_A9)) { | 239 cpu.part() == base::CPU::ARM_CORTEX_A9)) { |
| 250 dcache_line_size_ = 32; | 240 dcache_line_size_ = 32; |
| 251 } | 241 } |
| 252 #endif | 242 #endif |
| 253 | 243 |
| 254 DCHECK_IMPLIES(IsSupported(ARMv7_SUDIV), IsSupported(ARMv7)); | 244 DCHECK_IMPLIES(IsSupported(ARMv7_SUDIV), IsSupported(ARMv7)); |
| 255 DCHECK_IMPLIES(IsSupported(ARMv8), IsSupported(ARMv7_SUDIV)); | 245 DCHECK_IMPLIES(IsSupported(ARMv8), IsSupported(ARMv7_SUDIV)); |
| 256 DCHECK_IMPLIES(IsSupported(MOVW_MOVT_IMMEDIATE_LOADS), IsSupported(ARMv7)); | |
| 257 } | 246 } |
| 258 | 247 |
| 259 | 248 |
| 260 void CpuFeatures::PrintTarget() { | 249 void CpuFeatures::PrintTarget() { |
| 261 const char* arm_arch = NULL; | 250 const char* arm_arch = NULL; |
| 262 const char* arm_target_type = ""; | 251 const char* arm_target_type = ""; |
| 263 const char* arm_no_probe = ""; | 252 const char* arm_no_probe = ""; |
| 264 const char* arm_fpu = ""; | 253 const char* arm_fpu = ""; |
| 265 const char* arm_thumb = ""; | 254 const char* arm_thumb = ""; |
| 266 const char* arm_float_abi = NULL; | 255 const char* arm_float_abi = NULL; |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 305 arm_thumb = " thumb"; | 294 arm_thumb = " thumb"; |
| 306 #endif | 295 #endif |
| 307 | 296 |
| 308 printf("target%s%s %s%s%s %s\n", | 297 printf("target%s%s %s%s%s %s\n", |
| 309 arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb, | 298 arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb, |
| 310 arm_float_abi); | 299 arm_float_abi); |
| 311 } | 300 } |
| 312 | 301 |
| 313 | 302 |
| 314 void CpuFeatures::PrintFeatures() { | 303 void CpuFeatures::PrintFeatures() { |
| 315 printf( | 304 printf("ARMv8=%d ARMv7=%d VFPv3=%d VFP32DREGS=%d NEON=%d SUDIV=%d", |
| 316 "ARMv8=%d ARMv7=%d VFPv3=%d VFP32DREGS=%d NEON=%d SUDIV=%d " | 305 CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7), |
| 317 "MOVW_MOVT_IMMEDIATE_LOADS=%d", | 306 CpuFeatures::IsSupported(VFPv3), CpuFeatures::IsSupported(VFP32DREGS), |
| 318 CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7), | 307 CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV)); |
| 319 CpuFeatures::IsSupported(VFPv3), CpuFeatures::IsSupported(VFP32DREGS), | |
| 320 CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV), | |
| 321 CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS)); | |
| 322 #ifdef __arm__ | 308 #ifdef __arm__ |
| 323 bool eabi_hardfloat = base::OS::ArmUsingHardFloat(); | 309 bool eabi_hardfloat = base::OS::ArmUsingHardFloat(); |
| 324 #elif USE_EABI_HARDFLOAT | 310 #elif USE_EABI_HARDFLOAT |
| 325 bool eabi_hardfloat = true; | 311 bool eabi_hardfloat = true; |
| 326 #else | 312 #else |
| 327 bool eabi_hardfloat = false; | 313 bool eabi_hardfloat = false; |
| 328 #endif | 314 #endif |
| 329 printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat); | 315 printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat); |
| 330 } | 316 } |
| 331 | 317 |
| (...skipping 861 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1193 return assembler->serializer_enabled(); | 1179 return assembler->serializer_enabled(); |
| 1194 } else if (RelocInfo::IsNone(rmode_)) { | 1180 } else if (RelocInfo::IsNone(rmode_)) { |
| 1195 return false; | 1181 return false; |
| 1196 } | 1182 } |
| 1197 return true; | 1183 return true; |
| 1198 } | 1184 } |
| 1199 | 1185 |
| 1200 | 1186 |
| 1201 static bool use_mov_immediate_load(const Operand& x, | 1187 static bool use_mov_immediate_load(const Operand& x, |
| 1202 const Assembler* assembler) { | 1188 const Assembler* assembler) { |
| 1203 if (FLAG_enable_embedded_constant_pool && assembler != NULL && | 1189 DCHECK(assembler != nullptr); |
| 1190 if (FLAG_enable_embedded_constant_pool && |
| 1204 !assembler->is_constant_pool_available()) { | 1191 !assembler->is_constant_pool_available()) { |
| 1205 return true; | 1192 return true; |
| 1206 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && | |
| 1207 (assembler == NULL || !assembler->predictable_code_size())) { | |
| 1208 // Prefer movw / movt to constant pool if it is more efficient on the CPU. | |
| 1209 return true; | |
| 1210 } else if (x.must_output_reloc_info(assembler)) { | 1193 } else if (x.must_output_reloc_info(assembler)) { |
| 1211 // Prefer constant pool if data is likely to be patched. | 1194 // Prefer constant pool if data is likely to be patched. |
| 1212 return false; | 1195 return false; |
| 1213 } else { | 1196 } else { |
| 1214 // Otherwise, use immediate load if movw / movt is available. | 1197 // Otherwise, use immediate load if movw / movt is available. |
| 1215 return CpuFeatures::IsSupported(ARMv7); | 1198 return CpuFeatures::IsSupported(ARMv7); |
| 1216 } | 1199 } |
| 1217 } | 1200 } |
| 1218 | 1201 |
| 1219 | 1202 |
| 1220 int Operand::instructions_required(const Assembler* assembler, | 1203 int Operand::instructions_required(const Assembler* assembler, |
| 1221 Instr instr) const { | 1204 Instr instr) const { |
| 1205 DCHECK(assembler != nullptr); |
| 1222 if (rm_.is_valid()) return 1; | 1206 if (rm_.is_valid()) return 1; |
| 1223 uint32_t dummy1, dummy2; | 1207 uint32_t dummy1, dummy2; |
| 1224 if (must_output_reloc_info(assembler) || | 1208 if (must_output_reloc_info(assembler) || |
| 1225 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { | 1209 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { |
| 1226 // The immediate operand cannot be encoded as a shifter operand, or use of | 1210 // The immediate operand cannot be encoded as a shifter operand, or use of |
| 1227 // constant pool is required. First account for the instructions required | 1211 // constant pool is required. First account for the instructions required |
| 1228 // for the constant pool or immediate load | 1212 // for the constant pool or immediate load |
| 1229 int instructions; | 1213 int instructions; |
| 1230 if (use_mov_immediate_load(*this, assembler)) { | 1214 if (use_mov_immediate_load(*this, assembler)) { |
| 1231 // A movw / movt or mov / orr immediate load. | 1215 // A movw / movt or mov / orr immediate load. |
| 1232 instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4; | 1216 instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4; |
| 1233 } else if (assembler != NULL && | 1217 } else if (assembler->ConstantPoolAccessIsInOverflow()) { |
| 1234 assembler->ConstantPoolAccessIsInOverflow()) { | |
| 1235 // An overflowed constant pool load. | 1218 // An overflowed constant pool load. |
| 1236 instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5; | 1219 instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5; |
| 1237 } else { | 1220 } else { |
| 1238 // A small constant pool load. | 1221 // A small constant pool load. |
| 1239 instructions = 1; | 1222 instructions = 1; |
| 1240 } | 1223 } |
| 1241 | 1224 |
| 1242 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set | 1225 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set |
| 1243 // For a mov or mvn instruction which doesn't set the condition | 1226 // For a mov or mvn instruction which doesn't set the condition |
| 1244 // code, the constant pool or immediate load is enough, otherwise we need | 1227 // code, the constant pool or immediate load is enough, otherwise we need |
| (...skipping 3245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4490 DCHECK(is_uint12(offset)); | 4473 DCHECK(is_uint12(offset)); |
| 4491 instr_at_put(pc, SetLdrRegisterImmediateOffset(instr, offset)); | 4474 instr_at_put(pc, SetLdrRegisterImmediateOffset(instr, offset)); |
| 4492 } | 4475 } |
| 4493 } | 4476 } |
| 4494 | 4477 |
| 4495 | 4478 |
| 4496 } // namespace internal | 4479 } // namespace internal |
| 4497 } // namespace v8 | 4480 } // namespace v8 |
| 4498 | 4481 |
| 4499 #endif // V8_TARGET_ARCH_ARM | 4482 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |