| Index: src/arm/assembler-arm.cc
|
| diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
|
| index 35279e557c8a99d8daccf713b4c3be4e1892d8a9..c82eef20a27b1f911c08874bfd00dd06a898c992 100644
|
| --- a/src/arm/assembler-arm.cc
|
| +++ b/src/arm/assembler-arm.cc
|
| @@ -1054,14 +1054,19 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
|
| }
|
|
|
|
|
| -static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
|
| - if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
|
| +static bool use_mov_immediate_load(const Operand& x,
|
| + const Assembler* assembler) {
|
| + if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
|
| + (assembler == NULL || !assembler->predictable_code_size())) {
|
| + // Prefer movw / movt to constant pool if it is more efficient on the CPU.
|
| return true;
|
| - }
|
| - if (x.must_output_reloc_info(assembler)) {
|
| + } else if (x.must_output_reloc_info(assembler)) {
|
| + // Prefer constant pool if data is likely to be patched.
|
| return false;
|
| + } else {
|
| + // Otherwise, use immediate load if movw / movt is available.
|
| + return CpuFeatures::IsSupported(ARMv7);
|
| }
|
| - return CpuFeatures::IsSupported(ARMv7);
|
| }
|
|
|
|
|
| @@ -1075,7 +1080,7 @@ bool Operand::is_single_instruction(const Assembler* assembler,
|
| // constant pool is required. For a mov instruction not setting the
|
| // condition code additional instruction conventions can be used.
|
| if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
|
| - return !use_movw_movt(*this, assembler);
|
| + return !use_mov_immediate_load(*this, assembler);
|
| } else {
|
| // If this is not a mov or mvn instruction there will always an additional
|
| // instructions - either mov or ldr. The mov might actually be two
|
| @@ -1091,12 +1096,11 @@ bool Operand::is_single_instruction(const Assembler* assembler,
|
| }
|
|
|
|
|
| -void Assembler::move_32_bit_immediate(Condition cond,
|
| - Register rd,
|
| - SBit s,
|
| - const Operand& x) {
|
| - if (rd.code() != pc.code() && s == LeaveCC) {
|
| - if (use_movw_movt(x, this)) {
|
| +void Assembler::move_32_bit_immediate(Register rd,
|
| + const Operand& x,
|
| + Condition cond) {
|
| + if (rd.code() != pc.code()) {
|
| + if (use_mov_immediate_load(x, this)) {
|
| if (x.must_output_reloc_info(this)) {
|
| RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
|
| // Make sure the movw/movt doesn't get separated.
|
| @@ -1133,20 +1137,9 @@ void Assembler::addrmod1(Instr instr,
|
| CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
|
| Condition cond = Instruction::ConditionField(instr);
|
| if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
|
| - move_32_bit_immediate(cond, rd, LeaveCC, x);
|
| + move_32_bit_immediate(rd, x, cond);
|
| } else {
|
| - if ((instr & kMovMvnMask) == kMovMvnPattern) {
|
| - // Moves need to use a constant pool entry.
|
| - RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
|
| - ldr(ip, MemOperand(pc, 0), cond);
|
| - } else if (x.must_output_reloc_info(this)) {
|
| - // Otherwise, use most efficient form of fetching from constant pool.
|
| - move_32_bit_immediate(cond, ip, LeaveCC, x);
|
| - } else {
|
| - // If this is not a mov or mvn instruction we may still be able to
|
| - // avoid a constant pool entry by using mvn or movw.
|
| - mov(ip, x, LeaveCC, cond);
|
| - }
|
| + mov(ip, x, LeaveCC, cond);
|
| addrmod1(instr, rn, rd, Operand(ip));
|
| }
|
| return;
|
| @@ -1814,8 +1807,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
|
| if (src.must_output_reloc_info(this) ||
|
| !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
|
| // Immediate operand cannot be encoded, load it first to register ip.
|
| - RecordRelocInfo(src.rmode_, src.imm32_);
|
| - ldr(ip, MemOperand(pc, 0), cond);
|
| + move_32_bit_immediate(ip, src);
|
| msr(fields, Operand(ip), cond);
|
| return;
|
| }
|
|
|