| Index: src/arm64/macro-assembler-arm64.cc
|
| diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
|
| index 46626308091f6402aa8e07c25fc11808e2f43255..ff5efd531bdd90a3dda059d39fa9139a4a1bc23d 100644
|
| --- a/src/arm64/macro-assembler-arm64.cc
|
| +++ b/src/arm64/macro-assembler-arm64.cc
|
| @@ -124,15 +124,15 @@ void MacroAssembler::LogicalMacro(const Register& rd,
|
| } else {
|
| // Immediate can't be encoded: synthesize using move immediate.
|
| Register temp = temps.AcquireSameSizeAs(rn);
|
| - Mov(temp, immediate);
|
| + Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
|
| if (rd.Is(csp)) {
|
| // If rd is the stack pointer we cannot use it as the destination
|
| // register so we use the temp register as an intermediate again.
|
| - Logical(temp, rn, temp, op);
|
| + Logical(temp, rn, imm_operand, op);
|
| Mov(csp, temp);
|
| AssertStackConsistency();
|
| } else {
|
| - Logical(rd, rn, temp, op);
|
| + Logical(rd, rn, imm_operand, op);
|
| }
|
| }
|
|
|
| @@ -178,20 +178,11 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
|
| // applying move-keep operations to move-zero and move-inverted initial
|
| // values.
|
|
|
| - unsigned reg_size = rd.SizeInBits();
|
| - unsigned n, imm_s, imm_r;
|
| - if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
|
| - // Immediate can be represented in a move zero instruction. Movz can't
|
| - // write to the stack pointer.
|
| - movz(rd, imm);
|
| - } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
|
| - // Immediate can be represented in a move inverted instruction. Movn can't
|
| - // write to the stack pointer.
|
| - movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
|
| - } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
|
| - // Immediate can be represented in a logical orr instruction.
|
| - LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
|
| - } else {
|
| + // Try to move the immediate in one instruction, and if that fails, switch to
|
| + // using multiple instructions.
|
| + if (!TryOneInstrMoveImmediate(rd, imm)) {
|
| + unsigned reg_size = rd.SizeInBits();
|
| +
|
| // Generic immediate case. Imm will be represented by
|
| // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
|
| // A move-zero or move-inverted is generated for the first non-zero or
|
| @@ -419,6 +410,66 @@ void MacroAssembler::Csel(const Register& rd,
|
| }
|
|
|
|
|
| +bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
|
| + int64_t imm) {
|
| + unsigned n, imm_s, imm_r;
|
| + int reg_size = dst.SizeInBits();
|
| + if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
|
| + // Immediate can be represented in a move zero instruction. Movz can't write
|
| + // to the stack pointer.
|
| + movz(dst, imm);
|
| + return true;
|
| + } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
|
| + // Immediate can be represented in a move not instruction. Movn can't write
|
| + // to the stack pointer.
|
| + movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
|
| + return true;
|
| + } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
|
| + // Immediate can be represented in a logical orr instruction.
|
| + LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
|
| + return true;
|
| + }
|
| + return false;
|
| +}
|
| +
|
| +
|
| +Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
|
| + int64_t imm) {
|
| + int reg_size = dst.SizeInBits();
|
| +
|
| + // Encode the immediate in a single move instruction, if possible.
|
| + if (TryOneInstrMoveImmediate(dst, imm)) {
|
| + // The move was successful; nothing to do here.
|
| + } else {
|
| + // Pre-shift the immediate to the least-significant bits of the register.
|
| + int shift_low = CountTrailingZeros(imm, reg_size);
|
| + int64_t imm_low = imm >> shift_low;
|
| +
|
| + // Pre-shift the immediate to the most-significant bits of the register. We
|
| + // insert set bits in the least-significant bits, as this creates a
|
| + // different immediate that may be encodable using movn or orr-immediate.
|
| + // If this new immediate is encodable, the set bits will be eliminated by
|
| + // the post shift on the following instruction.
|
| + int shift_high = CountLeadingZeros(imm, reg_size);
|
| + int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
|
| +
|
| + if (TryOneInstrMoveImmediate(dst, imm_low)) {
|
| + // The new immediate has been moved into the destination's low bits:
|
| + // return a new leftward-shifting operand.
|
| + return Operand(dst, LSL, shift_low);
|
| + } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
|
| + // The new immediate has been moved into the destination's high bits:
|
| + // return a new rightward-shifting operand.
|
| + return Operand(dst, LSR, shift_high);
|
| + } else {
|
| + // Use the generic move operation to set up the immediate.
|
| + Mov(dst, imm);
|
| + }
|
| + }
|
| + return Operand(dst);
|
| +}
|
| +
|
| +
|
| void MacroAssembler::AddSubMacro(const Register& rd,
|
| const Register& rn,
|
| const Operand& operand,
|
| @@ -441,8 +492,14 @@ void MacroAssembler::AddSubMacro(const Register& rd,
|
| (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
|
| UseScratchRegisterScope temps(this);
|
| Register temp = temps.AcquireSameSizeAs(rn);
|
| - Mov(temp, operand);
|
| - AddSub(rd, rn, temp, S, op);
|
| + if (operand.IsImmediate()) {
|
| + Operand imm_operand =
|
| + MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
|
| + AddSub(rd, rn, imm_operand, S, op);
|
| + } else {
|
| + Mov(temp, operand);
|
| + AddSub(rd, rn, temp, S, op);
|
| + }
|
| } else {
|
| AddSub(rd, rn, operand, S, op);
|
| }
|
|
|