| Index: src/arm64/macro-assembler-arm64.h
|
| diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
|
| index bc2d9b39ac056e2333d8c32631e1bdc70ab154f2..6444530ebbf1b17d5f13ce59394d59d7c97d4130 100644
|
| --- a/src/arm64/macro-assembler-arm64.h
|
| +++ b/src/arm64/macro-assembler-arm64.h
|
| @@ -162,6 +162,21 @@ enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
|
| enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
|
| enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
|
|
|
| +// The macro assembler supports moving automatically pre-shifted immediates for
|
| +// arithmetic and logical instructions, and then applying a post shift in the
|
| +// instruction to undo the modification, in order to reduce the code emitted for
|
| +// an operation. For example:
|
| +//
|
| +// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
|
| +//
|
| +// This optimisation can be only partially applied when the stack pointer is an
|
| +// operand or destination, so this enumeration is used to control the shift.
|
| +enum PreShiftImmMode {
|
| + kNoShift, // Don't pre-shift.
|
| + kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
|
| + kAnyShift // Allow any pre-shift.
|
| +};
|
| +
|
| class MacroAssembler : public Assembler {
|
| public:
|
| MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
|
| @@ -276,7 +291,8 @@ class MacroAssembler : public Assembler {
|
| // dst is not necessarily equal to imm; it may have had a shifting operation
|
| // applied to it that will be subsequently undone by the shift applied in the
|
| // Operand.
|
| - Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
|
| + Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
|
| + PreShiftImmMode mode);
|
|
|
| // Conditional macros.
|
| inline void Ccmp(const Register& rn,
|
|
|