Index: runtime/vm/assembler_arm64.h |
=================================================================== |
--- runtime/vm/assembler_arm64.h (revision 35439) |
+++ runtime/vm/assembler_arm64.h (working copy) |
@@ -155,17 +155,24 @@ |
return addr; |
} |
+ enum Scaling { |
+ Unscaled, |
+ Scaled, |
+ }; |
+ |
// Base register rn with offset rm. rm is sign-extended according to ext. |
// If ext is UXTX, rm may be optionally scaled by the |
// Log2OperandSize (specified by the instruction). |
- Address(Register rn, Register rm, Extend ext = UXTX, bool scaled = false) { |
+ Address(Register rn, Register rm, |
+ Extend ext = UXTX, Scaling scale = Unscaled) { |
ASSERT((rn != R31) && (rn != ZR)); |
ASSERT((rm != R31) && (rm != SP)); |
- ASSERT(!scaled || (ext == UXTX)); // Can only scale when ext = UXTX. |
+ // Can only scale when ext = UXTX. |
+ ASSERT((scale != Scaled) || (ext == UXTX)); |
ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); |
const Register crn = ConcreteRegister(rn); |
const Register crm = ConcreteRegister(rm); |
- const int32_t s = scaled ? B12 : 0; |
+ const int32_t s = (scale == Scaled) ? B12 : 0; |
encoding_ = |
B21 | B11 | s | |
(static_cast<int32_t>(crn) << kRnShift) | |
@@ -543,6 +550,11 @@ |
EmitLoadStoreReg(STR, rt, a, sz); |
} |
+ // Conditional select. |
+ void csel(Register rd, Register rn, Register rm, Condition cond) { |
+ EmitCoditionalSelect(CSEL, rd, rn, rm, cond, kDoubleWord); |
+ } |
+ |
// Comparison. |
// rn cmp o. |
// For add and sub, to use SP for rn, o must be of type Operand::Extend. |
@@ -619,8 +631,8 @@ |
} |
void TagAndPushPP() { |
// Add the heap object tag back to PP before putting it on the stack. |
- add(PP, PP, Operand(kHeapObjectTag)); |
- str(PP, Address(SP, -1 * kWordSize, Address::PreIndex)); |
+ add(TMP, PP, Operand(kHeapObjectTag)); |
+ str(TMP, Address(SP, -1 * kWordSize, Address::PreIndex)); |
} |
void PopAndUntagPP() { |
ldr(PP, Address(SP, 1 * kWordSize, Address::PostIndex)); |
@@ -646,6 +658,9 @@ |
void SmiUntag(Register reg) { |
Asr(reg, reg, kSmiTagSize); |
} |
+ void SmiTag(Register reg) { |
+ Lsl(reg, reg, kSmiTagSize); |
+ } |
// Branching to ExternalLabels. |
void BranchPatchable(const ExternalLabel* label, Register pp) { |
@@ -713,6 +728,9 @@ |
// Object pool, loading from pool, etc. |
void LoadPoolPointer(Register pp); |
+ // Index of constant pool entries pointing to debugger stubs. |
+ static const int kBreakpointRuntimeCPIndex = 5; |
+ |
enum Patchability { |
kPatchable, |
kNotPatchable, |
@@ -737,6 +755,11 @@ |
Push(TMP); |
} |
+ void LoadClassId(Register result, Register object); |
+ void LoadClassById(Register result, Register class_id); |
+ void LoadClass(Register result, Register object); |
+ void CompareClassId(Register object, intptr_t class_id); |
+ |
void EnterFrame(intptr_t frame_size); |
void LeaveFrame(); |
@@ -845,7 +868,7 @@ |
void EmitAddSubImmOp(AddSubImmOp op, Register rd, Register rn, |
Operand o, OperandSize sz, bool set_flags) { |
- ASSERT((sz == kDoubleWord) || (sz == kWord)); |
+ ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
const int32_t s = set_flags ? B29 : 0; |
const int32_t encoding = |
@@ -858,7 +881,7 @@ |
void EmitLogicalImmOp(LogicalImmOp op, Register rd, Register rn, |
Operand o, OperandSize sz) { |
- ASSERT((sz == kDoubleWord) || (sz == kWord)); |
+ ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
ASSERT((rd != R31) && (rn != R31)); |
ASSERT(rn != SP); |
ASSERT((op == ANDIS) || (rd != ZR)); // op != ANDIS => rd != ZR. |
@@ -877,7 +900,7 @@ |
void EmitLogicalShiftOp(LogicalShiftOp op, |
Register rd, Register rn, Operand o, OperandSize sz) { |
- ASSERT((sz == kDoubleWord) || (sz == kWord)); |
+ ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
ASSERT((rd != R31) && (rn != R31)); |
ASSERT((rd != SP) && (rn != SP)); |
ASSERT(o.type() == Operand::Shifted); |
@@ -895,7 +918,7 @@ |
void EmitAddSubShiftExtOp(AddSubShiftExtOp op, |
Register rd, Register rn, Operand o, |
OperandSize sz, bool set_flags) { |
- ASSERT((sz == kDoubleWord) || (sz == kWord)); |
+ ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
const int32_t s = set_flags ? B29 : 0; |
const int32_t encoding = |
@@ -913,13 +936,13 @@ |
} |
int64_t DecodeImm19BranchOffset(int32_t instr) { |
- const int32_t off = (((instr >> kImm19Shift) & kImm19Shift) << 13) >> 13; |
+ const int32_t off = (((instr & kImm19Mask) >> kImm19Shift) << 13) >> 11; |
return static_cast<int64_t>(off); |
} |
void EmitCompareAndBranch(CompareAndBranchOp op, Register rt, int64_t imm, |
OperandSize sz) { |
- ASSERT((sz == kDoubleWord) || (sz == kWord)); |
+ ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
ASSERT(Utils::IsInt(21, imm) && ((imm & 0x3) == 0)); |
ASSERT((rt != SP) && (rt != R31)); |
const Register crt = ConcreteRegister(rt); |
@@ -944,7 +967,7 @@ |
bool CanEncodeImm19BranchOffset(int64_t offset) { |
ASSERT(Utils::IsAligned(offset, 4)); |
- return Utils::IsInt(19, offset); |
+ return Utils::IsInt(21, offset); |
} |
// TODO(zra): Implement far branches. Requires loading large immediates. |
@@ -978,7 +1001,7 @@ |
void EmitMoveWideOp(MoveWideOp op, Register rd, uint16_t imm, int hw_idx, |
OperandSize sz) { |
ASSERT((hw_idx >= 0) && (hw_idx <= 3)); |
- ASSERT((sz == kDoubleWord) || (sz == kWord)); |
+ ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
const int32_t encoding = |
op | size | |
@@ -1001,7 +1024,7 @@ |
void EmitLoadRegLiteral(LoadRegLiteralOp op, Register rt, Address a, |
OperandSize sz) { |
- ASSERT((sz == kDoubleWord) || (sz == kWord)); |
+ ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
ASSERT((rt != SP) && (rt != R31)); |
const Register crt = ConcreteRegister(rt); |
const int32_t size = (sz == kDoubleWord) ? B30 : 0; |
@@ -1028,6 +1051,7 @@ |
Register rd, Register rn, Register rm, |
OperandSize sz) { |
ASSERT((rd != SP) && (rn != SP) && (rm != SP)); |
+ ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
const Register crd = ConcreteRegister(rd); |
const Register crn = ConcreteRegister(rn); |
const Register crm = ConcreteRegister(rm); |
@@ -1044,6 +1068,7 @@ |
Register rd, Register rn, Register rm, Register ra, |
OperandSize sz) { |
ASSERT((rd != SP) && (rn != SP) && (rm != SP) && (ra != SP)); |
+ ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
const Register crd = ConcreteRegister(rd); |
const Register crn = ConcreteRegister(rn); |
const Register crm = ConcreteRegister(rm); |
@@ -1058,6 +1083,24 @@ |
Emit(encoding); |
} |
+ void EmitCoditionalSelect(ConditionalSelectOp op, |
+ Register rd, Register rn, Register rm, |
+ Condition cond, OperandSize sz) { |
+ ASSERT((rd != SP) && (rn != SP) && (rm != SP)); |
+ ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
+ const Register crd = ConcreteRegister(rd); |
+ const Register crn = ConcreteRegister(rn); |
+ const Register crm = ConcreteRegister(rm); |
+ const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
+ const int32_t encoding = |
+ op | size | |
+ (static_cast<int32_t>(crd) << kRdShift) | |
+ (static_cast<int32_t>(crn) << kRnShift) | |
+ (static_cast<int32_t>(crm) << kRmShift) | |
+ (static_cast<int32_t>(cond) << kSelCondShift); |
+ Emit(encoding); |
+ } |
+ |
void StoreIntoObjectFilter(Register object, Register value, Label* no_update); |
// Shorter filtering sequence that assumes that value is not a smi. |