OLD | NEW |
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #ifndef RUNTIME_VM_ASSEMBLER_ARM64_H_ | 5 #ifndef RUNTIME_VM_ASSEMBLER_ARM64_H_ |
6 #define RUNTIME_VM_ASSEMBLER_ARM64_H_ | 6 #define RUNTIME_VM_ASSEMBLER_ARM64_H_ |
7 | 7 |
8 #ifndef RUNTIME_VM_ASSEMBLER_H_ | 8 #ifndef RUNTIME_VM_ASSEMBLER_H_ |
9 #error Do not include assembler_arm64.h directly; use assembler.h instead. | 9 #error Do not include assembler_arm64.h directly; use assembler.h instead. |
10 #endif | 10 #endif |
11 | 11 |
12 #include "platform/assert.h" | 12 #include "platform/assert.h" |
13 #include "platform/utils.h" | 13 #include "platform/utils.h" |
14 #include "vm/constants_arm64.h" | 14 #include "vm/constants_arm64.h" |
15 #include "vm/hash_map.h" | 15 #include "vm/hash_map.h" |
16 #include "vm/longjump.h" | 16 #include "vm/longjump.h" |
17 #include "vm/object.h" | 17 #include "vm/object.h" |
18 #include "vm/simulator.h" | 18 #include "vm/simulator.h" |
19 | 19 |
20 namespace dart { | 20 namespace dart { |
21 | 21 |
22 // Forward declarations. | 22 // Forward declarations. |
23 class RuntimeEntry; | 23 class RuntimeEntry; |
24 class StubEntry; | 24 class StubEntry; |
25 | 25 |
26 class Immediate : public ValueObject { | 26 class Immediate : public ValueObject { |
27 public: | 27 public: |
28 explicit Immediate(int64_t value) : value_(value) { } | 28 explicit Immediate(int64_t value) : value_(value) {} |
29 | 29 |
30 Immediate(const Immediate& other) : ValueObject(), value_(other.value_) { } | 30 Immediate(const Immediate& other) : ValueObject(), value_(other.value_) {} |
31 Immediate& operator=(const Immediate& other) { | 31 Immediate& operator=(const Immediate& other) { |
32 value_ = other.value_; | 32 value_ = other.value_; |
33 return *this; | 33 return *this; |
34 } | 34 } |
35 | 35 |
36 private: | 36 private: |
37 int64_t value_; | 37 int64_t value_; |
38 | 38 |
39 int64_t value() const { return value_; } | 39 int64_t value() const { return value_; } |
40 | 40 |
41 friend class Assembler; | 41 friend class Assembler; |
42 }; | 42 }; |
43 | 43 |
44 | 44 |
45 class Label : public ValueObject { | 45 class Label : public ValueObject { |
46 public: | 46 public: |
47 Label() : position_(0) { } | 47 Label() : position_(0) {} |
48 | 48 |
49 ~Label() { | 49 ~Label() { |
50 // Assert if label is being destroyed with unresolved branches pending. | 50 // Assert if label is being destroyed with unresolved branches pending. |
51 ASSERT(!IsLinked()); | 51 ASSERT(!IsLinked()); |
52 } | 52 } |
53 | 53 |
54 // Returns the position for bound and linked labels. Cannot be used | 54 // Returns the position for bound and linked labels. Cannot be used |
55 // for unused labels. | 55 // for unused labels. |
56 intptr_t Position() const { | 56 intptr_t Position() const { |
57 ASSERT(!IsUnused()); | 57 ASSERT(!IsUnused()); |
58 return IsBound() ? -position_ - kWordSize : position_ - kWordSize; | 58 return IsBound() ? -position_ - kWordSize : position_ - kWordSize; |
59 } | 59 } |
60 | 60 |
61 bool IsBound() const { return position_ < 0; } | 61 bool IsBound() const { return position_ < 0; } |
62 bool IsUnused() const { return position_ == 0; } | 62 bool IsUnused() const { return position_ == 0; } |
63 bool IsLinked() const { return position_ > 0; } | 63 bool IsLinked() const { return position_ > 0; } |
64 | 64 |
65 private: | 65 private: |
66 intptr_t position_; | 66 intptr_t position_; |
67 | 67 |
68 void Reinitialize() { | 68 void Reinitialize() { position_ = 0; } |
69 position_ = 0; | |
70 } | |
71 | 69 |
72 void BindTo(intptr_t position) { | 70 void BindTo(intptr_t position) { |
73 ASSERT(!IsBound()); | 71 ASSERT(!IsBound()); |
74 position_ = -position - kWordSize; | 72 position_ = -position - kWordSize; |
75 ASSERT(IsBound()); | 73 ASSERT(IsBound()); |
76 } | 74 } |
77 | 75 |
78 void LinkTo(intptr_t position) { | 76 void LinkTo(intptr_t position) { |
79 ASSERT(!IsBound()); | 77 ASSERT(!IsBound()); |
80 position_ = position + kWordSize; | 78 position_ = position + kWordSize; |
81 ASSERT(IsLinked()); | 79 ASSERT(IsLinked()); |
82 } | 80 } |
83 | 81 |
84 friend class Assembler; | 82 friend class Assembler; |
85 DISALLOW_COPY_AND_ASSIGN(Label); | 83 DISALLOW_COPY_AND_ASSIGN(Label); |
86 }; | 84 }; |
87 | 85 |
88 | 86 |
89 class Address : public ValueObject { | 87 class Address : public ValueObject { |
90 public: | 88 public: |
91 Address(const Address& other) | 89 Address(const Address& other) |
92 : ValueObject(), | 90 : ValueObject(), |
93 encoding_(other.encoding_), | 91 encoding_(other.encoding_), |
94 type_(other.type_), | 92 type_(other.type_), |
95 base_(other.base_) { | 93 base_(other.base_) {} |
96 } | |
97 | 94 |
98 Address& operator=(const Address& other) { | 95 Address& operator=(const Address& other) { |
99 encoding_ = other.encoding_; | 96 encoding_ = other.encoding_; |
100 type_ = other.type_; | 97 type_ = other.type_; |
101 base_ = other.base_; | 98 base_ = other.base_; |
102 return *this; | 99 return *this; |
103 } | 100 } |
104 | 101 |
105 enum AddressType { | 102 enum AddressType { |
106 Offset, | 103 Offset, |
107 PreIndex, | 104 PreIndex, |
108 PostIndex, | 105 PostIndex, |
109 PairOffset, | 106 PairOffset, |
110 PairPreIndex, | 107 PairPreIndex, |
111 PairPostIndex, | 108 PairPostIndex, |
112 Reg, | 109 Reg, |
113 PCOffset, | 110 PCOffset, |
114 Unknown, | 111 Unknown, |
115 }; | 112 }; |
116 | 113 |
117 // Offset is in bytes. For the unsigned imm12 case, we unscale based on the | 114 // Offset is in bytes. For the unsigned imm12 case, we unscale based on the |
118 // operand size, and assert that offset is aligned accordingly. | 115 // operand size, and assert that offset is aligned accordingly. |
119 // For the smaller signed imm9 case, the offset is the number of bytes, but | 116 // For the smaller signed imm9 case, the offset is the number of bytes, but |
120 // is unscaled. | 117 // is unscaled. |
121 Address(Register rn, int32_t offset = 0, AddressType at = Offset, | 118 Address(Register rn, |
| 119 int32_t offset = 0, |
| 120 AddressType at = Offset, |
122 OperandSize sz = kDoubleWord) { | 121 OperandSize sz = kDoubleWord) { |
123 ASSERT((rn != kNoRegister) && (rn != R31) && (rn != ZR)); | 122 ASSERT((rn != kNoRegister) && (rn != R31) && (rn != ZR)); |
124 ASSERT(CanHoldOffset(offset, at, sz)); | 123 ASSERT(CanHoldOffset(offset, at, sz)); |
125 const Register crn = ConcreteRegister(rn); | 124 const Register crn = ConcreteRegister(rn); |
126 const int32_t scale = Log2OperandSizeBytes(sz); | 125 const int32_t scale = Log2OperandSizeBytes(sz); |
127 if ((at == Offset) && | 126 if ((at == Offset) && Utils::IsUint(12 + scale, offset) && |
128 Utils::IsUint(12 + scale, offset) && | |
129 (offset == ((offset >> scale) << scale))) { | 127 (offset == ((offset >> scale) << scale))) { |
130 encoding_ = | 128 encoding_ = B24 | ((offset >> scale) << kImm12Shift) | |
131 B24 | | 129 (static_cast<int32_t>(crn) << kRnShift); |
132 ((offset >> scale) << kImm12Shift) | | 130 } else if ((at == Offset) && Utils::IsInt(9, offset)) { |
133 (static_cast<int32_t>(crn) << kRnShift); | 131 encoding_ = ((offset & 0x1ff) << kImm9Shift) | |
134 } else if ((at == Offset) && | 132 (static_cast<int32_t>(crn) << kRnShift); |
135 Utils::IsInt(9, offset)) { | |
136 encoding_ = | |
137 ((offset & 0x1ff) << kImm9Shift) | | |
138 (static_cast<int32_t>(crn) << kRnShift); | |
139 } else if ((at == PreIndex) || (at == PostIndex)) { | 133 } else if ((at == PreIndex) || (at == PostIndex)) { |
140 ASSERT(Utils::IsInt(9, offset)); | 134 ASSERT(Utils::IsInt(9, offset)); |
141 int32_t idx = (at == PostIndex) ? B10 : (B11 | B10); | 135 int32_t idx = (at == PostIndex) ? B10 : (B11 | B10); |
142 encoding_ = | 136 encoding_ = idx | ((offset & 0x1ff) << kImm9Shift) | |
143 idx | | 137 (static_cast<int32_t>(crn) << kRnShift); |
144 ((offset & 0x1ff) << kImm9Shift) | | |
145 (static_cast<int32_t>(crn) << kRnShift); | |
146 } else { | 138 } else { |
147 ASSERT((at == PairOffset) || (at == PairPreIndex) || | 139 ASSERT((at == PairOffset) || (at == PairPreIndex) || |
148 (at == PairPostIndex)); | 140 (at == PairPostIndex)); |
149 ASSERT(Utils::IsInt(7 + scale, offset) && | 141 ASSERT(Utils::IsInt(7 + scale, offset) && |
150 (offset == ((offset >> scale) << scale))); | 142 (offset == ((offset >> scale) << scale))); |
151 int32_t idx = 0; | 143 int32_t idx = 0; |
152 switch (at) { | 144 switch (at) { |
153 case PairPostIndex: idx = B23; break; | 145 case PairPostIndex: |
154 case PairPreIndex: idx = B24 | B23; break; | 146 idx = B23; |
155 case PairOffset: idx = B24; break; | 147 break; |
156 default: UNREACHABLE(); break; | 148 case PairPreIndex: |
| 149 idx = B24 | B23; |
| 150 break; |
| 151 case PairOffset: |
| 152 idx = B24; |
| 153 break; |
| 154 default: |
| 155 UNREACHABLE(); |
| 156 break; |
157 } | 157 } |
158 encoding_ = | 158 encoding_ = idx | (((offset >> scale) << kImm7Shift) & kImm7Mask) | |
159 idx | | 159 (static_cast<int32_t>(crn) << kRnShift); |
160 (((offset >> scale) << kImm7Shift) & kImm7Mask) | | |
161 (static_cast<int32_t>(crn) << kRnShift); | |
162 } | 160 } |
163 type_ = at; | 161 type_ = at; |
164 base_ = crn; | 162 base_ = crn; |
165 } | 163 } |
166 | 164 |
167 // This addressing mode does not exist. | 165 // This addressing mode does not exist. |
168 Address(Register rn, Register offset, AddressType at, | 166 Address(Register rn, |
| 167 Register offset, |
| 168 AddressType at, |
169 OperandSize sz = kDoubleWord); | 169 OperandSize sz = kDoubleWord); |
170 | 170 |
171 static bool CanHoldOffset(int32_t offset, AddressType at = Offset, | 171 static bool CanHoldOffset(int32_t offset, |
| 172 AddressType at = Offset, |
172 OperandSize sz = kDoubleWord) { | 173 OperandSize sz = kDoubleWord) { |
173 if (at == Offset) { | 174 if (at == Offset) { |
174 // Offset fits in 12 bit unsigned and has right alignment for sz, | 175 // Offset fits in 12 bit unsigned and has right alignment for sz, |
175 // or fits in 9 bit signed offset with no alignment restriction. | 176 // or fits in 9 bit signed offset with no alignment restriction. |
176 const int32_t scale = Log2OperandSizeBytes(sz); | 177 const int32_t scale = Log2OperandSizeBytes(sz); |
177 return (Utils::IsUint(12 + scale, offset) && | 178 return (Utils::IsUint(12 + scale, offset) && |
178 (offset == ((offset >> scale) << scale))) || | 179 (offset == ((offset >> scale) << scale))) || |
179 (Utils::IsInt(9, offset)); | 180 (Utils::IsInt(9, offset)); |
180 } else if (at == PCOffset) { | 181 } else if (at == PCOffset) { |
181 return Utils::IsInt(21, offset) && | 182 return Utils::IsInt(21, offset) && (offset == ((offset >> 2) << 2)); |
182 (offset == ((offset >> 2) << 2)); | |
183 } else if ((at == PreIndex) || (at == PostIndex)) { | 183 } else if ((at == PreIndex) || (at == PostIndex)) { |
184 return Utils::IsInt(9, offset); | 184 return Utils::IsInt(9, offset); |
185 } else { | 185 } else { |
186 ASSERT((at == PairOffset) || (at == PairPreIndex) || | 186 ASSERT((at == PairOffset) || (at == PairPreIndex) || |
187 (at == PairPostIndex)); | 187 (at == PairPostIndex)); |
188 const int32_t scale = Log2OperandSizeBytes(sz); | 188 const int32_t scale = Log2OperandSizeBytes(sz); |
189 return (Utils::IsInt(7 + scale, offset) && | 189 return (Utils::IsInt(7 + scale, offset) && |
190 (offset == ((offset >> scale) << scale))); | 190 (offset == ((offset >> scale) << scale))); |
191 } | 191 } |
192 } | 192 } |
(...skipping 19 matching lines...) Expand all Loading... |
212 static Address PC(Register r); | 212 static Address PC(Register r); |
213 | 213 |
214 enum Scaling { | 214 enum Scaling { |
215 Unscaled, | 215 Unscaled, |
216 Scaled, | 216 Scaled, |
217 }; | 217 }; |
218 | 218 |
219 // Base register rn with offset rm. rm is sign-extended according to ext. | 219 // Base register rn with offset rm. rm is sign-extended according to ext. |
220 // If ext is UXTX, rm may be optionally scaled by the | 220 // If ext is UXTX, rm may be optionally scaled by the |
221 // Log2OperandSize (specified by the instruction). | 221 // Log2OperandSize (specified by the instruction). |
222 Address(Register rn, Register rm, | 222 Address(Register rn, |
223 Extend ext = UXTX, Scaling scale = Unscaled) { | 223 Register rm, |
| 224 Extend ext = UXTX, |
| 225 Scaling scale = Unscaled) { |
224 ASSERT((rn != R31) && (rn != ZR)); | 226 ASSERT((rn != R31) && (rn != ZR)); |
225 ASSERT((rm != R31) && (rm != CSP)); | 227 ASSERT((rm != R31) && (rm != CSP)); |
226 // Can only scale when ext = UXTX. | 228 // Can only scale when ext = UXTX. |
227 ASSERT((scale != Scaled) || (ext == UXTX)); | 229 ASSERT((scale != Scaled) || (ext == UXTX)); |
228 ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); | 230 ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); |
229 const Register crn = ConcreteRegister(rn); | 231 const Register crn = ConcreteRegister(rn); |
230 const Register crm = ConcreteRegister(rm); | 232 const Register crm = ConcreteRegister(rm); |
231 const int32_t s = (scale == Scaled) ? B12 : 0; | 233 const int32_t s = (scale == Scaled) ? B12 : 0; |
232 encoding_ = | 234 encoding_ = B21 | B11 | s | (static_cast<int32_t>(crn) << kRnShift) | |
233 B21 | B11 | s | | 235 (static_cast<int32_t>(crm) << kRmShift) | |
234 (static_cast<int32_t>(crn) << kRnShift) | | 236 (static_cast<int32_t>(ext) << kExtendTypeShift); |
235 (static_cast<int32_t>(crm) << kRmShift) | | |
236 (static_cast<int32_t>(ext) << kExtendTypeShift); | |
237 type_ = Reg; | 237 type_ = Reg; |
238 base_ = crn; | 238 base_ = crn; |
239 } | 239 } |
240 | 240 |
241 static OperandSize OperandSizeFor(intptr_t cid) { | 241 static OperandSize OperandSizeFor(intptr_t cid) { |
242 switch (cid) { | 242 switch (cid) { |
243 case kArrayCid: | 243 case kArrayCid: |
244 case kImmutableArrayCid: | 244 case kImmutableArrayCid: |
245 return kWord; | 245 return kWord; |
246 case kOneByteStringCid: | 246 case kOneByteStringCid: |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
296 AddressType type_; | 296 AddressType type_; |
297 Register base_; | 297 Register base_; |
298 | 298 |
299 friend class Assembler; | 299 friend class Assembler; |
300 }; | 300 }; |
301 | 301 |
302 | 302 |
303 class FieldAddress : public Address { | 303 class FieldAddress : public Address { |
304 public: | 304 public: |
305 FieldAddress(Register base, int32_t disp, OperandSize sz = kDoubleWord) | 305 FieldAddress(Register base, int32_t disp, OperandSize sz = kDoubleWord) |
306 : Address(base, disp - kHeapObjectTag, Offset, sz) { } | 306 : Address(base, disp - kHeapObjectTag, Offset, sz) {} |
307 | 307 |
308 // This addressing mode does not exist. | 308 // This addressing mode does not exist. |
309 FieldAddress(Register base, Register disp, OperandSize sz = kDoubleWord); | 309 FieldAddress(Register base, Register disp, OperandSize sz = kDoubleWord); |
310 | 310 |
311 FieldAddress(const FieldAddress& other) : Address(other) { } | 311 FieldAddress(const FieldAddress& other) : Address(other) {} |
312 | 312 |
313 FieldAddress& operator=(const FieldAddress& other) { | 313 FieldAddress& operator=(const FieldAddress& other) { |
314 Address::operator=(other); | 314 Address::operator=(other); |
315 return *this; | 315 return *this; |
316 } | 316 } |
317 }; | 317 }; |
318 | 318 |
319 | 319 |
320 class Operand : public ValueObject { | 320 class Operand : public ValueObject { |
321 public: | 321 public: |
322 enum OperandType { | 322 enum OperandType { |
323 Shifted, | 323 Shifted, |
324 Extended, | 324 Extended, |
325 Immediate, | 325 Immediate, |
326 BitfieldImm, | 326 BitfieldImm, |
327 Unknown, | 327 Unknown, |
328 }; | 328 }; |
329 | 329 |
330 // Data-processing operand - Uninitialized. | 330 // Data-processing operand - Uninitialized. |
331 Operand() : encoding_(-1), type_(Unknown) { } | 331 Operand() : encoding_(-1), type_(Unknown) {} |
332 | 332 |
333 // Data-processing operands - Copy constructor. | 333 // Data-processing operands - Copy constructor. |
334 Operand(const Operand& other) | 334 Operand(const Operand& other) |
335 : ValueObject(), encoding_(other.encoding_), type_(other.type_) { } | 335 : ValueObject(), encoding_(other.encoding_), type_(other.type_) {} |
336 | 336 |
337 Operand& operator=(const Operand& other) { | 337 Operand& operator=(const Operand& other) { |
338 type_ = other.type_; | 338 type_ = other.type_; |
339 encoding_ = other.encoding_; | 339 encoding_ = other.encoding_; |
340 return *this; | 340 return *this; |
341 } | 341 } |
342 | 342 |
343 explicit Operand(Register rm) { | 343 explicit Operand(Register rm) { |
344 ASSERT((rm != R31) && (rm != CSP)); | 344 ASSERT((rm != R31) && (rm != CSP)); |
345 const Register crm = ConcreteRegister(rm); | 345 const Register crm = ConcreteRegister(rm); |
346 encoding_ = (static_cast<int32_t>(crm) << kRmShift); | 346 encoding_ = (static_cast<int32_t>(crm) << kRmShift); |
347 type_ = Shifted; | 347 type_ = Shifted; |
348 } | 348 } |
349 | 349 |
350 Operand(Register rm, Shift shift, int32_t imm) { | 350 Operand(Register rm, Shift shift, int32_t imm) { |
351 ASSERT(Utils::IsUint(6, imm)); | 351 ASSERT(Utils::IsUint(6, imm)); |
352 ASSERT((rm != R31) && (rm != CSP)); | 352 ASSERT((rm != R31) && (rm != CSP)); |
353 const Register crm = ConcreteRegister(rm); | 353 const Register crm = ConcreteRegister(rm); |
354 encoding_ = | 354 encoding_ = (imm << kImm6Shift) | (static_cast<int32_t>(crm) << kRmShift) | |
355 (imm << kImm6Shift) | | 355 (static_cast<int32_t>(shift) << kShiftTypeShift); |
356 (static_cast<int32_t>(crm) << kRmShift) | | |
357 (static_cast<int32_t>(shift) << kShiftTypeShift); | |
358 type_ = Shifted; | 356 type_ = Shifted; |
359 } | 357 } |
360 | 358 |
361 // This operand type does not exist. | 359 // This operand type does not exist. |
362 Operand(Register rm, Shift shift, Register r); | 360 Operand(Register rm, Shift shift, Register r); |
363 | 361 |
364 Operand(Register rm, Extend extend, int32_t imm) { | 362 Operand(Register rm, Extend extend, int32_t imm) { |
365 ASSERT(Utils::IsUint(3, imm)); | 363 ASSERT(Utils::IsUint(3, imm)); |
366 ASSERT((rm != R31) && (rm != CSP)); | 364 ASSERT((rm != R31) && (rm != CSP)); |
367 const Register crm = ConcreteRegister(rm); | 365 const Register crm = ConcreteRegister(rm); |
368 encoding_ = | 366 encoding_ = B21 | (static_cast<int32_t>(crm) << kRmShift) | |
369 B21 | | 367 (static_cast<int32_t>(extend) << kExtendTypeShift) | |
370 (static_cast<int32_t>(crm) << kRmShift) | | 368 ((imm & 0x7) << kImm3Shift); |
371 (static_cast<int32_t>(extend) << kExtendTypeShift) | | |
372 ((imm & 0x7) << kImm3Shift); | |
373 type_ = Extended; | 369 type_ = Extended; |
374 } | 370 } |
375 | 371 |
376 // This operand type does not exist. | 372 // This operand type does not exist. |
377 Operand(Register rm, Extend extend, Register r); | 373 Operand(Register rm, Extend extend, Register r); |
378 | 374 |
379 explicit Operand(int32_t imm) { | 375 explicit Operand(int32_t imm) { |
380 if (Utils::IsUint(12, imm)) { | 376 if (Utils::IsUint(12, imm)) { |
381 encoding_ = imm << kImm12Shift; | 377 encoding_ = imm << kImm12Shift; |
382 } else { | 378 } else { |
383 // imm only has bits in [12, 24) set. | 379 // imm only has bits in [12, 24) set. |
384 ASSERT(((imm & 0xfff) == 0) && (Utils::IsUint(12, imm >> 12))); | 380 ASSERT(((imm & 0xfff) == 0) && (Utils::IsUint(12, imm >> 12))); |
385 encoding_ = B22 | ((imm >> 12) << kImm12Shift); | 381 encoding_ = B22 | ((imm >> 12) << kImm12Shift); |
386 } | 382 } |
387 type_ = Immediate; | 383 type_ = Immediate; |
388 } | 384 } |
389 | 385 |
390 // Encodes the value of an immediate for a logical operation. | 386 // Encodes the value of an immediate for a logical operation. |
391 // Since these values are difficult to craft by hand, instead pass the | 387 // Since these values are difficult to craft by hand, instead pass the |
392 // logical mask to the function IsImmLogical to get n, imm_s, and | 388 // logical mask to the function IsImmLogical to get n, imm_s, and |
393 // imm_r. | 389 // imm_r. |
394 Operand(uint8_t n, int8_t imm_s, int8_t imm_r) { | 390 Operand(uint8_t n, int8_t imm_s, int8_t imm_r) { |
395 ASSERT((n == 1) || (n == 0)); | 391 ASSERT((n == 1) || (n == 0)); |
396 ASSERT(Utils::IsUint(6, imm_s) && Utils::IsUint(6, imm_r)); | 392 ASSERT(Utils::IsUint(6, imm_s) && Utils::IsUint(6, imm_r)); |
397 type_ = BitfieldImm; | 393 type_ = BitfieldImm; |
398 encoding_ = | 394 encoding_ = (static_cast<int32_t>(n) << kNShift) | |
399 (static_cast<int32_t>(n) << kNShift) | | 395 (static_cast<int32_t>(imm_s) << kImmSShift) | |
400 (static_cast<int32_t>(imm_s) << kImmSShift) | | 396 (static_cast<int32_t>(imm_r) << kImmRShift); |
401 (static_cast<int32_t>(imm_r) << kImmRShift); | |
402 } | 397 } |
403 | 398 |
404 // Test if a given value can be encoded in the immediate field of a logical | 399 // Test if a given value can be encoded in the immediate field of a logical |
405 // instruction. | 400 // instruction. |
406 // If it can be encoded, the function returns true, and values pointed to by | 401 // If it can be encoded, the function returns true, and values pointed to by |
407 // n, imm_s and imm_r are updated with immediates encoded in the format | 402 // n, imm_s and imm_r are updated with immediates encoded in the format |
408 // required by the corresponding fields in the logical instruction. | 403 // required by the corresponding fields in the logical instruction. |
409 // If it can't be encoded, the function returns false, and the operand is | 404 // If it can't be encoded, the function returns false, and the operand is |
410 // undefined. | 405 // undefined. |
411 static bool IsImmLogical(uint64_t value, uint8_t width, Operand* imm_op); | 406 static bool IsImmLogical(uint64_t value, uint8_t width, Operand* imm_op); |
(...skipping 15 matching lines...) Expand all Loading... |
427 } else if (IsImmLogical(imm, sz, op)) { | 422 } else if (IsImmLogical(imm, sz, op)) { |
428 op->type_ = BitfieldImm; | 423 op->type_ = BitfieldImm; |
429 } else { | 424 } else { |
430 op->encoding_ = 0; | 425 op->encoding_ = 0; |
431 op->type_ = Unknown; | 426 op->type_ = Unknown; |
432 } | 427 } |
433 return op->type_; | 428 return op->type_; |
434 } | 429 } |
435 | 430 |
436 private: | 431 private: |
437 uint32_t encoding() const { | 432 uint32_t encoding() const { return encoding_; } |
438 return encoding_; | 433 OperandType type() const { return type_; } |
439 } | |
440 OperandType type() const { | |
441 return type_; | |
442 } | |
443 | 434 |
444 uint32_t encoding_; | 435 uint32_t encoding_; |
445 OperandType type_; | 436 OperandType type_; |
446 | 437 |
447 friend class Assembler; | 438 friend class Assembler; |
448 }; | 439 }; |
449 | 440 |
450 | 441 |
451 class Assembler : public ValueObject { | 442 class Assembler : public ValueObject { |
452 public: | 443 public: |
453 explicit Assembler(bool use_far_branches = false); | 444 explicit Assembler(bool use_far_branches = false); |
454 ~Assembler() { } | 445 ~Assembler() {} |
455 | 446 |
456 void PopRegister(Register r) { | 447 void PopRegister(Register r) { Pop(r); } |
457 Pop(r); | |
458 } | |
459 | 448 |
460 void Drop(intptr_t stack_elements) { | 449 void Drop(intptr_t stack_elements) { |
461 add(SP, SP, Operand(stack_elements * kWordSize)); | 450 add(SP, SP, Operand(stack_elements * kWordSize)); |
462 } | 451 } |
463 | 452 |
464 void Bind(Label* label); | 453 void Bind(Label* label); |
465 void Jump(Label* label) { b(label); } | 454 void Jump(Label* label) { b(label); } |
466 | 455 |
467 // Misc. functionality | 456 // Misc. functionality |
468 intptr_t CodeSize() const { return buffer_.Size(); } | 457 intptr_t CodeSize() const { return buffer_.Size(); } |
(...skipping 12 matching lines...) Expand all Loading... |
481 ObjectPoolWrapper& object_pool_wrapper() { return object_pool_wrapper_; } | 470 ObjectPoolWrapper& object_pool_wrapper() { return object_pool_wrapper_; } |
482 | 471 |
483 RawObjectPool* MakeObjectPool() { | 472 RawObjectPool* MakeObjectPool() { |
484 return object_pool_wrapper_.MakeObjectPool(); | 473 return object_pool_wrapper_.MakeObjectPool(); |
485 } | 474 } |
486 | 475 |
487 bool use_far_branches() const { | 476 bool use_far_branches() const { |
488 return FLAG_use_far_branches || use_far_branches_; | 477 return FLAG_use_far_branches || use_far_branches_; |
489 } | 478 } |
490 | 479 |
491 void set_use_far_branches(bool b) { | 480 void set_use_far_branches(bool b) { use_far_branches_ = b; } |
492 use_far_branches_ = b; | |
493 } | |
494 | 481 |
495 void FinalizeInstructions(const MemoryRegion& region) { | 482 void FinalizeInstructions(const MemoryRegion& region) { |
496 buffer_.FinalizeInstructions(region); | 483 buffer_.FinalizeInstructions(region); |
497 } | 484 } |
498 | 485 |
499 // Debugging and bringup support. | 486 // Debugging and bringup support. |
500 void Stop(const char* message); | 487 void Stop(const char* message); |
501 void Unimplemented(const char* message); | 488 void Unimplemented(const char* message); |
502 void Untested(const char* message); | 489 void Untested(const char* message); |
503 void Unreachable(const char* message); | 490 void Unreachable(const char* message); |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
585 AddSubWithCarryHelper(kDoubleWord, true, true, rd, rn, rm); | 572 AddSubWithCarryHelper(kDoubleWord, true, true, rd, rn, rm); |
586 } | 573 } |
587 void sbcw(Register rd, Register rn, Register rm) { | 574 void sbcw(Register rd, Register rn, Register rm) { |
588 AddSubWithCarryHelper(kWord, false, true, rd, rn, rm); | 575 AddSubWithCarryHelper(kWord, false, true, rd, rn, rm); |
589 } | 576 } |
590 void sbcsw(Register rd, Register rn, Register rm) { | 577 void sbcsw(Register rd, Register rn, Register rm) { |
591 AddSubWithCarryHelper(kWord, true, true, rd, rn, rm); | 578 AddSubWithCarryHelper(kWord, true, true, rd, rn, rm); |
592 } | 579 } |
593 | 580 |
594 // PC relative immediate add. imm is in bytes. | 581 // PC relative immediate add. imm is in bytes. |
595 void adr(Register rd, const Immediate& imm) { | 582 void adr(Register rd, const Immediate& imm) { EmitPCRelOp(ADR, rd, imm); } |
596 EmitPCRelOp(ADR, rd, imm); | |
597 } | |
598 | 583 |
599 // Logical immediate operations. | 584 // Logical immediate operations. |
600 void andi(Register rd, Register rn, const Immediate& imm) { | 585 void andi(Register rd, Register rn, const Immediate& imm) { |
601 Operand imm_op; | 586 Operand imm_op; |
602 const bool immok = | 587 const bool immok = |
603 Operand::IsImmLogical(imm.value(), kXRegSizeInBits, &imm_op); | 588 Operand::IsImmLogical(imm.value(), kXRegSizeInBits, &imm_op); |
604 ASSERT(immok); | 589 ASSERT(immok); |
605 EmitLogicalImmOp(ANDI, rd, rn, imm_op, kDoubleWord); | 590 EmitLogicalImmOp(ANDI, rd, rn, imm_op, kDoubleWord); |
606 } | 591 } |
607 void orri(Register rd, Register rn, const Immediate& imm) { | 592 void orri(Register rd, Register rn, const Immediate& imm) { |
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
793 csinv(rd, rn, rn, InvertCondition(cond)); | 778 csinv(rd, rn, rn, InvertCondition(cond)); |
794 } | 779 } |
795 void csetm(Register rd, Condition cond) { | 780 void csetm(Register rd, Condition cond) { |
796 csinv(rd, ZR, ZR, InvertCondition(cond)); | 781 csinv(rd, ZR, ZR, InvertCondition(cond)); |
797 } | 782 } |
798 | 783 |
799 // Comparison. | 784 // Comparison. |
800 // rn cmp o. | 785 // rn cmp o. |
801 // For add and sub, to use CSP for rn, o must be of type Operand::Extend. | 786 // For add and sub, to use CSP for rn, o must be of type Operand::Extend. |
802 // For an unmodified rm in this case, use Operand(rm, UXTX, 0); | 787 // For an unmodified rm in this case, use Operand(rm, UXTX, 0); |
803 void cmp(Register rn, Operand o) { | 788 void cmp(Register rn, Operand o) { subs(ZR, rn, o); } |
804 subs(ZR, rn, o); | |
805 } | |
806 // rn cmp -o. | 789 // rn cmp -o. |
807 void cmn(Register rn, Operand o) { | 790 void cmn(Register rn, Operand o) { adds(ZR, rn, o); } |
808 adds(ZR, rn, o); | |
809 } | |
810 | 791 |
811 void CompareRegisters(Register rn, Register rm) { | 792 void CompareRegisters(Register rn, Register rm) { |
812 if (rn == CSP) { | 793 if (rn == CSP) { |
813 // UXTX 0 on a 64-bit register (rm) is a nop, but forces R31 to be | 794 // UXTX 0 on a 64-bit register (rm) is a nop, but forces R31 to be |
814 // interpreted as CSP. | 795 // interpreted as CSP. |
815 cmp(CSP, Operand(rm, UXTX, 0)); | 796 cmp(CSP, Operand(rm, UXTX, 0)); |
816 } else { | 797 } else { |
817 cmp(rn, Operand(rm)); | 798 cmp(rn, Operand(rm)); |
818 } | 799 } |
819 } | 800 } |
820 | 801 |
821 // Conditional branch. | 802 // Conditional branch. |
822 void b(Label* label, Condition cond = AL) { | 803 void b(Label* label, Condition cond = AL) { |
823 EmitConditionalBranch(BCOND, cond, label); | 804 EmitConditionalBranch(BCOND, cond, label); |
824 } | 805 } |
825 | 806 |
826 void b(int32_t offset) { | 807 void b(int32_t offset) { EmitUnconditionalBranchOp(B, offset); } |
827 EmitUnconditionalBranchOp(B, offset); | 808 void bl(int32_t offset) { EmitUnconditionalBranchOp(BL, offset); } |
828 } | |
829 void bl(int32_t offset) { | |
830 EmitUnconditionalBranchOp(BL, offset); | |
831 } | |
832 | 809 |
833 void cbz(Label* label, Register rt, OperandSize sz = kDoubleWord) { | 810 void cbz(Label* label, Register rt, OperandSize sz = kDoubleWord) { |
834 EmitCompareAndBranch(CBZ, rt, label, sz); | 811 EmitCompareAndBranch(CBZ, rt, label, sz); |
835 } | 812 } |
836 | 813 |
837 void cbnz(Label* label, Register rt, OperandSize sz = kDoubleWord) { | 814 void cbnz(Label* label, Register rt, OperandSize sz = kDoubleWord) { |
838 EmitCompareAndBranch(CBNZ, rt, label, sz); | 815 EmitCompareAndBranch(CBNZ, rt, label, sz); |
839 } | 816 } |
840 | 817 |
841 // Branch, link, return. | 818 // Branch, link, return. |
842 void br(Register rn) { | 819 void br(Register rn) { EmitUnconditionalBranchRegOp(BR, rn); } |
843 EmitUnconditionalBranchRegOp(BR, rn); | 820 void blr(Register rn) { EmitUnconditionalBranchRegOp(BLR, rn); } |
844 } | 821 void ret(Register rn = R30) { EmitUnconditionalBranchRegOp(RET, rn); } |
845 void blr(Register rn) { | |
846 EmitUnconditionalBranchRegOp(BLR, rn); | |
847 } | |
848 void ret(Register rn = R30) { | |
849 EmitUnconditionalBranchRegOp(RET, rn); | |
850 } | |
851 | 822 |
852 // Breakpoint. | 823 // Breakpoint. |
853 void brk(uint16_t imm) { | 824 void brk(uint16_t imm) { EmitExceptionGenOp(BRK, imm); } |
854 EmitExceptionGenOp(BRK, imm); | |
855 } | |
856 | 825 |
857 static uword GetBreakInstructionFiller() { | 826 static uword GetBreakInstructionFiller() { |
858 const intptr_t encoding = ExceptionGenOpEncoding(BRK, 0); | 827 const intptr_t encoding = ExceptionGenOpEncoding(BRK, 0); |
859 return encoding << 32 | encoding; | 828 return encoding << 32 | encoding; |
860 } | 829 } |
861 | 830 |
862 // Double floating point. | 831 // Double floating point. |
863 bool fmovdi(VRegister vd, double immd) { | 832 bool fmovdi(VRegister vd, double immd) { |
864 int64_t imm64 = bit_cast<int64_t, double>(immd); | 833 int64_t imm64 = bit_cast<int64_t, double>(immd); |
865 const uint8_t bit7 = imm64 >> 63; | 834 const uint8_t bit7 = imm64 >> 63; |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
897 ASSERT(rn != CSP); | 866 ASSERT(rn != CSP); |
898 const Register crn = ConcreteRegister(rn); | 867 const Register crn = ConcreteRegister(rn); |
899 EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn, kWord); | 868 EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn, kWord); |
900 } | 869 } |
901 void fcvtzds(Register rd, VRegister vn) { | 870 void fcvtzds(Register rd, VRegister vn) { |
902 ASSERT(rd != R31); | 871 ASSERT(rd != R31); |
903 ASSERT(rd != CSP); | 872 ASSERT(rd != CSP); |
904 const Register crd = ConcreteRegister(rd); | 873 const Register crd = ConcreteRegister(rd); |
905 EmitFPIntCvtOp(FCVTZDS, crd, static_cast<Register>(vn)); | 874 EmitFPIntCvtOp(FCVTZDS, crd, static_cast<Register>(vn)); |
906 } | 875 } |
907 void fmovdd(VRegister vd, VRegister vn) { | 876 void fmovdd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FMOVDD, vd, vn); } |
908 EmitFPOneSourceOp(FMOVDD, vd, vn); | 877 void fabsd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FABSD, vd, vn); } |
909 } | 878 void fnegd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FNEGD, vd, vn); } |
910 void fabsd(VRegister vd, VRegister vn) { | 879 void fsqrtd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FSQRTD, vd, vn); } |
911 EmitFPOneSourceOp(FABSD, vd, vn); | 880 void fcvtsd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FCVTSD, vd, vn); } |
912 } | 881 void fcvtds(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FCVTDS, vd, vn); } |
913 void fnegd(VRegister vd, VRegister vn) { | |
914 EmitFPOneSourceOp(FNEGD, vd, vn); | |
915 } | |
916 void fsqrtd(VRegister vd, VRegister vn) { | |
917 EmitFPOneSourceOp(FSQRTD, vd, vn); | |
918 } | |
919 void fcvtsd(VRegister vd, VRegister vn) { | |
920 EmitFPOneSourceOp(FCVTSD, vd, vn); | |
921 } | |
922 void fcvtds(VRegister vd, VRegister vn) { | |
923 EmitFPOneSourceOp(FCVTDS, vd, vn); | |
924 } | |
925 void fldrq(VRegister vt, Address a) { | 882 void fldrq(VRegister vt, Address a) { |
926 ASSERT(a.type() != Address::PCOffset); | 883 ASSERT(a.type() != Address::PCOffset); |
927 EmitLoadStoreReg(FLDRQ, static_cast<Register>(vt), a, kByte); | 884 EmitLoadStoreReg(FLDRQ, static_cast<Register>(vt), a, kByte); |
928 } | 885 } |
929 void fstrq(VRegister vt, Address a) { | 886 void fstrq(VRegister vt, Address a) { |
930 ASSERT(a.type() != Address::PCOffset); | 887 ASSERT(a.type() != Address::PCOffset); |
931 EmitLoadStoreReg(FSTRQ, static_cast<Register>(vt), a, kByte); | 888 EmitLoadStoreReg(FSTRQ, static_cast<Register>(vt), a, kByte); |
932 } | 889 } |
933 void fldrd(VRegister vt, Address a) { | 890 void fldrd(VRegister vt, Address a) { |
934 ASSERT(a.type() != Address::PCOffset); | 891 ASSERT(a.type() != Address::PCOffset); |
935 EmitLoadStoreReg(FLDR, static_cast<Register>(vt), a, kDWord); | 892 EmitLoadStoreReg(FLDR, static_cast<Register>(vt), a, kDWord); |
936 } | 893 } |
937 void fstrd(VRegister vt, Address a) { | 894 void fstrd(VRegister vt, Address a) { |
938 ASSERT(a.type() != Address::PCOffset); | 895 ASSERT(a.type() != Address::PCOffset); |
939 EmitLoadStoreReg(FSTR, static_cast<Register>(vt), a, kDWord); | 896 EmitLoadStoreReg(FSTR, static_cast<Register>(vt), a, kDWord); |
940 } | 897 } |
941 void fldrs(VRegister vt, Address a) { | 898 void fldrs(VRegister vt, Address a) { |
942 ASSERT(a.type() != Address::PCOffset); | 899 ASSERT(a.type() != Address::PCOffset); |
943 EmitLoadStoreReg(FLDR, static_cast<Register>(vt), a, kSWord); | 900 EmitLoadStoreReg(FLDR, static_cast<Register>(vt), a, kSWord); |
944 } | 901 } |
945 void fstrs(VRegister vt, Address a) { | 902 void fstrs(VRegister vt, Address a) { |
946 ASSERT(a.type() != Address::PCOffset); | 903 ASSERT(a.type() != Address::PCOffset); |
947 EmitLoadStoreReg(FSTR, static_cast<Register>(vt), a, kSWord); | 904 EmitLoadStoreReg(FSTR, static_cast<Register>(vt), a, kSWord); |
948 } | 905 } |
949 void fcmpd(VRegister vn, VRegister vm) { | 906 void fcmpd(VRegister vn, VRegister vm) { EmitFPCompareOp(FCMPD, vn, vm); } |
950 EmitFPCompareOp(FCMPD, vn, vm); | 907 void fcmpdz(VRegister vn) { EmitFPCompareOp(FCMPZD, vn, V0); } |
951 } | |
952 void fcmpdz(VRegister vn) { | |
953 EmitFPCompareOp(FCMPZD, vn, V0); | |
954 } | |
955 void fmuld(VRegister vd, VRegister vn, VRegister vm) { | 908 void fmuld(VRegister vd, VRegister vn, VRegister vm) { |
956 EmitFPTwoSourceOp(FMULD, vd, vn, vm); | 909 EmitFPTwoSourceOp(FMULD, vd, vn, vm); |
957 } | 910 } |
958 void fdivd(VRegister vd, VRegister vn, VRegister vm) { | 911 void fdivd(VRegister vd, VRegister vn, VRegister vm) { |
959 EmitFPTwoSourceOp(FDIVD, vd, vn, vm); | 912 EmitFPTwoSourceOp(FDIVD, vd, vn, vm); |
960 } | 913 } |
961 void faddd(VRegister vd, VRegister vn, VRegister vm) { | 914 void faddd(VRegister vd, VRegister vn, VRegister vm) { |
962 EmitFPTwoSourceOp(FADDD, vd, vn, vm); | 915 EmitFPTwoSourceOp(FADDD, vd, vn, vm); |
963 } | 916 } |
964 void fsubd(VRegister vd, VRegister vn, VRegister vm) { | 917 void fsubd(VRegister vd, VRegister vn, VRegister vm) { |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1040 } | 993 } |
1041 void vmaxd(VRegister vd, VRegister vn, VRegister vm) { | 994 void vmaxd(VRegister vd, VRegister vn, VRegister vm) { |
1042 EmitSIMDThreeSameOp(VMAXD, vd, vn, vm); | 995 EmitSIMDThreeSameOp(VMAXD, vd, vn, vm); |
1043 } | 996 } |
1044 void vrecpss(VRegister vd, VRegister vn, VRegister vm) { | 997 void vrecpss(VRegister vd, VRegister vn, VRegister vm) { |
1045 EmitSIMDThreeSameOp(VRECPSS, vd, vn, vm); | 998 EmitSIMDThreeSameOp(VRECPSS, vd, vn, vm); |
1046 } | 999 } |
1047 void vrsqrtss(VRegister vd, VRegister vn, VRegister vm) { | 1000 void vrsqrtss(VRegister vd, VRegister vn, VRegister vm) { |
1048 EmitSIMDThreeSameOp(VRSQRTSS, vd, vn, vm); | 1001 EmitSIMDThreeSameOp(VRSQRTSS, vd, vn, vm); |
1049 } | 1002 } |
1050 void vnot(VRegister vd, VRegister vn) { | 1003 void vnot(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VNOT, vd, vn); } |
1051 EmitSIMDTwoRegOp(VNOT, vd, vn); | 1004 void vabss(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VABSS, vd, vn); } |
1052 } | 1005 void vabsd(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VABSD, vd, vn); } |
1053 void vabss(VRegister vd, VRegister vn) { | 1006 void vnegs(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VNEGS, vd, vn); } |
1054 EmitSIMDTwoRegOp(VABSS, vd, vn); | 1007 void vnegd(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VNEGD, vd, vn); } |
1055 } | 1008 void vsqrts(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VSQRTS, vd, vn); } |
1056 void vabsd(VRegister vd, VRegister vn) { | 1009 void vsqrtd(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VSQRTD, vd, vn); } |
1057 EmitSIMDTwoRegOp(VABSD, vd, vn); | |
1058 } | |
1059 void vnegs(VRegister vd, VRegister vn) { | |
1060 EmitSIMDTwoRegOp(VNEGS, vd, vn); | |
1061 } | |
1062 void vnegd(VRegister vd, VRegister vn) { | |
1063 EmitSIMDTwoRegOp(VNEGD, vd, vn); | |
1064 } | |
1065 void vsqrts(VRegister vd, VRegister vn) { | |
1066 EmitSIMDTwoRegOp(VSQRTS, vd, vn); | |
1067 } | |
1068 void vsqrtd(VRegister vd, VRegister vn) { | |
1069 EmitSIMDTwoRegOp(VSQRTD, vd, vn); | |
1070 } | |
1071 void vrecpes(VRegister vd, VRegister vn) { | 1010 void vrecpes(VRegister vd, VRegister vn) { |
1072 EmitSIMDTwoRegOp(VRECPES, vd, vn); | 1011 EmitSIMDTwoRegOp(VRECPES, vd, vn); |
1073 } | 1012 } |
1074 void vrsqrtes(VRegister vd, VRegister vn) { | 1013 void vrsqrtes(VRegister vd, VRegister vn) { |
1075 EmitSIMDTwoRegOp(VRSQRTES, vd, vn); | 1014 EmitSIMDTwoRegOp(VRSQRTES, vd, vn); |
1076 } | 1015 } |
1077 void vdupw(VRegister vd, Register rn) { | 1016 void vdupw(VRegister vd, Register rn) { |
1078 const VRegister vn = static_cast<VRegister>(rn); | 1017 const VRegister vn = static_cast<VRegister>(rn); |
1079 EmitSIMDCopyOp(VDUPI, vd, vn, kWord, 0, 0); | 1018 EmitSIMDCopyOp(VDUPI, vd, vn, kWord, 0, 0); |
1080 } | 1019 } |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1112 } | 1051 } |
1113 | 1052 |
1114 // Aliases. | 1053 // Aliases. |
1115 void mov(Register rd, Register rn) { | 1054 void mov(Register rd, Register rn) { |
1116 if ((rd == CSP) || (rn == CSP)) { | 1055 if ((rd == CSP) || (rn == CSP)) { |
1117 add(rd, rn, Operand(0)); | 1056 add(rd, rn, Operand(0)); |
1118 } else { | 1057 } else { |
1119 orr(rd, ZR, Operand(rn)); | 1058 orr(rd, ZR, Operand(rn)); |
1120 } | 1059 } |
1121 } | 1060 } |
1122 void vmov(VRegister vd, VRegister vn) { | 1061 void vmov(VRegister vd, VRegister vn) { vorr(vd, vn, vn); } |
1123 vorr(vd, vn, vn); | 1062 void mvn(Register rd, Register rm) { orn(rd, ZR, Operand(rm)); } |
1124 } | 1063 void neg(Register rd, Register rm) { sub(rd, ZR, Operand(rm)); } |
1125 void mvn(Register rd, Register rm) { | 1064 void negs(Register rd, Register rm) { subs(rd, ZR, Operand(rm)); } |
1126 orn(rd, ZR, Operand(rm)); | 1065 void mul(Register rd, Register rn, Register rm) { madd(rd, rn, rm, ZR); } |
1127 } | |
1128 void neg(Register rd, Register rm) { | |
1129 sub(rd, ZR, Operand(rm)); | |
1130 } | |
1131 void negs(Register rd, Register rm) { | |
1132 subs(rd, ZR, Operand(rm)); | |
1133 } | |
1134 void mul(Register rd, Register rn, Register rm) { | |
1135 madd(rd, rn, rm, ZR); | |
1136 } | |
1137 void Push(Register reg) { | 1066 void Push(Register reg) { |
1138 ASSERT(reg != PP); // Only push PP with TagAndPushPP(). | 1067 ASSERT(reg != PP); // Only push PP with TagAndPushPP(). |
1139 str(reg, Address(SP, -1 * kWordSize, Address::PreIndex)); | 1068 str(reg, Address(SP, -1 * kWordSize, Address::PreIndex)); |
1140 } | 1069 } |
1141 void Pop(Register reg) { | 1070 void Pop(Register reg) { |
1142 ASSERT(reg != PP); // Only pop PP with PopAndUntagPP(). | 1071 ASSERT(reg != PP); // Only pop PP with PopAndUntagPP(). |
1143 ldr(reg, Address(SP, 1 * kWordSize, Address::PostIndex)); | 1072 ldr(reg, Address(SP, 1 * kWordSize, Address::PostIndex)); |
1144 } | 1073 } |
1145 void PushPair(Register first, Register second) { | 1074 void PushPair(Register first, Register second) { |
1146 ASSERT((first != PP) && (second != PP)); | 1075 ASSERT((first != PP) && (second != PP)); |
(...skipping 23 matching lines...) Expand all Loading... |
1170 } | 1099 } |
1171 void TagAndPushPP() { | 1100 void TagAndPushPP() { |
1172 // Add the heap object tag back to PP before putting it on the stack. | 1101 // Add the heap object tag back to PP before putting it on the stack. |
1173 add(TMP, PP, Operand(kHeapObjectTag)); | 1102 add(TMP, PP, Operand(kHeapObjectTag)); |
1174 str(TMP, Address(SP, -1 * kWordSize, Address::PreIndex)); | 1103 str(TMP, Address(SP, -1 * kWordSize, Address::PreIndex)); |
1175 } | 1104 } |
1176 void TagAndPushPPAndPcMarker() { | 1105 void TagAndPushPPAndPcMarker() { |
1177 COMPILE_ASSERT(CODE_REG != TMP2); | 1106 COMPILE_ASSERT(CODE_REG != TMP2); |
1178 // Add the heap object tag back to PP before putting it on the stack. | 1107 // Add the heap object tag back to PP before putting it on the stack. |
1179 add(TMP2, PP, Operand(kHeapObjectTag)); | 1108 add(TMP2, PP, Operand(kHeapObjectTag)); |
1180 stp(TMP2, CODE_REG, | 1109 stp(TMP2, CODE_REG, Address(SP, -2 * kWordSize, Address::PairPreIndex)); |
1181 Address(SP, -2 * kWordSize, Address::PairPreIndex)); | |
1182 } | 1110 } |
1183 void PopAndUntagPP() { | 1111 void PopAndUntagPP() { |
1184 ldr(PP, Address(SP, 1 * kWordSize, Address::PostIndex)); | 1112 ldr(PP, Address(SP, 1 * kWordSize, Address::PostIndex)); |
1185 sub(PP, PP, Operand(kHeapObjectTag)); | 1113 sub(PP, PP, Operand(kHeapObjectTag)); |
1186 // The caller of PopAndUntagPP() must explicitly allow use of popped PP. | 1114 // The caller of PopAndUntagPP() must explicitly allow use of popped PP. |
1187 set_constant_pool_allowed(false); | 1115 set_constant_pool_allowed(false); |
1188 } | 1116 } |
1189 void tst(Register rn, Operand o) { | 1117 void tst(Register rn, Operand o) { ands(ZR, rn, o); } |
1190 ands(ZR, rn, o); | 1118 void tsti(Register rn, const Immediate& imm) { andis(ZR, rn, imm); } |
1191 } | |
1192 void tsti(Register rn, const Immediate& imm) { | |
1193 andis(ZR, rn, imm); | |
1194 } | |
1195 | 1119 |
1196 void LslImmediate(Register rd, Register rn, int shift) { | 1120 void LslImmediate(Register rd, Register rn, int shift) { |
1197 add(rd, ZR, Operand(rn, LSL, shift)); | 1121 add(rd, ZR, Operand(rn, LSL, shift)); |
1198 } | 1122 } |
1199 void LsrImmediate(Register rd, Register rn, int shift) { | 1123 void LsrImmediate(Register rd, Register rn, int shift) { |
1200 add(rd, ZR, Operand(rn, LSR, shift)); | 1124 add(rd, ZR, Operand(rn, LSR, shift)); |
1201 } | 1125 } |
1202 void AsrImmediate(Register rd, Register rn, int shift) { | 1126 void AsrImmediate(Register rd, Register rn, int shift) { |
1203 add(rd, ZR, Operand(rn, ASR, shift)); | 1127 add(rd, ZR, Operand(rn, ASR, shift)); |
1204 } | 1128 } |
1205 | 1129 |
1206 void VRecps(VRegister vd, VRegister vn); | 1130 void VRecps(VRegister vd, VRegister vn); |
1207 void VRSqrts(VRegister vd, VRegister vn); | 1131 void VRSqrts(VRegister vd, VRegister vn); |
1208 | 1132 |
1209 void SmiUntag(Register reg) { | 1133 void SmiUntag(Register reg) { AsrImmediate(reg, reg, kSmiTagSize); } |
1210 AsrImmediate(reg, reg, kSmiTagSize); | |
1211 } | |
1212 void SmiUntag(Register dst, Register src) { | 1134 void SmiUntag(Register dst, Register src) { |
1213 AsrImmediate(dst, src, kSmiTagSize); | 1135 AsrImmediate(dst, src, kSmiTagSize); |
1214 } | 1136 } |
1215 void SmiTag(Register reg) { | 1137 void SmiTag(Register reg) { LslImmediate(reg, reg, kSmiTagSize); } |
1216 LslImmediate(reg, reg, kSmiTagSize); | |
1217 } | |
1218 void SmiTag(Register dst, Register src) { | 1138 void SmiTag(Register dst, Register src) { |
1219 LslImmediate(dst, src, kSmiTagSize); | 1139 LslImmediate(dst, src, kSmiTagSize); |
1220 } | 1140 } |
1221 | 1141 |
1222 void BranchIfNotSmi(Register reg, Label* label) { | 1142 void BranchIfNotSmi(Register reg, Label* label) { |
1223 tsti(reg, Immediate(kSmiTagMask)); | 1143 tsti(reg, Immediate(kSmiTagMask)); |
1224 b(label, NE); | 1144 b(label, NE); |
1225 } | 1145 } |
1226 | 1146 |
1227 void Branch(const StubEntry& stub_entry, | 1147 void Branch(const StubEntry& stub_entry, |
(...skipping 18 matching lines...) Expand all Loading... |
1246 // PP should be passed for pp. | 1166 // PP should be passed for pp. |
1247 void AddImmediate(Register dest, Register rn, int64_t imm); | 1167 void AddImmediate(Register dest, Register rn, int64_t imm); |
1248 void AddImmediateSetFlags(Register dest, Register rn, int64_t imm); | 1168 void AddImmediateSetFlags(Register dest, Register rn, int64_t imm); |
1249 void SubImmediateSetFlags(Register dest, Register rn, int64_t imm); | 1169 void SubImmediateSetFlags(Register dest, Register rn, int64_t imm); |
1250 void AndImmediate(Register rd, Register rn, int64_t imm); | 1170 void AndImmediate(Register rd, Register rn, int64_t imm); |
1251 void OrImmediate(Register rd, Register rn, int64_t imm); | 1171 void OrImmediate(Register rd, Register rn, int64_t imm); |
1252 void XorImmediate(Register rd, Register rn, int64_t imm); | 1172 void XorImmediate(Register rd, Register rn, int64_t imm); |
1253 void TestImmediate(Register rn, int64_t imm); | 1173 void TestImmediate(Register rn, int64_t imm); |
1254 void CompareImmediate(Register rn, int64_t imm); | 1174 void CompareImmediate(Register rn, int64_t imm); |
1255 | 1175 |
1256 void LoadFromOffset(Register dest, Register base, int32_t offset, | 1176 void LoadFromOffset(Register dest, |
| 1177 Register base, |
| 1178 int32_t offset, |
1257 OperandSize sz = kDoubleWord); | 1179 OperandSize sz = kDoubleWord); |
1258 void LoadFieldFromOffset(Register dest, Register base, int32_t offset, | 1180 void LoadFieldFromOffset(Register dest, |
| 1181 Register base, |
| 1182 int32_t offset, |
1259 OperandSize sz = kDoubleWord) { | 1183 OperandSize sz = kDoubleWord) { |
1260 LoadFromOffset(dest, base, offset - kHeapObjectTag, sz); | 1184 LoadFromOffset(dest, base, offset - kHeapObjectTag, sz); |
1261 } | 1185 } |
1262 void LoadDFromOffset(VRegister dest, Register base, int32_t offset); | 1186 void LoadDFromOffset(VRegister dest, Register base, int32_t offset); |
1263 void LoadDFieldFromOffset(VRegister dest, Register base, int32_t offset) { | 1187 void LoadDFieldFromOffset(VRegister dest, Register base, int32_t offset) { |
1264 LoadDFromOffset(dest, base, offset - kHeapObjectTag); | 1188 LoadDFromOffset(dest, base, offset - kHeapObjectTag); |
1265 } | 1189 } |
1266 void LoadQFromOffset(VRegister dest, Register base, int32_t offset); | 1190 void LoadQFromOffset(VRegister dest, Register base, int32_t offset); |
1267 void LoadQFieldFromOffset(VRegister dest, Register base, int32_t offset) { | 1191 void LoadQFieldFromOffset(VRegister dest, Register base, int32_t offset) { |
1268 LoadQFromOffset(dest, base, offset - kHeapObjectTag); | 1192 LoadQFromOffset(dest, base, offset - kHeapObjectTag); |
1269 } | 1193 } |
1270 | 1194 |
1271 void StoreToOffset(Register src, Register base, int32_t offset, | 1195 void StoreToOffset(Register src, |
| 1196 Register base, |
| 1197 int32_t offset, |
1272 OperandSize sz = kDoubleWord); | 1198 OperandSize sz = kDoubleWord); |
1273 void StoreFieldToOffset(Register src, Register base, int32_t offset, | 1199 void StoreFieldToOffset(Register src, |
| 1200 Register base, |
| 1201 int32_t offset, |
1274 OperandSize sz = kDoubleWord) { | 1202 OperandSize sz = kDoubleWord) { |
1275 StoreToOffset(src, base, offset - kHeapObjectTag, sz); | 1203 StoreToOffset(src, base, offset - kHeapObjectTag, sz); |
1276 } | 1204 } |
1277 void StoreDToOffset(VRegister src, Register base, int32_t offset); | 1205 void StoreDToOffset(VRegister src, Register base, int32_t offset); |
1278 void StoreDFieldToOffset(VRegister src, Register base, int32_t offset) { | 1206 void StoreDFieldToOffset(VRegister src, Register base, int32_t offset) { |
1279 StoreDToOffset(src, base, offset - kHeapObjectTag); | 1207 StoreDToOffset(src, base, offset - kHeapObjectTag); |
1280 } | 1208 } |
1281 void StoreQToOffset(VRegister src, Register base, int32_t offset); | 1209 void StoreQToOffset(VRegister src, Register base, int32_t offset); |
1282 void StoreQFieldToOffset(VRegister src, Register base, int32_t offset) { | 1210 void StoreQFieldToOffset(VRegister src, Register base, int32_t offset) { |
1283 StoreQToOffset(src, base, offset - kHeapObjectTag); | 1211 StoreQToOffset(src, base, offset - kHeapObjectTag); |
(...skipping 17 matching lines...) Expand all Loading... |
1301 void StoreIntoObjectNoBarrier(Register object, | 1229 void StoreIntoObjectNoBarrier(Register object, |
1302 const Address& dest, | 1230 const Address& dest, |
1303 const Object& value); | 1231 const Object& value); |
1304 void StoreIntoObjectOffsetNoBarrier(Register object, | 1232 void StoreIntoObjectOffsetNoBarrier(Register object, |
1305 int32_t offset, | 1233 int32_t offset, |
1306 const Object& value); | 1234 const Object& value); |
1307 | 1235 |
1308 // Object pool, loading from pool, etc. | 1236 // Object pool, loading from pool, etc. |
1309 void LoadPoolPointer(Register pp = PP); | 1237 void LoadPoolPointer(Register pp = PP); |
1310 | 1238 |
1311 bool constant_pool_allowed() const { | 1239 bool constant_pool_allowed() const { return constant_pool_allowed_; } |
1312 return constant_pool_allowed_; | 1240 void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; } |
1313 } | |
1314 void set_constant_pool_allowed(bool b) { | |
1315 constant_pool_allowed_ = b; | |
1316 } | |
1317 | 1241 |
1318 intptr_t FindImmediate(int64_t imm); | 1242 intptr_t FindImmediate(int64_t imm); |
1319 bool CanLoadFromObjectPool(const Object& object) const; | 1243 bool CanLoadFromObjectPool(const Object& object) const; |
1320 void LoadNativeEntry(Register dst, const ExternalLabel* label); | 1244 void LoadNativeEntry(Register dst, const ExternalLabel* label); |
1321 void LoadFunctionFromCalleePool(Register dst, | 1245 void LoadFunctionFromCalleePool(Register dst, |
1322 const Function& function, | 1246 const Function& function, |
1323 Register new_pp); | 1247 Register new_pp); |
1324 void LoadIsolate(Register dst); | 1248 void LoadIsolate(Register dst); |
1325 void LoadObject(Register dst, const Object& obj); | 1249 void LoadObject(Register dst, const Object& obj); |
1326 void LoadUniqueObject(Register dst, const Object& obj); | 1250 void LoadUniqueObject(Register dst, const Object& obj); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1359 void LeaveCallRuntimeFrame(); | 1283 void LeaveCallRuntimeFrame(); |
1360 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count); | 1284 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count); |
1361 | 1285 |
1362 // Set up a stub frame so that the stack traversal code can easily identify | 1286 // Set up a stub frame so that the stack traversal code can easily identify |
1363 // a stub frame. | 1287 // a stub frame. |
1364 void EnterStubFrame(); | 1288 void EnterStubFrame(); |
1365 void LeaveStubFrame(); | 1289 void LeaveStubFrame(); |
1366 | 1290 |
1367 void MonomorphicCheckedEntry(); | 1291 void MonomorphicCheckedEntry(); |
1368 | 1292 |
1369 void UpdateAllocationStats(intptr_t cid, | 1293 void UpdateAllocationStats(intptr_t cid, Heap::Space space); |
1370 Heap::Space space); | |
1371 | 1294 |
1372 void UpdateAllocationStatsWithSize(intptr_t cid, | 1295 void UpdateAllocationStatsWithSize(intptr_t cid, |
1373 Register size_reg, | 1296 Register size_reg, |
1374 Heap::Space space); | 1297 Heap::Space space); |
1375 | 1298 |
1376 // If allocation tracing for |cid| is enabled, will jump to |trace| label, | 1299 // If allocation tracing for |cid| is enabled, will jump to |trace| label, |
1377 // which will allocate in the runtime where tracing occurs. | 1300 // which will allocate in the runtime where tracing occurs. |
1378 void MaybeTraceAllocation(intptr_t cid, | 1301 void MaybeTraceAllocation(intptr_t cid, Register temp_reg, Label* trace); |
1379 Register temp_reg, | |
1380 Label* trace); | |
1381 | 1302 |
1382 // Inlined allocation of an instance of class 'cls', code has no runtime | 1303 // Inlined allocation of an instance of class 'cls', code has no runtime |
1383 // calls. Jump to 'failure' if the instance cannot be allocated here. | 1304 // calls. Jump to 'failure' if the instance cannot be allocated here. |
1384 // Allocated instance is returned in 'instance_reg'. | 1305 // Allocated instance is returned in 'instance_reg'. |
1385 // Only the tags field of the object is initialized. | 1306 // Only the tags field of the object is initialized. |
1386 void TryAllocate(const Class& cls, | 1307 void TryAllocate(const Class& cls, |
1387 Label* failure, | 1308 Label* failure, |
1388 Register instance_reg, | 1309 Register instance_reg, |
1389 Register temp_reg); | 1310 Register temp_reg); |
1390 | 1311 |
(...skipping 23 matching lines...) Expand all Loading... |
1414 Register array, | 1335 Register array, |
1415 Register index); | 1336 Register index); |
1416 void LoadElementAddressForRegIndex(Register address, | 1337 void LoadElementAddressForRegIndex(Register address, |
1417 bool is_load, | 1338 bool is_load, |
1418 bool is_external, | 1339 bool is_external, |
1419 intptr_t cid, | 1340 intptr_t cid, |
1420 intptr_t index_scale, | 1341 intptr_t index_scale, |
1421 Register array, | 1342 Register array, |
1422 Register index); | 1343 Register index); |
1423 | 1344 |
1424 void LoadUnaligned(Register dst, Register addr, Register tmp, | 1345 void LoadUnaligned(Register dst, Register addr, Register tmp, OperandSize sz); |
1425 OperandSize sz); | 1346 void StoreUnaligned(Register src, |
1426 void StoreUnaligned(Register src, Register addr, Register tmp, | 1347 Register addr, |
| 1348 Register tmp, |
1427 OperandSize sz); | 1349 OperandSize sz); |
1428 | 1350 |
1429 private: | 1351 private: |
1430 AssemblerBuffer buffer_; // Contains position independent code. | 1352 AssemblerBuffer buffer_; // Contains position independent code. |
1431 ObjectPoolWrapper object_pool_wrapper_; | 1353 ObjectPoolWrapper object_pool_wrapper_; |
1432 int32_t prologue_offset_; | 1354 int32_t prologue_offset_; |
1433 bool has_single_entry_point_; | 1355 bool has_single_entry_point_; |
1434 bool use_far_branches_; | 1356 bool use_far_branches_; |
1435 | 1357 |
1436 class CodeComment : public ZoneAllocated { | 1358 class CodeComment : public ZoneAllocated { |
1437 public: | 1359 public: |
1438 CodeComment(intptr_t pc_offset, const String& comment) | 1360 CodeComment(intptr_t pc_offset, const String& comment) |
1439 : pc_offset_(pc_offset), comment_(comment) { } | 1361 : pc_offset_(pc_offset), comment_(comment) {} |
1440 | 1362 |
1441 intptr_t pc_offset() const { return pc_offset_; } | 1363 intptr_t pc_offset() const { return pc_offset_; } |
1442 const String& comment() const { return comment_; } | 1364 const String& comment() const { return comment_; } |
1443 | 1365 |
1444 private: | 1366 private: |
1445 intptr_t pc_offset_; | 1367 intptr_t pc_offset_; |
1446 const String& comment_; | 1368 const String& comment_; |
1447 | 1369 |
1448 DISALLOW_COPY_AND_ASSIGN(CodeComment); | 1370 DISALLOW_COPY_AND_ASSIGN(CodeComment); |
1449 }; | 1371 }; |
1450 | 1372 |
1451 GrowableArray<CodeComment*> comments_; | 1373 GrowableArray<CodeComment*> comments_; |
1452 | 1374 |
1453 bool constant_pool_allowed_; | 1375 bool constant_pool_allowed_; |
1454 | 1376 |
1455 void LoadWordFromPoolOffset(Register dst, uint32_t offset, Register pp = PP); | 1377 void LoadWordFromPoolOffset(Register dst, uint32_t offset, Register pp = PP); |
1456 void LoadWordFromPoolOffsetFixed(Register dst, uint32_t offset); | 1378 void LoadWordFromPoolOffsetFixed(Register dst, uint32_t offset); |
1457 | 1379 |
1458 void LoadObjectHelper(Register dst, const Object& obj, bool is_unique); | 1380 void LoadObjectHelper(Register dst, const Object& obj, bool is_unique); |
1459 | 1381 |
1460 void AddSubHelper(OperandSize os, bool set_flags, bool subtract, | 1382 void AddSubHelper(OperandSize os, |
1461 Register rd, Register rn, Operand o) { | 1383 bool set_flags, |
| 1384 bool subtract, |
| 1385 Register rd, |
| 1386 Register rn, |
| 1387 Operand o) { |
1462 ASSERT((rd != R31) && (rn != R31)); | 1388 ASSERT((rd != R31) && (rn != R31)); |
1463 const Register crd = ConcreteRegister(rd); | 1389 const Register crd = ConcreteRegister(rd); |
1464 const Register crn = ConcreteRegister(rn); | 1390 const Register crn = ConcreteRegister(rn); |
1465 if (o.type() == Operand::Immediate) { | 1391 if (o.type() == Operand::Immediate) { |
1466 ASSERT(rn != ZR); | 1392 ASSERT(rn != ZR); |
1467 EmitAddSubImmOp(subtract ? SUBI : ADDI, crd, crn, o, os, set_flags); | 1393 EmitAddSubImmOp(subtract ? SUBI : ADDI, crd, crn, o, os, set_flags); |
1468 } else if (o.type() == Operand::Shifted) { | 1394 } else if (o.type() == Operand::Shifted) { |
1469 ASSERT((rd != CSP) && (rn != CSP)); | 1395 ASSERT((rd != CSP) && (rn != CSP)); |
1470 EmitAddSubShiftExtOp(subtract ? SUB : ADD, crd, crn, o, os, set_flags); | 1396 EmitAddSubShiftExtOp(subtract ? SUB : ADD, crd, crn, o, os, set_flags); |
1471 } else { | 1397 } else { |
1472 ASSERT(o.type() == Operand::Extended); | 1398 ASSERT(o.type() == Operand::Extended); |
1473 ASSERT((rd != CSP) && (rn != ZR)); | 1399 ASSERT((rd != CSP) && (rn != ZR)); |
1474 EmitAddSubShiftExtOp(subtract ? SUB : ADD, crd, crn, o, os, set_flags); | 1400 EmitAddSubShiftExtOp(subtract ? SUB : ADD, crd, crn, o, os, set_flags); |
1475 } | 1401 } |
1476 } | 1402 } |
1477 | 1403 |
1478 void AddSubWithCarryHelper(OperandSize sz, bool set_flags, bool subtract, | 1404 void AddSubWithCarryHelper(OperandSize sz, |
1479 Register rd, Register rn, Register rm) { | 1405 bool set_flags, |
| 1406 bool subtract, |
| 1407 Register rd, |
| 1408 Register rn, |
| 1409 Register rm) { |
1480 ASSERT((rd != R31) && (rn != R31) && (rm != R31)); | 1410 ASSERT((rd != R31) && (rn != R31) && (rm != R31)); |
1481 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP)); | 1411 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP)); |
1482 const Register crd = ConcreteRegister(rd); | 1412 const Register crd = ConcreteRegister(rd); |
1483 const Register crn = ConcreteRegister(rn); | 1413 const Register crn = ConcreteRegister(rn); |
1484 const Register crm = ConcreteRegister(rm); | 1414 const Register crm = ConcreteRegister(rm); |
1485 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1415 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1486 const int32_t s = set_flags ? B29 : 0; | 1416 const int32_t s = set_flags ? B29 : 0; |
1487 const int32_t op = subtract ? SBC : ADC; | 1417 const int32_t op = subtract ? SBC : ADC; |
1488 const int32_t encoding = | 1418 const int32_t encoding = op | size | s | |
1489 op | size | s | | 1419 (static_cast<int32_t>(crd) << kRdShift) | |
1490 (static_cast<int32_t>(crd) << kRdShift) | | 1420 (static_cast<int32_t>(crn) << kRnShift) | |
1491 (static_cast<int32_t>(crn) << kRnShift) | | 1421 (static_cast<int32_t>(crm) << kRmShift); |
1492 (static_cast<int32_t>(crm) << kRmShift); | |
1493 Emit(encoding); | 1422 Emit(encoding); |
1494 } | 1423 } |
1495 | 1424 |
1496 void EmitAddSubImmOp(AddSubImmOp op, Register rd, Register rn, | 1425 void EmitAddSubImmOp(AddSubImmOp op, |
1497 Operand o, OperandSize sz, bool set_flags) { | 1426 Register rd, |
| 1427 Register rn, |
| 1428 Operand o, |
| 1429 OperandSize sz, |
| 1430 bool set_flags) { |
1498 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1431 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1499 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1432 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1500 const int32_t s = set_flags ? B29 : 0; | 1433 const int32_t s = set_flags ? B29 : 0; |
1501 const int32_t encoding = | 1434 const int32_t encoding = |
1502 op | size | s | | 1435 op | size | s | (static_cast<int32_t>(rd) << kRdShift) | |
1503 (static_cast<int32_t>(rd) << kRdShift) | | 1436 (static_cast<int32_t>(rn) << kRnShift) | o.encoding(); |
1504 (static_cast<int32_t>(rn) << kRnShift) | | |
1505 o.encoding(); | |
1506 Emit(encoding); | 1437 Emit(encoding); |
1507 } | 1438 } |
1508 | 1439 |
1509 void EmitLogicalImmOp(LogicalImmOp op, Register rd, Register rn, | 1440 void EmitLogicalImmOp(LogicalImmOp op, |
1510 Operand o, OperandSize sz) { | 1441 Register rd, |
| 1442 Register rn, |
| 1443 Operand o, |
| 1444 OperandSize sz) { |
1511 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1445 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1512 ASSERT((rd != R31) && (rn != R31)); | 1446 ASSERT((rd != R31) && (rn != R31)); |
1513 ASSERT(rn != CSP); | 1447 ASSERT(rn != CSP); |
1514 ASSERT((op == ANDIS) || (rd != ZR)); // op != ANDIS => rd != ZR. | 1448 ASSERT((op == ANDIS) || (rd != ZR)); // op != ANDIS => rd != ZR. |
1515 ASSERT((op != ANDIS) || (rd != CSP)); // op == ANDIS => rd != CSP. | 1449 ASSERT((op != ANDIS) || (rd != CSP)); // op == ANDIS => rd != CSP. |
1516 ASSERT(o.type() == Operand::BitfieldImm); | 1450 ASSERT(o.type() == Operand::BitfieldImm); |
1517 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1451 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1518 const Register crd = ConcreteRegister(rd); | 1452 const Register crd = ConcreteRegister(rd); |
1519 const Register crn = ConcreteRegister(rn); | 1453 const Register crn = ConcreteRegister(rn); |
1520 const int32_t encoding = | 1454 const int32_t encoding = |
1521 op | size | | 1455 op | size | (static_cast<int32_t>(crd) << kRdShift) | |
1522 (static_cast<int32_t>(crd) << kRdShift) | | 1456 (static_cast<int32_t>(crn) << kRnShift) | o.encoding(); |
1523 (static_cast<int32_t>(crn) << kRnShift) | | |
1524 o.encoding(); | |
1525 Emit(encoding); | 1457 Emit(encoding); |
1526 } | 1458 } |
1527 | 1459 |
1528 void EmitLogicalShiftOp(LogicalShiftOp op, | 1460 void EmitLogicalShiftOp(LogicalShiftOp op, |
1529 Register rd, Register rn, Operand o, OperandSize sz) { | 1461 Register rd, |
| 1462 Register rn, |
| 1463 Operand o, |
| 1464 OperandSize sz) { |
1530 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1465 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1531 ASSERT((rd != R31) && (rn != R31)); | 1466 ASSERT((rd != R31) && (rn != R31)); |
1532 ASSERT((rd != CSP) && (rn != CSP)); | 1467 ASSERT((rd != CSP) && (rn != CSP)); |
1533 ASSERT(o.type() == Operand::Shifted); | 1468 ASSERT(o.type() == Operand::Shifted); |
1534 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1469 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1535 const Register crd = ConcreteRegister(rd); | 1470 const Register crd = ConcreteRegister(rd); |
1536 const Register crn = ConcreteRegister(rn); | 1471 const Register crn = ConcreteRegister(rn); |
1537 const int32_t encoding = | 1472 const int32_t encoding = |
1538 op | size | | 1473 op | size | (static_cast<int32_t>(crd) << kRdShift) | |
1539 (static_cast<int32_t>(crd) << kRdShift) | | 1474 (static_cast<int32_t>(crn) << kRnShift) | o.encoding(); |
1540 (static_cast<int32_t>(crn) << kRnShift) | | |
1541 o.encoding(); | |
1542 Emit(encoding); | 1475 Emit(encoding); |
1543 } | 1476 } |
1544 | 1477 |
1545 void EmitAddSubShiftExtOp(AddSubShiftExtOp op, | 1478 void EmitAddSubShiftExtOp(AddSubShiftExtOp op, |
1546 Register rd, Register rn, Operand o, | 1479 Register rd, |
1547 OperandSize sz, bool set_flags) { | 1480 Register rn, |
| 1481 Operand o, |
| 1482 OperandSize sz, |
| 1483 bool set_flags) { |
1548 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1484 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1549 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1485 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1550 const int32_t s = set_flags ? B29 : 0; | 1486 const int32_t s = set_flags ? B29 : 0; |
1551 const int32_t encoding = | 1487 const int32_t encoding = |
1552 op | size | s | | 1488 op | size | s | (static_cast<int32_t>(rd) << kRdShift) | |
1553 (static_cast<int32_t>(rd) << kRdShift) | | 1489 (static_cast<int32_t>(rn) << kRnShift) | o.encoding(); |
1554 (static_cast<int32_t>(rn) << kRnShift) | | |
1555 o.encoding(); | |
1556 Emit(encoding); | 1490 Emit(encoding); |
1557 } | 1491 } |
1558 | 1492 |
1559 int32_t EncodeImm19BranchOffset(int64_t imm, int32_t instr) { | 1493 int32_t EncodeImm19BranchOffset(int64_t imm, int32_t instr) { |
1560 if (!CanEncodeImm19BranchOffset(imm)) { | 1494 if (!CanEncodeImm19BranchOffset(imm)) { |
1561 ASSERT(!use_far_branches()); | 1495 ASSERT(!use_far_branches()); |
1562 Thread::Current()->long_jump_base()->Jump( | 1496 Thread::Current()->long_jump_base()->Jump(1, |
1563 1, Object::branch_offset_error()); | 1497 Object::branch_offset_error()); |
1564 } | 1498 } |
1565 const int32_t imm32 = static_cast<int32_t>(imm); | 1499 const int32_t imm32 = static_cast<int32_t>(imm); |
1566 const int32_t off = (((imm32 >> 2) << kImm19Shift) & kImm19Mask); | 1500 const int32_t off = (((imm32 >> 2) << kImm19Shift) & kImm19Mask); |
1567 return (instr & ~kImm19Mask) | off; | 1501 return (instr & ~kImm19Mask) | off; |
1568 } | 1502 } |
1569 | 1503 |
1570 int64_t DecodeImm19BranchOffset(int32_t instr) { | 1504 int64_t DecodeImm19BranchOffset(int32_t instr) { |
1571 const int32_t off = (((instr & kImm19Mask) >> kImm19Shift) << 13) >> 11; | 1505 const int32_t off = (((instr & kImm19Mask) >> kImm19Shift) << 13) >> 11; |
1572 return static_cast<int64_t>(off); | 1506 return static_cast<int64_t>(off); |
1573 } | 1507 } |
(...skipping 29 matching lines...) Expand all Loading... |
1603 const int32_t imm32 = static_cast<int32_t>(imm); | 1537 const int32_t imm32 = static_cast<int32_t>(imm); |
1604 const int32_t off = (((imm32 >> 2) << kImm26Shift) & kImm26Mask); | 1538 const int32_t off = (((imm32 >> 2) << kImm26Shift) & kImm26Mask); |
1605 return (instr & ~kImm26Mask) | off; | 1539 return (instr & ~kImm26Mask) | off; |
1606 } | 1540 } |
1607 | 1541 |
1608 int64_t DecodeImm26BranchOffset(int32_t instr) { | 1542 int64_t DecodeImm26BranchOffset(int32_t instr) { |
1609 const int32_t off = (((instr & kImm26Mask) >> kImm26Shift) << 6) >> 4; | 1543 const int32_t off = (((instr & kImm26Mask) >> kImm26Shift) << 6) >> 4; |
1610 return static_cast<int64_t>(off); | 1544 return static_cast<int64_t>(off); |
1611 } | 1545 } |
1612 | 1546 |
1613 void EmitCompareAndBranchOp(CompareAndBranchOp op, Register rt, int64_t imm, | 1547 void EmitCompareAndBranchOp(CompareAndBranchOp op, |
| 1548 Register rt, |
| 1549 int64_t imm, |
1614 OperandSize sz) { | 1550 OperandSize sz) { |
1615 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1551 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1616 ASSERT(Utils::IsInt(21, imm) && ((imm & 0x3) == 0)); | 1552 ASSERT(Utils::IsInt(21, imm) && ((imm & 0x3) == 0)); |
1617 ASSERT((rt != CSP) && (rt != R31)); | 1553 ASSERT((rt != CSP) && (rt != R31)); |
1618 const Register crt = ConcreteRegister(rt); | 1554 const Register crt = ConcreteRegister(rt); |
1619 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1555 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1620 const int32_t encoded_offset = EncodeImm19BranchOffset(imm, 0); | 1556 const int32_t encoded_offset = EncodeImm19BranchOffset(imm, 0); |
1621 const int32_t encoding = | 1557 const int32_t encoding = |
1622 op | size | | 1558 op | size | (static_cast<int32_t>(crt) << kRtShift) | encoded_offset; |
1623 (static_cast<int32_t>(crt) << kRtShift) | | |
1624 encoded_offset; | |
1625 Emit(encoding); | 1559 Emit(encoding); |
1626 } | 1560 } |
1627 | 1561 |
1628 void EmitConditionalBranchOp(ConditionalBranchOp op, Condition cond, | 1562 void EmitConditionalBranchOp(ConditionalBranchOp op, |
| 1563 Condition cond, |
1629 int64_t imm) { | 1564 int64_t imm) { |
1630 const int32_t off = EncodeImm19BranchOffset(imm, 0); | 1565 const int32_t off = EncodeImm19BranchOffset(imm, 0); |
1631 const int32_t encoding = | 1566 const int32_t encoding = |
1632 op | | 1567 op | (static_cast<int32_t>(cond) << kCondShift) | off; |
1633 (static_cast<int32_t>(cond) << kCondShift) | | |
1634 off; | |
1635 Emit(encoding); | 1568 Emit(encoding); |
1636 } | 1569 } |
1637 | 1570 |
1638 bool CanEncodeImm19BranchOffset(int64_t offset) { | 1571 bool CanEncodeImm19BranchOffset(int64_t offset) { |
1639 ASSERT(Utils::IsAligned(offset, 4)); | 1572 ASSERT(Utils::IsAligned(offset, 4)); |
1640 return Utils::IsInt(21, offset); | 1573 return Utils::IsInt(21, offset); |
1641 } | 1574 } |
1642 | 1575 |
1643 void EmitConditionalBranch(ConditionalBranchOp op, Condition cond, | 1576 void EmitConditionalBranch(ConditionalBranchOp op, |
| 1577 Condition cond, |
1644 Label* label) { | 1578 Label* label) { |
1645 if (label->IsBound()) { | 1579 if (label->IsBound()) { |
1646 const int64_t dest = label->Position() - buffer_.Size(); | 1580 const int64_t dest = label->Position() - buffer_.Size(); |
1647 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) { | 1581 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) { |
1648 if (cond == AL) { | 1582 if (cond == AL) { |
1649 // If the condition is AL, we must always branch to dest. There is | 1583 // If the condition is AL, we must always branch to dest. There is |
1650 // no need for a guard branch. | 1584 // no need for a guard branch. |
1651 b(dest); | 1585 b(dest); |
1652 } else { | 1586 } else { |
1653 EmitConditionalBranchOp( | 1587 EmitConditionalBranchOp(op, InvertCondition(cond), |
1654 op, InvertCondition(cond), 2 * Instr::kInstrSize); | 1588 2 * Instr::kInstrSize); |
1655 b(dest); | 1589 b(dest); |
1656 } | 1590 } |
1657 } else { | 1591 } else { |
1658 EmitConditionalBranchOp(op, cond, dest); | 1592 EmitConditionalBranchOp(op, cond, dest); |
1659 } | 1593 } |
1660 } else { | 1594 } else { |
1661 const int64_t position = buffer_.Size(); | 1595 const int64_t position = buffer_.Size(); |
1662 if (use_far_branches()) { | 1596 if (use_far_branches()) { |
1663 // When cond is AL, this guard branch will be rewritten as a nop when | 1597 // When cond is AL, this guard branch will be rewritten as a nop when |
1664 // the label is bound. We don't write it as a nop initially because it | 1598 // the label is bound. We don't write it as a nop initially because it |
1665 // makes the decoding code in Bind simpler. | 1599 // makes the decoding code in Bind simpler. |
1666 EmitConditionalBranchOp( | 1600 EmitConditionalBranchOp(op, InvertCondition(cond), |
1667 op, InvertCondition(cond), 2 * Instr::kInstrSize); | 1601 2 * Instr::kInstrSize); |
1668 b(label->position_); | 1602 b(label->position_); |
1669 } else { | 1603 } else { |
1670 EmitConditionalBranchOp(op, cond, label->position_); | 1604 EmitConditionalBranchOp(op, cond, label->position_); |
1671 } | 1605 } |
1672 label->LinkTo(position); | 1606 label->LinkTo(position); |
1673 } | 1607 } |
1674 } | 1608 } |
1675 | 1609 |
1676 void EmitCompareAndBranch(CompareAndBranchOp op, Register rt, | 1610 void EmitCompareAndBranch(CompareAndBranchOp op, |
1677 Label* label, OperandSize sz) { | 1611 Register rt, |
| 1612 Label* label, |
| 1613 OperandSize sz) { |
1678 if (label->IsBound()) { | 1614 if (label->IsBound()) { |
1679 const int64_t dest = label->Position() - buffer_.Size(); | 1615 const int64_t dest = label->Position() - buffer_.Size(); |
1680 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) { | 1616 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) { |
1681 EmitCompareAndBranchOp( | 1617 EmitCompareAndBranchOp(op == CBZ ? CBNZ : CBZ, rt, |
1682 op == CBZ ? CBNZ : CBZ, rt, 2 * Instr::kInstrSize, sz); | 1618 2 * Instr::kInstrSize, sz); |
1683 b(dest); | 1619 b(dest); |
1684 } else { | 1620 } else { |
1685 EmitCompareAndBranchOp(op, rt, dest, sz); | 1621 EmitCompareAndBranchOp(op, rt, dest, sz); |
1686 } | 1622 } |
1687 } else { | 1623 } else { |
1688 const int64_t position = buffer_.Size(); | 1624 const int64_t position = buffer_.Size(); |
1689 if (use_far_branches()) { | 1625 if (use_far_branches()) { |
1690 EmitCompareAndBranchOp( | 1626 EmitCompareAndBranchOp(op == CBZ ? CBNZ : CBZ, rt, |
1691 op == CBZ ? CBNZ : CBZ, rt, 2 * Instr::kInstrSize, sz); | 1627 2 * Instr::kInstrSize, sz); |
1692 b(label->position_); | 1628 b(label->position_); |
1693 } else { | 1629 } else { |
1694 EmitCompareAndBranchOp(op, rt, label->position_, sz); | 1630 EmitCompareAndBranchOp(op, rt, label->position_, sz); |
1695 } | 1631 } |
1696 label->LinkTo(position); | 1632 label->LinkTo(position); |
1697 } | 1633 } |
1698 } | 1634 } |
1699 | 1635 |
1700 bool CanEncodeImm26BranchOffset(int64_t offset) { | 1636 bool CanEncodeImm26BranchOffset(int64_t offset) { |
1701 ASSERT(Utils::IsAligned(offset, 4)); | 1637 ASSERT(Utils::IsAligned(offset, 4)); |
1702 return Utils::IsInt(26, offset); | 1638 return Utils::IsInt(26, offset); |
1703 } | 1639 } |
1704 | 1640 |
1705 void EmitUnconditionalBranchOp(UnconditionalBranchOp op, int64_t offset) { | 1641 void EmitUnconditionalBranchOp(UnconditionalBranchOp op, int64_t offset) { |
1706 ASSERT(CanEncodeImm26BranchOffset(offset)); | 1642 ASSERT(CanEncodeImm26BranchOffset(offset)); |
1707 const int32_t off = ((offset >> 2) << kImm26Shift) & kImm26Mask; | 1643 const int32_t off = ((offset >> 2) << kImm26Shift) & kImm26Mask; |
1708 const int32_t encoding = op | off; | 1644 const int32_t encoding = op | off; |
1709 Emit(encoding); | 1645 Emit(encoding); |
1710 } | 1646 } |
1711 | 1647 |
1712 void EmitUnconditionalBranchRegOp(UnconditionalBranchRegOp op, Register rn) { | 1648 void EmitUnconditionalBranchRegOp(UnconditionalBranchRegOp op, Register rn) { |
1713 ASSERT((rn != CSP) && (rn != R31)); | 1649 ASSERT((rn != CSP) && (rn != R31)); |
1714 const Register crn = ConcreteRegister(rn); | 1650 const Register crn = ConcreteRegister(rn); |
1715 const int32_t encoding = | 1651 const int32_t encoding = op | (static_cast<int32_t>(crn) << kRnShift); |
1716 op | (static_cast<int32_t>(crn) << kRnShift); | |
1717 Emit(encoding); | 1652 Emit(encoding); |
1718 } | 1653 } |
1719 | 1654 |
1720 static int32_t ExceptionGenOpEncoding(ExceptionGenOp op, uint16_t imm) { | 1655 static int32_t ExceptionGenOpEncoding(ExceptionGenOp op, uint16_t imm) { |
1721 return op | (static_cast<int32_t>(imm) << kImm16Shift); | 1656 return op | (static_cast<int32_t>(imm) << kImm16Shift); |
1722 } | 1657 } |
1723 | 1658 |
1724 void EmitExceptionGenOp(ExceptionGenOp op, uint16_t imm) { | 1659 void EmitExceptionGenOp(ExceptionGenOp op, uint16_t imm) { |
1725 Emit(ExceptionGenOpEncoding(op, imm)); | 1660 Emit(ExceptionGenOpEncoding(op, imm)); |
1726 } | 1661 } |
1727 | 1662 |
1728 void EmitMoveWideOp(MoveWideOp op, Register rd, const Immediate& imm, | 1663 void EmitMoveWideOp(MoveWideOp op, |
1729 int hw_idx, OperandSize sz) { | 1664 Register rd, |
| 1665 const Immediate& imm, |
| 1666 int hw_idx, |
| 1667 OperandSize sz) { |
1730 ASSERT((hw_idx >= 0) && (hw_idx <= 3)); | 1668 ASSERT((hw_idx >= 0) && (hw_idx <= 3)); |
1731 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1669 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1732 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1670 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1733 const int32_t encoding = | 1671 const int32_t encoding = |
1734 op | size | | 1672 op | size | (static_cast<int32_t>(rd) << kRdShift) | |
1735 (static_cast<int32_t>(rd) << kRdShift) | | |
1736 (static_cast<int32_t>(hw_idx) << kHWShift) | | 1673 (static_cast<int32_t>(hw_idx) << kHWShift) | |
1737 (static_cast<int32_t>(imm.value() & 0xffff) << kImm16Shift); | 1674 (static_cast<int32_t>(imm.value() & 0xffff) << kImm16Shift); |
1738 Emit(encoding); | 1675 Emit(encoding); |
1739 } | 1676 } |
1740 | 1677 |
1741 void EmitLoadStoreExclusive(LoadStoreExclusiveOp op, Register rs, Register rn, | 1678 void EmitLoadStoreExclusive(LoadStoreExclusiveOp op, |
1742 Register rt, OperandSize sz = kDoubleWord) { | 1679 Register rs, |
| 1680 Register rn, |
| 1681 Register rt, |
| 1682 OperandSize sz = kDoubleWord) { |
1743 ASSERT(sz == kDoubleWord); | 1683 ASSERT(sz == kDoubleWord); |
1744 const int32_t size = B31 | B30; | 1684 const int32_t size = B31 | B30; |
1745 | 1685 |
1746 ASSERT((rs != kNoRegister) && (rs != ZR)); | 1686 ASSERT((rs != kNoRegister) && (rs != ZR)); |
1747 ASSERT((rn != kNoRegister) && (rn != ZR)); | 1687 ASSERT((rn != kNoRegister) && (rn != ZR)); |
1748 ASSERT((rt != kNoRegister) && (rt != ZR)); | 1688 ASSERT((rt != kNoRegister) && (rt != ZR)); |
1749 | 1689 |
1750 const int32_t encoding = | 1690 const int32_t encoding = |
1751 op | size | | 1691 op | size | (static_cast<int32_t>(ConcreteRegister(rs)) << kRsShift) | |
1752 (static_cast<int32_t>(ConcreteRegister(rs)) << kRsShift) | | |
1753 (static_cast<int32_t>(ConcreteRegister(rn)) << kRnShift) | | 1692 (static_cast<int32_t>(ConcreteRegister(rn)) << kRnShift) | |
1754 (static_cast<int32_t>(ConcreteRegister(rt)) << kRtShift); | 1693 (static_cast<int32_t>(ConcreteRegister(rt)) << kRtShift); |
1755 | 1694 |
1756 Emit(encoding); | 1695 Emit(encoding); |
1757 } | 1696 } |
1758 | 1697 |
1759 void EmitLoadStoreReg(LoadStoreRegOp op, Register rt, Address a, | 1698 void EmitLoadStoreReg(LoadStoreRegOp op, |
| 1699 Register rt, |
| 1700 Address a, |
1760 OperandSize sz) { | 1701 OperandSize sz) { |
1761 const Register crt = ConcreteRegister(rt); | 1702 const Register crt = ConcreteRegister(rt); |
1762 const int32_t size = Log2OperandSizeBytes(sz); | 1703 const int32_t size = Log2OperandSizeBytes(sz); |
1763 const int32_t encoding = | 1704 const int32_t encoding = op | ((size & 0x3) << kSzShift) | |
1764 op | ((size & 0x3) << kSzShift) | | 1705 (static_cast<int32_t>(crt) << kRtShift) | |
1765 (static_cast<int32_t>(crt) << kRtShift) | | 1706 a.encoding(); |
1766 a.encoding(); | |
1767 Emit(encoding); | 1707 Emit(encoding); |
1768 } | 1708 } |
1769 | 1709 |
1770 void EmitLoadRegLiteral(LoadRegLiteralOp op, Register rt, Address a, | 1710 void EmitLoadRegLiteral(LoadRegLiteralOp op, |
| 1711 Register rt, |
| 1712 Address a, |
1771 OperandSize sz) { | 1713 OperandSize sz) { |
1772 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1714 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1773 ASSERT((rt != CSP) && (rt != R31)); | 1715 ASSERT((rt != CSP) && (rt != R31)); |
1774 const Register crt = ConcreteRegister(rt); | 1716 const Register crt = ConcreteRegister(rt); |
1775 const int32_t size = (sz == kDoubleWord) ? B30 : 0; | 1717 const int32_t size = (sz == kDoubleWord) ? B30 : 0; |
1776 const int32_t encoding = | 1718 const int32_t encoding = |
1777 op | size | | 1719 op | size | (static_cast<int32_t>(crt) << kRtShift) | a.encoding(); |
1778 (static_cast<int32_t>(crt) << kRtShift) | | |
1779 a.encoding(); | |
1780 Emit(encoding); | 1720 Emit(encoding); |
1781 } | 1721 } |
1782 | 1722 |
1783 void EmitLoadStoreRegPair(LoadStoreRegPairOp op, | 1723 void EmitLoadStoreRegPair(LoadStoreRegPairOp op, |
1784 Register rt, Register rt2, Address a, | 1724 Register rt, |
| 1725 Register rt2, |
| 1726 Address a, |
1785 OperandSize sz) { | 1727 OperandSize sz) { |
1786 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1728 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1787 ASSERT((rt != CSP) && (rt != R31)); | 1729 ASSERT((rt != CSP) && (rt != R31)); |
1788 ASSERT((rt2 != CSP) && (rt2 != R31)); | 1730 ASSERT((rt2 != CSP) && (rt2 != R31)); |
1789 const Register crt = ConcreteRegister(rt); | 1731 const Register crt = ConcreteRegister(rt); |
1790 const Register crt2 = ConcreteRegister(rt2); | 1732 const Register crt2 = ConcreteRegister(rt2); |
1791 int32_t opc = 0; | 1733 int32_t opc = 0; |
1792 switch (sz) { | 1734 switch (sz) { |
1793 case kDoubleWord: opc = B31; break; | 1735 case kDoubleWord: |
1794 case kWord: opc = B30; break; | 1736 opc = B31; |
1795 case kUnsignedWord: opc = 0; break; | 1737 break; |
1796 default: UNREACHABLE(); break; | 1738 case kWord: |
| 1739 opc = B30; |
| 1740 break; |
| 1741 case kUnsignedWord: |
| 1742 opc = 0; |
| 1743 break; |
| 1744 default: |
| 1745 UNREACHABLE(); |
| 1746 break; |
1797 } | 1747 } |
1798 const int32_t encoding = | 1748 const int32_t encoding = |
1799 opc | op | | 1749 opc | op | (static_cast<int32_t>(crt) << kRtShift) | |
1800 (static_cast<int32_t>(crt) << kRtShift) | | 1750 (static_cast<int32_t>(crt2) << kRt2Shift) | a.encoding(); |
1801 (static_cast<int32_t>(crt2) << kRt2Shift) | | |
1802 a.encoding(); | |
1803 Emit(encoding); | 1751 Emit(encoding); |
1804 } | 1752 } |
1805 | 1753 |
1806 void EmitPCRelOp(PCRelOp op, Register rd, const Immediate& imm) { | 1754 void EmitPCRelOp(PCRelOp op, Register rd, const Immediate& imm) { |
1807 ASSERT(Utils::IsInt(21, imm.value())); | 1755 ASSERT(Utils::IsInt(21, imm.value())); |
1808 ASSERT((rd != R31) && (rd != CSP)); | 1756 ASSERT((rd != R31) && (rd != CSP)); |
1809 const Register crd = ConcreteRegister(rd); | 1757 const Register crd = ConcreteRegister(rd); |
1810 const int32_t loimm = (imm.value() & 0x3) << 29; | 1758 const int32_t loimm = (imm.value() & 0x3) << 29; |
1811 const int32_t hiimm = ((imm.value() >> 2) << kImm19Shift) & kImm19Mask; | 1759 const int32_t hiimm = ((imm.value() >> 2) << kImm19Shift) & kImm19Mask; |
1812 const int32_t encoding = | 1760 const int32_t encoding = |
1813 op | loimm | hiimm | | 1761 op | loimm | hiimm | (static_cast<int32_t>(crd) << kRdShift); |
1814 (static_cast<int32_t>(crd) << kRdShift); | |
1815 Emit(encoding); | 1762 Emit(encoding); |
1816 } | 1763 } |
1817 | 1764 |
1818 void EmitMiscDP1Source(MiscDP1SourceOp op, | 1765 void EmitMiscDP1Source(MiscDP1SourceOp op, |
1819 Register rd, Register rn, | 1766 Register rd, |
| 1767 Register rn, |
1820 OperandSize sz) { | 1768 OperandSize sz) { |
1821 ASSERT((rd != CSP) && (rn != CSP)); | 1769 ASSERT((rd != CSP) && (rn != CSP)); |
1822 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1770 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1823 const Register crd = ConcreteRegister(rd); | 1771 const Register crd = ConcreteRegister(rd); |
1824 const Register crn = ConcreteRegister(rn); | 1772 const Register crn = ConcreteRegister(rn); |
1825 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1773 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1826 const int32_t encoding = | 1774 const int32_t encoding = op | size | |
1827 op | size | | 1775 (static_cast<int32_t>(crd) << kRdShift) | |
1828 (static_cast<int32_t>(crd) << kRdShift) | | 1776 (static_cast<int32_t>(crn) << kRnShift); |
1829 (static_cast<int32_t>(crn) << kRnShift); | |
1830 Emit(encoding); | 1777 Emit(encoding); |
1831 } | 1778 } |
1832 | 1779 |
1833 void EmitMiscDP2Source(MiscDP2SourceOp op, | 1780 void EmitMiscDP2Source(MiscDP2SourceOp op, |
1834 Register rd, Register rn, Register rm, | 1781 Register rd, |
| 1782 Register rn, |
| 1783 Register rm, |
1835 OperandSize sz) { | 1784 OperandSize sz) { |
1836 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP)); | 1785 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP)); |
1837 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1786 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1838 const Register crd = ConcreteRegister(rd); | 1787 const Register crd = ConcreteRegister(rd); |
1839 const Register crn = ConcreteRegister(rn); | 1788 const Register crn = ConcreteRegister(rn); |
1840 const Register crm = ConcreteRegister(rm); | 1789 const Register crm = ConcreteRegister(rm); |
1841 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1790 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1842 const int32_t encoding = | 1791 const int32_t encoding = op | size | |
1843 op | size | | 1792 (static_cast<int32_t>(crd) << kRdShift) | |
1844 (static_cast<int32_t>(crd) << kRdShift) | | 1793 (static_cast<int32_t>(crn) << kRnShift) | |
1845 (static_cast<int32_t>(crn) << kRnShift) | | 1794 (static_cast<int32_t>(crm) << kRmShift); |
1846 (static_cast<int32_t>(crm) << kRmShift); | |
1847 Emit(encoding); | 1795 Emit(encoding); |
1848 } | 1796 } |
1849 | 1797 |
1850 void EmitMiscDP3Source(MiscDP3SourceOp op, | 1798 void EmitMiscDP3Source(MiscDP3SourceOp op, |
1851 Register rd, Register rn, Register rm, Register ra, | 1799 Register rd, |
| 1800 Register rn, |
| 1801 Register rm, |
| 1802 Register ra, |
1852 OperandSize sz) { | 1803 OperandSize sz) { |
1853 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP) && (ra != CSP)); | 1804 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP) && (ra != CSP)); |
1854 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1805 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1855 const Register crd = ConcreteRegister(rd); | 1806 const Register crd = ConcreteRegister(rd); |
1856 const Register crn = ConcreteRegister(rn); | 1807 const Register crn = ConcreteRegister(rn); |
1857 const Register crm = ConcreteRegister(rm); | 1808 const Register crm = ConcreteRegister(rm); |
1858 const Register cra = ConcreteRegister(ra); | 1809 const Register cra = ConcreteRegister(ra); |
1859 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1810 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1860 const int32_t encoding = | 1811 const int32_t encoding = op | size | |
1861 op | size | | 1812 (static_cast<int32_t>(crd) << kRdShift) | |
1862 (static_cast<int32_t>(crd) << kRdShift) | | 1813 (static_cast<int32_t>(crn) << kRnShift) | |
1863 (static_cast<int32_t>(crn) << kRnShift) | | 1814 (static_cast<int32_t>(crm) << kRmShift) | |
1864 (static_cast<int32_t>(crm) << kRmShift) | | 1815 (static_cast<int32_t>(cra) << kRaShift); |
1865 (static_cast<int32_t>(cra) << kRaShift); | |
1866 Emit(encoding); | 1816 Emit(encoding); |
1867 } | 1817 } |
1868 | 1818 |
1869 void EmitConditionalSelect(ConditionalSelectOp op, | 1819 void EmitConditionalSelect(ConditionalSelectOp op, |
1870 Register rd, Register rn, Register rm, | 1820 Register rd, |
1871 Condition cond, OperandSize sz) { | 1821 Register rn, |
| 1822 Register rm, |
| 1823 Condition cond, |
| 1824 OperandSize sz) { |
1872 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP)); | 1825 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP)); |
1873 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); | 1826 ASSERT((sz == kDoubleWord) || (sz == kWord) || (sz == kUnsignedWord)); |
1874 const Register crd = ConcreteRegister(rd); | 1827 const Register crd = ConcreteRegister(rd); |
1875 const Register crn = ConcreteRegister(rn); | 1828 const Register crn = ConcreteRegister(rn); |
1876 const Register crm = ConcreteRegister(rm); | 1829 const Register crm = ConcreteRegister(rm); |
1877 const int32_t size = (sz == kDoubleWord) ? B31 : 0; | 1830 const int32_t size = (sz == kDoubleWord) ? B31 : 0; |
1878 const int32_t encoding = | 1831 const int32_t encoding = op | size | |
1879 op | size | | 1832 (static_cast<int32_t>(crd) << kRdShift) | |
1880 (static_cast<int32_t>(crd) << kRdShift) | | 1833 (static_cast<int32_t>(crn) << kRnShift) | |
1881 (static_cast<int32_t>(crn) << kRnShift) | | 1834 (static_cast<int32_t>(crm) << kRmShift) | |
1882 (static_cast<int32_t>(crm) << kRmShift) | | 1835 (static_cast<int32_t>(cond) << kSelCondShift); |
1883 (static_cast<int32_t>(cond) << kSelCondShift); | |
1884 Emit(encoding); | 1836 Emit(encoding); |
1885 } | 1837 } |
1886 | 1838 |
1887 void EmitFPImm(FPImmOp op, VRegister vd, uint8_t imm8) { | 1839 void EmitFPImm(FPImmOp op, VRegister vd, uint8_t imm8) { |
1888 const int32_t encoding = | 1840 const int32_t encoding = |
1889 op | | 1841 op | (static_cast<int32_t>(vd) << kVdShift) | (imm8 << kImm8Shift); |
1890 (static_cast<int32_t>(vd) << kVdShift) | | |
1891 (imm8 << kImm8Shift); | |
1892 Emit(encoding); | 1842 Emit(encoding); |
1893 } | 1843 } |
1894 | 1844 |
1895 void EmitFPIntCvtOp(FPIntCvtOp op, Register rd, Register rn, | 1845 void EmitFPIntCvtOp(FPIntCvtOp op, |
| 1846 Register rd, |
| 1847 Register rn, |
1896 OperandSize sz = kDoubleWord) { | 1848 OperandSize sz = kDoubleWord) { |
1897 ASSERT((sz == kDoubleWord) || (sz == kWord)); | 1849 ASSERT((sz == kDoubleWord) || (sz == kWord)); |
1898 const int32_t sfield = (sz == kDoubleWord) ? B31 : 0; | 1850 const int32_t sfield = (sz == kDoubleWord) ? B31 : 0; |
1899 const int32_t encoding = | 1851 const int32_t encoding = op | (static_cast<int32_t>(rd) << kRdShift) | |
1900 op | | 1852 (static_cast<int32_t>(rn) << kRnShift) | sfield; |
1901 (static_cast<int32_t>(rd) << kRdShift) | | |
1902 (static_cast<int32_t>(rn) << kRnShift) | | |
1903 sfield; | |
1904 Emit(encoding); | 1853 Emit(encoding); |
1905 } | 1854 } |
1906 | 1855 |
1907 void EmitFPOneSourceOp(FPOneSourceOp op, VRegister vd, VRegister vn) { | 1856 void EmitFPOneSourceOp(FPOneSourceOp op, VRegister vd, VRegister vn) { |
1908 const int32_t encoding = | 1857 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) | |
1909 op | | 1858 (static_cast<int32_t>(vn) << kVnShift); |
1910 (static_cast<int32_t>(vd) << kVdShift) | | |
1911 (static_cast<int32_t>(vn) << kVnShift); | |
1912 Emit(encoding); | 1859 Emit(encoding); |
1913 } | 1860 } |
1914 | 1861 |
1915 void EmitFPTwoSourceOp(FPTwoSourceOp op, | 1862 void EmitFPTwoSourceOp(FPTwoSourceOp op, |
1916 VRegister vd, VRegister vn, VRegister vm) { | 1863 VRegister vd, |
1917 const int32_t encoding = | 1864 VRegister vn, |
1918 op | | 1865 VRegister vm) { |
1919 (static_cast<int32_t>(vd) << kVdShift) | | 1866 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) | |
1920 (static_cast<int32_t>(vn) << kVnShift) | | 1867 (static_cast<int32_t>(vn) << kVnShift) | |
1921 (static_cast<int32_t>(vm) << kVmShift); | 1868 (static_cast<int32_t>(vm) << kVmShift); |
1922 Emit(encoding); | 1869 Emit(encoding); |
1923 } | 1870 } |
1924 | 1871 |
1925 void EmitFPCompareOp(FPCompareOp op, VRegister vn, VRegister vm) { | 1872 void EmitFPCompareOp(FPCompareOp op, VRegister vn, VRegister vm) { |
1926 const int32_t encoding = | 1873 const int32_t encoding = op | (static_cast<int32_t>(vn) << kVnShift) | |
1927 op | | 1874 (static_cast<int32_t>(vm) << kVmShift); |
1928 (static_cast<int32_t>(vn) << kVnShift) | | |
1929 (static_cast<int32_t>(vm) << kVmShift); | |
1930 Emit(encoding); | 1875 Emit(encoding); |
1931 } | 1876 } |
1932 | 1877 |
1933 void EmitSIMDThreeSameOp(SIMDThreeSameOp op, | 1878 void EmitSIMDThreeSameOp(SIMDThreeSameOp op, |
1934 VRegister vd, VRegister vn, VRegister vm) { | 1879 VRegister vd, |
1935 const int32_t encoding = | 1880 VRegister vn, |
1936 op | | 1881 VRegister vm) { |
1937 (static_cast<int32_t>(vd) << kVdShift) | | 1882 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) | |
1938 (static_cast<int32_t>(vn) << kVnShift) | | 1883 (static_cast<int32_t>(vn) << kVnShift) | |
1939 (static_cast<int32_t>(vm) << kVmShift); | 1884 (static_cast<int32_t>(vm) << kVmShift); |
1940 Emit(encoding); | 1885 Emit(encoding); |
1941 } | 1886 } |
1942 | 1887 |
1943 void EmitSIMDCopyOp(SIMDCopyOp op, VRegister vd, VRegister vn, OperandSize sz, | 1888 void EmitSIMDCopyOp(SIMDCopyOp op, |
1944 int32_t idx4, int32_t idx5) { | 1889 VRegister vd, |
| 1890 VRegister vn, |
| 1891 OperandSize sz, |
| 1892 int32_t idx4, |
| 1893 int32_t idx5) { |
1945 const int32_t shift = Log2OperandSizeBytes(sz); | 1894 const int32_t shift = Log2OperandSizeBytes(sz); |
1946 const int32_t imm5 = ((idx5 << (shift + 1)) | (1 << shift)) & 0x1f; | 1895 const int32_t imm5 = ((idx5 << (shift + 1)) | (1 << shift)) & 0x1f; |
1947 const int32_t imm4 = (idx4 << shift) & 0xf; | 1896 const int32_t imm4 = (idx4 << shift) & 0xf; |
1948 const int32_t encoding = | 1897 const int32_t encoding = op | (imm5 << kImm5Shift) | (imm4 << kImm4Shift) | |
1949 op | | 1898 (static_cast<int32_t>(vd) << kVdShift) | |
1950 (imm5 << kImm5Shift) | | 1899 (static_cast<int32_t>(vn) << kVnShift); |
1951 (imm4 << kImm4Shift) | | |
1952 (static_cast<int32_t>(vd) << kVdShift) | | |
1953 (static_cast<int32_t>(vn) << kVnShift); | |
1954 Emit(encoding); | 1900 Emit(encoding); |
1955 } | 1901 } |
1956 | 1902 |
1957 void EmitSIMDTwoRegOp(SIMDTwoRegOp op, VRegister vd, VRegister vn) { | 1903 void EmitSIMDTwoRegOp(SIMDTwoRegOp op, VRegister vd, VRegister vn) { |
1958 const int32_t encoding = | 1904 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) | |
1959 op | | 1905 (static_cast<int32_t>(vn) << kVnShift); |
1960 (static_cast<int32_t>(vd) << kVdShift) | | |
1961 (static_cast<int32_t>(vn) << kVnShift); | |
1962 Emit(encoding); | 1906 Emit(encoding); |
1963 } | 1907 } |
1964 | 1908 |
1965 void StoreIntoObjectFilter(Register object, Register value, Label* no_update); | 1909 void StoreIntoObjectFilter(Register object, Register value, Label* no_update); |
1966 | 1910 |
1967 // Shorter filtering sequence that assumes that value is not a smi. | 1911 // Shorter filtering sequence that assumes that value is not a smi. |
1968 void StoreIntoObjectFilterNoSmi(Register object, | 1912 void StoreIntoObjectFilterNoSmi(Register object, |
1969 Register value, | 1913 Register value, |
1970 Label* no_update); | 1914 Label* no_update); |
1971 | 1915 |
1972 DISALLOW_ALLOCATION(); | 1916 DISALLOW_ALLOCATION(); |
1973 DISALLOW_COPY_AND_ASSIGN(Assembler); | 1917 DISALLOW_COPY_AND_ASSIGN(Assembler); |
1974 }; | 1918 }; |
1975 | 1919 |
1976 } // namespace dart | 1920 } // namespace dart |
1977 | 1921 |
1978 #endif // RUNTIME_VM_ASSEMBLER_ARM64_H_ | 1922 #endif // RUNTIME_VM_ASSEMBLER_ARM64_H_ |
OLD | NEW |