OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_ | 5 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_ |
6 #define V8_ARM64_ASSEMBLER_ARM64_H_ | 6 #define V8_ARM64_ASSEMBLER_ARM64_H_ |
7 | 7 |
8 #include <list> | 8 #include <list> |
9 #include <map> | 9 #include <map> |
10 #include <vector> | 10 #include <vector> |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
99 Register() { | 99 Register() { |
100 reg_code = 0; | 100 reg_code = 0; |
101 reg_size = 0; | 101 reg_size = 0; |
102 reg_type = CPURegister::kNoRegister; | 102 reg_type = CPURegister::kNoRegister; |
103 } | 103 } |
104 | 104 |
105 explicit Register(const CPURegister& r) { | 105 explicit Register(const CPURegister& r) { |
106 reg_code = r.reg_code; | 106 reg_code = r.reg_code; |
107 reg_size = r.reg_size; | 107 reg_size = r.reg_size; |
108 reg_type = r.reg_type; | 108 reg_type = r.reg_type; |
109 ASSERT(IsValidOrNone()); | 109 DCHECK(IsValidOrNone()); |
110 } | 110 } |
111 | 111 |
112 Register(const Register& r) { // NOLINT(runtime/explicit) | 112 Register(const Register& r) { // NOLINT(runtime/explicit) |
113 reg_code = r.reg_code; | 113 reg_code = r.reg_code; |
114 reg_size = r.reg_size; | 114 reg_size = r.reg_size; |
115 reg_type = r.reg_type; | 115 reg_type = r.reg_type; |
116 ASSERT(IsValidOrNone()); | 116 DCHECK(IsValidOrNone()); |
117 } | 117 } |
118 | 118 |
119 bool IsValid() const { | 119 bool IsValid() const { |
120 ASSERT(IsRegister() || IsNone()); | 120 DCHECK(IsRegister() || IsNone()); |
121 return IsValidRegister(); | 121 return IsValidRegister(); |
122 } | 122 } |
123 | 123 |
124 static Register XRegFromCode(unsigned code); | 124 static Register XRegFromCode(unsigned code); |
125 static Register WRegFromCode(unsigned code); | 125 static Register WRegFromCode(unsigned code); |
126 | 126 |
127 // Start of V8 compatibility section --------------------- | 127 // Start of V8 compatibility section --------------------- |
128 // These memebers are necessary for compilation. | 128 // These memebers are necessary for compilation. |
129 // A few of them may be unused for now. | 129 // A few of them may be unused for now. |
130 | 130 |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
162 | 162 |
163 // Return true if the register is one that crankshaft can allocate. | 163 // Return true if the register is one that crankshaft can allocate. |
164 bool IsAllocatable() const { | 164 bool IsAllocatable() const { |
165 return ((reg_code == kAllocatableContext) || | 165 return ((reg_code == kAllocatableContext) || |
166 (reg_code <= kAllocatableLowRangeEnd) || | 166 (reg_code <= kAllocatableLowRangeEnd) || |
167 ((reg_code >= kAllocatableHighRangeBegin) && | 167 ((reg_code >= kAllocatableHighRangeBegin) && |
168 (reg_code <= kAllocatableHighRangeEnd))); | 168 (reg_code <= kAllocatableHighRangeEnd))); |
169 } | 169 } |
170 | 170 |
171 static Register FromAllocationIndex(unsigned index) { | 171 static Register FromAllocationIndex(unsigned index) { |
172 ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters())); | 172 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters())); |
173 // cp is the last allocatable register. | 173 // cp is the last allocatable register. |
174 if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) { | 174 if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) { |
175 return from_code(kAllocatableContext); | 175 return from_code(kAllocatableContext); |
176 } | 176 } |
177 | 177 |
178 // Handle low and high ranges. | 178 // Handle low and high ranges. |
179 return (index <= kAllocatableLowRangeEnd) | 179 return (index <= kAllocatableLowRangeEnd) |
180 ? from_code(index) | 180 ? from_code(index) |
181 : from_code(index + kAllocatableRangeGapSize); | 181 : from_code(index + kAllocatableRangeGapSize); |
182 } | 182 } |
183 | 183 |
184 static const char* AllocationIndexToString(int index) { | 184 static const char* AllocationIndexToString(int index) { |
185 ASSERT((index >= 0) && (index < NumAllocatableRegisters())); | 185 DCHECK((index >= 0) && (index < NumAllocatableRegisters())); |
186 ASSERT((kAllocatableLowRangeBegin == 0) && | 186 DCHECK((kAllocatableLowRangeBegin == 0) && |
187 (kAllocatableLowRangeEnd == 15) && | 187 (kAllocatableLowRangeEnd == 15) && |
188 (kAllocatableHighRangeBegin == 18) && | 188 (kAllocatableHighRangeBegin == 18) && |
189 (kAllocatableHighRangeEnd == 24) && | 189 (kAllocatableHighRangeEnd == 24) && |
190 (kAllocatableContext == 27)); | 190 (kAllocatableContext == 27)); |
191 const char* const names[] = { | 191 const char* const names[] = { |
192 "x0", "x1", "x2", "x3", "x4", | 192 "x0", "x1", "x2", "x3", "x4", |
193 "x5", "x6", "x7", "x8", "x9", | 193 "x5", "x6", "x7", "x8", "x9", |
194 "x10", "x11", "x12", "x13", "x14", | 194 "x10", "x11", "x12", "x13", "x14", |
195 "x15", "x18", "x19", "x20", "x21", | 195 "x15", "x18", "x19", "x20", "x21", |
196 "x22", "x23", "x24", "x27", | 196 "x22", "x23", "x24", "x27", |
197 }; | 197 }; |
198 return names[index]; | 198 return names[index]; |
199 } | 199 } |
200 | 200 |
201 static int ToAllocationIndex(Register reg) { | 201 static int ToAllocationIndex(Register reg) { |
202 ASSERT(reg.IsAllocatable()); | 202 DCHECK(reg.IsAllocatable()); |
203 unsigned code = reg.code(); | 203 unsigned code = reg.code(); |
204 if (code == kAllocatableContext) { | 204 if (code == kAllocatableContext) { |
205 return NumAllocatableRegisters() - 1; | 205 return NumAllocatableRegisters() - 1; |
206 } | 206 } |
207 | 207 |
208 return (code <= kAllocatableLowRangeEnd) | 208 return (code <= kAllocatableLowRangeEnd) |
209 ? code | 209 ? code |
210 : code - kAllocatableRangeGapSize; | 210 : code - kAllocatableRangeGapSize; |
211 } | 211 } |
212 | 212 |
(...skipping 15 matching lines...) Expand all Loading... |
228 FPRegister() { | 228 FPRegister() { |
229 reg_code = 0; | 229 reg_code = 0; |
230 reg_size = 0; | 230 reg_size = 0; |
231 reg_type = CPURegister::kNoRegister; | 231 reg_type = CPURegister::kNoRegister; |
232 } | 232 } |
233 | 233 |
234 explicit FPRegister(const CPURegister& r) { | 234 explicit FPRegister(const CPURegister& r) { |
235 reg_code = r.reg_code; | 235 reg_code = r.reg_code; |
236 reg_size = r.reg_size; | 236 reg_size = r.reg_size; |
237 reg_type = r.reg_type; | 237 reg_type = r.reg_type; |
238 ASSERT(IsValidOrNone()); | 238 DCHECK(IsValidOrNone()); |
239 } | 239 } |
240 | 240 |
241 FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit) | 241 FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit) |
242 reg_code = r.reg_code; | 242 reg_code = r.reg_code; |
243 reg_size = r.reg_size; | 243 reg_size = r.reg_size; |
244 reg_type = r.reg_type; | 244 reg_type = r.reg_type; |
245 ASSERT(IsValidOrNone()); | 245 DCHECK(IsValidOrNone()); |
246 } | 246 } |
247 | 247 |
248 bool IsValid() const { | 248 bool IsValid() const { |
249 ASSERT(IsFPRegister() || IsNone()); | 249 DCHECK(IsFPRegister() || IsNone()); |
250 return IsValidFPRegister(); | 250 return IsValidFPRegister(); |
251 } | 251 } |
252 | 252 |
253 static FPRegister SRegFromCode(unsigned code); | 253 static FPRegister SRegFromCode(unsigned code); |
254 static FPRegister DRegFromCode(unsigned code); | 254 static FPRegister DRegFromCode(unsigned code); |
255 | 255 |
256 // Start of V8 compatibility section --------------------- | 256 // Start of V8 compatibility section --------------------- |
257 static const int kMaxNumRegisters = kNumberOfFPRegisters; | 257 static const int kMaxNumRegisters = kNumberOfFPRegisters; |
258 | 258 |
259 // Crankshaft can use all the FP registers except: | 259 // Crankshaft can use all the FP registers except: |
(...skipping 15 matching lines...) Expand all Loading... |
275 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) + | 275 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) + |
276 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1); | 276 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1); |
277 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; } | 277 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; } |
278 | 278 |
279 // Return true if the register is one that crankshaft can allocate. | 279 // Return true if the register is one that crankshaft can allocate. |
280 bool IsAllocatable() const { | 280 bool IsAllocatable() const { |
281 return (Bit() & kAllocatableFPRegisters) != 0; | 281 return (Bit() & kAllocatableFPRegisters) != 0; |
282 } | 282 } |
283 | 283 |
284 static FPRegister FromAllocationIndex(unsigned int index) { | 284 static FPRegister FromAllocationIndex(unsigned int index) { |
285 ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters())); | 285 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters())); |
286 | 286 |
287 return (index <= kAllocatableLowRangeEnd) | 287 return (index <= kAllocatableLowRangeEnd) |
288 ? from_code(index) | 288 ? from_code(index) |
289 : from_code(index + kAllocatableRangeGapSize); | 289 : from_code(index + kAllocatableRangeGapSize); |
290 } | 290 } |
291 | 291 |
292 static const char* AllocationIndexToString(int index) { | 292 static const char* AllocationIndexToString(int index) { |
293 ASSERT((index >= 0) && (index < NumAllocatableRegisters())); | 293 DCHECK((index >= 0) && (index < NumAllocatableRegisters())); |
294 ASSERT((kAllocatableLowRangeBegin == 0) && | 294 DCHECK((kAllocatableLowRangeBegin == 0) && |
295 (kAllocatableLowRangeEnd == 14) && | 295 (kAllocatableLowRangeEnd == 14) && |
296 (kAllocatableHighRangeBegin == 16) && | 296 (kAllocatableHighRangeBegin == 16) && |
297 (kAllocatableHighRangeEnd == 28)); | 297 (kAllocatableHighRangeEnd == 28)); |
298 const char* const names[] = { | 298 const char* const names[] = { |
299 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", | 299 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", |
300 "d8", "d9", "d10", "d11", "d12", "d13", "d14", | 300 "d8", "d9", "d10", "d11", "d12", "d13", "d14", |
301 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", | 301 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", |
302 "d24", "d25", "d26", "d27", "d28" | 302 "d24", "d25", "d26", "d27", "d28" |
303 }; | 303 }; |
304 return names[index]; | 304 return names[index]; |
305 } | 305 } |
306 | 306 |
307 static int ToAllocationIndex(FPRegister reg) { | 307 static int ToAllocationIndex(FPRegister reg) { |
308 ASSERT(reg.IsAllocatable()); | 308 DCHECK(reg.IsAllocatable()); |
309 unsigned code = reg.code(); | 309 unsigned code = reg.code(); |
310 | 310 |
311 return (code <= kAllocatableLowRangeEnd) | 311 return (code <= kAllocatableLowRangeEnd) |
312 ? code | 312 ? code |
313 : code - kAllocatableRangeGapSize; | 313 : code - kAllocatableRangeGapSize; |
314 } | 314 } |
315 | 315 |
316 static FPRegister from_code(int code) { | 316 static FPRegister from_code(int code) { |
317 // Always return a D register. | 317 // Always return a D register. |
318 return FPRegister::Create(code, kDRegSizeInBits); | 318 return FPRegister::Create(code, kDRegSizeInBits); |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
444 // ----------------------------------------------------------------------------- | 444 // ----------------------------------------------------------------------------- |
445 // Lists of registers. | 445 // Lists of registers. |
446 class CPURegList { | 446 class CPURegList { |
447 public: | 447 public: |
448 explicit CPURegList(CPURegister reg1, | 448 explicit CPURegList(CPURegister reg1, |
449 CPURegister reg2 = NoCPUReg, | 449 CPURegister reg2 = NoCPUReg, |
450 CPURegister reg3 = NoCPUReg, | 450 CPURegister reg3 = NoCPUReg, |
451 CPURegister reg4 = NoCPUReg) | 451 CPURegister reg4 = NoCPUReg) |
452 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()), | 452 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()), |
453 size_(reg1.SizeInBits()), type_(reg1.type()) { | 453 size_(reg1.SizeInBits()), type_(reg1.type()) { |
454 ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4)); | 454 DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4)); |
455 ASSERT(IsValid()); | 455 DCHECK(IsValid()); |
456 } | 456 } |
457 | 457 |
458 CPURegList(CPURegister::RegisterType type, unsigned size, RegList list) | 458 CPURegList(CPURegister::RegisterType type, unsigned size, RegList list) |
459 : list_(list), size_(size), type_(type) { | 459 : list_(list), size_(size), type_(type) { |
460 ASSERT(IsValid()); | 460 DCHECK(IsValid()); |
461 } | 461 } |
462 | 462 |
463 CPURegList(CPURegister::RegisterType type, unsigned size, | 463 CPURegList(CPURegister::RegisterType type, unsigned size, |
464 unsigned first_reg, unsigned last_reg) | 464 unsigned first_reg, unsigned last_reg) |
465 : size_(size), type_(type) { | 465 : size_(size), type_(type) { |
466 ASSERT(((type == CPURegister::kRegister) && | 466 DCHECK(((type == CPURegister::kRegister) && |
467 (last_reg < kNumberOfRegisters)) || | 467 (last_reg < kNumberOfRegisters)) || |
468 ((type == CPURegister::kFPRegister) && | 468 ((type == CPURegister::kFPRegister) && |
469 (last_reg < kNumberOfFPRegisters))); | 469 (last_reg < kNumberOfFPRegisters))); |
470 ASSERT(last_reg >= first_reg); | 470 DCHECK(last_reg >= first_reg); |
471 list_ = (1UL << (last_reg + 1)) - 1; | 471 list_ = (1UL << (last_reg + 1)) - 1; |
472 list_ &= ~((1UL << first_reg) - 1); | 472 list_ &= ~((1UL << first_reg) - 1); |
473 ASSERT(IsValid()); | 473 DCHECK(IsValid()); |
474 } | 474 } |
475 | 475 |
476 CPURegister::RegisterType type() const { | 476 CPURegister::RegisterType type() const { |
477 ASSERT(IsValid()); | 477 DCHECK(IsValid()); |
478 return type_; | 478 return type_; |
479 } | 479 } |
480 | 480 |
481 RegList list() const { | 481 RegList list() const { |
482 ASSERT(IsValid()); | 482 DCHECK(IsValid()); |
483 return list_; | 483 return list_; |
484 } | 484 } |
485 | 485 |
486 inline void set_list(RegList new_list) { | 486 inline void set_list(RegList new_list) { |
487 ASSERT(IsValid()); | 487 DCHECK(IsValid()); |
488 list_ = new_list; | 488 list_ = new_list; |
489 } | 489 } |
490 | 490 |
491 // Combine another CPURegList into this one. Registers that already exist in | 491 // Combine another CPURegList into this one. Registers that already exist in |
492 // this list are left unchanged. The type and size of the registers in the | 492 // this list are left unchanged. The type and size of the registers in the |
493 // 'other' list must match those in this list. | 493 // 'other' list must match those in this list. |
494 void Combine(const CPURegList& other); | 494 void Combine(const CPURegList& other); |
495 | 495 |
496 // Remove every register in the other CPURegList from this one. Registers that | 496 // Remove every register in the other CPURegList from this one. Registers that |
497 // do not exist in this list are ignored. The type of the registers in the | 497 // do not exist in this list are ignored. The type of the registers in the |
(...skipping 24 matching lines...) Expand all Loading... |
522 static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits); | 522 static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits); |
523 | 523 |
524 // AAPCS64 caller-saved registers. Note that this includes lr. | 524 // AAPCS64 caller-saved registers. Note that this includes lr. |
525 static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits); | 525 static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits); |
526 static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits); | 526 static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits); |
527 | 527 |
528 // Registers saved as safepoints. | 528 // Registers saved as safepoints. |
529 static CPURegList GetSafepointSavedRegisters(); | 529 static CPURegList GetSafepointSavedRegisters(); |
530 | 530 |
531 bool IsEmpty() const { | 531 bool IsEmpty() const { |
532 ASSERT(IsValid()); | 532 DCHECK(IsValid()); |
533 return list_ == 0; | 533 return list_ == 0; |
534 } | 534 } |
535 | 535 |
536 bool IncludesAliasOf(const CPURegister& other1, | 536 bool IncludesAliasOf(const CPURegister& other1, |
537 const CPURegister& other2 = NoCPUReg, | 537 const CPURegister& other2 = NoCPUReg, |
538 const CPURegister& other3 = NoCPUReg, | 538 const CPURegister& other3 = NoCPUReg, |
539 const CPURegister& other4 = NoCPUReg) const { | 539 const CPURegister& other4 = NoCPUReg) const { |
540 ASSERT(IsValid()); | 540 DCHECK(IsValid()); |
541 RegList list = 0; | 541 RegList list = 0; |
542 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit(); | 542 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit(); |
543 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit(); | 543 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit(); |
544 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit(); | 544 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit(); |
545 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit(); | 545 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit(); |
546 return (list_ & list) != 0; | 546 return (list_ & list) != 0; |
547 } | 547 } |
548 | 548 |
549 int Count() const { | 549 int Count() const { |
550 ASSERT(IsValid()); | 550 DCHECK(IsValid()); |
551 return CountSetBits(list_, kRegListSizeInBits); | 551 return CountSetBits(list_, kRegListSizeInBits); |
552 } | 552 } |
553 | 553 |
554 unsigned RegisterSizeInBits() const { | 554 unsigned RegisterSizeInBits() const { |
555 ASSERT(IsValid()); | 555 DCHECK(IsValid()); |
556 return size_; | 556 return size_; |
557 } | 557 } |
558 | 558 |
559 unsigned RegisterSizeInBytes() const { | 559 unsigned RegisterSizeInBytes() const { |
560 int size_in_bits = RegisterSizeInBits(); | 560 int size_in_bits = RegisterSizeInBits(); |
561 ASSERT((size_in_bits % kBitsPerByte) == 0); | 561 DCHECK((size_in_bits % kBitsPerByte) == 0); |
562 return size_in_bits / kBitsPerByte; | 562 return size_in_bits / kBitsPerByte; |
563 } | 563 } |
564 | 564 |
565 unsigned TotalSizeInBytes() const { | 565 unsigned TotalSizeInBytes() const { |
566 ASSERT(IsValid()); | 566 DCHECK(IsValid()); |
567 return RegisterSizeInBytes() * Count(); | 567 return RegisterSizeInBytes() * Count(); |
568 } | 568 } |
569 | 569 |
570 private: | 570 private: |
571 RegList list_; | 571 RegList list_; |
572 unsigned size_; | 572 unsigned size_; |
573 CPURegister::RegisterType type_; | 573 CPURegister::RegisterType type_; |
574 | 574 |
575 bool IsValid() const { | 575 bool IsValid() const { |
576 const RegList kValidRegisters = 0x8000000ffffffff; | 576 const RegList kValidRegisters = 0x8000000ffffffff; |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
822 virtual ~Assembler(); | 822 virtual ~Assembler(); |
823 | 823 |
824 virtual void AbortedCodeGeneration() { | 824 virtual void AbortedCodeGeneration() { |
825 constpool_.Clear(); | 825 constpool_.Clear(); |
826 } | 826 } |
827 | 827 |
828 // System functions --------------------------------------------------------- | 828 // System functions --------------------------------------------------------- |
829 // Start generating code from the beginning of the buffer, discarding any code | 829 // Start generating code from the beginning of the buffer, discarding any code |
830 // and data that has already been emitted into the buffer. | 830 // and data that has already been emitted into the buffer. |
831 // | 831 // |
832 // In order to avoid any accidental transfer of state, Reset ASSERTs that the | 832 // In order to avoid any accidental transfer of state, Reset DCHECKs that the |
833 // constant pool is not blocked. | 833 // constant pool is not blocked. |
834 void Reset(); | 834 void Reset(); |
835 | 835 |
836 // GetCode emits any pending (non-emitted) code and fills the descriptor | 836 // GetCode emits any pending (non-emitted) code and fills the descriptor |
837 // desc. GetCode() is idempotent; it returns the same result if no other | 837 // desc. GetCode() is idempotent; it returns the same result if no other |
838 // Assembler functions are invoked in between GetCode() calls. | 838 // Assembler functions are invoked in between GetCode() calls. |
839 // | 839 // |
840 // The descriptor (desc) can be NULL. In that case, the code is finalized as | 840 // The descriptor (desc) can be NULL. In that case, the code is finalized as |
841 // usual, but the descriptor is not populated. | 841 // usual, but the descriptor is not populated. |
842 void GetCode(CodeDesc* desc); | 842 void GetCode(CodeDesc* desc); |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
906 // blr temp | 906 // blr temp |
907 // | 907 // |
908 // With relocation: | 908 // With relocation: |
909 // ldr temp, =target | 909 // ldr temp, =target |
910 // blr temp | 910 // blr temp |
911 static const int kCallSizeWithoutRelocation = 4 * kInstructionSize; | 911 static const int kCallSizeWithoutRelocation = 4 * kInstructionSize; |
912 static const int kCallSizeWithRelocation = 2 * kInstructionSize; | 912 static const int kCallSizeWithRelocation = 2 * kInstructionSize; |
913 | 913 |
914 // Size of the generated code in bytes | 914 // Size of the generated code in bytes |
915 uint64_t SizeOfGeneratedCode() const { | 915 uint64_t SizeOfGeneratedCode() const { |
916 ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_))); | 916 DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_))); |
917 return pc_ - buffer_; | 917 return pc_ - buffer_; |
918 } | 918 } |
919 | 919 |
920 // Return the code size generated from label to the current position. | 920 // Return the code size generated from label to the current position. |
921 uint64_t SizeOfCodeGeneratedSince(const Label* label) { | 921 uint64_t SizeOfCodeGeneratedSince(const Label* label) { |
922 ASSERT(label->is_bound()); | 922 DCHECK(label->is_bound()); |
923 ASSERT(pc_offset() >= label->pos()); | 923 DCHECK(pc_offset() >= label->pos()); |
924 ASSERT(pc_offset() < buffer_size_); | 924 DCHECK(pc_offset() < buffer_size_); |
925 return pc_offset() - label->pos(); | 925 return pc_offset() - label->pos(); |
926 } | 926 } |
927 | 927 |
928 // Check the size of the code generated since the given label. This function | 928 // Check the size of the code generated since the given label. This function |
929 // is used primarily to work around comparisons between signed and unsigned | 929 // is used primarily to work around comparisons between signed and unsigned |
930 // quantities, since V8 uses both. | 930 // quantities, since V8 uses both. |
931 // TODO(jbramley): Work out what sign to use for these things and if possible, | 931 // TODO(jbramley): Work out what sign to use for these things and if possible, |
932 // change things to be consistent. | 932 // change things to be consistent. |
933 void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) { | 933 void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) { |
934 ASSERT(size >= 0); | 934 DCHECK(size >= 0); |
935 ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label)); | 935 DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label)); |
936 } | 936 } |
937 | 937 |
938 // Return the number of instructions generated from label to the | 938 // Return the number of instructions generated from label to the |
939 // current position. | 939 // current position. |
940 int InstructionsGeneratedSince(const Label* label) { | 940 int InstructionsGeneratedSince(const Label* label) { |
941 return SizeOfCodeGeneratedSince(label) / kInstructionSize; | 941 return SizeOfCodeGeneratedSince(label) / kInstructionSize; |
942 } | 942 } |
943 | 943 |
944 // Number of instructions generated for the return sequence in | 944 // Number of instructions generated for the return sequence in |
945 // FullCodeGenerator::EmitReturnSequence. | 945 // FullCodeGenerator::EmitReturnSequence. |
(...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1207 const Register& rn, | 1207 const Register& rn, |
1208 unsigned immr, | 1208 unsigned immr, |
1209 unsigned imms); | 1209 unsigned imms); |
1210 | 1210 |
1211 // Bfm aliases. | 1211 // Bfm aliases. |
1212 // Bitfield insert. | 1212 // Bitfield insert. |
1213 void bfi(const Register& rd, | 1213 void bfi(const Register& rd, |
1214 const Register& rn, | 1214 const Register& rn, |
1215 unsigned lsb, | 1215 unsigned lsb, |
1216 unsigned width) { | 1216 unsigned width) { |
1217 ASSERT(width >= 1); | 1217 DCHECK(width >= 1); |
1218 ASSERT(lsb + width <= rn.SizeInBits()); | 1218 DCHECK(lsb + width <= rn.SizeInBits()); |
1219 bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1); | 1219 bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1); |
1220 } | 1220 } |
1221 | 1221 |
1222 // Bitfield extract and insert low. | 1222 // Bitfield extract and insert low. |
1223 void bfxil(const Register& rd, | 1223 void bfxil(const Register& rd, |
1224 const Register& rn, | 1224 const Register& rn, |
1225 unsigned lsb, | 1225 unsigned lsb, |
1226 unsigned width) { | 1226 unsigned width) { |
1227 ASSERT(width >= 1); | 1227 DCHECK(width >= 1); |
1228 ASSERT(lsb + width <= rn.SizeInBits()); | 1228 DCHECK(lsb + width <= rn.SizeInBits()); |
1229 bfm(rd, rn, lsb, lsb + width - 1); | 1229 bfm(rd, rn, lsb, lsb + width - 1); |
1230 } | 1230 } |
1231 | 1231 |
1232 // Sbfm aliases. | 1232 // Sbfm aliases. |
1233 // Arithmetic shift right. | 1233 // Arithmetic shift right. |
1234 void asr(const Register& rd, const Register& rn, unsigned shift) { | 1234 void asr(const Register& rd, const Register& rn, unsigned shift) { |
1235 ASSERT(shift < rd.SizeInBits()); | 1235 DCHECK(shift < rd.SizeInBits()); |
1236 sbfm(rd, rn, shift, rd.SizeInBits() - 1); | 1236 sbfm(rd, rn, shift, rd.SizeInBits() - 1); |
1237 } | 1237 } |
1238 | 1238 |
1239 // Signed bitfield insert in zero. | 1239 // Signed bitfield insert in zero. |
1240 void sbfiz(const Register& rd, | 1240 void sbfiz(const Register& rd, |
1241 const Register& rn, | 1241 const Register& rn, |
1242 unsigned lsb, | 1242 unsigned lsb, |
1243 unsigned width) { | 1243 unsigned width) { |
1244 ASSERT(width >= 1); | 1244 DCHECK(width >= 1); |
1245 ASSERT(lsb + width <= rn.SizeInBits()); | 1245 DCHECK(lsb + width <= rn.SizeInBits()); |
1246 sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1); | 1246 sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1); |
1247 } | 1247 } |
1248 | 1248 |
1249 // Signed bitfield extract. | 1249 // Signed bitfield extract. |
1250 void sbfx(const Register& rd, | 1250 void sbfx(const Register& rd, |
1251 const Register& rn, | 1251 const Register& rn, |
1252 unsigned lsb, | 1252 unsigned lsb, |
1253 unsigned width) { | 1253 unsigned width) { |
1254 ASSERT(width >= 1); | 1254 DCHECK(width >= 1); |
1255 ASSERT(lsb + width <= rn.SizeInBits()); | 1255 DCHECK(lsb + width <= rn.SizeInBits()); |
1256 sbfm(rd, rn, lsb, lsb + width - 1); | 1256 sbfm(rd, rn, lsb, lsb + width - 1); |
1257 } | 1257 } |
1258 | 1258 |
1259 // Signed extend byte. | 1259 // Signed extend byte. |
1260 void sxtb(const Register& rd, const Register& rn) { | 1260 void sxtb(const Register& rd, const Register& rn) { |
1261 sbfm(rd, rn, 0, 7); | 1261 sbfm(rd, rn, 0, 7); |
1262 } | 1262 } |
1263 | 1263 |
1264 // Signed extend halfword. | 1264 // Signed extend halfword. |
1265 void sxth(const Register& rd, const Register& rn) { | 1265 void sxth(const Register& rd, const Register& rn) { |
1266 sbfm(rd, rn, 0, 15); | 1266 sbfm(rd, rn, 0, 15); |
1267 } | 1267 } |
1268 | 1268 |
1269 // Signed extend word. | 1269 // Signed extend word. |
1270 void sxtw(const Register& rd, const Register& rn) { | 1270 void sxtw(const Register& rd, const Register& rn) { |
1271 sbfm(rd, rn, 0, 31); | 1271 sbfm(rd, rn, 0, 31); |
1272 } | 1272 } |
1273 | 1273 |
1274 // Ubfm aliases. | 1274 // Ubfm aliases. |
1275 // Logical shift left. | 1275 // Logical shift left. |
1276 void lsl(const Register& rd, const Register& rn, unsigned shift) { | 1276 void lsl(const Register& rd, const Register& rn, unsigned shift) { |
1277 unsigned reg_size = rd.SizeInBits(); | 1277 unsigned reg_size = rd.SizeInBits(); |
1278 ASSERT(shift < reg_size); | 1278 DCHECK(shift < reg_size); |
1279 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1); | 1279 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1); |
1280 } | 1280 } |
1281 | 1281 |
1282 // Logical shift right. | 1282 // Logical shift right. |
1283 void lsr(const Register& rd, const Register& rn, unsigned shift) { | 1283 void lsr(const Register& rd, const Register& rn, unsigned shift) { |
1284 ASSERT(shift < rd.SizeInBits()); | 1284 DCHECK(shift < rd.SizeInBits()); |
1285 ubfm(rd, rn, shift, rd.SizeInBits() - 1); | 1285 ubfm(rd, rn, shift, rd.SizeInBits() - 1); |
1286 } | 1286 } |
1287 | 1287 |
1288 // Unsigned bitfield insert in zero. | 1288 // Unsigned bitfield insert in zero. |
1289 void ubfiz(const Register& rd, | 1289 void ubfiz(const Register& rd, |
1290 const Register& rn, | 1290 const Register& rn, |
1291 unsigned lsb, | 1291 unsigned lsb, |
1292 unsigned width) { | 1292 unsigned width) { |
1293 ASSERT(width >= 1); | 1293 DCHECK(width >= 1); |
1294 ASSERT(lsb + width <= rn.SizeInBits()); | 1294 DCHECK(lsb + width <= rn.SizeInBits()); |
1295 ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1); | 1295 ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1); |
1296 } | 1296 } |
1297 | 1297 |
1298 // Unsigned bitfield extract. | 1298 // Unsigned bitfield extract. |
1299 void ubfx(const Register& rd, | 1299 void ubfx(const Register& rd, |
1300 const Register& rn, | 1300 const Register& rn, |
1301 unsigned lsb, | 1301 unsigned lsb, |
1302 unsigned width) { | 1302 unsigned width) { |
1303 ASSERT(width >= 1); | 1303 DCHECK(width >= 1); |
1304 ASSERT(lsb + width <= rn.SizeInBits()); | 1304 DCHECK(lsb + width <= rn.SizeInBits()); |
1305 ubfm(rd, rn, lsb, lsb + width - 1); | 1305 ubfm(rd, rn, lsb, lsb + width - 1); |
1306 } | 1306 } |
1307 | 1307 |
1308 // Unsigned extend byte. | 1308 // Unsigned extend byte. |
1309 void uxtb(const Register& rd, const Register& rn) { | 1309 void uxtb(const Register& rd, const Register& rn) { |
1310 ubfm(rd, rn, 0, 7); | 1310 ubfm(rd, rn, 0, 7); |
1311 } | 1311 } |
1312 | 1312 |
1313 // Unsigned extend halfword. | 1313 // Unsigned extend halfword. |
1314 void uxth(const Register& rd, const Register& rn) { | 1314 void uxth(const Register& rd, const Register& rn) { |
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1564 // states of the generated code. | 1564 // states of the generated code. |
1565 enum NopMarkerTypes { | 1565 enum NopMarkerTypes { |
1566 DEBUG_BREAK_NOP, | 1566 DEBUG_BREAK_NOP, |
1567 INTERRUPT_CODE_NOP, | 1567 INTERRUPT_CODE_NOP, |
1568 ADR_FAR_NOP, | 1568 ADR_FAR_NOP, |
1569 FIRST_NOP_MARKER = DEBUG_BREAK_NOP, | 1569 FIRST_NOP_MARKER = DEBUG_BREAK_NOP, |
1570 LAST_NOP_MARKER = ADR_FAR_NOP | 1570 LAST_NOP_MARKER = ADR_FAR_NOP |
1571 }; | 1571 }; |
1572 | 1572 |
1573 void nop(NopMarkerTypes n) { | 1573 void nop(NopMarkerTypes n) { |
1574 ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER)); | 1574 DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER)); |
1575 mov(Register::XRegFromCode(n), Register::XRegFromCode(n)); | 1575 mov(Register::XRegFromCode(n), Register::XRegFromCode(n)); |
1576 } | 1576 } |
1577 | 1577 |
1578 // FP instructions. | 1578 // FP instructions. |
1579 // Move immediate to FP register. | 1579 // Move immediate to FP register. |
1580 void fmov(FPRegister fd, double imm); | 1580 void fmov(FPRegister fd, double imm); |
1581 void fmov(FPRegister fd, float imm); | 1581 void fmov(FPRegister fd, float imm); |
1582 | 1582 |
1583 // Move FP register to register. | 1583 // Move FP register to register. |
1584 void fmov(Register rd, FPRegister fn); | 1584 void fmov(Register rd, FPRegister fn); |
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1725 void dc32(uint32_t data) { EmitData(&data, sizeof(data)); } | 1725 void dc32(uint32_t data) { EmitData(&data, sizeof(data)); } |
1726 | 1726 |
1727 // Emit 64 bits of data in the instruction stream. | 1727 // Emit 64 bits of data in the instruction stream. |
1728 void dc64(uint64_t data) { EmitData(&data, sizeof(data)); } | 1728 void dc64(uint64_t data) { EmitData(&data, sizeof(data)); } |
1729 | 1729 |
1730 // Copy a string into the instruction stream, including the terminating NULL | 1730 // Copy a string into the instruction stream, including the terminating NULL |
1731 // character. The instruction pointer (pc_) is then aligned correctly for | 1731 // character. The instruction pointer (pc_) is then aligned correctly for |
1732 // subsequent instructions. | 1732 // subsequent instructions. |
1733 void EmitStringData(const char * string) { | 1733 void EmitStringData(const char * string) { |
1734 size_t len = strlen(string) + 1; | 1734 size_t len = strlen(string) + 1; |
1735 ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap)); | 1735 DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap)); |
1736 EmitData(string, len); | 1736 EmitData(string, len); |
1737 // Pad with NULL characters until pc_ is aligned. | 1737 // Pad with NULL characters until pc_ is aligned. |
1738 const char pad[] = {'\0', '\0', '\0', '\0'}; | 1738 const char pad[] = {'\0', '\0', '\0', '\0'}; |
1739 STATIC_ASSERT(sizeof(pad) == kInstructionSize); | 1739 STATIC_ASSERT(sizeof(pad) == kInstructionSize); |
1740 byte* next_pc = AlignUp(pc_, kInstructionSize); | 1740 byte* next_pc = AlignUp(pc_, kInstructionSize); |
1741 EmitData(&pad, next_pc - pc_); | 1741 EmitData(&pad, next_pc - pc_); |
1742 } | 1742 } |
1743 | 1743 |
1744 // Pseudo-instructions ------------------------------------------------------ | 1744 // Pseudo-instructions ------------------------------------------------------ |
1745 | 1745 |
(...skipping 13 matching lines...) Expand all Loading... |
1759 Instruction* InstructionAt(int offset) const { | 1759 Instruction* InstructionAt(int offset) const { |
1760 return reinterpret_cast<Instruction*>(buffer_ + offset); | 1760 return reinterpret_cast<Instruction*>(buffer_ + offset); |
1761 } | 1761 } |
1762 | 1762 |
1763 ptrdiff_t InstructionOffset(Instruction* instr) const { | 1763 ptrdiff_t InstructionOffset(Instruction* instr) const { |
1764 return reinterpret_cast<byte*>(instr) - buffer_; | 1764 return reinterpret_cast<byte*>(instr) - buffer_; |
1765 } | 1765 } |
1766 | 1766 |
1767 // Register encoding. | 1767 // Register encoding. |
1768 static Instr Rd(CPURegister rd) { | 1768 static Instr Rd(CPURegister rd) { |
1769 ASSERT(rd.code() != kSPRegInternalCode); | 1769 DCHECK(rd.code() != kSPRegInternalCode); |
1770 return rd.code() << Rd_offset; | 1770 return rd.code() << Rd_offset; |
1771 } | 1771 } |
1772 | 1772 |
1773 static Instr Rn(CPURegister rn) { | 1773 static Instr Rn(CPURegister rn) { |
1774 ASSERT(rn.code() != kSPRegInternalCode); | 1774 DCHECK(rn.code() != kSPRegInternalCode); |
1775 return rn.code() << Rn_offset; | 1775 return rn.code() << Rn_offset; |
1776 } | 1776 } |
1777 | 1777 |
1778 static Instr Rm(CPURegister rm) { | 1778 static Instr Rm(CPURegister rm) { |
1779 ASSERT(rm.code() != kSPRegInternalCode); | 1779 DCHECK(rm.code() != kSPRegInternalCode); |
1780 return rm.code() << Rm_offset; | 1780 return rm.code() << Rm_offset; |
1781 } | 1781 } |
1782 | 1782 |
1783 static Instr Ra(CPURegister ra) { | 1783 static Instr Ra(CPURegister ra) { |
1784 ASSERT(ra.code() != kSPRegInternalCode); | 1784 DCHECK(ra.code() != kSPRegInternalCode); |
1785 return ra.code() << Ra_offset; | 1785 return ra.code() << Ra_offset; |
1786 } | 1786 } |
1787 | 1787 |
1788 static Instr Rt(CPURegister rt) { | 1788 static Instr Rt(CPURegister rt) { |
1789 ASSERT(rt.code() != kSPRegInternalCode); | 1789 DCHECK(rt.code() != kSPRegInternalCode); |
1790 return rt.code() << Rt_offset; | 1790 return rt.code() << Rt_offset; |
1791 } | 1791 } |
1792 | 1792 |
1793 static Instr Rt2(CPURegister rt2) { | 1793 static Instr Rt2(CPURegister rt2) { |
1794 ASSERT(rt2.code() != kSPRegInternalCode); | 1794 DCHECK(rt2.code() != kSPRegInternalCode); |
1795 return rt2.code() << Rt2_offset; | 1795 return rt2.code() << Rt2_offset; |
1796 } | 1796 } |
1797 | 1797 |
1798 // These encoding functions allow the stack pointer to be encoded, and | 1798 // These encoding functions allow the stack pointer to be encoded, and |
1799 // disallow the zero register. | 1799 // disallow the zero register. |
1800 static Instr RdSP(Register rd) { | 1800 static Instr RdSP(Register rd) { |
1801 ASSERT(!rd.IsZero()); | 1801 DCHECK(!rd.IsZero()); |
1802 return (rd.code() & kRegCodeMask) << Rd_offset; | 1802 return (rd.code() & kRegCodeMask) << Rd_offset; |
1803 } | 1803 } |
1804 | 1804 |
1805 static Instr RnSP(Register rn) { | 1805 static Instr RnSP(Register rn) { |
1806 ASSERT(!rn.IsZero()); | 1806 DCHECK(!rn.IsZero()); |
1807 return (rn.code() & kRegCodeMask) << Rn_offset; | 1807 return (rn.code() & kRegCodeMask) << Rn_offset; |
1808 } | 1808 } |
1809 | 1809 |
1810 // Flags encoding. | 1810 // Flags encoding. |
1811 inline static Instr Flags(FlagsUpdate S); | 1811 inline static Instr Flags(FlagsUpdate S); |
1812 inline static Instr Cond(Condition cond); | 1812 inline static Instr Cond(Condition cond); |
1813 | 1813 |
1814 // PC-relative address encoding. | 1814 // PC-relative address encoding. |
1815 inline static Instr ImmPCRelAddress(int imm21); | 1815 inline static Instr ImmPCRelAddress(int imm21); |
1816 | 1816 |
(...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2080 | 2080 |
2081 // Set how far from current pc the next constant pool check will be. | 2081 // Set how far from current pc the next constant pool check will be. |
2082 void SetNextConstPoolCheckIn(int instructions) { | 2082 void SetNextConstPoolCheckIn(int instructions) { |
2083 next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize; | 2083 next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize; |
2084 } | 2084 } |
2085 | 2085 |
2086 // Emit the instruction at pc_. | 2086 // Emit the instruction at pc_. |
2087 void Emit(Instr instruction) { | 2087 void Emit(Instr instruction) { |
2088 STATIC_ASSERT(sizeof(*pc_) == 1); | 2088 STATIC_ASSERT(sizeof(*pc_) == 1); |
2089 STATIC_ASSERT(sizeof(instruction) == kInstructionSize); | 2089 STATIC_ASSERT(sizeof(instruction) == kInstructionSize); |
2090 ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_)); | 2090 DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_)); |
2091 | 2091 |
2092 memcpy(pc_, &instruction, sizeof(instruction)); | 2092 memcpy(pc_, &instruction, sizeof(instruction)); |
2093 pc_ += sizeof(instruction); | 2093 pc_ += sizeof(instruction); |
2094 CheckBuffer(); | 2094 CheckBuffer(); |
2095 } | 2095 } |
2096 | 2096 |
2097 // Emit data inline in the instruction stream. | 2097 // Emit data inline in the instruction stream. |
2098 void EmitData(void const * data, unsigned size) { | 2098 void EmitData(void const * data, unsigned size) { |
2099 ASSERT(sizeof(*pc_) == 1); | 2099 DCHECK(sizeof(*pc_) == 1); |
2100 ASSERT((pc_ + size) <= (buffer_ + buffer_size_)); | 2100 DCHECK((pc_ + size) <= (buffer_ + buffer_size_)); |
2101 | 2101 |
2102 // TODO(all): Somehow register we have some data here. Then we can | 2102 // TODO(all): Somehow register we have some data here. Then we can |
2103 // disassemble it correctly. | 2103 // disassemble it correctly. |
2104 memcpy(pc_, data, size); | 2104 memcpy(pc_, data, size); |
2105 pc_ += size; | 2105 pc_ += size; |
2106 CheckBuffer(); | 2106 CheckBuffer(); |
2107 } | 2107 } |
2108 | 2108 |
2109 void GrowBuffer(); | 2109 void GrowBuffer(); |
2110 void CheckBufferSpace(); | 2110 void CheckBufferSpace(); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2167 // the relocation info. | 2167 // the relocation info. |
2168 TypeFeedbackId recorded_ast_id_; | 2168 TypeFeedbackId recorded_ast_id_; |
2169 | 2169 |
2170 inline TypeFeedbackId RecordedAstId(); | 2170 inline TypeFeedbackId RecordedAstId(); |
2171 inline void ClearRecordedAstId(); | 2171 inline void ClearRecordedAstId(); |
2172 | 2172 |
2173 protected: | 2173 protected: |
2174 // Record the AST id of the CallIC being compiled, so that it can be placed | 2174 // Record the AST id of the CallIC being compiled, so that it can be placed |
2175 // in the relocation information. | 2175 // in the relocation information. |
2176 void SetRecordedAstId(TypeFeedbackId ast_id) { | 2176 void SetRecordedAstId(TypeFeedbackId ast_id) { |
2177 ASSERT(recorded_ast_id_.IsNone()); | 2177 DCHECK(recorded_ast_id_.IsNone()); |
2178 recorded_ast_id_ = ast_id; | 2178 recorded_ast_id_ = ast_id; |
2179 } | 2179 } |
2180 | 2180 |
2181 // Code generation | 2181 // Code generation |
2182 // The relocation writer's position is at least kGap bytes below the end of | 2182 // The relocation writer's position is at least kGap bytes below the end of |
2183 // the generated instructions. This is so that multi-instruction sequences do | 2183 // the generated instructions. This is so that multi-instruction sequences do |
2184 // not have to check for overflow. The same is true for writes of large | 2184 // not have to check for overflow. The same is true for writes of large |
2185 // relocation info entries, and debug strings encoded in the instruction | 2185 // relocation info entries, and debug strings encoded in the instruction |
2186 // stream. | 2186 // stream. |
2187 static const int kGap = 128; | 2187 static const int kGap = 128; |
(...skipping 27 matching lines...) Expand all Loading... |
2215 // We generate a veneer for a branch if we reach within this distance of the | 2215 // We generate a veneer for a branch if we reach within this distance of the |
2216 // limit of the range. | 2216 // limit of the range. |
2217 static const int kVeneerDistanceMargin = 1 * KB; | 2217 static const int kVeneerDistanceMargin = 1 * KB; |
2218 // The factor of 2 is a finger in the air guess. With a default margin of | 2218 // The factor of 2 is a finger in the air guess. With a default margin of |
2219 // 1KB, that leaves us an addional 256 instructions to avoid generating a | 2219 // 1KB, that leaves us an addional 256 instructions to avoid generating a |
2220 // protective branch. | 2220 // protective branch. |
2221 static const int kVeneerNoProtectionFactor = 2; | 2221 static const int kVeneerNoProtectionFactor = 2; |
2222 static const int kVeneerDistanceCheckMargin = | 2222 static const int kVeneerDistanceCheckMargin = |
2223 kVeneerNoProtectionFactor * kVeneerDistanceMargin; | 2223 kVeneerNoProtectionFactor * kVeneerDistanceMargin; |
2224 int unresolved_branches_first_limit() const { | 2224 int unresolved_branches_first_limit() const { |
2225 ASSERT(!unresolved_branches_.empty()); | 2225 DCHECK(!unresolved_branches_.empty()); |
2226 return unresolved_branches_.begin()->first; | 2226 return unresolved_branches_.begin()->first; |
2227 } | 2227 } |
2228 // This is similar to next_constant_pool_check_ and helps reduce the overhead | 2228 // This is similar to next_constant_pool_check_ and helps reduce the overhead |
2229 // of checking for veneer pools. | 2229 // of checking for veneer pools. |
2230 // It is maintained to the closest unresolved branch limit minus the maximum | 2230 // It is maintained to the closest unresolved branch limit minus the maximum |
2231 // veneer margin (or kMaxInt if there are no unresolved branches). | 2231 // veneer margin (or kMaxInt if there are no unresolved branches). |
2232 int next_veneer_pool_check_; | 2232 int next_veneer_pool_check_; |
2233 | 2233 |
2234 private: | 2234 private: |
2235 // If a veneer is emitted for a branch instruction, that instruction must be | 2235 // If a veneer is emitted for a branch instruction, that instruction must be |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2268 } | 2268 } |
2269 | 2269 |
2270 PatchingAssembler(byte* start, unsigned count) | 2270 PatchingAssembler(byte* start, unsigned count) |
2271 : Assembler(NULL, start, count * kInstructionSize + kGap) { | 2271 : Assembler(NULL, start, count * kInstructionSize + kGap) { |
2272 // Block constant pool emission. | 2272 // Block constant pool emission. |
2273 StartBlockPools(); | 2273 StartBlockPools(); |
2274 } | 2274 } |
2275 | 2275 |
2276 ~PatchingAssembler() { | 2276 ~PatchingAssembler() { |
2277 // Const pool should still be blocked. | 2277 // Const pool should still be blocked. |
2278 ASSERT(is_const_pool_blocked()); | 2278 DCHECK(is_const_pool_blocked()); |
2279 EndBlockPools(); | 2279 EndBlockPools(); |
2280 // Verify we have generated the number of instruction we expected. | 2280 // Verify we have generated the number of instruction we expected. |
2281 ASSERT((pc_offset() + kGap) == buffer_size_); | 2281 DCHECK((pc_offset() + kGap) == buffer_size_); |
2282 // Verify no relocation information has been emitted. | 2282 // Verify no relocation information has been emitted. |
2283 ASSERT(IsConstPoolEmpty()); | 2283 DCHECK(IsConstPoolEmpty()); |
2284 // Flush the Instruction cache. | 2284 // Flush the Instruction cache. |
2285 size_t length = buffer_size_ - kGap; | 2285 size_t length = buffer_size_ - kGap; |
2286 CpuFeatures::FlushICache(buffer_, length); | 2286 CpuFeatures::FlushICache(buffer_, length); |
2287 } | 2287 } |
2288 | 2288 |
2289 // See definition of PatchAdrFar() for details. | 2289 // See definition of PatchAdrFar() for details. |
2290 static const int kAdrFarPatchableNNops = 2; | 2290 static const int kAdrFarPatchableNNops = 2; |
2291 static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2; | 2291 static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2; |
2292 void PatchAdrFar(ptrdiff_t target_offset); | 2292 void PatchAdrFar(ptrdiff_t target_offset); |
2293 }; | 2293 }; |
2294 | 2294 |
2295 | 2295 |
2296 class EnsureSpace BASE_EMBEDDED { | 2296 class EnsureSpace BASE_EMBEDDED { |
2297 public: | 2297 public: |
2298 explicit EnsureSpace(Assembler* assembler) { | 2298 explicit EnsureSpace(Assembler* assembler) { |
2299 assembler->CheckBufferSpace(); | 2299 assembler->CheckBufferSpace(); |
2300 } | 2300 } |
2301 }; | 2301 }; |
2302 | 2302 |
2303 } } // namespace v8::internal | 2303 } } // namespace v8::internal |
2304 | 2304 |
2305 #endif // V8_ARM64_ASSEMBLER_ARM64_H_ | 2305 #endif // V8_ARM64_ASSEMBLER_ARM64_H_ |
OLD | NEW |