Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(89)

Side by Side Diff: src/a64/assembler-a64.h

Issue 196133017: Experimental parser: merge r19949 (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « samples/shell.cc ('k') | src/a64/assembler-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after
220 return NumAllocatableRegisters() - 1; 220 return NumAllocatableRegisters() - 1;
221 } 221 }
222 222
223 return (code <= kAllocatableLowRangeEnd) 223 return (code <= kAllocatableLowRangeEnd)
224 ? code 224 ? code
225 : code - kAllocatableRangeGapSize; 225 : code - kAllocatableRangeGapSize;
226 } 226 }
227 227
228 static Register from_code(int code) { 228 static Register from_code(int code) {
229 // Always return an X register. 229 // Always return an X register.
230 return Register::Create(code, kXRegSize); 230 return Register::Create(code, kXRegSizeInBits);
231 } 231 }
232 232
233 // End of V8 compatibility section ----------------------- 233 // End of V8 compatibility section -----------------------
234 }; 234 };
235 235
236 236
237 struct FPRegister : public CPURegister { 237 struct FPRegister : public CPURegister {
238 static FPRegister Create(unsigned code, unsigned size) { 238 static FPRegister Create(unsigned code, unsigned size) {
239 return CPURegister::Create(code, size, CPURegister::kFPRegister); 239 return CPURegister::Create(code, size, CPURegister::kFPRegister);
240 } 240 }
(...skipping 16 matching lines...) Expand all
257 return IsValidFPRegister(); 257 return IsValidFPRegister();
258 } 258 }
259 259
260 static FPRegister SRegFromCode(unsigned code); 260 static FPRegister SRegFromCode(unsigned code);
261 static FPRegister DRegFromCode(unsigned code); 261 static FPRegister DRegFromCode(unsigned code);
262 262
263 // Start of V8 compatibility section --------------------- 263 // Start of V8 compatibility section ---------------------
264 static const int kMaxNumRegisters = kNumberOfFPRegisters; 264 static const int kMaxNumRegisters = kNumberOfFPRegisters;
265 265
266 // Crankshaft can use all the FP registers except: 266 // Crankshaft can use all the FP registers except:
267 // - d29 which is used in crankshaft as a double scratch register 267 // - d15 which is used to keep the 0 double value
268 // - d30 which is used to keep the 0 double value 268 // - d30 which is used in crankshaft as a double scratch register
269 // - d31 which is used in the MacroAssembler as a double scratch register 269 // - d31 which is used in the MacroAssembler as a double scratch register
270 static const int kNumReservedRegisters = 3; 270 static const unsigned kAllocatableLowRangeBegin = 0;
271 static const unsigned kAllocatableLowRangeEnd = 14;
272 static const unsigned kAllocatableHighRangeBegin = 16;
273 static const unsigned kAllocatableHighRangeEnd = 29;
274
275 static const RegList kAllocatableFPRegisters = 0x3fff7fff;
276
277 // Gap between low and high ranges.
278 static const int kAllocatableRangeGapSize =
279 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
280
271 static const int kMaxNumAllocatableRegisters = 281 static const int kMaxNumAllocatableRegisters =
272 kNumberOfFPRegisters - kNumReservedRegisters; 282 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
283 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
273 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; } 284 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
274 static const RegList kAllocatableFPRegisters =
275 (1 << kMaxNumAllocatableRegisters) - 1;
276 285
277 static FPRegister FromAllocationIndex(int index) { 286 // Return true if the register is one that crankshaft can allocate.
278 ASSERT((index >= 0) && (index < NumAllocatableRegisters())); 287 bool IsAllocatable() const {
279 return from_code(index); 288 return (Bit() & kAllocatableFPRegisters) != 0;
289 }
290
291 static FPRegister FromAllocationIndex(unsigned int index) {
292 ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
293
294 return (index <= kAllocatableLowRangeEnd)
295 ? from_code(index)
296 : from_code(index + kAllocatableRangeGapSize);
280 } 297 }
281 298
282 static const char* AllocationIndexToString(int index) { 299 static const char* AllocationIndexToString(int index) {
283 ASSERT((index >= 0) && (index < NumAllocatableRegisters())); 300 ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
301 ASSERT((kAllocatableLowRangeBegin == 0) &&
302 (kAllocatableLowRangeEnd == 14) &&
303 (kAllocatableHighRangeBegin == 16) &&
304 (kAllocatableHighRangeEnd == 29));
284 const char* const names[] = { 305 const char* const names[] = {
285 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", 306 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
286 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", 307 "d8", "d9", "d10", "d11", "d12", "d13", "d14",
287 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", 308 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
288 "d24", "d25", "d26", "d27", "d28", 309 "d24", "d25", "d26", "d27", "d28", "d29"
289 }; 310 };
290 return names[index]; 311 return names[index];
291 } 312 }
292 313
293 static int ToAllocationIndex(FPRegister reg) { 314 static int ToAllocationIndex(FPRegister reg) {
294 int code = reg.code(); 315 ASSERT(reg.IsAllocatable());
295 ASSERT(code < NumAllocatableRegisters()); 316 unsigned code = reg.code();
296 return code; 317
318 return (code <= kAllocatableLowRangeEnd)
319 ? code
320 : code - kAllocatableRangeGapSize;
297 } 321 }
298 322
299 static FPRegister from_code(int code) { 323 static FPRegister from_code(int code) {
300 // Always return a D register. 324 // Always return a D register.
301 return FPRegister::Create(code, kDRegSize); 325 return FPRegister::Create(code, kDRegSizeInBits);
302 } 326 }
303 // End of V8 compatibility section ----------------------- 327 // End of V8 compatibility section -----------------------
304 }; 328 };
305 329
306 330
307 STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register)); 331 STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
308 STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister)); 332 STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
309 333
310 334
311 #if defined(A64_DEFINE_REG_STATICS) 335 #if defined(A64_DEFINE_REG_STATICS)
(...skipping 15 matching lines...) Expand all
327 // these all compare equal (using the Is() method). The Register and FPRegister 351 // these all compare equal (using the Is() method). The Register and FPRegister
328 // variants are provided for convenience. 352 // variants are provided for convenience.
329 INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister); 353 INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
330 INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister); 354 INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
331 INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister); 355 INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
332 356
333 // v8 compatibility. 357 // v8 compatibility.
334 INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister); 358 INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
335 359
336 #define DEFINE_REGISTERS(N) \ 360 #define DEFINE_REGISTERS(N) \
337 INITIALIZE_REGISTER(Register, w##N, N, kWRegSize, CPURegister::kRegister); \ 361 INITIALIZE_REGISTER(Register, w##N, N, \
338 INITIALIZE_REGISTER(Register, x##N, N, kXRegSize, CPURegister::kRegister); 362 kWRegSizeInBits, CPURegister::kRegister); \
363 INITIALIZE_REGISTER(Register, x##N, N, \
364 kXRegSizeInBits, CPURegister::kRegister);
339 REGISTER_CODE_LIST(DEFINE_REGISTERS) 365 REGISTER_CODE_LIST(DEFINE_REGISTERS)
340 #undef DEFINE_REGISTERS 366 #undef DEFINE_REGISTERS
341 367
342 INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSize, 368 INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
343 CPURegister::kRegister); 369 CPURegister::kRegister);
344 INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSize, 370 INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
345 CPURegister::kRegister); 371 CPURegister::kRegister);
346 372
347 #define DEFINE_FPREGISTERS(N) \ 373 #define DEFINE_FPREGISTERS(N) \
348 INITIALIZE_REGISTER(FPRegister, s##N, N, kSRegSize, \ 374 INITIALIZE_REGISTER(FPRegister, s##N, N, \
349 CPURegister::kFPRegister); \ 375 kSRegSizeInBits, CPURegister::kFPRegister); \
350 INITIALIZE_REGISTER(FPRegister, d##N, N, kDRegSize, CPURegister::kFPRegister); 376 INITIALIZE_REGISTER(FPRegister, d##N, N, \
377 kDRegSizeInBits, CPURegister::kFPRegister);
351 REGISTER_CODE_LIST(DEFINE_FPREGISTERS) 378 REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
352 #undef DEFINE_FPREGISTERS 379 #undef DEFINE_FPREGISTERS
353 380
354 #undef INITIALIZE_REGISTER 381 #undef INITIALIZE_REGISTER
355 382
356 // Registers aliases. 383 // Registers aliases.
357 ALIAS_REGISTER(Register, ip0, x16); 384 ALIAS_REGISTER(Register, ip0, x16);
358 ALIAS_REGISTER(Register, ip1, x17); 385 ALIAS_REGISTER(Register, ip1, x17);
359 ALIAS_REGISTER(Register, wip0, w16); 386 ALIAS_REGISTER(Register, wip0, w16);
360 ALIAS_REGISTER(Register, wip1, w17); 387 ALIAS_REGISTER(Register, wip1, w17);
361 // Root register. 388 // Root register.
362 ALIAS_REGISTER(Register, root, x26); 389 ALIAS_REGISTER(Register, root, x26);
363 ALIAS_REGISTER(Register, rr, x26); 390 ALIAS_REGISTER(Register, rr, x26);
364 // Context pointer register. 391 // Context pointer register.
365 ALIAS_REGISTER(Register, cp, x27); 392 ALIAS_REGISTER(Register, cp, x27);
366 // We use a register as a JS stack pointer to overcome the restriction on the 393 // We use a register as a JS stack pointer to overcome the restriction on the
367 // architectural SP alignment. 394 // architectural SP alignment.
368 // We chose x28 because it is contiguous with the other specific purpose 395 // We chose x28 because it is contiguous with the other specific purpose
369 // registers. 396 // registers.
370 STATIC_ASSERT(kJSSPCode == 28); 397 STATIC_ASSERT(kJSSPCode == 28);
371 ALIAS_REGISTER(Register, jssp, x28); 398 ALIAS_REGISTER(Register, jssp, x28);
372 ALIAS_REGISTER(Register, wjssp, w28); 399 ALIAS_REGISTER(Register, wjssp, w28);
373 ALIAS_REGISTER(Register, fp, x29); 400 ALIAS_REGISTER(Register, fp, x29);
374 ALIAS_REGISTER(Register, lr, x30); 401 ALIAS_REGISTER(Register, lr, x30);
375 ALIAS_REGISTER(Register, xzr, x31); 402 ALIAS_REGISTER(Register, xzr, x31);
376 ALIAS_REGISTER(Register, wzr, w31); 403 ALIAS_REGISTER(Register, wzr, w31);
377 404
405 // Keeps the 0 double value.
406 ALIAS_REGISTER(FPRegister, fp_zero, d15);
378 // Crankshaft double scratch register. 407 // Crankshaft double scratch register.
379 ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29); 408 ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d30);
380 // Keeps the 0 double value.
381 ALIAS_REGISTER(FPRegister, fp_zero, d30);
382 // MacroAssembler double scratch register. 409 // MacroAssembler double scratch register.
383 ALIAS_REGISTER(FPRegister, fp_scratch, d31); 410 ALIAS_REGISTER(FPRegister, fp_scratch, d31);
384 411
385 #undef ALIAS_REGISTER 412 #undef ALIAS_REGISTER
386 413
387 414
388 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, 415 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
389 Register reg2 = NoReg, 416 Register reg2 = NoReg,
390 Register reg3 = NoReg, 417 Register reg3 = NoReg,
391 Register reg4 = NoReg); 418 Register reg4 = NoReg);
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
454 CPURegister::RegisterType type() const { 481 CPURegister::RegisterType type() const {
455 ASSERT(IsValid()); 482 ASSERT(IsValid());
456 return type_; 483 return type_;
457 } 484 }
458 485
459 RegList list() const { 486 RegList list() const {
460 ASSERT(IsValid()); 487 ASSERT(IsValid());
461 return list_; 488 return list_;
462 } 489 }
463 490
491 inline void set_list(RegList new_list) {
492 ASSERT(IsValid());
493 list_ = new_list;
494 }
495
464 // Combine another CPURegList into this one. Registers that already exist in 496 // Combine another CPURegList into this one. Registers that already exist in
465 // this list are left unchanged. The type and size of the registers in the 497 // this list are left unchanged. The type and size of the registers in the
466 // 'other' list must match those in this list. 498 // 'other' list must match those in this list.
467 void Combine(const CPURegList& other); 499 void Combine(const CPURegList& other);
468 500
469 // Remove every register in the other CPURegList from this one. Registers that 501 // Remove every register in the other CPURegList from this one. Registers that
470 // do not exist in this list are ignored. The type and size of the registers 502 // do not exist in this list are ignored. The type and size of the registers
471 // in the 'other' list must match those in this list. 503 // in the 'other' list must match those in this list.
472 void Remove(const CPURegList& other); 504 void Remove(const CPURegList& other);
473 505
474 // Variants of Combine and Remove which take a single register. 506 // Variants of Combine and Remove which take CPURegisters.
475 void Combine(const CPURegister& other); 507 void Combine(const CPURegister& other);
476 void Remove(const CPURegister& other); 508 void Remove(const CPURegister& other1,
509 const CPURegister& other2 = NoCPUReg,
510 const CPURegister& other3 = NoCPUReg,
511 const CPURegister& other4 = NoCPUReg);
477 512
478 // Variants of Combine and Remove which take a single register by its code; 513 // Variants of Combine and Remove which take a single register by its code;
479 // the type and size of the register is inferred from this list. 514 // the type and size of the register is inferred from this list.
480 void Combine(int code); 515 void Combine(int code);
481 void Remove(int code); 516 void Remove(int code);
482 517
483 // Remove all callee-saved registers from the list. This can be useful when 518 // Remove all callee-saved registers from the list. This can be useful when
484 // preparing registers for an AAPCS64 function call, for example. 519 // preparing registers for an AAPCS64 function call, for example.
485 void RemoveCalleeSaved(); 520 void RemoveCalleeSaved();
486 521
487 CPURegister PopLowestIndex(); 522 CPURegister PopLowestIndex();
488 CPURegister PopHighestIndex(); 523 CPURegister PopHighestIndex();
489 524
490 // AAPCS64 callee-saved registers. 525 // AAPCS64 callee-saved registers.
491 static CPURegList GetCalleeSaved(unsigned size = kXRegSize); 526 static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
492 static CPURegList GetCalleeSavedFP(unsigned size = kDRegSize); 527 static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
493 528
494 // AAPCS64 caller-saved registers. Note that this includes lr. 529 // AAPCS64 caller-saved registers. Note that this includes lr.
495 static CPURegList GetCallerSaved(unsigned size = kXRegSize); 530 static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
496 static CPURegList GetCallerSavedFP(unsigned size = kDRegSize); 531 static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
497 532
498 // Registers saved as safepoints. 533 // Registers saved as safepoints.
499 static CPURegList GetSafepointSavedRegisters(); 534 static CPURegList GetSafepointSavedRegisters();
500 535
501 bool IsEmpty() const { 536 bool IsEmpty() const {
502 ASSERT(IsValid()); 537 ASSERT(IsValid());
503 return list_ == 0; 538 return list_ == 0;
504 } 539 }
505 540
506 bool IncludesAliasOf(const CPURegister& other) const { 541 bool IncludesAliasOf(const CPURegister& other1,
542 const CPURegister& other2 = NoCPUReg,
543 const CPURegister& other3 = NoCPUReg,
544 const CPURegister& other4 = NoCPUReg) const {
507 ASSERT(IsValid()); 545 ASSERT(IsValid());
508 return (type_ == other.type()) && (other.Bit() & list_); 546 RegList list = 0;
547 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
548 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
549 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
550 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
551 return (list_ & list) != 0;
509 } 552 }
510 553
511 int Count() const { 554 int Count() const {
512 ASSERT(IsValid()); 555 ASSERT(IsValid());
513 return CountSetBits(list_, kRegListSizeInBits); 556 return CountSetBits(list_, kRegListSizeInBits);
514 } 557 }
515 558
516 unsigned RegisterSizeInBits() const { 559 unsigned RegisterSizeInBits() const {
517 ASSERT(IsValid()); 560 ASSERT(IsValid());
518 return size_; 561 return size_;
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
723 766
724 inline void Unreachable(); 767 inline void Unreachable();
725 768
726 // Label -------------------------------------------------------------------- 769 // Label --------------------------------------------------------------------
727 // Bind a label to the current pc. Note that labels can only be bound once, 770 // Bind a label to the current pc. Note that labels can only be bound once,
728 // and if labels are linked to other instructions, they _must_ be bound 771 // and if labels are linked to other instructions, they _must_ be bound
729 // before they go out of scope. 772 // before they go out of scope.
730 void bind(Label* label); 773 void bind(Label* label);
731 774
732 775
733 // RelocInfo and constant pool ---------------------------------------------- 776 // RelocInfo and pools ------------------------------------------------------
734 777
735 // Record relocation information for current pc_. 778 // Record relocation information for current pc_.
736 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); 779 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
737 780
738 // Return the address in the constant pool of the code target address used by 781 // Return the address in the constant pool of the code target address used by
739 // the branch/call instruction at pc. 782 // the branch/call instruction at pc.
740 inline static Address target_pointer_address_at(Address pc); 783 inline static Address target_pointer_address_at(Address pc);
741 784
742 // Read/Modify the code target address in the branch/call instruction at pc. 785 // Read/Modify the code target address in the branch/call instruction at pc.
743 inline static Address target_address_at(Address pc); 786 inline static Address target_address_at(Address pc,
744 inline static void set_target_address_at(Address pc, Address target); 787 ConstantPoolArray* constant_pool);
788 inline static void set_target_address_at(Address pc,
789 ConstantPoolArray* constant_pool,
790 Address target);
791 static inline Address target_address_at(Address pc, Code* code);
792 static inline void set_target_address_at(Address pc,
793 Code* code,
794 Address target);
745 795
746 // Return the code target address at a call site from the return address of 796 // Return the code target address at a call site from the return address of
747 // that call in the instruction stream. 797 // that call in the instruction stream.
748 inline static Address target_address_from_return_address(Address pc); 798 inline static Address target_address_from_return_address(Address pc);
749 799
750 // Given the address of the beginning of a call, return the address in the 800 // Given the address of the beginning of a call, return the address in the
751 // instruction stream that call will return from. 801 // instruction stream that call will return from.
752 inline static Address return_address_from_call_start(Address pc); 802 inline static Address return_address_from_call_start(Address pc);
753 803
754 // This sets the branch destination (which is in the constant pool on ARM). 804 // This sets the branch destination (which is in the constant pool on ARM).
755 // This is for calls and branches within generated code. 805 // This is for calls and branches within generated code.
756 inline static void deserialization_set_special_target_at( 806 inline static void deserialization_set_special_target_at(
757 Address constant_pool_entry, Address target); 807 Address constant_pool_entry, Code* code, Address target);
758 808
759 // All addresses in the constant pool are the same size as pointers. 809 // All addresses in the constant pool are the same size as pointers.
760 static const int kSpecialTargetSize = kPointerSize; 810 static const int kSpecialTargetSize = kPointerSize;
761 811
762 // The sizes of the call sequences emitted by MacroAssembler::Call. 812 // The sizes of the call sequences emitted by MacroAssembler::Call.
763 // Wherever possible, use MacroAssembler::CallSize instead of these constants, 813 // Wherever possible, use MacroAssembler::CallSize instead of these constants,
764 // as it will choose the correct value for a given relocation mode. 814 // as it will choose the correct value for a given relocation mode.
765 // 815 //
766 // Without relocation: 816 // Without relocation:
767 // movz ip0, #(target & 0x000000000000ffff) 817 // movz ip0, #(target & 0x000000000000ffff)
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
799 ASSERT(size >= 0); 849 ASSERT(size >= 0);
800 ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label)); 850 ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
801 } 851 }
802 852
803 // Return the number of instructions generated from label to the 853 // Return the number of instructions generated from label to the
804 // current position. 854 // current position.
805 int InstructionsGeneratedSince(const Label* label) { 855 int InstructionsGeneratedSince(const Label* label) {
806 return SizeOfCodeGeneratedSince(label) / kInstructionSize; 856 return SizeOfCodeGeneratedSince(label) / kInstructionSize;
807 } 857 }
808 858
809 // TODO(all): Initialize these constants related with code patching.
810 // TODO(all): Set to -1 to hopefully crash if mistakenly used.
811
812 // Number of instructions generated for the return sequence in 859 // Number of instructions generated for the return sequence in
813 // FullCodeGenerator::EmitReturnSequence. 860 // FullCodeGenerator::EmitReturnSequence.
814 static const int kJSRetSequenceInstructions = 7; 861 static const int kJSRetSequenceInstructions = 7;
815 // Distance between start of patched return sequence and the emitted address 862 // Distance between start of patched return sequence and the emitted address
816 // to jump to. 863 // to jump to.
817 static const int kPatchReturnSequenceAddressOffset = 0; 864 static const int kPatchReturnSequenceAddressOffset = 0;
818 static const int kPatchDebugBreakSlotAddressOffset = 0; 865 static const int kPatchDebugBreakSlotAddressOffset = 0;
819 866
820 // Number of instructions necessary to be able to later patch it to a call. 867 // Number of instructions necessary to be able to later patch it to a call.
821 // See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot(). 868 // See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot().
(...skipping 10 matching lines...) Expand all
832 879
833 // Resume constant pool emission. Need to be called as many time as 880 // Resume constant pool emission. Need to be called as many time as
834 // StartBlockConstPool to have an effect. 881 // StartBlockConstPool to have an effect.
835 void EndBlockConstPool(); 882 void EndBlockConstPool();
836 883
837 bool is_const_pool_blocked() const; 884 bool is_const_pool_blocked() const;
838 static bool IsConstantPoolAt(Instruction* instr); 885 static bool IsConstantPoolAt(Instruction* instr);
839 static int ConstantPoolSizeAt(Instruction* instr); 886 static int ConstantPoolSizeAt(Instruction* instr);
840 // See Assembler::CheckConstPool for more info. 887 // See Assembler::CheckConstPool for more info.
841 void ConstantPoolMarker(uint32_t size); 888 void ConstantPoolMarker(uint32_t size);
889 void EmitPoolGuard();
842 void ConstantPoolGuard(); 890 void ConstantPoolGuard();
843 891
892 // Prevent veneer pool emission until EndBlockVeneerPool is called.
893 // Call to this function can be nested but must be followed by an equal
894 // number of call to EndBlockConstpool.
895 void StartBlockVeneerPool();
896
897 // Resume constant pool emission. Need to be called as many time as
898 // StartBlockVeneerPool to have an effect.
899 void EndBlockVeneerPool();
900
901 bool is_veneer_pool_blocked() const {
902 return veneer_pool_blocked_nesting_ > 0;
903 }
904
905 // Block/resume emission of constant pools and veneer pools.
906 void StartBlockPools() {
907 StartBlockConstPool();
908 StartBlockVeneerPool();
909 }
910 void EndBlockPools() {
911 EndBlockConstPool();
912 EndBlockVeneerPool();
913 }
844 914
845 // Debugging ---------------------------------------------------------------- 915 // Debugging ----------------------------------------------------------------
846 PositionsRecorder* positions_recorder() { return &positions_recorder_; } 916 PositionsRecorder* positions_recorder() { return &positions_recorder_; }
847 void RecordComment(const char* msg); 917 void RecordComment(const char* msg);
848 int buffer_space() const; 918 int buffer_space() const;
849 919
850 // Mark address of the ExitJSFrame code. 920 // Mark address of the ExitJSFrame code.
851 void RecordJSReturn(); 921 void RecordJSReturn();
852 922
853 // Mark address of a debug break slot. 923 // Mark address of a debug break slot.
854 void RecordDebugBreakSlot(); 924 void RecordDebugBreakSlot();
855 925
856 // Record the emission of a constant pool. 926 // Record the emission of a constant pool.
857 // 927 //
858 // The emission of constant pool depends on the size of the code generated and 928 // The emission of constant and veneer pools depends on the size of the code
859 // the number of RelocInfo recorded. 929 // generated and the number of RelocInfo recorded.
860 // The Debug mechanism needs to map code offsets between two versions of a 930 // The Debug mechanism needs to map code offsets between two versions of a
861 // function, compiled with and without debugger support (see for example 931 // function, compiled with and without debugger support (see for example
862 // Debug::PrepareForBreakPoints()). 932 // Debug::PrepareForBreakPoints()).
863 // Compiling functions with debugger support generates additional code 933 // Compiling functions with debugger support generates additional code
864 // (Debug::GenerateSlot()). This may affect the emission of the constant 934 // (Debug::GenerateSlot()). This may affect the emission of the pools and
865 // pools and cause the version of the code with debugger support to have 935 // cause the version of the code with debugger support to have pools generated
866 // constant pools generated in different places. 936 // in different places.
867 // Recording the position and size of emitted constant pools allows to 937 // Recording the position and size of emitted pools allows to correctly
868 // correctly compute the offset mappings between the different versions of a 938 // compute the offset mappings between the different versions of a function in
869 // function in all situations. 939 // all situations.
870 // 940 //
871 // The parameter indicates the size of the constant pool (in bytes), including 941 // The parameter indicates the size of the pool (in bytes), including
872 // the marker and branch over the data. 942 // the marker and branch over the data.
873 void RecordConstPool(int size); 943 void RecordConstPool(int size);
874 944
875 945
876 // Instruction set functions ------------------------------------------------ 946 // Instruction set functions ------------------------------------------------
877 947
878 // Branch / Jump instructions. 948 // Branch / Jump instructions.
879 // For branches offsets are scaled, i.e. they in instrcutions not in bytes. 949 // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
880 // Branch to register. 950 // Branch to register.
881 void br(const Register& xn); 951 void br(const Register& xn);
(...skipping 459 matching lines...) Expand 10 before | Expand all | Expand 10 after
1341 1411
1342 // Store integer or FP register pair, non-temporal. 1412 // Store integer or FP register pair, non-temporal.
1343 void stnp(const CPURegister& rt, const CPURegister& rt2, 1413 void stnp(const CPURegister& rt, const CPURegister& rt2,
1344 const MemOperand& dst); 1414 const MemOperand& dst);
1345 1415
1346 // Load literal to register. 1416 // Load literal to register.
1347 void ldr(const Register& rt, uint64_t imm); 1417 void ldr(const Register& rt, uint64_t imm);
1348 1418
1349 // Load literal to FP register. 1419 // Load literal to FP register.
1350 void ldr(const FPRegister& ft, double imm); 1420 void ldr(const FPRegister& ft, double imm);
1421 void ldr(const FPRegister& ft, float imm);
1351 1422
1352 // Move instructions. The default shift of -1 indicates that the move 1423 // Move instructions. The default shift of -1 indicates that the move
1353 // instruction will calculate an appropriate 16-bit immediate and left shift 1424 // instruction will calculate an appropriate 16-bit immediate and left shift
1354 // that is equal to the 64-bit immediate argument. If an explicit left shift 1425 // that is equal to the 64-bit immediate argument. If an explicit left shift
1355 // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value. 1426 // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1356 // 1427 //
1357 // For movk, an explicit shift can be used to indicate which half word should 1428 // For movk, an explicit shift can be used to indicate which half word should
1358 // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant 1429 // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1359 // half word with zero, whereas movk(x0, 0, 48) will overwrite the 1430 // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1360 // most-significant. 1431 // most-significant.
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1419 }; 1490 };
1420 1491
1421 void nop(NopMarkerTypes n) { 1492 void nop(NopMarkerTypes n) {
1422 ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER)); 1493 ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1423 mov(Register::XRegFromCode(n), Register::XRegFromCode(n)); 1494 mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
1424 } 1495 }
1425 1496
1426 // FP instructions. 1497 // FP instructions.
1427 // Move immediate to FP register. 1498 // Move immediate to FP register.
1428 void fmov(FPRegister fd, double imm); 1499 void fmov(FPRegister fd, double imm);
1500 void fmov(FPRegister fd, float imm);
1429 1501
1430 // Move FP register to register. 1502 // Move FP register to register.
1431 void fmov(Register rd, FPRegister fn); 1503 void fmov(Register rd, FPRegister fn);
1432 1504
1433 // Move register to FP register. 1505 // Move register to FP register.
1434 void fmov(FPRegister fd, Register rn); 1506 void fmov(FPRegister fd, Register rn);
1435 1507
1436 // Move FP register to FP register. 1508 // Move FP register to FP register.
1437 void fmov(FPRegister fd, FPRegister fn); 1509 void fmov(FPRegister fd, FPRegister fn);
1438 1510
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
1711 1783
1712 private: 1784 private:
1713 Assembler* assem_; 1785 Assembler* assem_;
1714 1786
1715 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope); 1787 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1716 }; 1788 };
1717 1789
1718 // Check if is time to emit a constant pool. 1790 // Check if is time to emit a constant pool.
1719 void CheckConstPool(bool force_emit, bool require_jump); 1791 void CheckConstPool(bool force_emit, bool require_jump);
1720 1792
1793
1794 // Returns true if we should emit a veneer as soon as possible for a branch
1795 // which can at most reach to specified pc.
1796 bool ShouldEmitVeneer(int max_reachable_pc,
1797 int margin = kVeneerDistanceMargin);
1798 bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
1799 return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
1800 }
1801
1802 // The maximum code size generated for a veneer. Currently one branch
1803 // instruction. This is for code size checking purposes, and can be extended
1804 // in the future for example if we decide to add nops between the veneers.
1805 static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1806
1807 void RecordVeneerPool(int location_offset, int size);
1808 // Emits veneers for branches that are approaching their maximum range.
1809 // If need_protection is true, the veneers are protected by a branch jumping
1810 // over the code.
1811 void EmitVeneers(bool need_protection, int margin = kVeneerDistanceMargin);
1812 void EmitVeneersGuard() { EmitPoolGuard(); }
1813 // Checks whether veneers need to be emitted at this point.
1814 void CheckVeneerPool(bool require_jump, int margin = kVeneerDistanceMargin);
1815
1816
1817 class BlockPoolsScope {
1818 public:
1819 explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1820 assem_->StartBlockPools();
1821 }
1822 ~BlockPoolsScope() {
1823 assem_->EndBlockPools();
1824 }
1825
1826 private:
1827 Assembler* assem_;
1828
1829 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
1830 };
1831
1721 // Available for constrained code generation scopes. Prefer 1832 // Available for constrained code generation scopes. Prefer
1722 // MacroAssembler::Mov() when possible. 1833 // MacroAssembler::Mov() when possible.
1723 inline void LoadRelocated(const CPURegister& rt, const Operand& operand); 1834 inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
1724 1835
1725 protected: 1836 protected:
1726 inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const; 1837 inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1727 1838
1728 void LoadStore(const CPURegister& rt, 1839 void LoadStore(const CPURegister& rt,
1729 const MemOperand& addr, 1840 const MemOperand& addr,
1730 LoadStoreOp op); 1841 LoadStoreOp op);
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
1896 // TODO(all): Somehow register we have some data here. Then we can 2007 // TODO(all): Somehow register we have some data here. Then we can
1897 // disassemble it correctly. 2008 // disassemble it correctly.
1898 memcpy(pc_, data, size); 2009 memcpy(pc_, data, size);
1899 pc_ += size; 2010 pc_ += size;
1900 CheckBuffer(); 2011 CheckBuffer();
1901 } 2012 }
1902 2013
1903 void GrowBuffer(); 2014 void GrowBuffer();
1904 void CheckBuffer(); 2015 void CheckBuffer();
1905 2016
1906 // Pc offset of the next buffer check. 2017 // Pc offset of the next constant pool check.
1907 int next_buffer_check_; 2018 int next_constant_pool_check_;
1908 2019
1909 // Constant pool generation 2020 // Constant pool generation
1910 // Pools are emitted in the instruction stream, preferably after unconditional 2021 // Pools are emitted in the instruction stream, preferably after unconditional
1911 // jumps or after returns from functions (in dead code locations). 2022 // jumps or after returns from functions (in dead code locations).
1912 // If a long code sequence does not contain unconditional jumps, it is 2023 // If a long code sequence does not contain unconditional jumps, it is
1913 // necessary to emit the constant pool before the pool gets too far from the 2024 // necessary to emit the constant pool before the pool gets too far from the
1914 // location it is accessed from. In this case, we emit a jump over the emitted 2025 // location it is accessed from. In this case, we emit a jump over the emitted
1915 // constant pool. 2026 // constant pool.
1916 // Constants in the pool may be addresses of functions that gets relocated; 2027 // Constants in the pool may be addresses of functions that gets relocated;
1917 // if so, a relocation info entry is associated to the constant pool entry. 2028 // if so, a relocation info entry is associated to the constant pool entry.
1918 2029
1919 // Repeated checking whether the constant pool should be emitted is rather 2030 // Repeated checking whether the constant pool should be emitted is rather
1920 // expensive. By default we only check again once a number of instructions 2031 // expensive. By default we only check again once a number of instructions
1921 // has been generated. That also means that the sizing of the buffers is not 2032 // has been generated. That also means that the sizing of the buffers is not
1922 // an exact science, and that we rely on some slop to not overrun buffers. 2033 // an exact science, and that we rely on some slop to not overrun buffers.
1923 static const int kCheckPoolIntervalInst = 128; 2034 static const int kCheckConstPoolIntervalInst = 128;
1924 static const int kCheckPoolInterval = 2035 static const int kCheckConstPoolInterval =
1925 kCheckPoolIntervalInst * kInstructionSize; 2036 kCheckConstPoolIntervalInst * kInstructionSize;
1926 2037
1927 // Constants in pools are accessed via pc relative addressing, which can 2038 // Constants in pools are accessed via pc relative addressing, which can
1928 // reach +/-4KB thereby defining a maximum distance between the instruction 2039 // reach +/-4KB thereby defining a maximum distance between the instruction
1929 // and the accessed constant. 2040 // and the accessed constant.
1930 static const int kMaxDistToPool = 4 * KB; 2041 static const int kMaxDistToConstPool = 4 * KB;
1931 static const int kMaxNumPendingRelocInfo = kMaxDistToPool / kInstructionSize; 2042 static const int kMaxNumPendingRelocInfo =
2043 kMaxDistToConstPool / kInstructionSize;
1932 2044
1933 2045
1934 // Average distance beetween a constant pool and the first instruction 2046 // Average distance beetween a constant pool and the first instruction
1935 // accessing the constant pool. Longer distance should result in less I-cache 2047 // accessing the constant pool. Longer distance should result in less I-cache
1936 // pollution. 2048 // pollution.
1937 // In practice the distance will be smaller since constant pool emission is 2049 // In practice the distance will be smaller since constant pool emission is
1938 // forced after function return and sometimes after unconditional branches. 2050 // forced after function return and sometimes after unconditional branches.
1939 static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval; 2051 static const int kAvgDistToConstPool =
2052 kMaxDistToConstPool - kCheckConstPoolInterval;
1940 2053
1941 // Emission of the constant pool may be blocked in some code sequences. 2054 // Emission of the constant pool may be blocked in some code sequences.
1942 int const_pool_blocked_nesting_; // Block emission if this is not zero. 2055 int const_pool_blocked_nesting_; // Block emission if this is not zero.
1943 int no_const_pool_before_; // Block emission before this pc offset. 2056 int no_const_pool_before_; // Block emission before this pc offset.
1944 2057
1945 // Keep track of the first instruction requiring a constant pool entry 2058 // Keep track of the first instruction requiring a constant pool entry
1946 // since the previous constant pool was emitted. 2059 // since the previous constant pool was emitted.
1947 int first_const_pool_use_; 2060 int first_const_pool_use_;
1948 2061
2062 // Emission of the veneer pools may be blocked in some code sequences.
2063 int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2064
1949 // Relocation info generation 2065 // Relocation info generation
1950 // Each relocation is encoded as a variable size value 2066 // Each relocation is encoded as a variable size value
1951 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; 2067 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1952 RelocInfoWriter reloc_info_writer; 2068 RelocInfoWriter reloc_info_writer;
1953 2069
1954 // Relocation info records are also used during code generation as temporary 2070 // Relocation info records are also used during code generation as temporary
1955 // containers for constants and code target addresses until they are emitted 2071 // containers for constants and code target addresses until they are emitted
1956 // to the constant pool. These pending relocation info records are temporarily 2072 // to the constant pool. These pending relocation info records are temporarily
1957 // stored in a separate buffer until a constant pool is emitted. 2073 // stored in a separate buffer until a constant pool is emitted.
1958 // If every instruction in a long sequence is accessing the pool, we need one 2074 // If every instruction in a long sequence is accessing the pool, we need one
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
2006 // 2122 //
2007 // The second member gives information about the unresolved branch. The first 2123 // The second member gives information about the unresolved branch. The first
2008 // member of the pair is the maximum offset that the branch can reach in the 2124 // member of the pair is the maximum offset that the branch can reach in the
2009 // buffer. The map is sorted according to this reachable offset, allowing to 2125 // buffer. The map is sorted according to this reachable offset, allowing to
2010 // easily check when veneers need to be emitted. 2126 // easily check when veneers need to be emitted.
2011 // Note that the maximum reachable offset (first member of the pairs) should 2127 // Note that the maximum reachable offset (first member of the pairs) should
2012 // always be positive but has the same type as the return value for 2128 // always be positive but has the same type as the return value for
2013 // pc_offset() for convenience. 2129 // pc_offset() for convenience.
2014 std::multimap<int, FarBranchInfo> unresolved_branches_; 2130 std::multimap<int, FarBranchInfo> unresolved_branches_;
2015 2131
2132 // We generate a veneer for a branch if we reach within this distance of the
2133 // limit of the range.
2134 static const int kVeneerDistanceMargin = 1 * KB;
2135 // The factor of 2 is a finger in the air guess. With a default margin of
2136 // 1KB, that leaves us an addional 256 instructions to avoid generating a
2137 // protective branch.
2138 static const int kVeneerNoProtectionFactor = 2;
2139 static const int kVeneerDistanceCheckMargin =
2140 kVeneerNoProtectionFactor * kVeneerDistanceMargin;
2141 int unresolved_branches_first_limit() const {
2142 ASSERT(!unresolved_branches_.empty());
2143 return unresolved_branches_.begin()->first;
2144 }
2145 // This is similar to next_constant_pool_check_ and helps reduce the overhead
2146 // of checking for veneer pools.
2147 // It is maintained to the closest unresolved branch limit minus the maximum
2148 // veneer margin (or kMaxInt if there are no unresolved branches).
2149 int next_veneer_pool_check_;
2150
2016 private: 2151 private:
2017 // If a veneer is emitted for a branch instruction, that instruction must be 2152 // If a veneer is emitted for a branch instruction, that instruction must be
2018 // removed from the associated label's link chain so that the assembler does 2153 // removed from the associated label's link chain so that the assembler does
2019 // not later attempt (likely unsuccessfully) to patch it to branch directly to 2154 // not later attempt (likely unsuccessfully) to patch it to branch directly to
2020 // the label. 2155 // the label.
2021 void DeleteUnresolvedBranchInfoForLabel(Label* label); 2156 void DeleteUnresolvedBranchInfoForLabel(Label* label);
2022 2157
2023 private: 2158 private:
2024 // TODO(jbramley): VIXL uses next_literal_pool_check_ and
2025 // literal_pool_monitor_ to determine when to consider emitting a literal
2026 // pool. V8 doesn't use them, so they should either not be here at all, or
2027 // should replace or be merged with next_buffer_check_ and
2028 // const_pool_blocked_nesting_.
2029 Instruction* next_literal_pool_check_;
2030 unsigned literal_pool_monitor_;
2031
2032 PositionsRecorder positions_recorder_; 2159 PositionsRecorder positions_recorder_;
2033 friend class PositionsRecorder; 2160 friend class PositionsRecorder;
2034 friend class EnsureSpace; 2161 friend class EnsureSpace;
2035 }; 2162 };
2036 2163
2037 class PatchingAssembler : public Assembler { 2164 class PatchingAssembler : public Assembler {
2038 public: 2165 public:
2039 // Create an Assembler with a buffer starting at 'start'. 2166 // Create an Assembler with a buffer starting at 'start'.
2040 // The buffer size is 2167 // The buffer size is
2041 // size of instructions to patch + kGap 2168 // size of instructions to patch + kGap
2042 // Where kGap is the distance from which the Assembler tries to grow the 2169 // Where kGap is the distance from which the Assembler tries to grow the
2043 // buffer. 2170 // buffer.
2044 // If more or fewer instructions than expected are generated or if some 2171 // If more or fewer instructions than expected are generated or if some
2045 // relocation information takes space in the buffer, the PatchingAssembler 2172 // relocation information takes space in the buffer, the PatchingAssembler
2046 // will crash trying to grow the buffer. 2173 // will crash trying to grow the buffer.
2047 PatchingAssembler(Instruction* start, unsigned count) 2174 PatchingAssembler(Instruction* start, unsigned count)
2048 : Assembler(NULL, 2175 : Assembler(NULL,
2049 reinterpret_cast<byte*>(start), 2176 reinterpret_cast<byte*>(start),
2050 count * kInstructionSize + kGap) { 2177 count * kInstructionSize + kGap) {
2051 // Block constant pool emission. 2178 StartBlockPools();
2052 StartBlockConstPool();
2053 } 2179 }
2054 2180
2055 PatchingAssembler(byte* start, unsigned count) 2181 PatchingAssembler(byte* start, unsigned count)
2056 : Assembler(NULL, start, count * kInstructionSize + kGap) { 2182 : Assembler(NULL, start, count * kInstructionSize + kGap) {
2057 // Block constant pool emission. 2183 // Block constant pool emission.
2058 StartBlockConstPool(); 2184 StartBlockPools();
2059 } 2185 }
2060 2186
2061 ~PatchingAssembler() { 2187 ~PatchingAssembler() {
2062 // Const pool should still be blocked. 2188 // Const pool should still be blocked.
2063 ASSERT(is_const_pool_blocked()); 2189 ASSERT(is_const_pool_blocked());
2064 EndBlockConstPool(); 2190 EndBlockPools();
2065 // Verify we have generated the number of instruction we expected. 2191 // Verify we have generated the number of instruction we expected.
2066 ASSERT((pc_offset() + kGap) == buffer_size_); 2192 ASSERT((pc_offset() + kGap) == buffer_size_);
2067 // Verify no relocation information has been emitted. 2193 // Verify no relocation information has been emitted.
2068 ASSERT(num_pending_reloc_info() == 0); 2194 ASSERT(num_pending_reloc_info() == 0);
2069 // Flush the Instruction cache. 2195 // Flush the Instruction cache.
2070 size_t length = buffer_size_ - kGap; 2196 size_t length = buffer_size_ - kGap;
2071 CPU::FlushICache(buffer_, length); 2197 CPU::FlushICache(buffer_, length);
2072 } 2198 }
2073 }; 2199 };
2074 2200
2075 2201
2076 class EnsureSpace BASE_EMBEDDED { 2202 class EnsureSpace BASE_EMBEDDED {
2077 public: 2203 public:
2078 explicit EnsureSpace(Assembler* assembler) { 2204 explicit EnsureSpace(Assembler* assembler) {
2079 assembler->CheckBuffer(); 2205 assembler->CheckBuffer();
2080 } 2206 }
2081 }; 2207 };
2082 2208
2083 } } // namespace v8::internal 2209 } } // namespace v8::internal
2084 2210
2085 #endif // V8_A64_ASSEMBLER_A64_H_ 2211 #endif // V8_A64_ASSEMBLER_A64_H_
OLDNEW
« no previous file with comments | « samples/shell.cc ('k') | src/a64/assembler-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698