Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(740)

Side by Side Diff: src/a64/macro-assembler-a64.h

Issue 196133017: Experimental parser: merge r19949 (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/lithium-codegen-a64.cc ('k') | src/a64/macro-assembler-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after
350 const FPRegister& fn, 350 const FPRegister& fn,
351 const FPRegister& fm); 351 const FPRegister& fm);
352 inline void Fmin(const FPRegister& fd, 352 inline void Fmin(const FPRegister& fd,
353 const FPRegister& fn, 353 const FPRegister& fn,
354 const FPRegister& fm); 354 const FPRegister& fm);
355 inline void Fminnm(const FPRegister& fd, 355 inline void Fminnm(const FPRegister& fd,
356 const FPRegister& fn, 356 const FPRegister& fn,
357 const FPRegister& fm); 357 const FPRegister& fm);
358 inline void Fmov(FPRegister fd, FPRegister fn); 358 inline void Fmov(FPRegister fd, FPRegister fn);
359 inline void Fmov(FPRegister fd, Register rn); 359 inline void Fmov(FPRegister fd, Register rn);
360 // Provide explicit double and float interfaces for FP immediate moves, rather
361 // than relying on implicit C++ casts. This allows signalling NaNs to be
362 // preserved when the immediate matches the format of fd. Most systems convert
363 // signalling NaNs to quiet NaNs when converting between float and double.
360 inline void Fmov(FPRegister fd, double imm); 364 inline void Fmov(FPRegister fd, double imm);
365 inline void Fmov(FPRegister fd, float imm);
366 // Provide a template to allow other types to be converted automatically.
367 template<typename T>
368 void Fmov(FPRegister fd, T imm) {
369 ASSERT(allow_macro_instructions_);
370 Fmov(fd, static_cast<double>(imm));
371 }
361 inline void Fmov(Register rd, FPRegister fn); 372 inline void Fmov(Register rd, FPRegister fn);
362 inline void Fmsub(const FPRegister& fd, 373 inline void Fmsub(const FPRegister& fd,
363 const FPRegister& fn, 374 const FPRegister& fn,
364 const FPRegister& fm, 375 const FPRegister& fm,
365 const FPRegister& fa); 376 const FPRegister& fa);
366 inline void Fmul(const FPRegister& fd, 377 inline void Fmul(const FPRegister& fd,
367 const FPRegister& fn, 378 const FPRegister& fn,
368 const FPRegister& fm); 379 const FPRegister& fm);
369 inline void Fneg(const FPRegister& fd, const FPRegister& fn); 380 inline void Fneg(const FPRegister& fd, const FPRegister& fn);
370 inline void Fnmadd(const FPRegister& fd, 381 inline void Fnmadd(const FPRegister& fd,
(...skipping 16 matching lines...) Expand all
387 inline void Isb(); 398 inline void Isb();
388 inline void Ldnp(const CPURegister& rt, 399 inline void Ldnp(const CPURegister& rt,
389 const CPURegister& rt2, 400 const CPURegister& rt2,
390 const MemOperand& src); 401 const MemOperand& src);
391 inline void Ldp(const CPURegister& rt, 402 inline void Ldp(const CPURegister& rt,
392 const CPURegister& rt2, 403 const CPURegister& rt2,
393 const MemOperand& src); 404 const MemOperand& src);
394 inline void Ldpsw(const Register& rt, 405 inline void Ldpsw(const Register& rt,
395 const Register& rt2, 406 const Register& rt2,
396 const MemOperand& src); 407 const MemOperand& src);
408 // Provide both double and float interfaces for FP immediate loads, rather
409 // than relying on implicit C++ casts. This allows signalling NaNs to be
410 // preserved when the immediate matches the format of fd. Most systems convert
411 // signalling NaNs to quiet NaNs when converting between float and double.
397 inline void Ldr(const FPRegister& ft, double imm); 412 inline void Ldr(const FPRegister& ft, double imm);
413 inline void Ldr(const FPRegister& ft, float imm);
398 inline void Ldr(const Register& rt, uint64_t imm); 414 inline void Ldr(const Register& rt, uint64_t imm);
399 inline void Lsl(const Register& rd, const Register& rn, unsigned shift); 415 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
400 inline void Lsl(const Register& rd, const Register& rn, const Register& rm); 416 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
401 inline void Lsr(const Register& rd, const Register& rn, unsigned shift); 417 inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
402 inline void Lsr(const Register& rd, const Register& rn, const Register& rm); 418 inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
403 inline void Madd(const Register& rd, 419 inline void Madd(const Register& rd,
404 const Register& rn, 420 const Register& rn,
405 const Register& rm, 421 const Register& rm,
406 const Register& ra); 422 const Register& ra);
407 inline void Mneg(const Register& rd, const Register& rn, const Register& rm); 423 inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
513 // must be aligned to 16 bytes on entry and the total size of the specified 529 // must be aligned to 16 bytes on entry and the total size of the specified
514 // registers must also be a multiple of 16 bytes. 530 // registers must also be a multiple of 16 bytes.
515 // 531 //
516 // Even if the current stack pointer is not the system stack pointer (csp), 532 // Even if the current stack pointer is not the system stack pointer (csp),
517 // Push (and derived methods) will still modify the system stack pointer in 533 // Push (and derived methods) will still modify the system stack pointer in
518 // order to comply with ABI rules about accessing memory below the system 534 // order to comply with ABI rules about accessing memory below the system
519 // stack pointer. 535 // stack pointer.
520 // 536 //
521 // Other than the registers passed into Pop, the stack pointer and (possibly) 537 // Other than the registers passed into Pop, the stack pointer and (possibly)
522 // the system stack pointer, these methods do not modify any other registers. 538 // the system stack pointer, these methods do not modify any other registers.
523 // Scratch registers such as Tmp0() and Tmp1() are preserved.
524 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg, 539 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
525 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg); 540 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
526 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg, 541 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
527 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg); 542 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
528 543
529 // Alternative forms of Push and Pop, taking a RegList or CPURegList that 544 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
530 // specifies the registers that are to be pushed or popped. Higher-numbered 545 // specifies the registers that are to be pushed or popped. Higher-numbered
531 // registers are associated with higher memory addresses (as in the A32 push 546 // registers are associated with higher memory addresses (as in the A32 push
532 // and pop instructions). 547 // and pop instructions).
533 // 548 //
534 // (Push|Pop)SizeRegList allow you to specify the register size as a 549 // (Push|Pop)SizeRegList allow you to specify the register size as a
535 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are 550 // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
536 // supported. 551 // kSRegSizeInBits are supported.
537 // 552 //
538 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred. 553 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
539 void PushCPURegList(CPURegList registers); 554 void PushCPURegList(CPURegList registers);
540 void PopCPURegList(CPURegList registers); 555 void PopCPURegList(CPURegList registers);
541 556
542 inline void PushSizeRegList(RegList registers, unsigned reg_size, 557 inline void PushSizeRegList(RegList registers, unsigned reg_size,
543 CPURegister::RegisterType type = CPURegister::kRegister) { 558 CPURegister::RegisterType type = CPURegister::kRegister) {
544 PushCPURegList(CPURegList(type, reg_size, registers)); 559 PushCPURegList(CPURegList(type, reg_size, registers));
545 } 560 }
546 inline void PopSizeRegList(RegList registers, unsigned reg_size, 561 inline void PopSizeRegList(RegList registers, unsigned reg_size,
547 CPURegister::RegisterType type = CPURegister::kRegister) { 562 CPURegister::RegisterType type = CPURegister::kRegister) {
548 PopCPURegList(CPURegList(type, reg_size, registers)); 563 PopCPURegList(CPURegList(type, reg_size, registers));
549 } 564 }
550 inline void PushXRegList(RegList regs) { 565 inline void PushXRegList(RegList regs) {
551 PushSizeRegList(regs, kXRegSize); 566 PushSizeRegList(regs, kXRegSizeInBits);
552 } 567 }
553 inline void PopXRegList(RegList regs) { 568 inline void PopXRegList(RegList regs) {
554 PopSizeRegList(regs, kXRegSize); 569 PopSizeRegList(regs, kXRegSizeInBits);
555 } 570 }
556 inline void PushWRegList(RegList regs) { 571 inline void PushWRegList(RegList regs) {
557 PushSizeRegList(regs, kWRegSize); 572 PushSizeRegList(regs, kWRegSizeInBits);
558 } 573 }
559 inline void PopWRegList(RegList regs) { 574 inline void PopWRegList(RegList regs) {
560 PopSizeRegList(regs, kWRegSize); 575 PopSizeRegList(regs, kWRegSizeInBits);
561 } 576 }
562 inline void PushDRegList(RegList regs) { 577 inline void PushDRegList(RegList regs) {
563 PushSizeRegList(regs, kDRegSize, CPURegister::kFPRegister); 578 PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
564 } 579 }
565 inline void PopDRegList(RegList regs) { 580 inline void PopDRegList(RegList regs) {
566 PopSizeRegList(regs, kDRegSize, CPURegister::kFPRegister); 581 PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
567 } 582 }
568 inline void PushSRegList(RegList regs) { 583 inline void PushSRegList(RegList regs) {
569 PushSizeRegList(regs, kSRegSize, CPURegister::kFPRegister); 584 PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
570 } 585 }
571 inline void PopSRegList(RegList regs) { 586 inline void PopSRegList(RegList regs) {
572 PopSizeRegList(regs, kSRegSize, CPURegister::kFPRegister); 587 PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
573 } 588 }
574 589
575 // Push the specified register 'count' times. 590 // Push the specified register 'count' times.
576 void PushMultipleTimes(CPURegister src, Register count); 591 void PushMultipleTimes(CPURegister src, Register count);
577 void PushMultipleTimes(CPURegister src, int count); 592 void PushMultipleTimes(CPURegister src, int count);
578 593
579 // This is a convenience method for pushing a single Handle<Object>. 594 // This is a convenience method for pushing a single Handle<Object>.
580 inline void Push(Handle<Object> handle); 595 inline void Push(Handle<Object> handle);
581 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 596 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
582 597
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
648 // 663 //
649 // In debug mode, both of these will write invalid data into the claimed or 664 // In debug mode, both of these will write invalid data into the claimed or
650 // dropped space. 665 // dropped space.
651 // 666 //
652 // If the current stack pointer (according to StackPointer()) is csp, then it 667 // If the current stack pointer (according to StackPointer()) is csp, then it
653 // must be aligned to 16 bytes and the size claimed or dropped must be a 668 // must be aligned to 16 bytes and the size claimed or dropped must be a
654 // multiple of 16 bytes. 669 // multiple of 16 bytes.
655 // 670 //
656 // Note that unit_size must be specified in bytes. For variants which take a 671 // Note that unit_size must be specified in bytes. For variants which take a
657 // Register count, the unit size must be a power of two. 672 // Register count, the unit size must be a power of two.
658 inline void Claim(uint64_t count, uint64_t unit_size = kXRegSizeInBytes); 673 inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
659 inline void Claim(const Register& count, 674 inline void Claim(const Register& count,
660 uint64_t unit_size = kXRegSizeInBytes); 675 uint64_t unit_size = kXRegSize);
661 inline void Drop(uint64_t count, uint64_t unit_size = kXRegSizeInBytes); 676 inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
662 inline void Drop(const Register& count, 677 inline void Drop(const Register& count,
663 uint64_t unit_size = kXRegSizeInBytes); 678 uint64_t unit_size = kXRegSize);
664 679
665 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a 680 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
666 // register. 681 // register.
667 inline void ClaimBySMI(const Register& count_smi, 682 inline void ClaimBySMI(const Register& count_smi,
668 uint64_t unit_size = kXRegSizeInBytes); 683 uint64_t unit_size = kXRegSize);
669 inline void DropBySMI(const Register& count_smi, 684 inline void DropBySMI(const Register& count_smi,
670 uint64_t unit_size = kXRegSizeInBytes); 685 uint64_t unit_size = kXRegSize);
671 686
672 // Compare a register with an operand, and branch to label depending on the 687 // Compare a register with an operand, and branch to label depending on the
673 // condition. May corrupt the status flags. 688 // condition. May corrupt the status flags.
674 inline void CompareAndBranch(const Register& lhs, 689 inline void CompareAndBranch(const Register& lhs,
675 const Operand& rhs, 690 const Operand& rhs,
676 Condition cond, 691 Condition cond,
677 Label* label); 692 Label* label);
678 693
679 // Test the bits of register defined by bit_pattern, and branch if ANY of 694 // Test the bits of register defined by bit_pattern, and branch if ANY of
680 // those bits are set. May corrupt the status flags. 695 // those bits are set. May corrupt the status flags.
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
739 // Floating-point registers are popped after general-purpose registers, and 754 // Floating-point registers are popped after general-purpose registers, and
740 // thus come from higher addresses. 755 // thus come from higher addresses.
741 // 756 //
742 // This method must not be called unless the current stack pointer (as set by 757 // This method must not be called unless the current stack pointer (as set by
743 // SetStackPointer) is the system stack pointer (csp), and is aligned to 758 // SetStackPointer) is the system stack pointer (csp), and is aligned to
744 // ActivationFrameAlignment(). 759 // ActivationFrameAlignment().
745 void PopCalleeSavedRegisters(); 760 void PopCalleeSavedRegisters();
746 761
747 // Set the current stack pointer, but don't generate any code. 762 // Set the current stack pointer, but don't generate any code.
748 inline void SetStackPointer(const Register& stack_pointer) { 763 inline void SetStackPointer(const Register& stack_pointer) {
749 ASSERT(!AreAliased(stack_pointer, Tmp0(), Tmp1())); 764 ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
750 sp_ = stack_pointer; 765 sp_ = stack_pointer;
751 } 766 }
752 767
753 // Return the current stack pointer, as set by SetStackPointer. 768 // Return the current stack pointer, as set by SetStackPointer.
754 inline const Register& StackPointer() const { 769 inline const Register& StackPointer() const {
755 return sp_; 770 return sp_;
756 } 771 }
757 772
758 // Align csp for a frame, as per ActivationFrameAlignment, and make it the 773 // Align csp for a frame, as per ActivationFrameAlignment, and make it the
759 // current stack pointer. 774 // current stack pointer.
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
933 Label* on_failed_conversion = NULL) { 948 Label* on_failed_conversion = NULL) {
934 ASSERT(as_int.Is64Bits()); 949 ASSERT(as_int.Is64Bits());
935 TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion, 950 TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
936 on_failed_conversion); 951 on_failed_conversion);
937 } 952 }
938 953
939 // ---- Object Utilities ---- 954 // ---- Object Utilities ----
940 955
941 // Copy fields from 'src' to 'dst', where both are tagged objects. 956 // Copy fields from 'src' to 'dst', where both are tagged objects.
942 // The 'temps' list is a list of X registers which can be used for scratch 957 // The 'temps' list is a list of X registers which can be used for scratch
943 // values. The temps list must include at least one register, and it must not 958 // values. The temps list must include at least one register.
944 // contain Tmp0() or Tmp1().
945 // 959 //
946 // Currently, CopyFields cannot make use of more than three registers from 960 // Currently, CopyFields cannot make use of more than three registers from
947 // the 'temps' list. 961 // the 'temps' list.
948 // 962 //
949 // As with several MacroAssembler methods, Tmp0() and Tmp1() will be used. 963 // CopyFields expects to be able to take at least two registers from
964 // MacroAssembler::TmpList().
950 void CopyFields(Register dst, Register src, CPURegList temps, unsigned count); 965 void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
951 966
967 // Starting at address in dst, initialize field_count 64-bit fields with
968 // 64-bit value in register filler. Register dst is corrupted.
969 void FillFields(Register dst,
970 Register field_count,
971 Register filler);
972
952 // Copies a number of bytes from src to dst. All passed registers are 973 // Copies a number of bytes from src to dst. All passed registers are
953 // clobbered. On exit src and dst will point to the place just after where the 974 // clobbered. On exit src and dst will point to the place just after where the
954 // last byte was read or written and length will be zero. Hint may be used to 975 // last byte was read or written and length will be zero. Hint may be used to
955 // determine which is the most efficient algorithm to use for copying. 976 // determine which is the most efficient algorithm to use for copying.
956 void CopyBytes(Register dst, 977 void CopyBytes(Register dst,
957 Register src, 978 Register src,
958 Register length, 979 Register length,
959 Register scratch, 980 Register scratch,
960 CopyHint hint = kCopyUnknown); 981 CopyHint hint = kCopyUnknown);
961 982
962 // Initialize fields with filler values. Fields starting at start_offset not
963 // including end_offset are overwritten with the value in filler. At the end
964 // of the loop, start_offset takes the value of end_offset.
965 void InitializeFieldsWithFiller(Register start_offset,
966 Register end_offset,
967 Register filler);
968
969 // ---- String Utilities ---- 983 // ---- String Utilities ----
970 984
971 985
972 // Jump to label if either object is not a sequential ASCII string. 986 // Jump to label if either object is not a sequential ASCII string.
973 // Optionally perform a smi check on the objects first. 987 // Optionally perform a smi check on the objects first.
974 void JumpIfEitherIsNotSequentialAsciiStrings( 988 void JumpIfEitherIsNotSequentialAsciiStrings(
975 Register first, 989 Register first,
976 Register second, 990 Register second,
977 Register scratch1, 991 Register scratch1,
978 Register scratch2, 992 Register scratch2,
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
1041 void CallRuntime(const Runtime::Function* f, 1055 void CallRuntime(const Runtime::Function* f,
1042 int num_arguments, 1056 int num_arguments,
1043 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 1057 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1044 1058
1045 void CallRuntime(Runtime::FunctionId id, 1059 void CallRuntime(Runtime::FunctionId id,
1046 int num_arguments, 1060 int num_arguments,
1047 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1061 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1048 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); 1062 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1049 } 1063 }
1050 1064
1051 // TODO(all): Why does this variant save FP regs unconditionally?
1052 void CallRuntimeSaveDoubles(Runtime::FunctionId id) { 1065 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1053 const Runtime::Function* function = Runtime::FunctionForId(id); 1066 const Runtime::Function* function = Runtime::FunctionForId(id);
1054 CallRuntime(function, function->nargs, kSaveFPRegs); 1067 CallRuntime(function, function->nargs, kSaveFPRegs);
1055 } 1068 }
1056 1069
1057 void TailCallRuntime(Runtime::FunctionId fid, 1070 void TailCallRuntime(Runtime::FunctionId fid,
1058 int num_arguments, 1071 int num_arguments,
1059 int result_size); 1072 int result_size);
1060 1073
1061 int ActivationFrameAlignment(); 1074 int ActivationFrameAlignment();
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1104 int num_arguments); 1117 int num_arguments);
1105 1118
1106 1119
1107 // Invoke specified builtin JavaScript function. Adds an entry to 1120 // Invoke specified builtin JavaScript function. Adds an entry to
1108 // the unresolved list if the name does not resolve. 1121 // the unresolved list if the name does not resolve.
1109 void InvokeBuiltin(Builtins::JavaScript id, 1122 void InvokeBuiltin(Builtins::JavaScript id,
1110 InvokeFlag flag, 1123 InvokeFlag flag,
1111 const CallWrapper& call_wrapper = NullCallWrapper()); 1124 const CallWrapper& call_wrapper = NullCallWrapper());
1112 1125
1113 // Store the code object for the given builtin in the target register and 1126 // Store the code object for the given builtin in the target register and
1114 // setup the function in x1. 1127 // setup the function in the function register.
1115 // TODO(all): Can we use another register than x1? 1128 void GetBuiltinEntry(Register target,
1116 void GetBuiltinEntry(Register target, Builtins::JavaScript id); 1129 Register function,
1130 Builtins::JavaScript id);
1117 1131
1118 // Store the function for the given builtin in the target register. 1132 // Store the function for the given builtin in the target register.
1119 void GetBuiltinFunction(Register target, Builtins::JavaScript id); 1133 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1120 1134
1121 void Jump(Register target); 1135 void Jump(Register target);
1122 void Jump(Address target, RelocInfo::Mode rmode); 1136 void Jump(Address target, RelocInfo::Mode rmode);
1123 void Jump(Handle<Code> code, RelocInfo::Mode rmode); 1137 void Jump(Handle<Code> code, RelocInfo::Mode rmode);
1124 void Jump(intptr_t target, RelocInfo::Mode rmode); 1138 void Jump(intptr_t target, RelocInfo::Mode rmode);
1125 1139
1126 void Call(Register target); 1140 void Call(Register target);
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after
1442 1456
1443 // Test the bitfield of the heap object map with mask and set the condition 1457 // Test the bitfield of the heap object map with mask and set the condition
1444 // flags. The object register is preserved. 1458 // flags. The object register is preserved.
1445 void TestMapBitfield(Register object, uint64_t mask); 1459 void TestMapBitfield(Register object, uint64_t mask);
1446 1460
1447 // Load the elements kind field of an object, and return it in the result 1461 // Load the elements kind field of an object, and return it in the result
1448 // register. 1462 // register.
1449 void LoadElementsKind(Register result, Register object); 1463 void LoadElementsKind(Register result, Register object);
1450 1464
1451 // Compare the object in a register to a value from the root list. 1465 // Compare the object in a register to a value from the root list.
1452 // Uses the Tmp0() register as scratch.
1453 void CompareRoot(const Register& obj, Heap::RootListIndex index); 1466 void CompareRoot(const Register& obj, Heap::RootListIndex index);
1454 1467
1455 // Compare the object in a register to a value and jump if they are equal. 1468 // Compare the object in a register to a value and jump if they are equal.
1456 void JumpIfRoot(const Register& obj, 1469 void JumpIfRoot(const Register& obj,
1457 Heap::RootListIndex index, 1470 Heap::RootListIndex index,
1458 Label* if_equal); 1471 Label* if_equal);
1459 1472
1460 // Compare the object in a register to a value and jump if they are not equal. 1473 // Compare the object in a register to a value and jump if they are not equal.
1461 void JumpIfNotRoot(const Register& obj, 1474 void JumpIfNotRoot(const Register& obj,
1462 Heap::RootListIndex index, 1475 Heap::RootListIndex index,
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
1549 void EmitSeqStringSetCharCheck(Register string, 1562 void EmitSeqStringSetCharCheck(Register string,
1550 Register index, 1563 Register index,
1551 SeqStringSetCharCheckIndexType index_type, 1564 SeqStringSetCharCheckIndexType index_type,
1552 Register scratch, 1565 Register scratch,
1553 uint32_t encoding_mask); 1566 uint32_t encoding_mask);
1554 1567
1555 // Generate code for checking access rights - used for security checks 1568 // Generate code for checking access rights - used for security checks
1556 // on access to global objects across environments. The holder register 1569 // on access to global objects across environments. The holder register
1557 // is left untouched, whereas both scratch registers are clobbered. 1570 // is left untouched, whereas both scratch registers are clobbered.
1558 void CheckAccessGlobalProxy(Register holder_reg, 1571 void CheckAccessGlobalProxy(Register holder_reg,
1559 Register scratch, 1572 Register scratch1,
1573 Register scratch2,
1560 Label* miss); 1574 Label* miss);
1561 1575
1562 // Hash the interger value in 'key' register. 1576 // Hash the interger value in 'key' register.
1563 // It uses the same algorithm as ComputeIntegerHash in utils.h. 1577 // It uses the same algorithm as ComputeIntegerHash in utils.h.
1564 void GetNumberHash(Register key, Register scratch); 1578 void GetNumberHash(Register key, Register scratch);
1565 1579
1566 // Load value from the dictionary. 1580 // Load value from the dictionary.
1567 // 1581 //
1568 // elements - holds the slow-case elements of the receiver on entry. 1582 // elements - holds the slow-case elements of the receiver on entry.
1569 // Unchanged unless 'result' is the same register. 1583 // Unchanged unless 'result' is the same register.
(...skipping 11 matching lines...) Expand all
1581 Register result, 1595 Register result,
1582 Register scratch0, 1596 Register scratch0,
1583 Register scratch1, 1597 Register scratch1,
1584 Register scratch2, 1598 Register scratch2,
1585 Register scratch3); 1599 Register scratch3);
1586 1600
1587 // --------------------------------------------------------------------------- 1601 // ---------------------------------------------------------------------------
1588 // Frames. 1602 // Frames.
1589 1603
1590 // Activation support. 1604 // Activation support.
1591 // Note that Tmp0() and Tmp1() are used as a scratch registers. This is safe
1592 // because these methods are not used in Crankshaft.
1593 void EnterFrame(StackFrame::Type type); 1605 void EnterFrame(StackFrame::Type type);
1594 void LeaveFrame(StackFrame::Type type); 1606 void LeaveFrame(StackFrame::Type type);
1595 1607
1596 // Returns map with validated enum cache in object register. 1608 // Returns map with validated enum cache in object register.
1597 void CheckEnumCache(Register object, 1609 void CheckEnumCache(Register object,
1598 Register null_value, 1610 Register null_value,
1599 Register scratch0, 1611 Register scratch0,
1600 Register scratch1, 1612 Register scratch1,
1601 Register scratch2, 1613 Register scratch2,
1602 Register scratch3, 1614 Register scratch3,
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
1670 // * The exit frame is dropped. 1682 // * The exit frame is dropped.
1671 // * The stack pointer is reset to jssp. 1683 // * The stack pointer is reset to jssp.
1672 // 1684 //
1673 // The stack pointer must be csp on entry. 1685 // The stack pointer must be csp on entry.
1674 void LeaveExitFrame(bool save_doubles, 1686 void LeaveExitFrame(bool save_doubles,
1675 const Register& scratch, 1687 const Register& scratch,
1676 bool restore_context); 1688 bool restore_context);
1677 1689
1678 void LoadContext(Register dst, int context_chain_length); 1690 void LoadContext(Register dst, int context_chain_length);
1679 1691
1692 // Emit code for a flooring division by a constant. The dividend register is
1693 // unchanged. Dividend and result must be different.
1694 void FlooringDiv(Register result, Register dividend, int32_t divisor);
1695
1680 // --------------------------------------------------------------------------- 1696 // ---------------------------------------------------------------------------
1681 // StatsCounter support 1697 // StatsCounter support
1682 1698
1683 void SetCounter(StatsCounter* counter, int value, Register scratch1, 1699 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1684 Register scratch2); 1700 Register scratch2);
1685 void IncrementCounter(StatsCounter* counter, int value, Register scratch1, 1701 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1686 Register scratch2); 1702 Register scratch2);
1687 void DecrementCounter(StatsCounter* counter, int value, Register scratch1, 1703 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1688 Register scratch2); 1704 Register scratch2);
1689 1705
1690 // --------------------------------------------------------------------------- 1706 // ---------------------------------------------------------------------------
1691 // Garbage collector support (GC). 1707 // Garbage collector support (GC).
1692 1708
1693 enum RememberedSetFinalAction { 1709 enum RememberedSetFinalAction {
1694 kReturnAtEnd, 1710 kReturnAtEnd,
1695 kFallThroughAtEnd 1711 kFallThroughAtEnd
1696 }; 1712 };
1697 1713
1698 // Record in the remembered set the fact that we have a pointer to new space 1714 // Record in the remembered set the fact that we have a pointer to new space
1699 // at the address pointed to by the addr register. Only works if addr is not 1715 // at the address pointed to by the addr register. Only works if addr is not
1700 // in new space. 1716 // in new space.
1701 void RememberedSetHelper(Register object, // Used for debug code. 1717 void RememberedSetHelper(Register object, // Used for debug code.
1702 Register addr, 1718 Register addr,
1703 Register scratch, 1719 Register scratch1,
1704 SaveFPRegsMode save_fp, 1720 SaveFPRegsMode save_fp,
1705 RememberedSetFinalAction and_then); 1721 RememberedSetFinalAction and_then);
1706 1722
1707 // Push and pop the registers that can hold pointers, as defined by the 1723 // Push and pop the registers that can hold pointers, as defined by the
1708 // RegList constant kSafepointSavedRegisters. 1724 // RegList constant kSafepointSavedRegisters.
1709 void PushSafepointRegisters(); 1725 void PushSafepointRegisters();
1710 void PopSafepointRegisters(); 1726 void PopSafepointRegisters();
1711 1727
1712 void PushSafepointFPRegisters(); 1728 void PushSafepointFPRegisters();
1713 void PopSafepointFPRegisters(); 1729 void PopSafepointFPRegisters();
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
1878 void Abort(BailoutReason reason); 1894 void Abort(BailoutReason reason);
1879 1895
1880 // Conditionally load the cached Array transitioned map of type 1896 // Conditionally load the cached Array transitioned map of type
1881 // transitioned_kind from the native context if the map in register 1897 // transitioned_kind from the native context if the map in register
1882 // map_in_out is the cached Array map in the native context of 1898 // map_in_out is the cached Array map in the native context of
1883 // expected_kind. 1899 // expected_kind.
1884 void LoadTransitionedArrayMapConditional( 1900 void LoadTransitionedArrayMapConditional(
1885 ElementsKind expected_kind, 1901 ElementsKind expected_kind,
1886 ElementsKind transitioned_kind, 1902 ElementsKind transitioned_kind,
1887 Register map_in_out, 1903 Register map_in_out,
1888 Register scratch, 1904 Register scratch1,
1905 Register scratch2,
1889 Label* no_map_match); 1906 Label* no_map_match);
1890 1907
1891 void LoadGlobalFunction(int index, Register function); 1908 void LoadGlobalFunction(int index, Register function);
1892 1909
1893 // Load the initial map from the global function. The registers function and 1910 // Load the initial map from the global function. The registers function and
1894 // map can be the same, function is then overwritten. 1911 // map can be the same, function is then overwritten.
1895 void LoadGlobalFunctionInitialMap(Register function, 1912 void LoadGlobalFunctionInitialMap(Register function,
1896 Register map, 1913 Register map,
1897 Register scratch); 1914 Register scratch);
1898 1915
1899 // -------------------------------------------------------------------------- 1916 CPURegList* TmpList() { return &tmp_list_; }
1900 // Set the registers used internally by the MacroAssembler as scratch 1917 CPURegList* FPTmpList() { return &fptmp_list_; }
1901 // registers. These registers are used to implement behaviours which are not
1902 // directly supported by A64, and where an intermediate result is required.
1903 //
1904 // Both tmp0 and tmp1 may be set to any X register except for xzr, sp,
1905 // and StackPointer(). Also, they must not be the same register (though they
1906 // may both be NoReg).
1907 //
1908 // It is valid to set either or both of these registers to NoReg if you don't
1909 // want the MacroAssembler to use any scratch registers. In a debug build, the
1910 // Assembler will assert that any registers it uses are valid. Be aware that
1911 // this check is not present in release builds. If this is a problem, use the
1912 // Assembler directly.
1913 void SetScratchRegisters(const Register& tmp0, const Register& tmp1) {
1914 // V8 assumes the macro assembler uses ip0 and ip1 as temp registers.
1915 ASSERT(tmp0.IsNone() || tmp0.Is(ip0));
1916 ASSERT(tmp1.IsNone() || tmp1.Is(ip1));
1917
1918 ASSERT(!AreAliased(xzr, csp, tmp0, tmp1));
1919 ASSERT(!AreAliased(StackPointer(), tmp0, tmp1));
1920 tmp0_ = tmp0;
1921 tmp1_ = tmp1;
1922 }
1923
1924 const Register& Tmp0() const {
1925 return tmp0_;
1926 }
1927
1928 const Register& Tmp1() const {
1929 return tmp1_;
1930 }
1931
1932 const Register WTmp0() const {
1933 return Register::Create(tmp0_.code(), kWRegSize);
1934 }
1935
1936 const Register WTmp1() const {
1937 return Register::Create(tmp1_.code(), kWRegSize);
1938 }
1939
1940 void SetFPScratchRegister(const FPRegister& fptmp0) {
1941 fptmp0_ = fptmp0;
1942 }
1943
1944 const FPRegister& FPTmp0() const {
1945 return fptmp0_;
1946 }
1947
1948 const Register AppropriateTempFor(
1949 const Register& target,
1950 const CPURegister& forbidden = NoCPUReg) const {
1951 Register candidate = forbidden.Is(Tmp0()) ? Tmp1() : Tmp0();
1952 ASSERT(!candidate.Is(target));
1953 return Register::Create(candidate.code(), target.SizeInBits());
1954 }
1955
1956 const FPRegister AppropriateTempFor(
1957 const FPRegister& target,
1958 const CPURegister& forbidden = NoCPUReg) const {
1959 USE(forbidden);
1960 FPRegister candidate = FPTmp0();
1961 ASSERT(!candidate.Is(forbidden));
1962 ASSERT(!candidate.Is(target));
1963 return FPRegister::Create(candidate.code(), target.SizeInBits());
1964 }
1965 1918
1966 // Like printf, but print at run-time from generated code. 1919 // Like printf, but print at run-time from generated code.
1967 // 1920 //
1968 // The caller must ensure that arguments for floating-point placeholders 1921 // The caller must ensure that arguments for floating-point placeholders
1969 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer 1922 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
1970 // placeholders are Registers. 1923 // placeholders are Registers.
1971 // 1924 //
1972 // A maximum of four arguments may be given to any single Printf call. The 1925 // A maximum of four arguments may be given to any single Printf call. The
1973 // arguments must be of the same type, but they do not need to have the same 1926 // arguments must be of the same type, but they do not need to have the same
1974 // size. 1927 // size.
1975 // 1928 //
1976 // The following registers cannot be printed: 1929 // The following registers cannot be printed:
1977 // Tmp0(), Tmp1(), StackPointer(), csp. 1930 // StackPointer(), csp.
1978 // 1931 //
1979 // This function automatically preserves caller-saved registers so that 1932 // This function automatically preserves caller-saved registers so that
1980 // calling code can use Printf at any point without having to worry about 1933 // calling code can use Printf at any point without having to worry about
1981 // corruption. The preservation mechanism generates a lot of code. If this is 1934 // corruption. The preservation mechanism generates a lot of code. If this is
1982 // a problem, preserve the important registers manually and then call 1935 // a problem, preserve the important registers manually and then call
1983 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are 1936 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1984 // implicitly preserved. 1937 // implicitly preserved.
1985 // 1938 //
1986 // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be 1939 // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be
1987 // preserved, and can be printed. This allows Printf to be used during debug 1940 // preserved, and can be printed. This allows Printf to be used during debug
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
2052 2005
2053 // Jumps to found label if a prototype map has dictionary elements. 2006 // Jumps to found label if a prototype map has dictionary elements.
2054 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 2007 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
2055 Register scratch1, Label* found); 2008 Register scratch1, Label* found);
2056 2009
2057 private: 2010 private:
2058 // Helpers for CopyFields. 2011 // Helpers for CopyFields.
2059 // These each implement CopyFields in a different way. 2012 // These each implement CopyFields in a different way.
2060 void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count, 2013 void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
2061 Register scratch1, Register scratch2, 2014 Register scratch1, Register scratch2,
2062 Register scratch3); 2015 Register scratch3, Register scratch4,
2016 Register scratch5);
2063 void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count, 2017 void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
2064 Register scratch1, Register scratch2); 2018 Register scratch1, Register scratch2,
2019 Register scratch3, Register scratch4);
2065 void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count, 2020 void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
2066 Register scratch1); 2021 Register scratch1, Register scratch2,
2022 Register scratch3);
2067 2023
2068 // The actual Push and Pop implementations. These don't generate any code 2024 // The actual Push and Pop implementations. These don't generate any code
2069 // other than that required for the push or pop. This allows 2025 // other than that required for the push or pop. This allows
2070 // (Push|Pop)CPURegList to bundle together run-time assertions for a large 2026 // (Push|Pop)CPURegList to bundle together run-time assertions for a large
2071 // block of registers. 2027 // block of registers.
2072 // 2028 //
2073 // Note that size is per register, and is specified in bytes. 2029 // Note that size is per register, and is specified in bytes.
2074 void PushHelper(int count, int size, 2030 void PushHelper(int count, int size,
2075 const CPURegister& src0, const CPURegister& src1, 2031 const CPURegister& src0, const CPURegister& src1,
2076 const CPURegister& src2, const CPURegister& src3); 2032 const CPURegister& src2, const CPURegister& src3);
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
2137 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is 2093 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
2138 // being generated. 2094 // being generated.
2139 bool use_real_aborts_; 2095 bool use_real_aborts_;
2140 2096
2141 // This handle will be patched with the code object on installation. 2097 // This handle will be patched with the code object on installation.
2142 Handle<Object> code_object_; 2098 Handle<Object> code_object_;
2143 2099
2144 // The register to use as a stack pointer for stack operations. 2100 // The register to use as a stack pointer for stack operations.
2145 Register sp_; 2101 Register sp_;
2146 2102
2147 // Scratch registers used internally by the MacroAssembler. 2103 // Scratch registers available for use by the MacroAssembler.
2148 Register tmp0_; 2104 CPURegList tmp_list_;
2149 Register tmp1_; 2105 CPURegList fptmp_list_;
2150 FPRegister fptmp0_;
2151 2106
2152 void InitializeNewString(Register string, 2107 void InitializeNewString(Register string,
2153 Register length, 2108 Register length,
2154 Heap::RootListIndex map_index, 2109 Heap::RootListIndex map_index,
2155 Register scratch1, 2110 Register scratch1,
2156 Register scratch2); 2111 Register scratch2);
2157 2112
2158 public: 2113 public:
2159 // Far branches resolving. 2114 // Far branches resolving.
2160 // 2115 //
2161 // The various classes of branch instructions with immediate offsets have 2116 // The various classes of branch instructions with immediate offsets have
2162 // different ranges. While the Assembler will fail to assemble a branch 2117 // different ranges. While the Assembler will fail to assemble a branch
2163 // exceeding its range, the MacroAssembler offers a mechanism to resolve 2118 // exceeding its range, the MacroAssembler offers a mechanism to resolve
2164 // branches to too distant targets, either by tweaking the generated code to 2119 // branches to too distant targets, either by tweaking the generated code to
2165 // use branch instructions with wider ranges or generating veneers. 2120 // use branch instructions with wider ranges or generating veneers.
2166 // 2121 //
2167 // Currently branches to distant targets are resolved using unconditional 2122 // Currently branches to distant targets are resolved using unconditional
2168 // branch isntructions with a range of +-128MB. If that becomes too little 2123 // branch isntructions with a range of +-128MB. If that becomes too little
2169 // (!), the mechanism can be extended to generate special veneers for really 2124 // (!), the mechanism can be extended to generate special veneers for really
2170 // far targets. 2125 // far targets.
2171 2126
2172 // Returns true if we should emit a veneer as soon as possible for a branch
2173 // which can at most reach to specified pc.
2174 bool ShouldEmitVeneer(int max_reachable_pc,
2175 int margin = kVeneerDistanceMargin);
2176
2177 // The maximum code size generated for a veneer. Currently one branch
2178 // instruction. This is for code size checking purposes, and can be extended
2179 // in the future for example if we decide to add nops between the veneers.
2180 static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
2181
2182 // Emits veneers for branches that are approaching their maximum range.
2183 // If need_protection is true, the veneers are protected by a branch jumping
2184 // over the code.
2185 void EmitVeneers(bool need_protection);
2186 void EmitVeneersGuard();
2187 // Checks wether veneers need to be emitted at this point.
2188 void CheckVeneers(bool need_protection);
2189
2190 // Helps resolve branching to labels potentially out of range. 2127 // Helps resolve branching to labels potentially out of range.
2191 // If the label is not bound, it registers the information necessary to later 2128 // If the label is not bound, it registers the information necessary to later
2192 // be able to emit a veneer for this branch if necessary. 2129 // be able to emit a veneer for this branch if necessary.
2193 // If the label is bound, it returns true if the label (or the previous link 2130 // If the label is bound, it returns true if the label (or the previous link
2194 // in the label chain) is out of range. In that case the caller is responsible 2131 // in the label chain) is out of range. In that case the caller is responsible
2195 // for generating appropriate code. 2132 // for generating appropriate code.
2196 // Otherwise it returns false. 2133 // Otherwise it returns false.
2197 // This function also checks wether veneers need to be emitted. 2134 // This function also checks wether veneers need to be emitted.
2198 bool NeedExtraInstructionsOrRegisterBranch(Label *label, 2135 bool NeedExtraInstructionsOrRegisterBranch(Label *label,
2199 ImmBranchType branch_type); 2136 ImmBranchType branch_type);
2200
2201 private:
2202 // We generate a veneer for a branch if we reach within this distance of the
2203 // limit of the range.
2204 static const int kVeneerDistanceMargin = 4 * KB;
2205 int unresolved_branches_first_limit() const {
2206 ASSERT(!unresolved_branches_.empty());
2207 return unresolved_branches_.begin()->first;
2208 }
2209 }; 2137 };
2210 2138
2211 2139
2212 // Use this scope when you need a one-to-one mapping bewteen methods and 2140 // Use this scope when you need a one-to-one mapping bewteen methods and
2213 // instructions. This scope prevents the MacroAssembler from being called and 2141 // instructions. This scope prevents the MacroAssembler from being called and
2214 // literal pools from being emitted. It also asserts the number of instructions 2142 // literal pools from being emitted. It also asserts the number of instructions
2215 // emitted is what you specified when creating the scope. 2143 // emitted is what you specified when creating the scope.
2216 class InstructionAccurateScope BASE_EMBEDDED { 2144 class InstructionAccurateScope BASE_EMBEDDED {
2217 public: 2145 public:
2218 InstructionAccurateScope(MacroAssembler* masm, size_t count = 0) 2146 InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
(...skipping 29 matching lines...) Expand all
2248 private: 2176 private:
2249 MacroAssembler* masm_; 2177 MacroAssembler* masm_;
2250 #ifdef DEBUG 2178 #ifdef DEBUG
2251 size_t size_; 2179 size_t size_;
2252 Label start_; 2180 Label start_;
2253 bool previous_allow_macro_instructions_; 2181 bool previous_allow_macro_instructions_;
2254 #endif 2182 #endif
2255 }; 2183 };
2256 2184
2257 2185
2186 // This scope utility allows scratch registers to be managed safely. The
2187 // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2188 // registers. These registers can be allocated on demand, and will be returned
2189 // at the end of the scope.
2190 //
2191 // When the scope ends, the MacroAssembler's lists will be restored to their
2192 // original state, even if the lists were modified by some other means.
2193 class UseScratchRegisterScope {
2194 public:
2195 explicit UseScratchRegisterScope(MacroAssembler* masm)
2196 : available_(masm->TmpList()),
2197 availablefp_(masm->FPTmpList()),
2198 old_available_(available_->list()),
2199 old_availablefp_(availablefp_->list()) {
2200 ASSERT(available_->type() == CPURegister::kRegister);
2201 ASSERT(availablefp_->type() == CPURegister::kFPRegister);
2202 }
2203
2204 ~UseScratchRegisterScope();
2205
2206 // Take a register from the appropriate temps list. It will be returned
2207 // automatically when the scope ends.
2208 Register AcquireW() { return AcquireNextAvailable(available_).W(); }
2209 Register AcquireX() { return AcquireNextAvailable(available_).X(); }
2210 FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
2211 FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
2212
2213 Register AcquireSameSizeAs(const Register& reg);
2214 FPRegister AcquireSameSizeAs(const FPRegister& reg);
2215
2216 private:
2217 static CPURegister AcquireNextAvailable(CPURegList* available);
2218
2219 // Available scratch registers.
2220 CPURegList* available_; // kRegister
2221 CPURegList* availablefp_; // kFPRegister
2222
2223 // The state of the available lists at the start of this scope.
2224 RegList old_available_; // kRegister
2225 RegList old_availablefp_; // kFPRegister
2226 };
2227
2228
2258 inline MemOperand ContextMemOperand(Register context, int index) { 2229 inline MemOperand ContextMemOperand(Register context, int index) {
2259 return MemOperand(context, Context::SlotOffset(index)); 2230 return MemOperand(context, Context::SlotOffset(index));
2260 } 2231 }
2261 2232
2262 inline MemOperand GlobalObjectMemOperand() { 2233 inline MemOperand GlobalObjectMemOperand() {
2263 return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX); 2234 return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
2264 } 2235 }
2265 2236
2266 2237
2267 // Encode and decode information about patchable inline SMI checks. 2238 // Encode and decode information about patchable inline SMI checks.
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
2318 #error "Unsupported option" 2289 #error "Unsupported option"
2319 #define CODE_COVERAGE_STRINGIFY(x) #x 2290 #define CODE_COVERAGE_STRINGIFY(x) #x
2320 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 2291 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
2321 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 2292 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
2322 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> 2293 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
2323 #else 2294 #else
2324 #define ACCESS_MASM(masm) masm-> 2295 #define ACCESS_MASM(masm) masm->
2325 #endif 2296 #endif
2326 2297
2327 #endif // V8_A64_MACRO_ASSEMBLER_A64_H_ 2298 #endif // V8_A64_MACRO_ASSEMBLER_A64_H_
OLDNEW
« no previous file with comments | « src/a64/lithium-codegen-a64.cc ('k') | src/a64/macro-assembler-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698