| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "v8.h" | 5 #include "v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM64 | 7 #if V8_TARGET_ARCH_ARM64 |
| 8 | 8 |
| 9 #include "bootstrapper.h" | 9 #include "bootstrapper.h" |
| 10 #include "codegen.h" | 10 #include "codegen.h" |
| (...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 117 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); | 117 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); |
| 118 } else { | 118 } else { |
| 119 // Immediate can't be encoded: synthesize using move immediate. | 119 // Immediate can't be encoded: synthesize using move immediate. |
| 120 Register temp = temps.AcquireSameSizeAs(rn); | 120 Register temp = temps.AcquireSameSizeAs(rn); |
| 121 Mov(temp, immediate); | 121 Mov(temp, immediate); |
| 122 if (rd.Is(csp)) { | 122 if (rd.Is(csp)) { |
| 123 // If rd is the stack pointer we cannot use it as the destination | 123 // If rd is the stack pointer we cannot use it as the destination |
| 124 // register so we use the temp register as an intermediate again. | 124 // register so we use the temp register as an intermediate again. |
| 125 Logical(temp, rn, temp, op); | 125 Logical(temp, rn, temp, op); |
| 126 Mov(csp, temp); | 126 Mov(csp, temp); |
| 127 AssertStackConsistency(); | |
| 128 } else { | 127 } else { |
| 129 Logical(rd, rn, temp, op); | 128 Logical(rd, rn, temp, op); |
| 130 } | 129 } |
| 131 } | 130 } |
| 132 | 131 |
| 133 } else if (operand.IsExtendedRegister()) { | 132 } else if (operand.IsExtendedRegister()) { |
| 134 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | 133 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
| 135 // Add/sub extended supports shift <= 4. We want to support exactly the | 134 // Add/sub extended supports shift <= 4. We want to support exactly the |
| 136 // same modes here. | 135 // same modes here. |
| 137 ASSERT(operand.shift_amount() <= 4); | 136 ASSERT(operand.shift_amount() <= 4); |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 225 movk(temp, imm16, 16 * i); | 224 movk(temp, imm16, 16 * i); |
| 226 } | 225 } |
| 227 } | 226 } |
| 228 } | 227 } |
| 229 ASSERT(first_mov_done); | 228 ASSERT(first_mov_done); |
| 230 | 229 |
| 231 // Move the temporary if the original destination register was the stack | 230 // Move the temporary if the original destination register was the stack |
| 232 // pointer. | 231 // pointer. |
| 233 if (rd.IsSP()) { | 232 if (rd.IsSP()) { |
| 234 mov(rd, temp); | 233 mov(rd, temp); |
| 235 AssertStackConsistency(); | |
| 236 } | 234 } |
| 237 } | 235 } |
| 238 } | 236 } |
| 239 | 237 |
| 240 | 238 |
| 241 void MacroAssembler::Mov(const Register& rd, | 239 void MacroAssembler::Mov(const Register& rd, |
| 242 const Operand& operand, | 240 const Operand& operand, |
| 243 DiscardMoveMode discard_mode) { | 241 DiscardMoveMode discard_mode) { |
| 244 ASSERT(allow_macro_instructions_); | 242 ASSERT(allow_macro_instructions_); |
| 245 ASSERT(!rd.IsZero()); | 243 ASSERT(!rd.IsZero()); |
| (...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 762 // Abstracted stack operations. | 760 // Abstracted stack operations. |
| 763 | 761 |
| 764 | 762 |
| 765 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, | 763 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, |
| 766 const CPURegister& src2, const CPURegister& src3) { | 764 const CPURegister& src2, const CPURegister& src3) { |
| 767 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); | 765 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); |
| 768 | 766 |
| 769 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); | 767 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); |
| 770 int size = src0.SizeInBytes(); | 768 int size = src0.SizeInBytes(); |
| 771 | 769 |
| 772 PushPreamble(count, size); | 770 PrepareForPush(count, size); |
| 773 PushHelper(count, size, src0, src1, src2, src3); | 771 PushHelper(count, size, src0, src1, src2, src3); |
| 774 } | 772 } |
| 775 | 773 |
| 776 | 774 |
| 777 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, | 775 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, |
| 778 const CPURegister& src2, const CPURegister& src3, | 776 const CPURegister& src2, const CPURegister& src3, |
| 779 const CPURegister& src4, const CPURegister& src5, | 777 const CPURegister& src4, const CPURegister& src5, |
| 780 const CPURegister& src6, const CPURegister& src7) { | 778 const CPURegister& src6, const CPURegister& src7) { |
| 781 ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7)); | 779 ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7)); |
| 782 | 780 |
| 783 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid(); | 781 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid(); |
| 784 int size = src0.SizeInBytes(); | 782 int size = src0.SizeInBytes(); |
| 785 | 783 |
| 786 PushPreamble(count, size); | 784 PrepareForPush(count, size); |
| 787 PushHelper(4, size, src0, src1, src2, src3); | 785 PushHelper(4, size, src0, src1, src2, src3); |
| 788 PushHelper(count - 4, size, src4, src5, src6, src7); | 786 PushHelper(count - 4, size, src4, src5, src6, src7); |
| 789 } | 787 } |
| 790 | 788 |
| 791 | 789 |
| 792 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, | 790 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, |
| 793 const CPURegister& dst2, const CPURegister& dst3) { | 791 const CPURegister& dst2, const CPURegister& dst3) { |
| 794 // It is not valid to pop into the same register more than once in one | 792 // It is not valid to pop into the same register more than once in one |
| 795 // instruction, not even into the zero register. | 793 // instruction, not even into the zero register. |
| 796 ASSERT(!AreAliased(dst0, dst1, dst2, dst3)); | 794 ASSERT(!AreAliased(dst0, dst1, dst2, dst3)); |
| 797 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); | 795 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); |
| 798 ASSERT(dst0.IsValid()); | 796 ASSERT(dst0.IsValid()); |
| 799 | 797 |
| 800 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); | 798 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); |
| 801 int size = dst0.SizeInBytes(); | 799 int size = dst0.SizeInBytes(); |
| 802 | 800 |
| 801 PrepareForPop(count, size); |
| 803 PopHelper(count, size, dst0, dst1, dst2, dst3); | 802 PopHelper(count, size, dst0, dst1, dst2, dst3); |
| 804 PopPostamble(count, size); | 803 |
| 804 if (!csp.Is(StackPointer()) && emit_debug_code()) { |
| 805 // It is safe to leave csp where it is when unwinding the JavaScript stack, |
| 806 // but if we keep it matching StackPointer, the simulator can detect memory |
| 807 // accesses in the now-free part of the stack. |
| 808 Mov(csp, StackPointer()); |
| 809 } |
| 805 } | 810 } |
| 806 | 811 |
| 807 | 812 |
| 808 void MacroAssembler::PushPopQueue::PushQueued() { | 813 void MacroAssembler::PushPopQueue::PushQueued() { |
| 809 if (queued_.empty()) return; | 814 if (queued_.empty()) return; |
| 810 | 815 |
| 811 masm_->PushPreamble(size_); | 816 masm_->PrepareForPush(size_); |
| 812 | 817 |
| 813 int count = queued_.size(); | 818 int count = queued_.size(); |
| 814 int index = 0; | 819 int index = 0; |
| 815 while (index < count) { | 820 while (index < count) { |
| 816 // PushHelper can only handle registers with the same size and type, and it | 821 // PushHelper can only handle registers with the same size and type, and it |
| 817 // can handle only four at a time. Batch them up accordingly. | 822 // can handle only four at a time. Batch them up accordingly. |
| 818 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; | 823 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; |
| 819 int batch_index = 0; | 824 int batch_index = 0; |
| 820 do { | 825 do { |
| 821 batch[batch_index++] = queued_[index++]; | 826 batch[batch_index++] = queued_[index++]; |
| 822 } while ((batch_index < 4) && (index < count) && | 827 } while ((batch_index < 4) && (index < count) && |
| 823 batch[0].IsSameSizeAndType(queued_[index])); | 828 batch[0].IsSameSizeAndType(queued_[index])); |
| 824 | 829 |
| 825 masm_->PushHelper(batch_index, batch[0].SizeInBytes(), | 830 masm_->PushHelper(batch_index, batch[0].SizeInBytes(), |
| 826 batch[0], batch[1], batch[2], batch[3]); | 831 batch[0], batch[1], batch[2], batch[3]); |
| 827 } | 832 } |
| 828 | 833 |
| 829 queued_.clear(); | 834 queued_.clear(); |
| 830 } | 835 } |
| 831 | 836 |
| 832 | 837 |
| 833 void MacroAssembler::PushPopQueue::PopQueued() { | 838 void MacroAssembler::PushPopQueue::PopQueued() { |
| 834 if (queued_.empty()) return; | 839 if (queued_.empty()) return; |
| 835 | 840 |
| 841 masm_->PrepareForPop(size_); |
| 842 |
| 836 int count = queued_.size(); | 843 int count = queued_.size(); |
| 837 int index = 0; | 844 int index = 0; |
| 838 while (index < count) { | 845 while (index < count) { |
| 839 // PopHelper can only handle registers with the same size and type, and it | 846 // PopHelper can only handle registers with the same size and type, and it |
| 840 // can handle only four at a time. Batch them up accordingly. | 847 // can handle only four at a time. Batch them up accordingly. |
| 841 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; | 848 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; |
| 842 int batch_index = 0; | 849 int batch_index = 0; |
| 843 do { | 850 do { |
| 844 batch[batch_index++] = queued_[index++]; | 851 batch[batch_index++] = queued_[index++]; |
| 845 } while ((batch_index < 4) && (index < count) && | 852 } while ((batch_index < 4) && (index < count) && |
| 846 batch[0].IsSameSizeAndType(queued_[index])); | 853 batch[0].IsSameSizeAndType(queued_[index])); |
| 847 | 854 |
| 848 masm_->PopHelper(batch_index, batch[0].SizeInBytes(), | 855 masm_->PopHelper(batch_index, batch[0].SizeInBytes(), |
| 849 batch[0], batch[1], batch[2], batch[3]); | 856 batch[0], batch[1], batch[2], batch[3]); |
| 850 } | 857 } |
| 851 | 858 |
| 852 masm_->PopPostamble(size_); | |
| 853 queued_.clear(); | 859 queued_.clear(); |
| 854 } | 860 } |
| 855 | 861 |
| 856 | 862 |
| 857 void MacroAssembler::PushCPURegList(CPURegList registers) { | 863 void MacroAssembler::PushCPURegList(CPURegList registers) { |
| 858 int size = registers.RegisterSizeInBytes(); | 864 int size = registers.RegisterSizeInBytes(); |
| 859 | 865 |
| 860 PushPreamble(registers.Count(), size); | 866 PrepareForPush(registers.Count(), size); |
| 861 // Push up to four registers at a time because if the current stack pointer is | 867 // Push up to four registers at a time because if the current stack pointer is |
| 862 // csp and reg_size is 32, registers must be pushed in blocks of four in order | 868 // csp and reg_size is 32, registers must be pushed in blocks of four in order |
| 863 // to maintain the 16-byte alignment for csp. | 869 // to maintain the 16-byte alignment for csp. |
| 864 while (!registers.IsEmpty()) { | 870 while (!registers.IsEmpty()) { |
| 865 int count_before = registers.Count(); | 871 int count_before = registers.Count(); |
| 866 const CPURegister& src0 = registers.PopHighestIndex(); | 872 const CPURegister& src0 = registers.PopHighestIndex(); |
| 867 const CPURegister& src1 = registers.PopHighestIndex(); | 873 const CPURegister& src1 = registers.PopHighestIndex(); |
| 868 const CPURegister& src2 = registers.PopHighestIndex(); | 874 const CPURegister& src2 = registers.PopHighestIndex(); |
| 869 const CPURegister& src3 = registers.PopHighestIndex(); | 875 const CPURegister& src3 = registers.PopHighestIndex(); |
| 870 int count = count_before - registers.Count(); | 876 int count = count_before - registers.Count(); |
| 871 PushHelper(count, size, src0, src1, src2, src3); | 877 PushHelper(count, size, src0, src1, src2, src3); |
| 872 } | 878 } |
| 873 } | 879 } |
| 874 | 880 |
| 875 | 881 |
| 876 void MacroAssembler::PopCPURegList(CPURegList registers) { | 882 void MacroAssembler::PopCPURegList(CPURegList registers) { |
| 877 int size = registers.RegisterSizeInBytes(); | 883 int size = registers.RegisterSizeInBytes(); |
| 878 | 884 |
| 885 PrepareForPop(registers.Count(), size); |
| 879 // Pop up to four registers at a time because if the current stack pointer is | 886 // Pop up to four registers at a time because if the current stack pointer is |
| 880 // csp and reg_size is 32, registers must be pushed in blocks of four in | 887 // csp and reg_size is 32, registers must be pushed in blocks of four in |
| 881 // order to maintain the 16-byte alignment for csp. | 888 // order to maintain the 16-byte alignment for csp. |
| 882 while (!registers.IsEmpty()) { | 889 while (!registers.IsEmpty()) { |
| 883 int count_before = registers.Count(); | 890 int count_before = registers.Count(); |
| 884 const CPURegister& dst0 = registers.PopLowestIndex(); | 891 const CPURegister& dst0 = registers.PopLowestIndex(); |
| 885 const CPURegister& dst1 = registers.PopLowestIndex(); | 892 const CPURegister& dst1 = registers.PopLowestIndex(); |
| 886 const CPURegister& dst2 = registers.PopLowestIndex(); | 893 const CPURegister& dst2 = registers.PopLowestIndex(); |
| 887 const CPURegister& dst3 = registers.PopLowestIndex(); | 894 const CPURegister& dst3 = registers.PopLowestIndex(); |
| 888 int count = count_before - registers.Count(); | 895 int count = count_before - registers.Count(); |
| 889 PopHelper(count, size, dst0, dst1, dst2, dst3); | 896 PopHelper(count, size, dst0, dst1, dst2, dst3); |
| 890 } | 897 } |
| 891 PopPostamble(registers.Count(), size); | 898 |
| 899 if (!csp.Is(StackPointer()) && emit_debug_code()) { |
| 900 // It is safe to leave csp where it is when unwinding the JavaScript stack, |
| 901 // but if we keep it matching StackPointer, the simulator can detect memory |
| 902 // accesses in the now-free part of the stack. |
| 903 Mov(csp, StackPointer()); |
| 904 } |
| 892 } | 905 } |
| 893 | 906 |
| 894 | 907 |
| 895 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { | 908 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { |
| 896 int size = src.SizeInBytes(); | 909 int size = src.SizeInBytes(); |
| 897 | 910 |
| 898 PushPreamble(count, size); | 911 PrepareForPush(count, size); |
| 899 | 912 |
| 900 if (FLAG_optimize_for_size && count > 8) { | 913 if (FLAG_optimize_for_size && count > 8) { |
| 901 UseScratchRegisterScope temps(this); | 914 UseScratchRegisterScope temps(this); |
| 902 Register temp = temps.AcquireX(); | 915 Register temp = temps.AcquireX(); |
| 903 | 916 |
| 904 Label loop; | 917 Label loop; |
| 905 __ Mov(temp, count / 2); | 918 __ Mov(temp, count / 2); |
| 906 __ Bind(&loop); | 919 __ Bind(&loop); |
| 907 PushHelper(2, size, src, src, NoReg, NoReg); | 920 PushHelper(2, size, src, src, NoReg, NoReg); |
| 908 __ Subs(temp, temp, 1); | 921 __ Subs(temp, temp, 1); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 924 } | 937 } |
| 925 if (count == 1) { | 938 if (count == 1) { |
| 926 PushHelper(1, size, src, NoReg, NoReg, NoReg); | 939 PushHelper(1, size, src, NoReg, NoReg, NoReg); |
| 927 count -= 1; | 940 count -= 1; |
| 928 } | 941 } |
| 929 ASSERT(count == 0); | 942 ASSERT(count == 0); |
| 930 } | 943 } |
| 931 | 944 |
| 932 | 945 |
| 933 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { | 946 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { |
| 934 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); | 947 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); |
| 935 | 948 |
| 936 UseScratchRegisterScope temps(this); | 949 UseScratchRegisterScope temps(this); |
| 937 Register temp = temps.AcquireSameSizeAs(count); | 950 Register temp = temps.AcquireSameSizeAs(count); |
| 938 | 951 |
| 939 if (FLAG_optimize_for_size) { | 952 if (FLAG_optimize_for_size) { |
| 940 Label loop, done; | 953 Label loop, done; |
| 941 | 954 |
| 942 Subs(temp, count, 1); | 955 Subs(temp, count, 1); |
| 943 B(mi, &done); | 956 B(mi, &done); |
| 944 | 957 |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1050 // for csp at all times. | 1063 // for csp at all times. |
| 1051 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); | 1064 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); |
| 1052 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); | 1065 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); |
| 1053 break; | 1066 break; |
| 1054 default: | 1067 default: |
| 1055 UNREACHABLE(); | 1068 UNREACHABLE(); |
| 1056 } | 1069 } |
| 1057 } | 1070 } |
| 1058 | 1071 |
| 1059 | 1072 |
| 1060 void MacroAssembler::PushPreamble(Operand total_size) { | 1073 void MacroAssembler::PrepareForPush(Operand total_size) { |
| 1061 // TODO(jbramley): This assertion generates too much code in some debug tests. | 1074 // TODO(jbramley): This assertion generates too much code in some debug tests. |
| 1062 // AssertStackConsistency(); | 1075 // AssertStackConsistency(); |
| 1063 if (csp.Is(StackPointer())) { | 1076 if (csp.Is(StackPointer())) { |
| 1064 // If the current stack pointer is csp, then it must be aligned to 16 bytes | 1077 // If the current stack pointer is csp, then it must be aligned to 16 bytes |
| 1065 // on entry and the total size of the specified registers must also be a | 1078 // on entry and the total size of the specified registers must also be a |
| 1066 // multiple of 16 bytes. | 1079 // multiple of 16 bytes. |
| 1067 if (total_size.IsImmediate()) { | 1080 if (total_size.IsImmediate()) { |
| 1068 ASSERT((total_size.immediate() % 16) == 0); | 1081 ASSERT((total_size.immediate() % 16) == 0); |
| 1069 } | 1082 } |
| 1070 | 1083 |
| 1071 // Don't check access size for non-immediate sizes. It's difficult to do | 1084 // Don't check access size for non-immediate sizes. It's difficult to do |
| 1072 // well, and it will be caught by hardware (or the simulator) anyway. | 1085 // well, and it will be caught by hardware (or the simulator) anyway. |
| 1073 } else { | 1086 } else { |
| 1074 // Even if the current stack pointer is not the system stack pointer (csp), | 1087 // Even if the current stack pointer is not the system stack pointer (csp), |
| 1075 // the system stack pointer will still be modified in order to comply with | 1088 // the system stack pointer will still be modified in order to comply with |
| 1076 // ABI rules about accessing memory below the system stack pointer. | 1089 // ABI rules about accessing memory below the system stack pointer. |
| 1077 BumpSystemStackPointer(total_size); | 1090 BumpSystemStackPointer(total_size); |
| 1078 } | 1091 } |
| 1079 } | 1092 } |
| 1080 | 1093 |
| 1081 | 1094 |
| 1082 void MacroAssembler::PopPostamble(Operand total_size) { | 1095 void MacroAssembler::PrepareForPop(Operand total_size) { |
| 1083 AssertStackConsistency(); | 1096 AssertStackConsistency(); |
| 1084 if (csp.Is(StackPointer())) { | 1097 if (csp.Is(StackPointer())) { |
| 1085 // If the current stack pointer is csp, then it must be aligned to 16 bytes | 1098 // If the current stack pointer is csp, then it must be aligned to 16 bytes |
| 1086 // on entry and the total size of the specified registers must also be a | 1099 // on entry and the total size of the specified registers must also be a |
| 1087 // multiple of 16 bytes. | 1100 // multiple of 16 bytes. |
| 1088 if (total_size.IsImmediate()) { | 1101 if (total_size.IsImmediate()) { |
| 1089 ASSERT((total_size.immediate() % 16) == 0); | 1102 ASSERT((total_size.immediate() % 16) == 0); |
| 1090 } | 1103 } |
| 1091 | 1104 |
| 1092 // Don't check access size for non-immediate sizes. It's difficult to do | 1105 // Don't check access size for non-immediate sizes. It's difficult to do |
| 1093 // well, and it will be caught by hardware (or the simulator) anyway. | 1106 // well, and it will be caught by hardware (or the simulator) anyway. |
| 1094 } else if (emit_debug_code()) { | |
| 1095 // It is safe to leave csp where it is when unwinding the JavaScript stack, | |
| 1096 // but if we keep it matching StackPointer, the simulator can detect memory | |
| 1097 // accesses in the now-free part of the stack. | |
| 1098 SyncSystemStackPointer(); | |
| 1099 } | 1107 } |
| 1100 } | 1108 } |
| 1101 | 1109 |
| 1102 | 1110 |
| 1103 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { | 1111 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { |
| 1104 if (offset.IsImmediate()) { | 1112 if (offset.IsImmediate()) { |
| 1105 ASSERT(offset.immediate() >= 0); | 1113 ASSERT(offset.immediate() >= 0); |
| 1106 } else if (emit_debug_code()) { | 1114 } else if (emit_debug_code()) { |
| 1107 Cmp(xzr, offset); | 1115 Cmp(xzr, offset); |
| 1108 Check(le, kStackAccessBelowStackPointer); | 1116 Check(le, kStackAccessBelowStackPointer); |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1185 | 1193 |
| 1186 ldp(d8, d9, tos); | 1194 ldp(d8, d9, tos); |
| 1187 ldp(d10, d11, tos); | 1195 ldp(d10, d11, tos); |
| 1188 ldp(d12, d13, tos); | 1196 ldp(d12, d13, tos); |
| 1189 ldp(d14, d15, tos); | 1197 ldp(d14, d15, tos); |
| 1190 } | 1198 } |
| 1191 | 1199 |
| 1192 | 1200 |
| 1193 void MacroAssembler::AssertStackConsistency() { | 1201 void MacroAssembler::AssertStackConsistency() { |
| 1194 if (emit_debug_code()) { | 1202 if (emit_debug_code()) { |
| 1195 if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) { | 1203 if (csp.Is(StackPointer())) { |
| 1196 // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true, since it | 1204 // We can't check the alignment of csp without using a scratch register |
| 1197 // could have been bumped even if it is not the stack pointer. We can't | 1205 // (or clobbering the flags), but the processor (or simulator) will abort |
| 1198 // check the alignment of csp without using a scratch register (or | 1206 // if it is not properly aligned during a load. |
| 1199 // clobbering the flags), but the processor (or simulator) will abort if | |
| 1200 // it is not properly aligned during a load. | |
| 1201 ldr(xzr, MemOperand(csp, 0)); | 1207 ldr(xzr, MemOperand(csp, 0)); |
| 1202 } else if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) { | 1208 } else if (FLAG_enable_slow_asserts) { |
| 1203 Label ok; | 1209 Label ok; |
| 1204 // Check that csp <= StackPointer(), preserving all registers and NZCV. | 1210 // Check that csp <= StackPointer(), preserving all registers and NZCV. |
| 1205 sub(StackPointer(), csp, StackPointer()); | 1211 sub(StackPointer(), csp, StackPointer()); |
| 1206 cbz(StackPointer(), &ok); // Ok if csp == StackPointer(). | 1212 cbz(StackPointer(), &ok); // Ok if csp == StackPointer(). |
| 1207 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer(). | 1213 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer(). |
| 1208 | 1214 |
| 1209 Abort(kTheCurrentStackPointerIsBelowCsp); | 1215 Abort(kTheCurrentStackPointerIsBelowCsp); |
| 1210 | 1216 |
| 1211 bind(&ok); | 1217 bind(&ok); |
| 1212 // Restore StackPointer(). | 1218 // Restore StackPointer(). |
| (...skipping 4016 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5229 } | 5235 } |
| 5230 } | 5236 } |
| 5231 | 5237 |
| 5232 | 5238 |
| 5233 #undef __ | 5239 #undef __ |
| 5234 | 5240 |
| 5235 | 5241 |
| 5236 } } // namespace v8::internal | 5242 } } // namespace v8::internal |
| 5237 | 5243 |
| 5238 #endif // V8_TARGET_ARCH_ARM64 | 5244 #endif // V8_TARGET_ARCH_ARM64 |
| OLD | NEW |