| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 605 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 616 | 616 |
| 617 if (!csp.Is(StackPointer()) && emit_debug_code()) { | 617 if (!csp.Is(StackPointer()) && emit_debug_code()) { |
| 618 // It is safe to leave csp where it is when unwinding the JavaScript stack, | 618 // It is safe to leave csp where it is when unwinding the JavaScript stack, |
| 619 // but if we keep it matching StackPointer, the simulator can detect memory | 619 // but if we keep it matching StackPointer, the simulator can detect memory |
| 620 // accesses in the now-free part of the stack. | 620 // accesses in the now-free part of the stack. |
| 621 Mov(csp, StackPointer()); | 621 Mov(csp, StackPointer()); |
| 622 } | 622 } |
| 623 } | 623 } |
| 624 | 624 |
| 625 | 625 |
| 626 void MacroAssembler::PushPopQueue::PushQueued() { | |
| 627 if (queued_.empty()) return; | |
| 628 | |
| 629 masm_->PrepareForPush(size_); | |
| 630 | |
| 631 int count = queued_.size(); | |
| 632 int index = 0; | |
| 633 while (index < count) { | |
| 634 // PushHelper can only handle registers with the same size and type, and it | |
| 635 // can handle only four at a time. Batch them up accordingly. | |
| 636 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; | |
| 637 int batch_index = 0; | |
| 638 do { | |
| 639 batch[batch_index++] = queued_[index++]; | |
| 640 } while ((batch_index < 4) && (index < count) && | |
| 641 batch[0].IsSameSizeAndType(queued_[index])); | |
| 642 | |
| 643 masm_->PushHelper(batch_index, batch[0].SizeInBytes(), | |
| 644 batch[0], batch[1], batch[2], batch[3]); | |
| 645 } | |
| 646 | |
| 647 queued_.clear(); | |
| 648 } | |
| 649 | |
| 650 | |
| 651 void MacroAssembler::PushPopQueue::PopQueued() { | |
| 652 if (queued_.empty()) return; | |
| 653 | |
| 654 masm_->PrepareForPop(size_); | |
| 655 | |
| 656 int count = queued_.size(); | |
| 657 int index = 0; | |
| 658 while (index < count) { | |
| 659 // PopHelper can only handle registers with the same size and type, and it | |
| 660 // can handle only four at a time. Batch them up accordingly. | |
| 661 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; | |
| 662 int batch_index = 0; | |
| 663 do { | |
| 664 batch[batch_index++] = queued_[index++]; | |
| 665 } while ((batch_index < 4) && (index < count) && | |
| 666 batch[0].IsSameSizeAndType(queued_[index])); | |
| 667 | |
| 668 masm_->PopHelper(batch_index, batch[0].SizeInBytes(), | |
| 669 batch[0], batch[1], batch[2], batch[3]); | |
| 670 } | |
| 671 | |
| 672 queued_.clear(); | |
| 673 } | |
| 674 | |
| 675 | |
| 676 void MacroAssembler::PushCPURegList(CPURegList registers) { | 626 void MacroAssembler::PushCPURegList(CPURegList registers) { |
| 677 int size = registers.RegisterSizeInBytes(); | 627 int size = registers.RegisterSizeInBytes(); |
| 678 | 628 |
| 679 PrepareForPush(registers.Count(), size); | 629 PrepareForPush(registers.Count(), size); |
| 680 // Push up to four registers at a time because if the current stack pointer is | 630 // Push up to four registers at a time because if the current stack pointer is |
| 681 // csp and reg_size is 32, registers must be pushed in blocks of four in order | 631 // csp and reg_size is 32, registers must be pushed in blocks of four in order |
| 682 // to maintain the 16-byte alignment for csp. | 632 // to maintain the 16-byte alignment for csp. |
| 683 while (!registers.IsEmpty()) { | 633 while (!registers.IsEmpty()) { |
| 684 int count_before = registers.Count(); | 634 int count_before = registers.Count(); |
| 685 const CPURegister& src0 = registers.PopHighestIndex(); | 635 const CPURegister& src0 = registers.PopHighestIndex(); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 711 | 661 |
| 712 if (!csp.Is(StackPointer()) && emit_debug_code()) { | 662 if (!csp.Is(StackPointer()) && emit_debug_code()) { |
| 713 // It is safe to leave csp where it is when unwinding the JavaScript stack, | 663 // It is safe to leave csp where it is when unwinding the JavaScript stack, |
| 714 // but if we keep it matching StackPointer, the simulator can detect memory | 664 // but if we keep it matching StackPointer, the simulator can detect memory |
| 715 // accesses in the now-free part of the stack. | 665 // accesses in the now-free part of the stack. |
| 716 Mov(csp, StackPointer()); | 666 Mov(csp, StackPointer()); |
| 717 } | 667 } |
| 718 } | 668 } |
| 719 | 669 |
| 720 | 670 |
| 721 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { | 671 void MacroAssembler::PushMultipleTimes(int count, Register src) { |
| 722 int size = src.SizeInBytes(); | 672 int size = src.SizeInBytes(); |
| 723 | 673 |
| 724 PrepareForPush(count, size); | 674 PrepareForPush(count, size); |
| 725 | 675 |
| 726 if (FLAG_optimize_for_size && count > 8) { | 676 if (FLAG_optimize_for_size && count > 8) { |
| 727 Label loop; | 677 Label loop; |
| 728 __ Mov(Tmp0(), count / 2); | 678 __ Mov(Tmp0(), count / 2); |
| 729 __ Bind(&loop); | 679 __ Bind(&loop); |
| 730 PushHelper(2, size, src, src, NoReg, NoReg); | 680 PushHelper(2, size, src, src, NoReg, NoReg); |
| 731 __ Subs(Tmp0(), Tmp0(), 1); | 681 __ Subs(Tmp0(), Tmp0(), 1); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 746 count -= 2; | 696 count -= 2; |
| 747 } | 697 } |
| 748 if (count == 1) { | 698 if (count == 1) { |
| 749 PushHelper(1, size, src, NoReg, NoReg, NoReg); | 699 PushHelper(1, size, src, NoReg, NoReg, NoReg); |
| 750 count -= 1; | 700 count -= 1; |
| 751 } | 701 } |
| 752 ASSERT(count == 0); | 702 ASSERT(count == 0); |
| 753 } | 703 } |
| 754 | 704 |
| 755 | 705 |
| 756 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { | |
| 757 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); | |
| 758 | |
| 759 Register temp = AppropriateTempFor(count); | |
| 760 | |
| 761 if (FLAG_optimize_for_size) { | |
| 762 Label loop, done; | |
| 763 | |
| 764 Subs(temp, count, 1); | |
| 765 B(mi, &done); | |
| 766 | |
| 767 // Push all registers individually, to save code size. | |
| 768 Bind(&loop); | |
| 769 Subs(temp, temp, 1); | |
| 770 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg); | |
| 771 B(pl, &loop); | |
| 772 | |
| 773 Bind(&done); | |
| 774 } else { | |
| 775 Label loop, leftover2, leftover1, done; | |
| 776 | |
| 777 Subs(temp, count, 4); | |
| 778 B(mi, &leftover2); | |
| 779 | |
| 780 // Push groups of four first. | |
| 781 Bind(&loop); | |
| 782 Subs(temp, temp, 4); | |
| 783 PushHelper(4, src.SizeInBytes(), src, src, src, src); | |
| 784 B(pl, &loop); | |
| 785 | |
| 786 // Push groups of two. | |
| 787 Bind(&leftover2); | |
| 788 Tbz(count, 1, &leftover1); | |
| 789 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg); | |
| 790 | |
| 791 // Push the last one (if required). | |
| 792 Bind(&leftover1); | |
| 793 Tbz(count, 0, &done); | |
| 794 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg); | |
| 795 | |
| 796 Bind(&done); | |
| 797 } | |
| 798 } | |
| 799 | |
| 800 | |
| 801 void MacroAssembler::PushHelper(int count, int size, | 706 void MacroAssembler::PushHelper(int count, int size, |
| 802 const CPURegister& src0, | 707 const CPURegister& src0, |
| 803 const CPURegister& src1, | 708 const CPURegister& src1, |
| 804 const CPURegister& src2, | 709 const CPURegister& src2, |
| 805 const CPURegister& src3) { | 710 const CPURegister& src3) { |
| 806 // Ensure that we don't unintentially modify scratch or debug registers. | 711 // Ensure that we don't unintentially modify scratch or debug registers. |
| 807 InstructionAccurateScope scope(this); | 712 InstructionAccurateScope scope(this); |
| 808 | 713 |
| 809 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); | 714 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); |
| 810 ASSERT(size == src0.SizeInBytes()); | 715 ASSERT(size == src0.SizeInBytes()); |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 872 // for csp at all times. | 777 // for csp at all times. |
| 873 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); | 778 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); |
| 874 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); | 779 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); |
| 875 break; | 780 break; |
| 876 default: | 781 default: |
| 877 UNREACHABLE(); | 782 UNREACHABLE(); |
| 878 } | 783 } |
| 879 } | 784 } |
| 880 | 785 |
| 881 | 786 |
| 882 void MacroAssembler::PrepareForPush(Operand total_size) { | 787 void MacroAssembler::PrepareForPush(int count, int size) { |
| 788 // TODO(jbramley): Use AssertStackConsistency here, if possible. See the |
| 789 // AssertStackConsistency for details of why we can't at the moment. |
| 790 if (csp.Is(StackPointer())) { |
| 791 // If the current stack pointer is csp, then it must be aligned to 16 bytes |
| 792 // on entry and the total size of the specified registers must also be a |
| 793 // multiple of 16 bytes. |
| 794 ASSERT((count * size) % 16 == 0); |
| 795 } else { |
| 796 // Even if the current stack pointer is not the system stack pointer (csp), |
| 797 // the system stack pointer will still be modified in order to comply with |
| 798 // ABI rules about accessing memory below the system stack pointer. |
| 799 BumpSystemStackPointer(count * size); |
| 800 } |
| 801 } |
| 802 |
| 803 |
| 804 void MacroAssembler::PrepareForPop(int count, int size) { |
| 883 AssertStackConsistency(); | 805 AssertStackConsistency(); |
| 884 if (csp.Is(StackPointer())) { | 806 if (csp.Is(StackPointer())) { |
| 885 // If the current stack pointer is csp, then it must be aligned to 16 bytes | 807 // If the current stack pointer is csp, then it must be aligned to 16 bytes |
| 886 // on entry and the total size of the specified registers must also be a | 808 // on entry and the total size of the specified registers must also be a |
| 887 // multiple of 16 bytes. | 809 // multiple of 16 bytes. |
| 888 if (total_size.IsImmediate()) { | 810 ASSERT((count * size) % 16 == 0); |
| 889 ASSERT((total_size.immediate() % 16) == 0); | |
| 890 } | |
| 891 | |
| 892 // Don't check access size for non-immediate sizes. It's difficult to do | |
| 893 // well, and it will be caught by hardware (or the simulator) anyway. | |
| 894 } else { | |
| 895 // Even if the current stack pointer is not the system stack pointer (csp), | |
| 896 // the system stack pointer will still be modified in order to comply with | |
| 897 // ABI rules about accessing memory below the system stack pointer. | |
| 898 BumpSystemStackPointer(total_size); | |
| 899 } | 811 } |
| 900 } | 812 } |
| 901 | 813 |
| 902 | |
| 903 void MacroAssembler::PrepareForPop(Operand total_size) { | |
| 904 AssertStackConsistency(); | |
| 905 if (csp.Is(StackPointer())) { | |
| 906 // If the current stack pointer is csp, then it must be aligned to 16 bytes | |
| 907 // on entry and the total size of the specified registers must also be a | |
| 908 // multiple of 16 bytes. | |
| 909 if (total_size.IsImmediate()) { | |
| 910 ASSERT((total_size.immediate() % 16) == 0); | |
| 911 } | |
| 912 | |
| 913 // Don't check access size for non-immediate sizes. It's difficult to do | |
| 914 // well, and it will be caught by hardware (or the simulator) anyway. | |
| 915 } | |
| 916 } | |
| 917 | |
| 918 | 814 |
| 919 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { | 815 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { |
| 920 if (offset.IsImmediate()) { | 816 if (offset.IsImmediate()) { |
| 921 ASSERT(offset.immediate() >= 0); | 817 ASSERT(offset.immediate() >= 0); |
| 922 } else if (emit_debug_code()) { | 818 } else if (emit_debug_code()) { |
| 923 Cmp(xzr, offset); | 819 Cmp(xzr, offset); |
| 924 Check(le, kStackAccessBelowStackPointer); | 820 Check(le, kStackAccessBelowStackPointer); |
| 925 } | 821 } |
| 926 | 822 |
| 927 Str(src, MemOperand(StackPointer(), offset)); | 823 Str(src, MemOperand(StackPointer(), offset)); |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1000 ldp(x29, x30, tos); | 896 ldp(x29, x30, tos); |
| 1001 | 897 |
| 1002 ldp(d8, d9, tos); | 898 ldp(d8, d9, tos); |
| 1003 ldp(d10, d11, tos); | 899 ldp(d10, d11, tos); |
| 1004 ldp(d12, d13, tos); | 900 ldp(d12, d13, tos); |
| 1005 ldp(d14, d15, tos); | 901 ldp(d14, d15, tos); |
| 1006 } | 902 } |
| 1007 | 903 |
| 1008 | 904 |
| 1009 void MacroAssembler::AssertStackConsistency() { | 905 void MacroAssembler::AssertStackConsistency() { |
| 1010 if (emit_debug_code()) { | 906 if (emit_debug_code() && !csp.Is(StackPointer())) { |
| 1011 if (csp.Is(StackPointer())) { | 907 if (csp.Is(StackPointer())) { |
| 1012 // We can't check the alignment of csp without using a scratch register | 908 // TODO(jbramley): Check for csp alignment if it is the stack pointer. |
| 1013 // (or clobbering the flags), but the processor (or simulator) will abort | 909 } else { |
| 1014 // if it is not properly aligned during a load. | 910 // TODO(jbramley): Currently we cannot use this assertion in Push because |
| 1015 ldr(xzr, MemOperand(csp, 0)); | 911 // some calling code assumes that the flags are preserved. For an example, |
| 1016 } else if (FLAG_enable_slow_asserts) { | 912 // look at Builtins::Generate_ArgumentsAdaptorTrampoline. |
| 1017 Label ok; | 913 Cmp(csp, StackPointer()); |
| 1018 // Check that csp <= StackPointer(), preserving all registers and NZCV. | 914 Check(ls, kTheCurrentStackPointerIsBelowCsp); |
| 1019 sub(StackPointer(), csp, StackPointer()); | |
| 1020 cbz(StackPointer(), &ok); // Ok if csp == StackPointer(). | |
| 1021 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer(). | |
| 1022 | |
| 1023 Abort(kTheCurrentStackPointerIsBelowCsp); | |
| 1024 | |
| 1025 bind(&ok); | |
| 1026 // Restore StackPointer(). | |
| 1027 sub(StackPointer(), csp, StackPointer()); | |
| 1028 } | 915 } |
| 1029 } | 916 } |
| 1030 } | 917 } |
| 1031 | 918 |
| 1032 | 919 |
| 1033 void MacroAssembler::LoadRoot(Register destination, | 920 void MacroAssembler::LoadRoot(Register destination, |
| 1034 Heap::RootListIndex index) { | 921 Heap::RootListIndex index) { |
| 1035 // TODO(jbramley): Most root values are constants, and can be synthesized | 922 // TODO(jbramley): Most root values are constants, and can be synthesized |
| 1036 // without a load. Refer to the ARM back end for details. | 923 // without a load. Refer to the ARM back end for details. |
| 1037 Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); | 924 Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); |
| (...skipping 3387 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4425 if (FLAG_trap_on_abort) { | 4312 if (FLAG_trap_on_abort) { |
| 4426 Brk(0); | 4313 Brk(0); |
| 4427 return; | 4314 return; |
| 4428 } | 4315 } |
| 4429 #endif | 4316 #endif |
| 4430 | 4317 |
| 4431 Label msg_address; | 4318 Label msg_address; |
| 4432 Adr(x0, &msg_address); | 4319 Adr(x0, &msg_address); |
| 4433 | 4320 |
| 4434 if (use_real_aborts()) { | 4321 if (use_real_aborts()) { |
| 4435 // Avoid infinite recursion; Push contains some assertions that use Abort. | |
| 4436 NoUseRealAbortsScope no_real_aborts(this); | |
| 4437 | |
| 4438 // Split the message pointer into two SMI to avoid the GC | 4322 // Split the message pointer into two SMI to avoid the GC |
| 4439 // trying to scan the string. | 4323 // trying to scan the string. |
| 4440 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); | 4324 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); |
| 4441 SmiTag(x1, x0); | 4325 SmiTag(x1, x0); |
| 4442 Bic(x0, x0, kSmiShiftMask); | 4326 Bic(x0, x0, kSmiShiftMask); |
| 4443 | 4327 |
| 4444 Push(x0, x1); | 4328 Push(x0, x1); |
| 4445 | 4329 |
| 4446 if (!has_frame_) { | 4330 if (!has_frame_) { |
| 4447 // We don't actually want to generate a pile of code for this, so just | 4331 // We don't actually want to generate a pile of code for this, so just |
| (...skipping 440 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4888 } | 4772 } |
| 4889 } | 4773 } |
| 4890 | 4774 |
| 4891 | 4775 |
| 4892 #undef __ | 4776 #undef __ |
| 4893 | 4777 |
| 4894 | 4778 |
| 4895 } } // namespace v8::internal | 4779 } } // namespace v8::internal |
| 4896 | 4780 |
| 4897 #endif // V8_TARGET_ARCH_A64 | 4781 #endif // V8_TARGET_ARCH_A64 |
| OLD | NEW |