| Index: src/a64/macro-assembler-a64.cc
|
| diff --git a/src/a64/macro-assembler-a64.cc b/src/a64/macro-assembler-a64.cc
|
| index 14fb2fda63248808d25cdaa947fc4f61a2c6d9b3..b341044fb04515f46517621d6905d1523d4c0452 100644
|
| --- a/src/a64/macro-assembler-a64.cc
|
| +++ b/src/a64/macro-assembler-a64.cc
|
| @@ -822,6 +822,56 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
|
| }
|
|
|
|
|
| +void MacroAssembler::PushPopQueue::PushQueued() {
|
| + if (queued_.empty()) return;
|
| +
|
| + masm_->PrepareForPush(size_);
|
| +
|
| + int count = queued_.size();
|
| + int index = 0;
|
| + while (index < count) {
|
| + // PushHelper can only handle registers with the same size and type, and it
|
| + // can handle only four at a time. Batch them up accordingly.
|
| + CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
|
| + int batch_index = 0;
|
| + do {
|
| + batch[batch_index++] = queued_[index++];
|
| + } while ((batch_index < 4) && (index < count) &&
|
| + batch[0].IsSameSizeAndType(queued_[index]));
|
| +
|
| + masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
|
| + batch[0], batch[1], batch[2], batch[3]);
|
| + }
|
| +
|
| + queued_.clear();
|
| +}
|
| +
|
| +
|
| +void MacroAssembler::PushPopQueue::PopQueued() {
|
| + if (queued_.empty()) return;
|
| +
|
| + masm_->PrepareForPop(size_);
|
| +
|
| + int count = queued_.size();
|
| + int index = 0;
|
| + while (index < count) {
|
| + // PopHelper can only handle registers with the same size and type, and it
|
| + // can handle only four at a time. Batch them up accordingly.
|
| + CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
|
| + int batch_index = 0;
|
| + do {
|
| + batch[batch_index++] = queued_[index++];
|
| + } while ((batch_index < 4) && (index < count) &&
|
| + batch[0].IsSameSizeAndType(queued_[index]));
|
| +
|
| + masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
|
| + batch[0], batch[1], batch[2], batch[3]);
|
| + }
|
| +
|
| + queued_.clear();
|
| +}
|
| +
|
| +
|
| void MacroAssembler::PushCPURegList(CPURegList registers) {
|
| int size = registers.RegisterSizeInBytes();
|
|
|
| @@ -867,7 +917,7 @@ void MacroAssembler::PopCPURegList(CPURegList registers) {
|
| }
|
|
|
|
|
| -void MacroAssembler::PushMultipleTimes(int count, Register src) {
|
| +void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
|
| int size = src.SizeInBytes();
|
|
|
| PrepareForPush(count, size);
|
| @@ -902,6 +952,51 @@ void MacroAssembler::PushMultipleTimes(int count, Register src) {
|
| }
|
|
|
|
|
| +void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
|
| + PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
|
| +
|
| + Register temp = AppropriateTempFor(count);
|
| +
|
| + if (FLAG_optimize_for_size) {
|
| + Label loop, done;
|
| +
|
| + Subs(temp, count, 1);
|
| + B(mi, &done);
|
| +
|
| + // Push all registers individually, to save code size.
|
| + Bind(&loop);
|
| + Subs(temp, temp, 1);
|
| + PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
|
| + B(pl, &loop);
|
| +
|
| + Bind(&done);
|
| + } else {
|
| + Label loop, leftover2, leftover1, done;
|
| +
|
| + Subs(temp, count, 4);
|
| + B(mi, &leftover2);
|
| +
|
| + // Push groups of four first.
|
| + Bind(&loop);
|
| + Subs(temp, temp, 4);
|
| + PushHelper(4, src.SizeInBytes(), src, src, src, src);
|
| + B(pl, &loop);
|
| +
|
| + // Push groups of two.
|
| + Bind(&leftover2);
|
| + Tbz(count, 1, &leftover1);
|
| + PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
|
| +
|
| + // Push the last one (if required).
|
| + Bind(&leftover1);
|
| + Tbz(count, 0, &done);
|
| + PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
|
| +
|
| + Bind(&done);
|
| + }
|
| +}
|
| +
|
| +
|
| void MacroAssembler::PushHelper(int count, int size,
|
| const CPURegister& src0,
|
| const CPURegister& src1,
|
| @@ -983,30 +1078,39 @@ void MacroAssembler::PopHelper(int count, int size,
|
| }
|
|
|
|
|
| -void MacroAssembler::PrepareForPush(int count, int size) {
|
| - // TODO(jbramley): Use AssertStackConsistency here, if possible. See the
|
| - // AssertStackConsistency for details of why we can't at the moment.
|
| +void MacroAssembler::PrepareForPush(Operand total_size) {
|
| + AssertStackConsistency();
|
| if (csp.Is(StackPointer())) {
|
| // If the current stack pointer is csp, then it must be aligned to 16 bytes
|
| // on entry and the total size of the specified registers must also be a
|
| // multiple of 16 bytes.
|
| - ASSERT((count * size) % 16 == 0);
|
| + if (total_size.IsImmediate()) {
|
| + ASSERT((total_size.immediate() % 16) == 0);
|
| + }
|
| +
|
| + // Don't check access size for non-immediate sizes. It's difficult to do
|
| + // well, and it will be caught by hardware (or the simulator) anyway.
|
| } else {
|
| // Even if the current stack pointer is not the system stack pointer (csp),
|
| // the system stack pointer will still be modified in order to comply with
|
| // ABI rules about accessing memory below the system stack pointer.
|
| - BumpSystemStackPointer(count * size);
|
| + BumpSystemStackPointer(total_size);
|
| }
|
| }
|
|
|
|
|
| -void MacroAssembler::PrepareForPop(int count, int size) {
|
| +void MacroAssembler::PrepareForPop(Operand total_size) {
|
| AssertStackConsistency();
|
| if (csp.Is(StackPointer())) {
|
| // If the current stack pointer is csp, then it must be aligned to 16 bytes
|
| // on entry and the total size of the specified registers must also be a
|
| // multiple of 16 bytes.
|
| - ASSERT((count * size) % 16 == 0);
|
| + if (total_size.IsImmediate()) {
|
| + ASSERT((total_size.immediate() % 16) == 0);
|
| + }
|
| +
|
| + // Don't check access size for non-immediate sizes. It's difficult to do
|
| + // well, and it will be caught by hardware (or the simulator) anyway.
|
| }
|
| }
|
|
|
| @@ -1102,15 +1206,24 @@ void MacroAssembler::PopCalleeSavedRegisters() {
|
|
|
|
|
| void MacroAssembler::AssertStackConsistency() {
|
| - if (emit_debug_code() && !csp.Is(StackPointer())) {
|
| + if (emit_debug_code()) {
|
| if (csp.Is(StackPointer())) {
|
| - // TODO(jbramley): Check for csp alignment if it is the stack pointer.
|
| - } else {
|
| - // TODO(jbramley): Currently we cannot use this assertion in Push because
|
| - // some calling code assumes that the flags are preserved. For an example,
|
| - // look at Builtins::Generate_ArgumentsAdaptorTrampoline.
|
| - Cmp(csp, StackPointer());
|
| - Check(ls, kTheCurrentStackPointerIsBelowCsp);
|
| + // We can't check the alignment of csp without using a scratch register
|
| + // (or clobbering the flags), but the processor (or simulator) will abort
|
| + // if it is not properly aligned during a load.
|
| + ldr(xzr, MemOperand(csp, 0));
|
| + } else if (FLAG_enable_slow_asserts) {
|
| + Label ok;
|
| + // Check that csp <= StackPointer(), preserving all registers and NZCV.
|
| + sub(StackPointer(), csp, StackPointer());
|
| + cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
|
| + tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
|
| +
|
| + Abort(kTheCurrentStackPointerIsBelowCsp);
|
| +
|
| + bind(&ok);
|
| + // Restore StackPointer().
|
| + sub(StackPointer(), csp, StackPointer());
|
| }
|
| }
|
| }
|
| @@ -4522,6 +4635,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
|
| Mov(jssp, old_stack_pointer);
|
|
|
| if (use_real_aborts()) {
|
| + // Avoid infinite recursion; Push contains some assertions that use Abort.
|
| + NoUseRealAbortsScope no_real_aborts(this);
|
| +
|
| Mov(x0, Operand(Smi::FromInt(reason)));
|
| Push(x0);
|
|
|
|
|