Chromium Code Reviews| Index: runtime/vm/flow_graph_compiler_arm.cc |
| diff --git a/runtime/vm/flow_graph_compiler_arm.cc b/runtime/vm/flow_graph_compiler_arm.cc |
| index db7e56a4d9d2f515b8c26236c7ec41ed1ac0efa9..bbe3fdce2a3e6a5e5901850334b3a976bce852b4 100644 |
| --- a/runtime/vm/flow_graph_compiler_arm.cc |
| +++ b/runtime/vm/flow_graph_compiler_arm.cc |
| @@ -1146,8 +1146,7 @@ void ParallelMoveResolver::EmitMove(int index) { |
| if (destination.IsDoubleStackSlot()) { |
| __ vstrd(source.fpu_reg(), destination.ToStackSlotAddress()); |
| } else { |
| - ASSERT(destination.IsFloat32x4StackSlot() || |
| - destination.IsUint32x4StackSlot()); |
| + ASSERT(destination.IsQuadStackSlot()); |
| UNIMPLEMENTED(); |
| } |
| } |
| @@ -1159,7 +1158,7 @@ void ParallelMoveResolver::EmitMove(int index) { |
| __ vldrd(FpuTMP, source.ToStackSlotAddress()); |
| __ vstrd(FpuTMP, destination.ToStackSlotAddress()); |
| } |
| - } else if (source.IsFloat32x4StackSlot() || source.IsUint32x4StackSlot()) { |
| + } else if (source.IsQuadStackSlot()) { |
| UNIMPLEMENTED(); |
| } else { |
| ASSERT(source.IsConstant()); |
| @@ -1199,11 +1198,9 @@ void ParallelMoveResolver::EmitSwap(int index) { |
| __ vmovd(destination.fpu_reg(), FpuTMP); |
| } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { |
| ASSERT(destination.IsDoubleStackSlot() || |
| - destination.IsFloat32x4StackSlot() || |
| - destination.IsUint32x4StackSlot() || |
| + destination.IsQuadStackSlot() || |
| source.IsDoubleStackSlot() || |
| - source.IsFloat32x4StackSlot() || |
| - source.IsUint32x4StackSlot()); |
| + source.IsQuadStackSlot()); |
| bool double_width = destination.IsDoubleStackSlot() || |
| source.IsDoubleStackSlot(); |
| DRegister reg = source.IsFpuRegister() ? source.fpu_reg() |
| @@ -1219,6 +1216,17 @@ void ParallelMoveResolver::EmitSwap(int index) { |
| } else { |
| UNIMPLEMENTED(); |
| } |
| + } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { |
| + const Address& source_slot_address = source.ToStackSlotAddress(); |
| + const Address& destination_slot_address = destination.ToStackSlotAddress(); |
| + |
| + ScratchFpuRegisterScope ensure_scratch(this, FpuTMP); |
| + __ vldrd(FpuTMP, source_slot_address); |
| + __ vldrd(ensure_scratch.reg(), destination_slot_address); |
| + __ vstrd(FpuTMP, destination_slot_address); |
| + __ vstrd(ensure_scratch.reg(), source_slot_address); |
| + } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) { |
| + UNIMPLEMENTED(); |
| } else { |
| UNREACHABLE(); |
| } |
| @@ -1263,13 +1271,31 @@ void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { |
| void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { |
| - // TODO(vegorov): allocate temporary registers for such moves. |
| - __ Push(R0); |
| - __ ldr(R0, mem1); |
| + ScratchRegisterScope ensure_scratch(this, IP); |
|
regis
2013/04/09 15:29:27
IP is not an available register, so there is no ne
|
| + __ ldr(ensure_scratch.reg(), mem1); |
| __ ldr(IP, mem2); |
| + __ str(ensure_scratch.reg(), mem2); |
| __ str(IP, mem1); |
| - __ str(R0, mem2); |
| - __ Pop(R0); |
| +} |
| + |
| + |
| +void ParallelMoveResolver::SpillScratch(Register reg) { |
| + __ Push(reg); |
| +} |
| + |
| + |
| +void ParallelMoveResolver::RestoreScratch(Register reg) { |
| + __ Pop(reg); |
| +} |
| + |
| + |
| +void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { |
| + __ vstrd(reg, Address(SP, -kDoubleSize, Address::PreIndex)); |
| +} |
| + |
| + |
| +void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { |
| + __ vldrd(reg, Address(SP, kDoubleSize, Address::PostIndex)); |
| } |