| Index: runtime/vm/flow_graph_compiler_arm.cc
|
| ===================================================================
|
| --- runtime/vm/flow_graph_compiler_arm.cc (revision 20980)
|
| +++ runtime/vm/flow_graph_compiler_arm.cc (working copy)
|
| @@ -19,6 +19,7 @@
|
|
|
| namespace dart {
|
|
|
| +DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
|
| DECLARE_FLAG(int, optimization_counter_threshold);
|
| DECLARE_FLAG(bool, print_ast);
|
| DECLARE_FLAG(bool, print_scopes);
|
| @@ -42,7 +43,19 @@
|
|
|
| void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
|
| intptr_t stub_ix) {
|
| - UNIMPLEMENTED();
|
| + // Calls do not need stubs, they share a deoptimization trampoline.
|
| + ASSERT(reason() != kDeoptAtCall);
|
| + Assembler* assem = compiler->assembler();
|
| +#define __ assem->
|
| + __ Comment("Deopt stub for id %"Pd"", deopt_id());
|
| + __ Bind(entry_label());
|
| + if (FLAG_trap_on_deoptimization) __ bkpt(0);
|
| +
|
| + ASSERT(deoptimization_env() != NULL);
|
| +
|
| + __ BranchLink(&StubCode::DeoptimizeLabel());
|
| + set_pc_offset(assem->CodeSize());
|
| +#undef __
|
| }
|
|
|
|
|
| @@ -1102,37 +1115,166 @@
|
| }
|
|
|
|
|
| +#undef __
|
| +#define __ compiler_->assembler()->
|
| +
|
| +
|
| void ParallelMoveResolver::EmitMove(int index) {
|
| - UNIMPLEMENTED();
|
| + MoveOperands* move = moves_[index];
|
| + const Location source = move->src();
|
| + const Location destination = move->dest();
|
| +
|
| + if (source.IsRegister()) {
|
| + if (destination.IsRegister()) {
|
| + __ mov(destination.reg(), ShifterOperand(source.reg()));
|
| + } else {
|
| + ASSERT(destination.IsStackSlot());
|
| + __ str(source.reg(), destination.ToStackSlotAddress());
|
| + }
|
| + } else if (source.IsStackSlot()) {
|
| + if (destination.IsRegister()) {
|
| + __ ldr(destination.reg(), source.ToStackSlotAddress());
|
| + } else {
|
| + ASSERT(destination.IsStackSlot());
|
| + MoveMemoryToMemory(destination.ToStackSlotAddress(),
|
| + source.ToStackSlotAddress());
|
| + }
|
| + } else if (source.IsFpuRegister()) {
|
| + if (destination.IsFpuRegister()) {
|
| + __ vmovd(destination.fpu_reg(), source.fpu_reg());
|
| + } else {
|
| + if (destination.IsDoubleStackSlot()) {
|
| + __ vstrd(source.fpu_reg(), destination.ToStackSlotAddress());
|
| + } else {
|
| + ASSERT(destination.IsFloat32x4StackSlot() ||
|
| + destination.IsUint32x4StackSlot());
|
| + UNIMPLEMENTED();
|
| + }
|
| + }
|
| + } else if (source.IsDoubleStackSlot()) {
|
| + if (destination.IsFpuRegister()) {
|
| + __ vldrd(destination.fpu_reg(), source.ToStackSlotAddress());
|
| + } else {
|
| + ASSERT(destination.IsDoubleStackSlot());
|
| + __ vldrd(FpuTMP, source.ToStackSlotAddress());
|
| + __ vstrd(FpuTMP, destination.ToStackSlotAddress());
|
| + }
|
| + } else if (source.IsFloat32x4StackSlot() || source.IsUint32x4StackSlot()) {
|
| + UNIMPLEMENTED();
|
| + } else {
|
| + ASSERT(source.IsConstant());
|
| + if (destination.IsRegister()) {
|
| + const Object& constant = source.constant();
|
| + __ LoadObject(destination.reg(), constant);
|
| + } else {
|
| + ASSERT(destination.IsStackSlot());
|
| + StoreObject(destination.ToStackSlotAddress(), source.constant());
|
| + }
|
| + }
|
| +
|
| + move->Eliminate();
|
| }
|
|
|
|
|
| void ParallelMoveResolver::EmitSwap(int index) {
|
| - UNIMPLEMENTED();
|
| + MoveOperands* move = moves_[index];
|
| + const Location source = move->src();
|
| + const Location destination = move->dest();
|
| +
|
| + if (source.IsRegister() && destination.IsRegister()) {
|
| + ASSERT(source.reg() != IP);
|
| + ASSERT(destination.reg() != IP);
|
| + __ mov(IP, ShifterOperand(source.reg()));
|
| + __ mov(source.reg(), ShifterOperand(destination.reg()));
|
| + __ mov(destination.reg(), ShifterOperand(IP));
|
| + } else if (source.IsRegister() && destination.IsStackSlot()) {
|
| + Exchange(source.reg(), destination.ToStackSlotAddress());
|
| + } else if (source.IsStackSlot() && destination.IsRegister()) {
|
| + Exchange(destination.reg(), source.ToStackSlotAddress());
|
| + } else if (source.IsStackSlot() && destination.IsStackSlot()) {
|
| + Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress());
|
| + } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
|
| + __ vmovd(FpuTMP, source.fpu_reg());
|
| + __ vmovd(source.fpu_reg(), destination.fpu_reg());
|
| + __ vmovd(destination.fpu_reg(), FpuTMP);
|
| + } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
|
| + ASSERT(destination.IsDoubleStackSlot() ||
|
| + destination.IsFloat32x4StackSlot() ||
|
| + destination.IsUint32x4StackSlot() ||
|
| + source.IsDoubleStackSlot() ||
|
| + source.IsFloat32x4StackSlot() ||
|
| + source.IsUint32x4StackSlot());
|
| + bool double_width = destination.IsDoubleStackSlot() ||
|
| + source.IsDoubleStackSlot();
|
| + DRegister reg = source.IsFpuRegister() ? source.fpu_reg()
|
| + : destination.fpu_reg();
|
| + const Address& slot_address = source.IsFpuRegister()
|
| + ? destination.ToStackSlotAddress()
|
| + : source.ToStackSlotAddress();
|
| +
|
| + if (double_width) {
|
| + __ vldrd(FpuTMP, slot_address);
|
| + __ vstrd(reg, slot_address);
|
| + __ vmovd(reg, FpuTMP);
|
| + } else {
|
| + UNIMPLEMENTED();
|
| + }
|
| + } else {
|
| + UNREACHABLE();
|
| + }
|
| +
|
| + // The swap of source and destination has executed a move from source to
|
| + // destination.
|
| + move->Eliminate();
|
| +
|
| + // Any unperformed (including pending) move with a source of either
|
| + // this move's source or destination needs to have their source
|
| + // changed to reflect the state of affairs after the swap.
|
| + for (int i = 0; i < moves_.length(); ++i) {
|
| + const MoveOperands& other_move = *moves_[i];
|
| + if (other_move.Blocks(source)) {
|
| + moves_[i]->set_src(destination);
|
| + } else if (other_move.Blocks(destination)) {
|
| + moves_[i]->set_src(source);
|
| + }
|
| + }
|
| }
|
|
|
|
|
| void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
|
| const Address& src) {
|
| - UNIMPLEMENTED();
|
| + __ ldr(IP, src);
|
| + __ str(IP, dst);
|
| }
|
|
|
|
|
| void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) {
|
| - UNIMPLEMENTED();
|
| + __ LoadObject(IP, obj);
|
| + __ str(IP, dst);
|
| }
|
|
|
|
|
| void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
|
| - UNIMPLEMENTED();
|
| + ASSERT(reg != IP);
|
| + __ mov(IP, ShifterOperand(reg));
|
| + __ ldr(reg, mem);
|
| + __ str(IP, mem);
|
| }
|
|
|
|
|
| void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
|
| - UNIMPLEMENTED();
|
| + // TODO(vegorov): allocate temporary registers for such moves.
|
| + __ Push(R0);
|
| + __ ldr(R0, mem1);
|
| + __ ldr(IP, mem2);
|
| + __ str(IP, mem1);
|
| + __ str(R0, mem2);
|
| + __ Pop(R0);
|
| }
|
|
|
|
|
| +#undef __
|
| +
|
| } // namespace dart
|
|
|
| #endif // defined TARGET_ARCH_ARM
|
|
|