| Index: runtime/vm/flow_graph_compiler_arm64.cc
|
| diff --git a/runtime/vm/flow_graph_compiler_arm64.cc b/runtime/vm/flow_graph_compiler_arm64.cc
|
| index 85135888c28ec0fb9da9e4c2d22cc9627fdd0b66..efe6b32532324abd5c9c94e9ad3e1081b90e5b22 100644
|
| --- a/runtime/vm/flow_graph_compiler_arm64.cc
|
| +++ b/runtime/vm/flow_graph_compiler_arm64.cc
|
| @@ -1600,7 +1600,14 @@ void ParallelMoveResolver::EmitMove(int index) {
|
| ASSERT(source.IsConstant());
|
| const Object& constant = source.constant();
|
| if (destination.IsRegister()) {
|
| - __ LoadObject(destination.reg(), constant, PP);
|
| + if (constant.IsSmi() &&
|
| + (source.constant_instruction()->representation() == kUnboxedInt32)) {
|
| + __ LoadImmediate(destination.reg(),
|
| + static_cast<int32_t>(Smi::Cast(constant).Value()),
|
| + PP);
|
| + } else {
|
| + __ LoadObject(destination.reg(), constant, PP);
|
| + }
|
| } else if (destination.IsFpuRegister()) {
|
| const VRegister dst = destination.fpu_reg();
|
| if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0)) {
|
| @@ -1624,7 +1631,14 @@ void ParallelMoveResolver::EmitMove(int index) {
|
| ASSERT(destination.IsStackSlot());
|
| const intptr_t dest_offset = destination.ToStackSlotOffset();
|
| ScratchRegisterScope tmp(this, kNoRegister);
|
| - __ LoadObject(tmp.reg(), constant, PP);
|
| + if (constant.IsSmi() &&
|
| + (source.constant_instruction()->representation() == kUnboxedInt32)) {
|
| + __ LoadImmediate(tmp.reg(),
|
| + static_cast<int32_t>(Smi::Cast(constant).Value()),
|
| + PP);
|
| + } else {
|
| + __ LoadObject(tmp.reg(), constant, PP);
|
| + }
|
| __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset, PP);
|
| }
|
| }
|
|
|