| Index: runtime/vm/flow_graph_compiler_mips.cc | 
| diff --git a/runtime/vm/flow_graph_compiler_mips.cc b/runtime/vm/flow_graph_compiler_mips.cc | 
| index c0ac35d1caee4abf4d698c17a104e82c963f841e..ab5fe5fade5379c067688610f0fc0b795b0f0828 100644 | 
| --- a/runtime/vm/flow_graph_compiler_mips.cc | 
| +++ b/runtime/vm/flow_graph_compiler_mips.cc | 
| @@ -1650,7 +1650,12 @@ void ParallelMoveResolver::EmitMove(int index) { | 
| ASSERT(source.IsConstant()); | 
| const Object& constant = source.constant(); | 
| if (destination.IsRegister()) { | 
| -      __ LoadObject(destination.reg(), constant); | 
| +      if (constant.IsSmi() && | 
| +          (source.constant_instruction()->representation() == kUnboxedInt32)) { | 
| +        __ LoadImmediate(destination.reg(), Smi::Cast(constant).Value()); | 
| +      } else { | 
| +        __ LoadObject(destination.reg(), constant); | 
| +      } | 
| } else if (destination.IsFpuRegister()) { | 
| __ LoadObject(TMP, constant); | 
| __ LoadDFromOffset(destination.fpu_reg(), TMP, | 
| @@ -1664,7 +1669,12 @@ void ParallelMoveResolver::EmitMove(int index) { | 
| ASSERT(destination.IsStackSlot()); | 
| const intptr_t dest_offset = destination.ToStackSlotOffset(); | 
| ScratchRegisterScope tmp(this, kNoRegister); | 
| -      __ LoadObject(tmp.reg(), constant); | 
| +      if (constant.IsSmi() && | 
| +          (source.constant_instruction()->representation() == kUnboxedInt32)) { | 
| +        __ LoadImmediate(tmp.reg(), Smi::Cast(constant).Value()); | 
| +      } else { | 
| +        __ LoadObject(tmp.reg(), constant); | 
| +      } | 
| __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset); | 
| } | 
| } | 
|  |