Index: runtime/vm/flow_graph_compiler_arm.cc |
=================================================================== |
--- runtime/vm/flow_graph_compiler_arm.cc (revision 38628) |
+++ runtime/vm/flow_graph_compiler_arm.cc (working copy) |
@@ -1609,14 +1609,25 @@ |
__ LoadObject(destination.reg(), constant); |
} else if (destination.IsFpuRegister()) { |
const DRegister dst = EvenDRegisterOf(destination.fpu_reg()); |
- __ LoadObject(TMP, constant); |
- __ AddImmediate(TMP, TMP, Double::value_offset() - kHeapObjectTag); |
- __ vldrd(dst, Address(TMP, 0)); |
+ if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0) && |
+ TargetCPUFeatures::neon_supported()) { |
+ QRegister qdst = destination.fpu_reg(); |
+ __ veorq(qdst, qdst, qdst); |
regis
2014/07/29 21:43:17
Why not veord?
Mmm, it looks like Zach did not imp
|
+ } else { |
+ __ LoadObject(TMP, constant); |
+ __ AddImmediate(TMP, TMP, Double::value_offset() - kHeapObjectTag); |
+ __ vldrd(dst, Address(TMP, 0)); |
+ } |
} else if (destination.IsDoubleStackSlot()) { |
+ if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0) && |
+ TargetCPUFeatures::neon_supported()) { |
+ __ veorq(QTMP, QTMP, QTMP); |
+ } else { |
+ __ LoadObject(TMP, constant); |
+ __ AddImmediate(TMP, TMP, Double::value_offset() - kHeapObjectTag); |
+ __ vldrd(DTMP, Address(TMP, 0)); |
+ } |
const intptr_t dest_offset = destination.ToStackSlotOffset(); |
- __ LoadObject(TMP, constant); |
- __ AddImmediate(TMP, TMP, Double::value_offset() - kHeapObjectTag); |
- __ vldrd(DTMP, Address(TMP, 0)); |
__ StoreDToOffset(DTMP, FP, dest_offset); |
} else { |
ASSERT(destination.IsStackSlot()); |