Index: src/compiler/arm64/code-generator-arm64.cc |
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc |
index 9b6db06d859002ed523d2cb80023abf43fe0fc67..597bdcf48a55d4103c63687ae2500c7085a3cece 100644 |
--- a/src/compiler/arm64/code-generator-arm64.cc |
+++ b/src/compiler/arm64/code-generator-arm64.cc |
@@ -2229,18 +2229,34 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, |
__ Fmov(dst, src); |
} else { |
DCHECK(destination->IsFPStackSlot()); |
- __ Str(src, g.ToMemOperand(destination, masm())); |
+ MemOperand dst = g.ToMemOperand(destination, masm()); |
+ if (!destination->IsSimd128StackSlot()) { |
+ __ Str(src, dst); |
+ } else { |
+ __ st1(src, dst); |
martyn.capewell
2017/06/08 08:29:13
I'm not sure this will work - I think ToDoubleRegi
bbudge
2017/06/08 20:16:59
I forgot that VRegisters have a size. I think your
|
+ } |
} |
} else if (source->IsFPStackSlot()) { |
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); |
MemOperand src = g.ToMemOperand(source, masm()); |
if (destination->IsFPRegister()) { |
- __ Ldr(g.ToDoubleRegister(destination), src); |
+ VRegister dst = g.ToDoubleRegister(destination); |
+ if (!destination->IsSimd128Register()) { |
+ __ Ldr(dst, src); |
+ } else { |
+ __ ld1(dst, src); |
+ } |
} else { |
UseScratchRegisterScope scope(masm()); |
VRegister temp = scope.AcquireD(); |
- __ Ldr(temp, src); |
- __ Str(temp, g.ToMemOperand(destination, masm())); |
+ MemOperand dst = g.ToMemOperand(destination, masm()); |
+ if (!destination->IsSimd128StackSlot()) { |
+ __ Ldr(temp, src); |
+ __ Str(temp, dst); |
+ } else { |
+ __ ld1(temp, src); |
+ __ st1(temp, dst); |
+ } |
} |
} else { |
UNREACHABLE(); |
@@ -2272,14 +2288,21 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, |
} |
} else if (source->IsStackSlot() || source->IsFPStackSlot()) { |
UseScratchRegisterScope scope(masm()); |
- DoubleRegister temp_0 = scope.AcquireD(); |
- DoubleRegister temp_1 = scope.AcquireD(); |
+ VRegister temp_0 = scope.AcquireD(); |
+ VRegister temp_1 = scope.AcquireD(); |
MemOperand src = g.ToMemOperand(source, masm()); |
MemOperand dst = g.ToMemOperand(destination, masm()); |
- __ Ldr(temp_0, src); |
- __ Ldr(temp_1, dst); |
- __ Str(temp_0, dst); |
- __ Str(temp_1, src); |
+ if (!source->IsSimd128StackSlot()) { |
+ __ Ldr(temp_0, src); |
+ __ Ldr(temp_1, dst); |
+ __ Str(temp_0, dst); |
+ __ Str(temp_1, src); |
+ } else { |
+ __ ld1(temp_0, src); |
+ __ ld1(temp_1, dst); |
+ __ st1(temp_0, dst); |
+ __ st1(temp_1, src); |
+ } |
} else if (source->IsFPRegister()) { |
UseScratchRegisterScope scope(masm()); |
VRegister temp = scope.AcquireD(); |
@@ -2292,9 +2315,15 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, |
} else { |
DCHECK(destination->IsFPStackSlot()); |
MemOperand dst = g.ToMemOperand(destination, masm()); |
- __ Fmov(temp, src); |
- __ Ldr(src, dst); |
- __ Str(temp, dst); |
+ if (!source->IsSimd128Register()) { |
+ __ Fmov(temp, src); |
+ __ Ldr(src, dst); |
+ __ Str(temp, dst); |
+ } else { |
+ __ Fmov(temp, src); |
+ __ ld1(src, dst); |
+ __ st1(temp, dst); |
+ } |
} |
} else { |
// No other combinations are possible. |