Index: src/compiler/wasm-compiler.cc |
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc |
index e7508c26ddffe32340289b2ed64342fb4d809590..8a6c5c61f2dcb573e87dbdba198ae7ad77699d8d 100644 |
--- a/src/compiler/wasm-compiler.cc |
+++ b/src/compiler/wasm-compiler.cc |
@@ -2895,7 +2895,13 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype, |
MemBuffer(offset), index, *effect_, *control_); |
*effect_ = load; |
} else { |
- load = BuildUnalignedLoad(type, memtype, index, offset, alignment); |
+ if (jsgraph()->machine()->UnalignedLoad(memtype).IsSupported()) { |
+ load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype).op(), |
+ MemBuffer(offset), index, *effect_, *control_); |
+ *effect_ = load; |
+ } else { |
+ load = BuildUnalignedLoad(type, memtype, index, offset, alignment); |
titzer
2016/07/11 08:40:00
I thought this routine would go away. Is it the ca
ivica.bogosavljevic
2016/07/11 09:34:43
Agreed. In that case we'll make UnalignedLoad/Unal
|
+ } |
} |
#if defined(V8_TARGET_BIG_ENDIAN) |
// TODO(john.yan) Implement byte swap turbofan operator |
@@ -3036,7 +3042,16 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index, |
index, val, *effect_, *control_); |
*effect_ = store; |
} else { |
- store = BuildUnalignedStore(memtype, index, offset, alignment, val); |
+ UnalignedStoreRepresentation rep(memtype.representation()); |
+ |
+ if (jsgraph()->machine()->UnalignedStore(rep).IsSupported()) { |
+ store = |
+ graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep).op(), |
+ MemBuffer(offset), index, val, *effect_, *control_); |
+ *effect_ = store; |
+ } else { |
+ store = BuildUnalignedStore(memtype, index, offset, alignment, val); |
+ } |
} |
return store; |