Index: test/cctest/wasm/test-run-wasm-simd.cc |
diff --git a/test/cctest/wasm/test-run-wasm-simd.cc b/test/cctest/wasm/test-run-wasm-simd.cc |
index 58f1d6559c622a9b87e4054a8e23b981986c3e90..4d4dccd32632d29e31c97fe063a6fa19786dcde9 100644 |
--- a/test/cctest/wasm/test-run-wasm-simd.cc |
+++ b/test/cctest/wasm/test-run-wasm-simd.cc |
@@ -1964,6 +1964,54 @@ WASM_EXEC_COMPILED_TEST(S1x16Xor) { RunS1x16BinOpTest(kExprS1x16Xor, Xor); } |
#endif // !V8_TARGET_ARCH_ARM |
#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET |
+ |
+template <typename T, int maxLanes = 4> |
bbudge
2017/05/01 18:21:06
How about 'numLanes' or 'LANES'? 'maxLanes' implie
|
+void SetVectorByLanes(T* v, ...) { |
bbudge
2017/05/01 18:21:07
Could you follow the pattern set by RunUnaryLaneOp
|
+ va_list vl; |
+ va_start(vl, v); |
+#if defined(V8_TARGET_BIG_ENDIAN) |
+ for (int lane = maxLanes - 1; lane >= 0; lane--) { |
+ v[lane] = static_cast<T>(va_arg(vl, T)); |
+ } |
+#else |
+ for (int lane = 0; lane < maxLanes; lane++) { |
+ v[lane] = static_cast<T>(va_arg(vl, T)); |
+ } |
+#endif |
+ va_end(vl); |
+} |
+ |
+// has to specialize float to get rid of error |
+template <> |
+void SetVectorByLanes<float>(float* v, ...) { |
+ va_list vl; |
+ va_start(vl, v); |
+ const int maxLanes = 4; |
+#if defined(V8_TARGET_BIG_ENDIAN) |
+ for (int lane = maxLanes - 1; lane >= 0; lane--) { |
+ v[lane] = static_cast<float>(va_arg(vl, double)); |
+ } |
+#else |
+ for (int lane = 0; lane < maxLanes; lane++) { |
+ v[lane] = static_cast<float>(va_arg(vl, double)); |
+ } |
+#endif |
+ va_end(vl); |
+} |
+ |
+template <int vSize = 16, typename T> |
bbudge
2017/05/01 18:21:07
Simd vectors are always 16 bytes. Why not just use
|
+T GetScalarByLanes(T* v, int lane) { |
bbudge
2017/05/01 18:21:07
Naming suggestion: GetScalar or ExtractLane.
cons
|
+ constexpr int kElems = vSize / sizeof(T); |
+#if defined(V8_TARGET_BIG_ENDIAN) |
+ const int index = kElems - 1 - lane; |
+#else |
+ const int index = lane; |
+#endif |
+ USE(kElems); |
+ DCHECK(index >= 0 && index < kElems); |
+ return v[index]; |
+} |
+ |
WASM_EXEC_COMPILED_TEST(SimdI32x4ExtractWithF32x4) { |
FLAG_wasm_simd_prototype = true; |
WasmRunner<int32_t> r(kExecuteCompiled); |
@@ -2108,10 +2156,7 @@ WASM_EXEC_COMPILED_TEST(SimdI32x4GetGlobal) { |
FLAG_wasm_simd_prototype = true; |
WasmRunner<int32_t, int32_t> r(kExecuteCompiled); |
int32_t* global = r.module().AddGlobal<int32_t>(kWasmS128); |
- *(global) = 0; |
- *(global + 1) = 1; |
- *(global + 2) = 2; |
- *(global + 3) = 3; |
+ SetVectorByLanes(global, 0, 1, 2, 3); |
r.AllocateLocal(kWasmI32); |
BUILD( |
r, WASM_SET_LOCAL(1, WASM_I32V(1)), |
@@ -2144,20 +2189,17 @@ WASM_EXEC_COMPILED_TEST(SimdI32x4SetGlobal) { |
WASM_I32V(56))), |
WASM_I32V(1)); |
CHECK_EQ(1, r.Call(0)); |
- CHECK_EQ(*global, 23); |
- CHECK_EQ(*(global + 1), 34); |
- CHECK_EQ(*(global + 2), 45); |
- CHECK_EQ(*(global + 3), 56); |
+ CHECK_EQ(GetScalarByLanes(global, 0), 23); |
+ CHECK_EQ(GetScalarByLanes(global, 1), 34); |
+ CHECK_EQ(GetScalarByLanes(global, 2), 45); |
+ CHECK_EQ(GetScalarByLanes(global, 3), 56); |
} |
WASM_EXEC_COMPILED_TEST(SimdF32x4GetGlobal) { |
FLAG_wasm_simd_prototype = true; |
WasmRunner<int32_t, int32_t> r(kExecuteCompiled); |
float* global = r.module().AddGlobal<float>(kWasmS128); |
- *(global) = 0.0; |
- *(global + 1) = 1.5; |
- *(global + 2) = 2.25; |
- *(global + 3) = 3.5; |
+ SetVectorByLanes<float>(global, 0.0, 1.5, 2.25, 3.5); |
r.AllocateLocal(kWasmI32); |
BUILD( |
r, WASM_SET_LOCAL(1, WASM_I32V(1)), |
@@ -2190,10 +2232,10 @@ WASM_EXEC_COMPILED_TEST(SimdF32x4SetGlobal) { |
WASM_F32(65.0))), |
WASM_I32V(1)); |
CHECK_EQ(1, r.Call(0)); |
- CHECK_EQ(*global, 13.5); |
- CHECK_EQ(*(global + 1), 45.5); |
- CHECK_EQ(*(global + 2), 32.25); |
- CHECK_EQ(*(global + 3), 65.0); |
+ CHECK_EQ(GetScalarByLanes(global, 0), 13.5f); |
+ CHECK_EQ(GetScalarByLanes(global, 1), 45.5f); |
+ CHECK_EQ(GetScalarByLanes(global, 2), 32.25f); |
+ CHECK_EQ(GetScalarByLanes(global, 3), 65.0f); |
} |
WASM_EXEC_COMPILED_TEST(SimdLoadStoreLoad) { |