Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(288)

Unified Diff: test/cctest/test-macro-assembler-mips.cc

Issue 1779713009: Implement optional turbofan UnalignedLoad and UnalignedStore operators (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fix failures in cctest/test-run-wasm-64/Run_Wasm_LoadStoreI64_sx due to missing implementation of U… Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: test/cctest/test-macro-assembler-mips.cc
diff --git a/test/cctest/test-macro-assembler-mips.cc b/test/cctest/test-macro-assembler-mips.cc
index 074d1868701563654bcb9af64f02b487cd6ac10d..47c6fc00686b0d28492b39b358c46b756ba7e68d 100644
--- a/test/cctest/test-macro-assembler-mips.cc
+++ b/test/cctest/test-macro-assembler-mips.cc
@@ -389,14 +389,14 @@ TEST(Lsa) {
}
}
-static const std::vector<uint32_t> uint32_test_values() {
+static const std::vector<uint32_t> cvt_trunc_uint32_test_values() {
static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00ffff00,
0x7fffffff, 0x80000000, 0x80000001,
0x80ffff00, 0x8fffffff, 0xffffffff};
return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
-static const std::vector<int32_t> int32_test_values() {
+static const std::vector<int32_t> cvt_trunc_int32_test_values() {
static const int32_t kValues[] = {
static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
static_cast<int32_t>(0x00ffff00), static_cast<int32_t>(0x7fffffff),
@@ -407,13 +407,27 @@ static const std::vector<int32_t> int32_test_values() {
}
// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
-#define FOR_INPUTS(ctype, itype, var) \
- std::vector<ctype> var##_vec = itype##_test_values(); \
+#define FOR_INPUTS(ctype, itype, var, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
for (std::vector<ctype>::iterator var = var##_vec.begin(); \
var != var##_vec.end(); ++var)
-#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
-#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
+#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
+ std::vector<ctype>::iterator var; \
+ std::vector<ctype>::reverse_iterator var2; \
+ for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \
+ var != var##_vec.end(); ++var, ++var2)
+
+#define FOR_UINT32_INPUTS(var, test_vector) \
+ FOR_INPUTS(uint32_t, uint32, var, test_vector)
+#define FOR_INT32_INPUTS(var, test_vector) \
+ FOR_INPUTS(int32_t, int32, var, test_vector)
+#define FOR_INT32_INPUTS2(var, var2, test_vector) \
+ FOR_INPUTS2(int32_t, int32, var, var2, test_vector)
+
+#define FOR_UINT64_INPUTS(var, test_vector) \
+ FOR_INPUTS(uint64_t, uint32, var, test_vector)
template <typename RET_TYPE, typename IN_TYPE, typename Func>
RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
@@ -444,7 +458,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
TEST(cvt_s_w_Trunc_uw_s) {
CcTest::InitializeVM();
- FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(i, cvt_trunc_uint32_test_values) {
uint32_t input = *i;
CHECK_EQ(static_cast<float>(input),
run_Cvt<uint32_t>(input, [](MacroAssembler* masm) {
@@ -456,7 +470,7 @@ TEST(cvt_s_w_Trunc_uw_s) {
TEST(cvt_d_w_Trunc_w_d) {
CcTest::InitializeVM();
- FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(i, cvt_trunc_int32_test_values) {
int32_t input = *i;
CHECK_EQ(static_cast<double>(input),
run_Cvt<int32_t>(input, [](MacroAssembler* masm) {
@@ -466,4 +480,241 @@ TEST(cvt_d_w_Trunc_w_d) {
}
}
+template <typename IN_TYPE, typename Func>
+bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
+ IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
+ typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+ IN_TYPE res;
+
+ GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+
+ MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
+ CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0);
+ MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE));
+
+ return res == value;
+}
+
+static const std::vector<uint64_t> unsigned_test_values() {
+ static const uint64_t kValues[] = {
+ 0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0,
+ 0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff,
+ };
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset() {
+ static const int32_t kValues[] = {// value, offset
+ -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset_increment() {
+ static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+TEST(Ulh) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulh(v0, MemOperand(a0, in_offset));
+ __ Ush(v0, MemOperand(a0, out_offset), v0);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulh(a0, MemOperand(a0, in_offset));
+ __ Ush(a0, MemOperand(t0, out_offset), v0);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulhu(a0, MemOperand(a0, in_offset));
+ __ Ush(a0, MemOperand(t0, out_offset), t1);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulhu(v0, MemOperand(a0, in_offset));
+ __ Ush(v0, MemOperand(a0, out_offset), t1);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulh_bitextension) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ Label success, fail, end, different;
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ulhu(t1, MemOperand(a0, in_offset));
+ __ Branch(&different, ne, t0, Operand(t1));
+
+ // If signed and unsigned values are same, check
+ // the upper bits to see if they are zero
+ __ sra(t0, t0, 15);
+ __ Branch(&success, eq, t0, Operand(zero_reg));
+ __ Branch(&fail);
+
+ // If signed and unsigned values are different,
+ // check that the upper bits are complementary
+ __ bind(&different);
+ __ sra(t1, t1, 15);
+ __ Branch(&fail, ne, t1, Operand(1));
+ __ sra(t0, t0, 15);
+ __ addiu(t0, t0, 1);
+ __ Branch(&fail, ne, t0, Operand(zero_reg));
+ // Fall through to success
+
+ __ bind(&success);
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ush(t0, MemOperand(a0, out_offset), v0);
+ __ Branch(&end);
+ __ bind(&fail);
+ __ Ush(zero_reg, MemOperand(a0, out_offset), v0);
+ __ bind(&end);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulw) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulw(v0, MemOperand(a0, in_offset));
+ __ Usw(v0, MemOperand(a0, out_offset));
+ }));
+ CHECK_EQ(true,
+ run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, (uint32_t)value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulw(a0, MemOperand(a0, in_offset));
+ __ Usw(a0, MemOperand(t0, out_offset));
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulwc1) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ float value = static_cast<float>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<float>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulwc1(f0, MemOperand(a0, in_offset), t0);
+ __ Uswc1(f0, MemOperand(a0, out_offset), t0);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Uldc1) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ double value = static_cast<double>(*i);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<double>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Uldc1(f0, MemOperand(a0, in_offset), t0);
+ __ Usdc1(f0, MemOperand(a0, out_offset), t0);
+ }));
+ }
+ }
+ }
+}
+
#undef __

Powered by Google App Engine
This is Rietveld 408576698