Index: test/cctest/test-macro-assembler-mips64.cc |
diff --git a/test/cctest/test-macro-assembler-mips64.cc b/test/cctest/test-macro-assembler-mips64.cc |
index 873da5cd17d221ce4792db733511c4a381eb53cc..d9c93125ac5a3f56ec3af8d0bde0dc513e4a94d3 100644 |
--- a/test/cctest/test-macro-assembler-mips64.cc |
+++ b/test/cctest/test-macro-assembler-mips64.cc |
@@ -522,14 +522,14 @@ TEST(Dlsa) { |
} |
} |
-static const std::vector<uint32_t> uint32_test_values() { |
+static const std::vector<uint32_t> cvt_trunc_uint32_test_values() { |
static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00ffff00, |
0x7fffffff, 0x80000000, 0x80000001, |
0x80ffff00, 0x8fffffff, 0xffffffff}; |
return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]); |
} |
-static const std::vector<int32_t> int32_test_values() { |
+static const std::vector<int32_t> cvt_trunc_int32_test_values() { |
static const int32_t kValues[] = { |
static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001), |
static_cast<int32_t>(0x00ffff00), static_cast<int32_t>(0x7fffffff), |
@@ -539,7 +539,7 @@ static const std::vector<int32_t> int32_test_values() { |
return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]); |
} |
-static const std::vector<uint64_t> uint64_test_values() { |
+static const std::vector<uint64_t> cvt_trunc_uint64_test_values() { |
static const uint64_t kValues[] = { |
0x0000000000000000, 0x0000000000000001, 0x0000ffffffff0000, |
0x7fffffffffffffff, 0x8000000000000000, 0x8000000000000001, |
@@ -547,7 +547,7 @@ static const std::vector<uint64_t> uint64_test_values() { |
return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]); |
} |
-static const std::vector<int64_t> int64_test_values() { |
+static const std::vector<int64_t> cvt_trunc_int64_test_values() { |
static const int64_t kValues[] = {static_cast<int64_t>(0x0000000000000000), |
static_cast<int64_t>(0x0000000000000001), |
static_cast<int64_t>(0x0000ffffffff0000), |
@@ -561,15 +561,28 @@ static const std::vector<int64_t> int64_test_values() { |
} |
// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... } |
-#define FOR_INPUTS(ctype, itype, var) \ |
- std::vector<ctype> var##_vec = itype##_test_values(); \ |
+#define FOR_INPUTS(ctype, itype, var, test_vector) \ |
+ std::vector<ctype> var##_vec = test_vector(); \ |
for (std::vector<ctype>::iterator var = var##_vec.begin(); \ |
var != var##_vec.end(); ++var) |
-#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var) |
-#define FOR_INT64_INPUTS(var) FOR_INPUTS(int64_t, int64, var) |
-#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var) |
-#define FOR_UINT64_INPUTS(var) FOR_INPUTS(uint64_t, uint64, var) |
+#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \ |
+ std::vector<ctype> var##_vec = test_vector(); \ |
+ std::vector<ctype>::iterator var; \ |
+ std::vector<ctype>::reverse_iterator var2; \ |
+ for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \ |
+ var != var##_vec.end(); ++var, ++var2) |
+ |
+#define FOR_INT32_INPUTS(var, test_vector) \ |
+ FOR_INPUTS(int32_t, int32, var, test_vector) |
+#define FOR_INT32_INPUTS2(var, var2, test_vector) \ |
+ FOR_INPUTS2(int32_t, int32, var, var2, test_vector) |
+#define FOR_INT64_INPUTS(var, test_vector) \ |
+ FOR_INPUTS(int64_t, int64, var, test_vector) |
+#define FOR_UINT32_INPUTS(var, test_vector) \ |
+ FOR_INPUTS(uint32_t, uint32, var, test_vector) |
+#define FOR_UINT64_INPUTS(var, test_vector) \ |
+ FOR_INPUTS(uint64_t, uint64, var, test_vector) |
template <typename RET_TYPE, typename IN_TYPE, typename Func> |
RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) { |
@@ -599,7 +612,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) { |
TEST(Cvt_s_uw_Trunc_uw_s) { |
CcTest::InitializeVM(); |
- FOR_UINT32_INPUTS(i) { |
+ FOR_UINT32_INPUTS(i, cvt_trunc_uint32_test_values) { |
uint32_t input = *i; |
CHECK_EQ(static_cast<float>(input), |
run_Cvt<uint64_t>(input, [](MacroAssembler* masm) { |
@@ -611,7 +624,7 @@ TEST(Cvt_s_uw_Trunc_uw_s) { |
TEST(Cvt_s_ul_Trunc_ul_s) { |
CcTest::InitializeVM(); |
- FOR_UINT64_INPUTS(i) { |
+ FOR_UINT64_INPUTS(i, cvt_trunc_uint64_test_values) { |
uint64_t input = *i; |
CHECK_EQ(static_cast<float>(input), |
run_Cvt<uint64_t>(input, [](MacroAssembler* masm) { |
@@ -623,7 +636,7 @@ TEST(Cvt_s_ul_Trunc_ul_s) { |
TEST(Cvt_d_ul_Trunc_ul_d) { |
CcTest::InitializeVM(); |
- FOR_UINT64_INPUTS(i) { |
+ FOR_UINT64_INPUTS(i, cvt_trunc_uint64_test_values) { |
uint64_t input = *i; |
CHECK_EQ(static_cast<double>(input), |
run_Cvt<uint64_t>(input, [](MacroAssembler* masm) { |
@@ -635,7 +648,7 @@ TEST(Cvt_d_ul_Trunc_ul_d) { |
TEST(cvt_d_l_Trunc_l_d) { |
CcTest::InitializeVM(); |
- FOR_INT64_INPUTS(i) { |
+ FOR_INT64_INPUTS(i, cvt_trunc_int64_test_values) { |
int64_t input = *i; |
CHECK_EQ(static_cast<double>(input), |
run_Cvt<int64_t>(input, [](MacroAssembler* masm) { |
@@ -648,7 +661,7 @@ TEST(cvt_d_l_Trunc_l_d) { |
TEST(cvt_d_l_Trunc_l_ud) { |
CcTest::InitializeVM(); |
- FOR_INT64_INPUTS(i) { |
+ FOR_INT64_INPUTS(i, cvt_trunc_int64_test_values) { |
int64_t input = *i; |
uint64_t abs_input = (input < 0) ? -input : input; |
CHECK_EQ(static_cast<double>(abs_input), |
@@ -662,7 +675,7 @@ TEST(cvt_d_l_Trunc_l_ud) { |
TEST(cvt_d_w_Trunc_w_d) { |
CcTest::InitializeVM(); |
- FOR_INT32_INPUTS(i) { |
+ FOR_INT32_INPUTS(i, cvt_trunc_int32_test_values) { |
int32_t input = *i; |
CHECK_EQ(static_cast<double>(input), |
run_Cvt<int64_t>(input, [](MacroAssembler* masm) { |
@@ -675,4 +688,344 @@ TEST(cvt_d_w_Trunc_w_d) { |
} |
} |
+template <typename IN_TYPE, typename Func> |
+bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset, |
+ IN_TYPE value, Func GenerateUnalignedInstructionFunc) { |
+ typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4); |
+ |
+ Isolate* isolate = CcTest::i_isolate(); |
+ HandleScope scope(isolate); |
+ MacroAssembler assm(isolate, nullptr, 0, |
+ v8::internal::CodeObjectRequired::kYes); |
+ MacroAssembler* masm = &assm; |
+ IN_TYPE res; |
+ |
+ GenerateUnalignedInstructionFunc(masm, in_offset, out_offset); |
+ __ jr(ra); |
+ __ nop(); |
+ |
+ CodeDesc desc; |
+ assm.GetCode(&desc); |
+ Handle<Code> code = isolate->factory()->NewCode( |
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); |
+ |
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry()); |
+ |
+ MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE)); |
+ CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0); |
+ MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE)); |
+ |
+ return res == value; |
+} |
+ |
+static const std::vector<uint64_t> unsigned_test_values() { |
+ static const uint64_t kValues[] = { |
+ 0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0, |
+ 0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff, |
+ }; |
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]); |
+} |
+ |
+static const std::vector<int32_t> unsigned_test_offset() { |
+ static const int32_t kValues[] = {// value, offset |
+ -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB}; |
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]); |
+} |
+ |
+static const std::vector<int32_t> unsigned_test_offset_increment() { |
+ static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; |
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]); |
+} |
+ |
+TEST(Ulh) { |
+ CcTest::InitializeVM(); |
+ |
+ static const int kBufferSize = 300 * KB; |
+ char memory_buffer[kBufferSize]; |
+ char* buffer_middle = memory_buffer + (kBufferSize / 2); |
+ |
+ FOR_UINT64_INPUTS(i, unsigned_test_values) { |
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF); |
+ int32_t in_offset = *j1 + *k1; |
+ int32_t out_offset = *j2 + *k2; |
+ |
+ CHECK_EQ(true, run_Unaligned<uint16_t>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ Ulh(v0, MemOperand(a0, in_offset)); |
+ __ Ush(v0, MemOperand(a0, out_offset), v0); |
+ })); |
+ CHECK_EQ(true, run_Unaligned<uint16_t>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ mov(t0, a0); |
+ __ Ulh(a0, MemOperand(a0, in_offset)); |
+ __ Ush(a0, MemOperand(t0, out_offset), v0); |
+ })); |
+ CHECK_EQ(true, run_Unaligned<uint16_t>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ mov(t0, a0); |
+ __ Ulhu(a0, MemOperand(a0, in_offset)); |
+ __ Ush(a0, MemOperand(t0, out_offset), t1); |
+ })); |
+ CHECK_EQ(true, run_Unaligned<uint16_t>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ Ulhu(v0, MemOperand(a0, in_offset)); |
+ __ Ush(v0, MemOperand(a0, out_offset), t1); |
+ })); |
+ } |
+ } |
+ } |
+} |
+ |
+TEST(Ulh_bitextension) { |
+ CcTest::InitializeVM(); |
+ |
+ static const int kBufferSize = 300 * KB; |
+ char memory_buffer[kBufferSize]; |
+ char* buffer_middle = memory_buffer + (kBufferSize / 2); |
+ |
+ FOR_UINT64_INPUTS(i, unsigned_test_values) { |
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF); |
+ int32_t in_offset = *j1 + *k1; |
+ int32_t out_offset = *j2 + *k2; |
+ |
+ CHECK_EQ(true, run_Unaligned<uint16_t>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ Label success, fail, end, different; |
+ __ Ulh(t0, MemOperand(a0, in_offset)); |
+ __ Ulhu(t1, MemOperand(a0, in_offset)); |
+ __ Branch(&different, ne, t0, Operand(t1)); |
+ |
+ // If signed and unsigned values are same, check |
+ // the upper bits to see if they are zero |
+ __ sra(t0, t0, 15); |
+ __ Branch(&success, eq, t0, Operand(zero_reg)); |
+ __ Branch(&fail); |
+ |
+ // If signed and unsigned values are different, |
+ // check that the upper bits are complementary |
+ __ bind(&different); |
+ __ sra(t1, t1, 15); |
+ __ Branch(&fail, ne, t1, Operand(1)); |
+ __ sra(t0, t0, 15); |
+ __ addiu(t0, t0, 1); |
+ __ Branch(&fail, ne, t0, Operand(zero_reg)); |
+ // Fall through to success |
+ |
+ __ bind(&success); |
+ __ Ulh(t0, MemOperand(a0, in_offset)); |
+ __ Ush(t0, MemOperand(a0, out_offset), v0); |
+ __ Branch(&end); |
+ __ bind(&fail); |
+ __ Ush(zero_reg, MemOperand(a0, out_offset), v0); |
+ __ bind(&end); |
+ })); |
+ } |
+ } |
+ } |
+} |
+ |
+TEST(Ulw) { |
+ CcTest::InitializeVM(); |
+ |
+ static const int kBufferSize = 300 * KB; |
+ char memory_buffer[kBufferSize]; |
+ char* buffer_middle = memory_buffer + (kBufferSize / 2); |
+ |
+ FOR_UINT64_INPUTS(i, unsigned_test_values) { |
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
+ uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF); |
+ int32_t in_offset = *j1 + *k1; |
+ int32_t out_offset = *j2 + *k2; |
+ |
+ CHECK_EQ(true, run_Unaligned<uint32_t>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ Ulw(v0, MemOperand(a0, in_offset)); |
+ __ Usw(v0, MemOperand(a0, out_offset)); |
+ })); |
+ CHECK_EQ(true, |
+ run_Unaligned<uint32_t>( |
+ buffer_middle, in_offset, out_offset, (uint32_t)value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ mov(t0, a0); |
+ __ Ulw(a0, MemOperand(a0, in_offset)); |
+ __ Usw(a0, MemOperand(t0, out_offset)); |
+ })); |
+ CHECK_EQ(true, run_Unaligned<uint32_t>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ Ulwu(v0, MemOperand(a0, in_offset)); |
+ __ Usw(v0, MemOperand(a0, out_offset)); |
+ })); |
+ CHECK_EQ(true, |
+ run_Unaligned<uint32_t>( |
+ buffer_middle, in_offset, out_offset, (uint32_t)value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ mov(t0, a0); |
+ __ Ulwu(a0, MemOperand(a0, in_offset)); |
+ __ Usw(a0, MemOperand(t0, out_offset)); |
+ })); |
+ } |
+ } |
+ } |
+} |
+ |
+TEST(Ulw_extension) { |
+ CcTest::InitializeVM(); |
+ |
+ static const int kBufferSize = 300 * KB; |
+ char memory_buffer[kBufferSize]; |
+ char* buffer_middle = memory_buffer + (kBufferSize / 2); |
+ |
+ FOR_UINT64_INPUTS(i, unsigned_test_values) { |
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
+ uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF); |
+ int32_t in_offset = *j1 + *k1; |
+ int32_t out_offset = *j2 + *k2; |
+ |
+ CHECK_EQ(true, run_Unaligned<uint32_t>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ Label success, fail, end, different; |
+ __ Ulw(t0, MemOperand(a0, in_offset)); |
+ __ Ulwu(t1, MemOperand(a0, in_offset)); |
+ __ Branch(&different, ne, t0, Operand(t1)); |
+ |
+ // If signed and unsigned values are same, check |
+ // the upper bits to see if they are zero |
+ __ dsra(t0, t0, 31); |
+ __ Branch(&success, eq, t0, Operand(zero_reg)); |
+ __ Branch(&fail); |
+ |
+ // If signed and unsigned values are different, |
+ // check that the upper bits are complementary |
+ __ bind(&different); |
+ __ dsra(t1, t1, 31); |
+ __ Branch(&fail, ne, t1, Operand(1)); |
+ __ dsra(t0, t0, 31); |
+ __ daddiu(t0, t0, 1); |
+ __ Branch(&fail, ne, t0, Operand(zero_reg)); |
+ // Fall through to success |
+ |
+ __ bind(&success); |
+ __ Ulw(t0, MemOperand(a0, in_offset)); |
+ __ Usw(t0, MemOperand(a0, out_offset)); |
+ __ Branch(&end); |
+ __ bind(&fail); |
+ __ Usw(zero_reg, MemOperand(a0, out_offset)); |
+ __ bind(&end); |
+ })); |
+ } |
+ } |
+ } |
+} |
+ |
+TEST(Uld) { |
+ CcTest::InitializeVM(); |
+ |
+ static const int kBufferSize = 300 * KB; |
+ char memory_buffer[kBufferSize]; |
+ char* buffer_middle = memory_buffer + (kBufferSize / 2); |
+ |
+ FOR_UINT64_INPUTS(i, unsigned_test_values) { |
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
+ uint64_t value = *i; |
+ int32_t in_offset = *j1 + *k1; |
+ int32_t out_offset = *j2 + *k2; |
+ |
+ CHECK_EQ(true, run_Unaligned<uint64_t>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ Uld(v0, MemOperand(a0, in_offset)); |
+ __ Usd(v0, MemOperand(a0, out_offset)); |
+ })); |
+ CHECK_EQ(true, |
+ run_Unaligned<uint64_t>( |
+ buffer_middle, in_offset, out_offset, (uint32_t)value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ mov(t0, a0); |
+ __ Uld(a0, MemOperand(a0, in_offset)); |
+ __ Usd(a0, MemOperand(t0, out_offset)); |
+ })); |
+ } |
+ } |
+ } |
+} |
+ |
+TEST(Ulwc1) { |
+ CcTest::InitializeVM(); |
+ |
+ static const int kBufferSize = 300 * KB; |
+ char memory_buffer[kBufferSize]; |
+ char* buffer_middle = memory_buffer + (kBufferSize / 2); |
+ |
+ FOR_UINT64_INPUTS(i, unsigned_test_values) { |
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
+ float value = static_cast<float>(*i & 0xFFFFFFFF); |
+ int32_t in_offset = *j1 + *k1; |
+ int32_t out_offset = *j2 + *k2; |
+ |
+ CHECK_EQ(true, run_Unaligned<float>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ Ulwc1(f0, MemOperand(a0, in_offset), t0); |
+ __ Uswc1(f0, MemOperand(a0, out_offset), t0); |
+ })); |
+ } |
+ } |
+ } |
+} |
+ |
+TEST(Uldc1) { |
+ CcTest::InitializeVM(); |
+ |
+ static const int kBufferSize = 300 * KB; |
+ char memory_buffer[kBufferSize]; |
+ char* buffer_middle = memory_buffer + (kBufferSize / 2); |
+ |
+ FOR_UINT64_INPUTS(i, unsigned_test_values) { |
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { |
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { |
+ double value = static_cast<double>(*i); |
+ int32_t in_offset = *j1 + *k1; |
+ int32_t out_offset = *j2 + *k2; |
+ |
+ CHECK_EQ(true, run_Unaligned<double>( |
+ buffer_middle, in_offset, out_offset, value, |
+ [](MacroAssembler* masm, int32_t in_offset, |
+ int32_t out_offset) { |
+ __ Uldc1(f0, MemOperand(a0, in_offset), t0); |
+ __ Usdc1(f0, MemOperand(a0, out_offset), t0); |
+ })); |
+ } |
+ } |
+ } |
+} |
+ |
#undef __ |