| Index: source/libvpx/test/variance_test.cc
|
| ===================================================================
|
| --- source/libvpx/test/variance_test.cc (revision 291857)
|
| +++ source/libvpx/test/variance_test.cc (working copy)
|
| @@ -35,6 +35,14 @@
|
| using ::std::tr1::tuple;
|
| using libvpx_test::ACMRandom;
|
|
|
| +static unsigned int mb_ss_ref(const int16_t *src) {
|
| + unsigned int res = 0;
|
| + for (int i = 0; i < 256; ++i) {
|
| + res += src[i] * src[i];
|
| + }
|
| + return res;
|
| +}
|
| +
|
| static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src,
|
| int l2w, int l2h, unsigned int *sse_ptr) {
|
| int se = 0;
|
| @@ -76,6 +84,50 @@
|
| return sse - (((int64_t) se * se) >> (l2w + l2h));
|
| }
|
|
|
| +typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
|
| +
|
| +class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
|
| + public:
|
| + SumOfSquaresTest() : func_(GetParam()) {}
|
| +
|
| + virtual ~SumOfSquaresTest() {
|
| + libvpx_test::ClearSystemState();
|
| + }
|
| +
|
| + protected:
|
| + void ConstTest();
|
| + void RefTest();
|
| +
|
| + SumOfSquaresFunction func_;
|
| + ACMRandom rnd_;
|
| +};
|
| +
|
| +void SumOfSquaresTest::ConstTest() {
|
| + int16_t mem[256];
|
| + unsigned int res;
|
| + for (int v = 0; v < 256; ++v) {
|
| + for (int i = 0; i < 256; ++i) {
|
| + mem[i] = v;
|
| + }
|
| + ASM_REGISTER_STATE_CHECK(res = func_(mem));
|
| + EXPECT_EQ(256u * (v * v), res);
|
| + }
|
| +}
|
| +
|
| +void SumOfSquaresTest::RefTest() {
|
| + int16_t mem[256];
|
| + for (int i = 0; i < 100; ++i) {
|
| + for (int j = 0; j < 256; ++j) {
|
| + mem[j] = rnd_.Rand8() - rnd_.Rand8();
|
| + }
|
| +
|
| + const unsigned int expected = mb_ss_ref(mem);
|
| + unsigned int res;
|
| + ASM_REGISTER_STATE_CHECK(res = func_(mem));
|
| + EXPECT_EQ(expected, res);
|
| + }
|
| +}
|
| +
|
| template<typename VarianceFunctionType>
|
| class VarianceTest
|
| : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > {
|
| @@ -88,7 +140,7 @@
|
| height_ = 1 << log2height_;
|
| variance_ = get<2>(params);
|
|
|
| - rnd(ACMRandom::DeterministicSeed());
|
| + rnd_.Reset(ACMRandom::DeterministicSeed());
|
| block_size_ = width_ * height_;
|
| src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
|
| ref_ = new uint8_t[block_size_];
|
| @@ -107,7 +159,7 @@
|
| void RefTest();
|
| void OneQuarterTest();
|
|
|
| - ACMRandom rnd;
|
| + ACMRandom rnd_;
|
| uint8_t* src_;
|
| uint8_t* ref_;
|
| int width_, log2width_;
|
| @@ -135,8 +187,8 @@
|
| void VarianceTest<VarianceFunctionType>::RefTest() {
|
| for (int i = 0; i < 10; ++i) {
|
| for (int j = 0; j < block_size_; j++) {
|
| - src_[j] = rnd.Rand8();
|
| - ref_[j] = rnd.Rand8();
|
| + src_[j] = rnd_.Rand8();
|
| + ref_[j] = rnd_.Rand8();
|
| }
|
| unsigned int sse1, sse2;
|
| unsigned int var1;
|
| @@ -206,7 +258,7 @@
|
| height_ = 1 << log2height_;
|
| subpel_variance_ = get<2>(params);
|
|
|
| - rnd(ACMRandom::DeterministicSeed());
|
| + rnd_.Reset(ACMRandom::DeterministicSeed());
|
| block_size_ = width_ * height_;
|
| src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
|
| sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
|
| @@ -226,7 +278,7 @@
|
| protected:
|
| void RefTest();
|
|
|
| - ACMRandom rnd;
|
| + ACMRandom rnd_;
|
| uint8_t *src_;
|
| uint8_t *ref_;
|
| uint8_t *sec_;
|
| @@ -241,10 +293,10 @@
|
| for (int x = 0; x < 16; ++x) {
|
| for (int y = 0; y < 16; ++y) {
|
| for (int j = 0; j < block_size_; j++) {
|
| - src_[j] = rnd.Rand8();
|
| + src_[j] = rnd_.Rand8();
|
| }
|
| for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
|
| - ref_[j] = rnd.Rand8();
|
| + ref_[j] = rnd_.Rand8();
|
| }
|
| unsigned int sse1, sse2;
|
| unsigned int var1;
|
| @@ -263,11 +315,11 @@
|
| for (int x = 0; x < 16; ++x) {
|
| for (int y = 0; y < 16; ++y) {
|
| for (int j = 0; j < block_size_; j++) {
|
| - src_[j] = rnd.Rand8();
|
| - sec_[j] = rnd.Rand8();
|
| + src_[j] = rnd_.Rand8();
|
| + sec_[j] = rnd_.Rand8();
|
| }
|
| for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
|
| - ref_[j] = rnd.Rand8();
|
| + ref_[j] = rnd_.Rand8();
|
| }
|
| unsigned int sse1, sse2;
|
| unsigned int var1;
|
| @@ -362,6 +414,13 @@
|
| namespace vp9 {
|
|
|
| #if CONFIG_VP9_ENCODER
|
| +
|
| +TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
|
| +TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
|
| +
|
| +INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
|
| + ::testing::Values(vp9_get_mb_ss_c));
|
| +
|
| typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
|
| typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
|
| typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
|
| @@ -485,23 +544,12 @@
|
| make_tuple(6, 5, subpel_avg_variance64x32_c),
|
| make_tuple(6, 6, subpel_avg_variance64x64_c)));
|
|
|
| -#if HAVE_MMX
|
| -const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
|
| -const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
|
| -const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
|
| -const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
|
| -const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
|
| -INSTANTIATE_TEST_CASE_P(
|
| - MMX, VP9VarianceTest,
|
| - ::testing::Values(make_tuple(2, 2, variance4x4_mmx),
|
| - make_tuple(3, 3, variance8x8_mmx),
|
| - make_tuple(3, 4, variance8x16_mmx),
|
| - make_tuple(4, 3, variance16x8_mmx),
|
| - make_tuple(4, 4, variance16x16_mmx)));
|
| -#endif
|
| -
|
| #if HAVE_SSE2
|
| #if CONFIG_USE_X86INC
|
| +
|
| +INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
|
| + ::testing::Values(vp9_get_mb_ss_sse2));
|
| +
|
| const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
|
| const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
|
| const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
|
|
|