Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(191)

Side by Side Diff: source/libvpx/test/variance_test.cc

Issue 756673003: libvpx: Pull from upstream (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/test/test_vectors.cc ('k') | source/libvpx/test/vp9_error_block_test.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 #include <stdlib.h> 10
11 #include <cstdlib>
11 #include <new> 12 #include <new>
12 13
14 #include "test/acm_random.h"
15 #include "test/clear_system_state.h"
16 #include "test/register_state_check.h"
13 #include "third_party/googletest/src/include/gtest/gtest.h" 17 #include "third_party/googletest/src/include/gtest/gtest.h"
14 18
15 #include "test/clear_system_state.h" 19 #include "./vpx_config.h"
16 #include "test/register_state_check.h" 20 #include "vpx/vpx_codec.h"
17
18 #include "vpx/vpx_integer.h" 21 #include "vpx/vpx_integer.h"
19 #include "./vpx_config.h"
20 #include "vpx_mem/vpx_mem.h" 22 #include "vpx_mem/vpx_mem.h"
21 #if CONFIG_VP8_ENCODER 23 #if CONFIG_VP8_ENCODER
22 # include "./vp8_rtcd.h" 24 # include "./vp8_rtcd.h"
23 # include "vp8/common/variance.h" 25 # include "vp8/common/variance.h"
24 #endif 26 #endif
25 #if CONFIG_VP9_ENCODER 27 #if CONFIG_VP9_ENCODER
26 # include "./vp9_rtcd.h" 28 # include "./vp9_rtcd.h"
27 # include "vp9/encoder/vp9_variance.h" 29 # include "vp9/encoder/vp9_variance.h"
28 #endif 30 #endif
29 #include "test/acm_random.h"
30 31
31 namespace { 32 namespace {
32 33
33 using ::std::tr1::get; 34 using ::std::tr1::get;
34 using ::std::tr1::make_tuple; 35 using ::std::tr1::make_tuple;
35 using ::std::tr1::tuple; 36 using ::std::tr1::tuple;
36 using libvpx_test::ACMRandom; 37 using libvpx_test::ACMRandom;
37 38
38 static unsigned int mb_ss_ref(const int16_t *src) { 39 static unsigned int mb_ss_ref(const int16_t *src) {
39 unsigned int res = 0; 40 unsigned int res = 0;
40 for (int i = 0; i < 256; ++i) { 41 for (int i = 0; i < 256; ++i) {
41 res += src[i] * src[i]; 42 res += src[i] * src[i];
42 } 43 }
43 return res; 44 return res;
44 } 45 }
45 46
46 static unsigned int variance_ref(const uint8_t *ref, const uint8_t *src, 47 static unsigned int variance_ref(const uint8_t *src, const uint8_t *ref,
47 int l2w, int l2h, unsigned int *sse_ptr) { 48 int l2w, int l2h, int src_stride_coeff,
49 int ref_stride_coeff, uint32_t *sse_ptr,
50 bool use_high_bit_depth_,
51 vpx_bit_depth_t bit_depth) {
52 #if CONFIG_VP9_HIGHBITDEPTH
53 int64_t se = 0;
54 uint64_t sse = 0;
55 const int w = 1 << l2w;
56 const int h = 1 << l2h;
57 for (int y = 0; y < h; y++) {
58 for (int x = 0; x < w; x++) {
59 int diff;
60 if (!use_high_bit_depth_) {
61 diff = ref[w * y * ref_stride_coeff + x] -
62 src[w * y * src_stride_coeff + x];
63 se += diff;
64 sse += diff * diff;
65 } else {
66 diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
67 CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
68 se += diff;
69 sse += diff * diff;
70 }
71 }
72 }
73 if (bit_depth > VPX_BITS_8) {
74 sse = ROUND_POWER_OF_TWO(sse, 2 * (bit_depth - 8));
75 se = ROUND_POWER_OF_TWO(se, bit_depth - 8);
76 }
77 #else
48 int se = 0; 78 int se = 0;
49 unsigned int sse = 0; 79 unsigned int sse = 0;
50 const int w = 1 << l2w, h = 1 << l2h; 80 const int w = 1 << l2w;
81 const int h = 1 << l2h;
51 for (int y = 0; y < h; y++) { 82 for (int y = 0; y < h; y++) {
52 for (int x = 0; x < w; x++) { 83 for (int x = 0; x < w; x++) {
53 int diff = ref[w * y + x] - src[w * y + x]; 84 int diff = ref[w * y * ref_stride_coeff + x] -
85 src[w * y * src_stride_coeff + x];
54 se += diff; 86 se += diff;
55 sse += diff * diff; 87 sse += diff * diff;
56 } 88 }
57 } 89 }
90 #endif // CONFIG_VP9_HIGHBITDEPTH
58 *sse_ptr = sse; 91 *sse_ptr = sse;
59 return sse - (((int64_t) se * se) >> (l2w + l2h)); 92 return sse - (((int64_t) se * se) >> (l2w + l2h));
60 } 93 }
61 94
62 static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src, 95 static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
63 int l2w, int l2h, int xoff, int yoff, 96 int l2w, int l2h, int xoff, int yoff,
64 unsigned int *sse_ptr) { 97 unsigned int *sse_ptr,
98 bool use_high_bit_depth_,
99 vpx_bit_depth_t bit_depth) {
100 #if CONFIG_VP9_HIGHBITDEPTH
101 int64_t se = 0;
102 uint64_t sse = 0;
103 const int w = 1 << l2w;
104 const int h = 1 << l2h;
105 for (int y = 0; y < h; y++) {
106 for (int x = 0; x < w; x++) {
107 // Bilinear interpolation at a 16th pel step.
108 if (!use_high_bit_depth_) {
109 const int a1 = ref[(w + 1) * (y + 0) + x + 0];
110 const int a2 = ref[(w + 1) * (y + 0) + x + 1];
111 const int b1 = ref[(w + 1) * (y + 1) + x + 0];
112 const int b2 = ref[(w + 1) * (y + 1) + x + 1];
113 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
114 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
115 const int r = a + (((b - a) * yoff + 8) >> 4);
116 const int diff = r - src[w * y + x];
117 se += diff;
118 sse += diff * diff;
119 } else {
120 uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
121 uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
122 const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
123 const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
124 const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
125 const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
126 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
127 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
128 const int r = a + (((b - a) * yoff + 8) >> 4);
129 const int diff = r - src16[w * y + x];
130 se += diff;
131 sse += diff * diff;
132 }
133 }
134 }
135 if (bit_depth > VPX_BITS_8) {
136 sse = ROUND_POWER_OF_TWO(sse, 2 * (bit_depth - 8));
137 se = ROUND_POWER_OF_TWO(se, bit_depth - 8);
138 }
139 #else
65 int se = 0; 140 int se = 0;
66 unsigned int sse = 0; 141 unsigned int sse = 0;
67 const int w = 1 << l2w, h = 1 << l2h; 142 const int w = 1 << l2w;
143 const int h = 1 << l2h;
68 for (int y = 0; y < h; y++) { 144 for (int y = 0; y < h; y++) {
69 for (int x = 0; x < w; x++) { 145 for (int x = 0; x < w; x++) {
70 // bilinear interpolation at a 16th pel step 146 // Bilinear interpolation at a 16th pel step.
71 const int a1 = ref[(w + 1) * (y + 0) + x + 0]; 147 const int a1 = ref[(w + 1) * (y + 0) + x + 0];
72 const int a2 = ref[(w + 1) * (y + 0) + x + 1]; 148 const int a2 = ref[(w + 1) * (y + 0) + x + 1];
73 const int b1 = ref[(w + 1) * (y + 1) + x + 0]; 149 const int b1 = ref[(w + 1) * (y + 1) + x + 0];
74 const int b2 = ref[(w + 1) * (y + 1) + x + 1]; 150 const int b2 = ref[(w + 1) * (y + 1) + x + 1];
75 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4); 151 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
76 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4); 152 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
77 const int r = a + (((b - a) * yoff + 8) >> 4); 153 const int r = a + (((b - a) * yoff + 8) >> 4);
78 int diff = r - src[w * y + x]; 154 const int diff = r - src[w * y + x];
79 se += diff; 155 se += diff;
80 sse += diff * diff; 156 sse += diff * diff;
81 } 157 }
82 } 158 }
159 #endif // CONFIG_VP9_HIGHBITDEPTH
83 *sse_ptr = sse; 160 *sse_ptr = sse;
84 return sse - (((int64_t) se * se) >> (l2w + l2h)); 161 return sse - (((int64_t) se * se) >> (l2w + l2h));
85 } 162 }
86 163
87 typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src); 164 typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
88 165
89 class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> { 166 class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
90 public: 167 public:
91 SumOfSquaresTest() : func_(GetParam()) {} 168 SumOfSquaresTest() : func_(GetParam()) {}
92 169
(...skipping 30 matching lines...) Expand all
123 200
124 const unsigned int expected = mb_ss_ref(mem); 201 const unsigned int expected = mb_ss_ref(mem);
125 unsigned int res; 202 unsigned int res;
126 ASM_REGISTER_STATE_CHECK(res = func_(mem)); 203 ASM_REGISTER_STATE_CHECK(res = func_(mem));
127 EXPECT_EQ(expected, res); 204 EXPECT_EQ(expected, res);
128 } 205 }
129 } 206 }
130 207
131 template<typename VarianceFunctionType> 208 template<typename VarianceFunctionType>
132 class VarianceTest 209 class VarianceTest
133 : public ::testing::TestWithParam<tuple<int, int, VarianceFunctionType> > { 210 : public ::testing::TestWithParam<tuple<int, int,
211 VarianceFunctionType, int> > {
134 public: 212 public:
135 virtual void SetUp() { 213 virtual void SetUp() {
136 const tuple<int, int, VarianceFunctionType>& params = this->GetParam(); 214 const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
137 log2width_ = get<0>(params); 215 log2width_ = get<0>(params);
138 width_ = 1 << log2width_; 216 width_ = 1 << log2width_;
139 log2height_ = get<1>(params); 217 log2height_ = get<1>(params);
140 height_ = 1 << log2height_; 218 height_ = 1 << log2height_;
141 variance_ = get<2>(params); 219 variance_ = get<2>(params);
220 if (get<3>(params)) {
221 bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
222 use_high_bit_depth_ = true;
223 } else {
224 bit_depth_ = VPX_BITS_8;
225 use_high_bit_depth_ = false;
226 }
227 mask_ = (1 << bit_depth_) - 1;
142 228
143 rnd_.Reset(ACMRandom::DeterministicSeed()); 229 rnd_.Reset(ACMRandom::DeterministicSeed());
144 block_size_ = width_ * height_; 230 block_size_ = width_ * height_;
145 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_)); 231 #if CONFIG_VP9_HIGHBITDEPTH
146 ref_ = new uint8_t[block_size_]; 232 if (!use_high_bit_depth_) {
233 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
234 ref_ = new uint8_t[block_size_ * 2];
235 } else {
236 src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
237 vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
238 ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
239 }
240 #else
241 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
242 ref_ = new uint8_t[block_size_ * 2];
243 #endif
147 ASSERT_TRUE(src_ != NULL); 244 ASSERT_TRUE(src_ != NULL);
148 ASSERT_TRUE(ref_ != NULL); 245 ASSERT_TRUE(ref_ != NULL);
149 } 246 }
150 247
151 virtual void TearDown() { 248 virtual void TearDown() {
249 #if CONFIG_VP9_HIGHBITDEPTH
250 if (!use_high_bit_depth_) {
251 vpx_free(src_);
252 delete[] ref_;
253 } else {
254 vpx_free(CONVERT_TO_SHORTPTR(src_));
255 delete[] CONVERT_TO_SHORTPTR(ref_);
256 }
257 #else
152 vpx_free(src_); 258 vpx_free(src_);
153 delete[] ref_; 259 delete[] ref_;
260 #endif
154 libvpx_test::ClearSystemState(); 261 libvpx_test::ClearSystemState();
155 } 262 }
156 263
157 protected: 264 protected:
158 void ZeroTest(); 265 void ZeroTest();
159 void RefTest(); 266 void RefTest();
267 void RefStrideTest();
160 void OneQuarterTest(); 268 void OneQuarterTest();
161 269
162 ACMRandom rnd_; 270 ACMRandom rnd_;
163 uint8_t* src_; 271 uint8_t *src_;
164 uint8_t* ref_; 272 uint8_t *ref_;
165 int width_, log2width_; 273 int width_, log2width_;
166 int height_, log2height_; 274 int height_, log2height_;
275 vpx_bit_depth_t bit_depth_;
276 int mask_;
277 bool use_high_bit_depth_;
167 int block_size_; 278 int block_size_;
168 VarianceFunctionType variance_; 279 VarianceFunctionType variance_;
169 }; 280 };
170 281
171 template<typename VarianceFunctionType> 282 template<typename VarianceFunctionType>
172 void VarianceTest<VarianceFunctionType>::ZeroTest() { 283 void VarianceTest<VarianceFunctionType>::ZeroTest() {
173 for (int i = 0; i <= 255; ++i) { 284 for (int i = 0; i <= 255; ++i) {
285 #if CONFIG_VP9_HIGHBITDEPTH
286 if (!use_high_bit_depth_) {
287 memset(src_, i, block_size_);
288 } else {
289 vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
290 block_size_);
291 }
292 #else
174 memset(src_, i, block_size_); 293 memset(src_, i, block_size_);
294 #endif
175 for (int j = 0; j <= 255; ++j) { 295 for (int j = 0; j <= 255; ++j) {
296 #if CONFIG_VP9_HIGHBITDEPTH
297 if (!use_high_bit_depth_) {
298 memset(ref_, j, block_size_);
299 } else {
300 vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
301 block_size_);
302 }
303 #else
176 memset(ref_, j, block_size_); 304 memset(ref_, j, block_size_);
305 #endif
177 unsigned int sse; 306 unsigned int sse;
178 unsigned int var; 307 unsigned int var;
179 ASM_REGISTER_STATE_CHECK( 308 ASM_REGISTER_STATE_CHECK(
180 var = variance_(src_, width_, ref_, width_, &sse)); 309 var = variance_(src_, width_, ref_, width_, &sse));
181 EXPECT_EQ(0u, var) << "src values: " << i << "ref values: " << j; 310 EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
182 } 311 }
183 } 312 }
184 } 313 }
185 314
186 template<typename VarianceFunctionType> 315 template<typename VarianceFunctionType>
187 void VarianceTest<VarianceFunctionType>::RefTest() { 316 void VarianceTest<VarianceFunctionType>::RefTest() {
188 for (int i = 0; i < 10; ++i) { 317 for (int i = 0; i < 10; ++i) {
189 for (int j = 0; j < block_size_; j++) { 318 for (int j = 0; j < block_size_; j++) {
319 #if CONFIG_VP9_HIGHBITDEPTH
320 if (!use_high_bit_depth_) {
190 src_[j] = rnd_.Rand8(); 321 src_[j] = rnd_.Rand8();
191 ref_[j] = rnd_.Rand8(); 322 ref_[j] = rnd_.Rand8();
323 } else {
324 CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
325 CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
326 }
327 #else
328 src_[j] = rnd_.Rand8();
329 ref_[j] = rnd_.Rand8();
330 #endif
192 } 331 }
193 unsigned int sse1, sse2; 332 unsigned int sse1, sse2;
194 unsigned int var1; 333 unsigned int var1;
334 const int stride_coeff = 1;
195 ASM_REGISTER_STATE_CHECK( 335 ASM_REGISTER_STATE_CHECK(
196 var1 = variance_(src_, width_, ref_, width_, &sse1)); 336 var1 = variance_(src_, width_, ref_, width_, &sse1));
197 const unsigned int var2 = variance_ref(src_, ref_, log2width_, 337 const unsigned int var2 = variance_ref(src_, ref_, log2width_,
198 log2height_, &sse2); 338 log2height_, stride_coeff,
339 stride_coeff, &sse2,
340 use_high_bit_depth_, bit_depth_);
199 EXPECT_EQ(sse1, sse2); 341 EXPECT_EQ(sse1, sse2);
200 EXPECT_EQ(var1, var2); 342 EXPECT_EQ(var1, var2);
201 } 343 }
202 } 344 }
203 345
204 template<typename VarianceFunctionType> 346 template<typename VarianceFunctionType>
347 void VarianceTest<VarianceFunctionType>::RefStrideTest() {
348 for (int i = 0; i < 10; ++i) {
349 int ref_stride_coeff = i % 2;
350 int src_stride_coeff = (i >> 1) % 2;
351 for (int j = 0; j < block_size_; j++) {
352 int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
353 int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
354 #if CONFIG_VP9_HIGHBITDEPTH
355 if (!use_high_bit_depth_) {
356 src_[src_ind] = rnd_.Rand8();
357 ref_[ref_ind] = rnd_.Rand8();
358 } else {
359 CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
360 CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
361 }
362 #else
363 src_[src_ind] = rnd_.Rand8();
364 ref_[ref_ind] = rnd_.Rand8();
365 #endif
366 }
367 unsigned int sse1, sse2;
368 unsigned int var1;
369
370 ASM_REGISTER_STATE_CHECK(
371 var1 = variance_(src_, width_ * src_stride_coeff,
372 ref_, width_ * ref_stride_coeff, &sse1));
373 const unsigned int var2 = variance_ref(src_, ref_, log2width_,
374 log2height_, src_stride_coeff,
375 ref_stride_coeff, &sse2,
376 use_high_bit_depth_, bit_depth_);
377 EXPECT_EQ(sse1, sse2);
378 EXPECT_EQ(var1, var2);
379 }
380 }
381
382 template<typename VarianceFunctionType>
205 void VarianceTest<VarianceFunctionType>::OneQuarterTest() { 383 void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
384 const int half = block_size_ / 2;
385 #if CONFIG_VP9_HIGHBITDEPTH
386 if (!use_high_bit_depth_) {
387 memset(src_, 255, block_size_);
388 memset(ref_, 255, half);
389 memset(ref_ + half, 0, half);
390 } else {
391 vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
392 block_size_);
393 vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
394 vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
395 }
396 #else
206 memset(src_, 255, block_size_); 397 memset(src_, 255, block_size_);
207 const int half = block_size_ / 2;
208 memset(ref_, 255, half); 398 memset(ref_, 255, half);
209 memset(ref_ + half, 0, half); 399 memset(ref_ + half, 0, half);
400 #endif
210 unsigned int sse; 401 unsigned int sse;
211 unsigned int var; 402 unsigned int var;
212 ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse)); 403 ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
213 const unsigned int expected = block_size_ * 255 * 255 / 4; 404 const unsigned int expected = block_size_ * 255 * 255 / 4;
214 EXPECT_EQ(expected, var); 405 EXPECT_EQ(expected, var);
215 } 406 }
216 407
217 #if CONFIG_VP8_ENCODER 408 #if CONFIG_VP8_ENCODER
218 template<typename MseFunctionType> 409 template<typename MseFunctionType>
219 class MseTest 410 class MseTest
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
257 }; 448 };
258 449
259 template<typename MseFunctionType> 450 template<typename MseFunctionType>
260 void MseTest<MseFunctionType>::RefTest_mse() { 451 void MseTest<MseFunctionType>::RefTest_mse() {
261 for (int i = 0; i < 10; ++i) { 452 for (int i = 0; i < 10; ++i) {
262 for (int j = 0; j < block_size_; j++) { 453 for (int j = 0; j < block_size_; j++) {
263 src_[j] = rnd.Rand8(); 454 src_[j] = rnd.Rand8();
264 ref_[j] = rnd.Rand8(); 455 ref_[j] = rnd.Rand8();
265 } 456 }
266 unsigned int sse1, sse2; 457 unsigned int sse1, sse2;
458 const int stride_coeff = 1;
267 ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1)); 459 ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
268 variance_ref(src_, ref_, log2width_, log2height_, &sse2); 460 variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
461 stride_coeff, &sse2, false, VPX_BITS_8);
269 EXPECT_EQ(sse1, sse2); 462 EXPECT_EQ(sse1, sse2);
270 } 463 }
271 } 464 }
272 465
273 template<typename MseFunctionType> 466 template<typename MseFunctionType>
274 void MseTest<MseFunctionType>::RefTest_sse() { 467 void MseTest<MseFunctionType>::RefTest_sse() {
275 for (int i = 0; i < 10; ++i) { 468 for (int i = 0; i < 10; ++i) {
276 for (int j = 0; j < block_size_; j++) { 469 for (int j = 0; j < block_size_; j++) {
277 src_[j] = rnd.Rand8(); 470 src_[j] = rnd.Rand8();
278 ref_[j] = rnd.Rand8(); 471 ref_[j] = rnd.Rand8();
279 } 472 }
280 unsigned int sse2; 473 unsigned int sse2;
281 unsigned int var1; 474 unsigned int var1;
282 ASM_REGISTER_STATE_CHECK( 475 const int stride_coeff = 1;
283 var1 = mse_(src_, width_, ref_, width_)); 476 ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
284 variance_ref(src_, ref_, log2width_, log2height_, &sse2); 477 variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
478 stride_coeff, &sse2, false, VPX_BITS_8);
285 EXPECT_EQ(var1, sse2); 479 EXPECT_EQ(var1, sse2);
286 } 480 }
287 } 481 }
288 482
289 template<typename MseFunctionType> 483 template<typename MseFunctionType>
290 void MseTest<MseFunctionType>::MaxTest_mse() { 484 void MseTest<MseFunctionType>::MaxTest_mse() {
291 memset(src_, 255, block_size_); 485 memset(src_, 255, block_size_);
292 memset(ref_, 0, block_size_); 486 memset(ref_, 0, block_size_);
293 unsigned int sse; 487 unsigned int sse;
294 ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse)); 488 ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
295 const unsigned int expected = block_size_ * 255 * 255; 489 const unsigned int expected = block_size_ * 255 * 255;
296 EXPECT_EQ(expected, sse); 490 EXPECT_EQ(expected, sse);
297 } 491 }
298 492
299 template<typename MseFunctionType> 493 template<typename MseFunctionType>
300 void MseTest<MseFunctionType>::MaxTest_sse() { 494 void MseTest<MseFunctionType>::MaxTest_sse() {
301 memset(src_, 255, block_size_); 495 memset(src_, 255, block_size_);
302 memset(ref_, 0, block_size_); 496 memset(ref_, 0, block_size_);
303 unsigned int var; 497 unsigned int var;
304 ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_)); 498 ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
305 const unsigned int expected = block_size_ * 255 * 255; 499 const unsigned int expected = block_size_ * 255 * 255;
306 EXPECT_EQ(expected, var); 500 EXPECT_EQ(expected, var);
307 } 501 }
308 #endif 502 #endif
309 503
310 #if CONFIG_VP9_ENCODER 504 #if CONFIG_VP9_ENCODER
311
312 unsigned int subpel_avg_variance_ref(const uint8_t *ref, 505 unsigned int subpel_avg_variance_ref(const uint8_t *ref,
313 const uint8_t *src, 506 const uint8_t *src,
314 const uint8_t *second_pred, 507 const uint8_t *second_pred,
315 int l2w, int l2h, 508 int l2w, int l2h,
316 int xoff, int yoff, 509 int xoff, int yoff,
317 unsigned int *sse_ptr) { 510 unsigned int *sse_ptr,
318 int se = 0; 511 bool use_high_bit_depth,
319 unsigned int sse = 0; 512 vpx_bit_depth_t bit_depth) {
320 const int w = 1 << l2w, h = 1 << l2h; 513 #if CONFIG_VP9_HIGHBITDEPTH
514 int64_t se = 0;
515 uint64_t sse = 0;
516 const int w = 1 << l2w;
517 const int h = 1 << l2h;
321 for (int y = 0; y < h; y++) { 518 for (int y = 0; y < h; y++) {
322 for (int x = 0; x < w; x++) { 519 for (int x = 0; x < w; x++) {
323 // bilinear interpolation at a 16th pel step 520 // bilinear interpolation at a 16th pel step
521 if (!use_high_bit_depth) {
522 const int a1 = ref[(w + 1) * (y + 0) + x + 0];
523 const int a2 = ref[(w + 1) * (y + 0) + x + 1];
524 const int b1 = ref[(w + 1) * (y + 1) + x + 0];
525 const int b2 = ref[(w + 1) * (y + 1) + x + 1];
526 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
527 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
528 const int r = a + (((b - a) * yoff + 8) >> 4);
529 const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x ];
530 se += diff;
531 sse += diff * diff;
532 } else {
533 uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
534 uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
535 uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
536 const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
537 const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
538 const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
539 const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
540 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
541 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
542 const int r = a + (((b - a) * yoff + 8) >> 4);
543 const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
544 se += diff;
545 sse += diff * diff;
546 }
547 }
548 }
549 if (bit_depth > 8) {
550 sse = ROUND_POWER_OF_TWO(sse, 2*(bit_depth-8));
551 se = ROUND_POWER_OF_TWO(se, bit_depth-8);
552 }
553 #else
554 int se = 0;
555 unsigned int sse = 0;
556 const int w = 1 << l2w;
557 const int h = 1 << l2h;
558 for (int y = 0; y < h; y++) {
559 for (int x = 0; x < w; x++) {
560 // bilinear interpolation at a 16th pel step
324 const int a1 = ref[(w + 1) * (y + 0) + x + 0]; 561 const int a1 = ref[(w + 1) * (y + 0) + x + 0];
325 const int a2 = ref[(w + 1) * (y + 0) + x + 1]; 562 const int a2 = ref[(w + 1) * (y + 0) + x + 1];
326 const int b1 = ref[(w + 1) * (y + 1) + x + 0]; 563 const int b1 = ref[(w + 1) * (y + 1) + x + 0];
327 const int b2 = ref[(w + 1) * (y + 1) + x + 1]; 564 const int b2 = ref[(w + 1) * (y + 1) + x + 1];
328 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4); 565 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
329 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4); 566 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
330 const int r = a + (((b - a) * yoff + 8) >> 4); 567 const int r = a + (((b - a) * yoff + 8) >> 4);
331 int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x]; 568 const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
332 se += diff; 569 se += diff;
333 sse += diff * diff; 570 sse += diff * diff;
334 } 571 }
335 } 572 }
573 #endif // CONFIG_VP9_HIGHBITDEPTH
336 *sse_ptr = sse; 574 *sse_ptr = sse;
337 return sse - (((int64_t) se * se) >> (l2w + l2h)); 575 return sse - (((int64_t) se * se) >> (l2w + l2h));
338 } 576 }
339 577
340 template<typename SubpelVarianceFunctionType> 578 template<typename SubpelVarianceFunctionType>
341 class SubpelVarianceTest 579 class SubpelVarianceTest
342 : public ::testing::TestWithParam<tuple<int, int, 580 : public ::testing::TestWithParam<tuple<int, int,
343 SubpelVarianceFunctionType> > { 581 SubpelVarianceFunctionType, int> > {
344 public: 582 public:
345 virtual void SetUp() { 583 virtual void SetUp() {
346 const tuple<int, int, SubpelVarianceFunctionType>& params = 584 const tuple<int, int, SubpelVarianceFunctionType, int>& params =
347 this->GetParam(); 585 this->GetParam();
348 log2width_ = get<0>(params); 586 log2width_ = get<0>(params);
349 width_ = 1 << log2width_; 587 width_ = 1 << log2width_;
350 log2height_ = get<1>(params); 588 log2height_ = get<1>(params);
351 height_ = 1 << log2height_; 589 height_ = 1 << log2height_;
352 subpel_variance_ = get<2>(params); 590 subpel_variance_ = get<2>(params);
591 if (get<3>(params)) {
592 bit_depth_ = (vpx_bit_depth_t) get<3>(params);
593 use_high_bit_depth_ = true;
594 } else {
595 bit_depth_ = VPX_BITS_8;
596 use_high_bit_depth_ = false;
597 }
598 mask_ = (1 << bit_depth_)-1;
353 599
354 rnd_.Reset(ACMRandom::DeterministicSeed()); 600 rnd_.Reset(ACMRandom::DeterministicSeed());
355 block_size_ = width_ * height_; 601 block_size_ = width_ * height_;
602 #if CONFIG_VP9_HIGHBITDEPTH
603 if (!use_high_bit_depth_) {
604 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
605 sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
606 ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
607 } else {
608 src_ = CONVERT_TO_BYTEPTR(
609 reinterpret_cast<uint16_t *>(
610 vpx_memalign(16, block_size_*sizeof(uint16_t))));
611 sec_ = CONVERT_TO_BYTEPTR(
612 reinterpret_cast<uint16_t *>(
613 vpx_memalign(16, block_size_*sizeof(uint16_t))));
614 ref_ = CONVERT_TO_BYTEPTR(
615 new uint16_t[block_size_ + width_ + height_ + 1]);
616 }
617 #else
356 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_)); 618 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
357 sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_)); 619 sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
358 ref_ = new uint8_t[block_size_ + width_ + height_ + 1]; 620 ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
621 #endif // CONFIG_VP9_HIGHBITDEPTH
359 ASSERT_TRUE(src_ != NULL); 622 ASSERT_TRUE(src_ != NULL);
360 ASSERT_TRUE(sec_ != NULL); 623 ASSERT_TRUE(sec_ != NULL);
361 ASSERT_TRUE(ref_ != NULL); 624 ASSERT_TRUE(ref_ != NULL);
362 } 625 }
363 626
364 virtual void TearDown() { 627 virtual void TearDown() {
628 #if CONFIG_VP9_HIGHBITDEPTH
629 if (!use_high_bit_depth_) {
630 vpx_free(src_);
631 delete[] ref_;
632 vpx_free(sec_);
633 } else {
634 vpx_free(CONVERT_TO_SHORTPTR(src_));
635 delete[] CONVERT_TO_SHORTPTR(ref_);
636 vpx_free(CONVERT_TO_SHORTPTR(sec_));
637 }
638 #else
365 vpx_free(src_); 639 vpx_free(src_);
366 delete[] ref_; 640 delete[] ref_;
367 vpx_free(sec_); 641 vpx_free(sec_);
642 #endif
368 libvpx_test::ClearSystemState(); 643 libvpx_test::ClearSystemState();
369 } 644 }
370 645
371 protected: 646 protected:
372 void RefTest(); 647 void RefTest();
648 void ExtremeRefTest();
373 649
374 ACMRandom rnd_; 650 ACMRandom rnd_;
375 uint8_t *src_; 651 uint8_t *src_;
376 uint8_t *ref_; 652 uint8_t *ref_;
377 uint8_t *sec_; 653 uint8_t *sec_;
654 bool use_high_bit_depth_;
655 vpx_bit_depth_t bit_depth_;
378 int width_, log2width_; 656 int width_, log2width_;
379 int height_, log2height_; 657 int height_, log2height_;
380 int block_size_; 658 int block_size_, mask_;
381 SubpelVarianceFunctionType subpel_variance_; 659 SubpelVarianceFunctionType subpel_variance_;
382 }; 660 };
383 661
384 template<typename SubpelVarianceFunctionType> 662 template<typename SubpelVarianceFunctionType>
385 void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() { 663 void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
386 for (int x = 0; x < 16; ++x) { 664 for (int x = 0; x < 16; ++x) {
387 for (int y = 0; y < 16; ++y) { 665 for (int y = 0; y < 16; ++y) {
666 #if CONFIG_VP9_HIGHBITDEPTH
667 if (!use_high_bit_depth_) {
668 for (int j = 0; j < block_size_; j++) {
669 src_[j] = rnd_.Rand8();
670 }
671 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
672 ref_[j] = rnd_.Rand8();
673 }
674 } else {
675 for (int j = 0; j < block_size_; j++) {
676 CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
677 }
678 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
679 CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
680 }
681 }
682 #else
388 for (int j = 0; j < block_size_; j++) { 683 for (int j = 0; j < block_size_; j++) {
389 src_[j] = rnd_.Rand8(); 684 src_[j] = rnd_.Rand8();
390 } 685 }
391 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) { 686 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
392 ref_[j] = rnd_.Rand8(); 687 ref_[j] = rnd_.Rand8();
393 } 688 }
689 #endif // CONFIG_VP9_HIGHBITDEPTH
394 unsigned int sse1, sse2; 690 unsigned int sse1, sse2;
395 unsigned int var1; 691 unsigned int var1;
396 ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y, 692 ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
397 src_, width_, &sse1)); 693 src_, width_, &sse1));
398 const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_, 694 const unsigned int var2 = subpel_variance_ref(ref_, src_, log2width_,
399 log2height_, x, y, &sse2); 695 log2height_, x, y, &sse2,
696 use_high_bit_depth_,
697 bit_depth_);
400 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y; 698 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
401 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y; 699 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
402 } 700 }
403 } 701 }
404 } 702 }
405 703
704 template<typename SubpelVarianceFunctionType>
705 void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
706 // Compare against reference.
707 // Src: Set the first half of values to 0, the second half to the maximum.
708 // Ref: Set the first half of values to the maximum, the second half to 0.
709 for (int x = 0; x < 16; ++x) {
710 for (int y = 0; y < 16; ++y) {
711 const int half = block_size_ / 2;
712 #if CONFIG_VP9_HIGHBITDEPTH
713 if (!use_high_bit_depth_) {
714 memset(src_, 0, half);
715 memset(src_ + half, 255, half);
716 memset(ref_, 255, half);
717 memset(ref_ + half, 0, half + width_ + height_ + 1);
718 } else {
719 vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
720 vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
721 vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
722 vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
723 half + width_ + height_ + 1);
724 }
725 #else
726 memset(src_, 0, half);
727 memset(src_ + half, 255, half);
728 memset(ref_, 255, half);
729 memset(ref_ + half, 0, half + width_ + height_ + 1);
730 #endif // CONFIG_VP9_HIGHBITDEPTH
731 unsigned int sse1, sse2;
732 unsigned int var1;
733 ASM_REGISTER_STATE_CHECK(
734 var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
735 const unsigned int var2 =
736 subpel_variance_ref(ref_, src_, log2width_, log2height_, x, y, &sse2,
737 use_high_bit_depth_, bit_depth_);
738 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
739 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
740 }
741 }
742 }
743
406 template<> 744 template<>
407 void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() { 745 void SubpelVarianceTest<vp9_subp_avg_variance_fn_t>::RefTest() {
408 for (int x = 0; x < 16; ++x) { 746 for (int x = 0; x < 16; ++x) {
409 for (int y = 0; y < 16; ++y) { 747 for (int y = 0; y < 16; ++y) {
748 #if CONFIG_VP9_HIGHBITDEPTH
749 if (!use_high_bit_depth_) {
750 for (int j = 0; j < block_size_; j++) {
751 src_[j] = rnd_.Rand8();
752 sec_[j] = rnd_.Rand8();
753 }
754 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
755 ref_[j] = rnd_.Rand8();
756 }
757 } else {
758 for (int j = 0; j < block_size_; j++) {
759 CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
760 CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
761 }
762 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
763 CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
764 }
765 }
766 #else
410 for (int j = 0; j < block_size_; j++) { 767 for (int j = 0; j < block_size_; j++) {
411 src_[j] = rnd_.Rand8(); 768 src_[j] = rnd_.Rand8();
412 sec_[j] = rnd_.Rand8(); 769 sec_[j] = rnd_.Rand8();
413 } 770 }
414 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) { 771 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
415 ref_[j] = rnd_.Rand8(); 772 ref_[j] = rnd_.Rand8();
416 } 773 }
774 #endif
417 unsigned int sse1, sse2; 775 unsigned int sse1, sse2;
418 unsigned int var1; 776 unsigned int var1;
419 ASM_REGISTER_STATE_CHECK( 777 ASM_REGISTER_STATE_CHECK(
420 var1 = subpel_variance_(ref_, width_ + 1, x, y, 778 var1 = subpel_variance_(ref_, width_ + 1, x, y,
421 src_, width_, &sse1, sec_)); 779 src_, width_, &sse1, sec_));
422 const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_, 780 const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
423 log2width_, log2height_, 781 log2width_, log2height_,
424 x, y, &sse2); 782 x, y, &sse2,
783 use_high_bit_depth_,
784 bit_depth_);
425 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y; 785 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
426 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y; 786 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
427 } 787 }
428 } 788 }
429 } 789 }
430 790
431 #endif // CONFIG_VP9_ENCODER 791 #endif // CONFIG_VP9_ENCODER
432 792
433 // ----------------------------------------------------------------------------- 793 // -----------------------------------------------------------------------------
434 // VP8 test cases. 794 // VP8 test cases.
(...skipping 26 matching lines...) Expand all
461 C, VP8MseTest, 821 C, VP8MseTest,
462 ::testing::Values(make_tuple(4, 4, mse16x16_c))); 822 ::testing::Values(make_tuple(4, 4, mse16x16_c)));
463 823
464 const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c; 824 const vp8_variance_fn_t variance4x4_c = vp8_variance4x4_c;
465 const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c; 825 const vp8_variance_fn_t variance8x8_c = vp8_variance8x8_c;
466 const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c; 826 const vp8_variance_fn_t variance8x16_c = vp8_variance8x16_c;
467 const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c; 827 const vp8_variance_fn_t variance16x8_c = vp8_variance16x8_c;
468 const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c; 828 const vp8_variance_fn_t variance16x16_c = vp8_variance16x16_c;
469 INSTANTIATE_TEST_CASE_P( 829 INSTANTIATE_TEST_CASE_P(
470 C, VP8VarianceTest, 830 C, VP8VarianceTest,
471 ::testing::Values(make_tuple(2, 2, variance4x4_c), 831 ::testing::Values(make_tuple(2, 2, variance4x4_c, 0),
472 make_tuple(3, 3, variance8x8_c), 832 make_tuple(3, 3, variance8x8_c, 0),
473 make_tuple(3, 4, variance8x16_c), 833 make_tuple(3, 4, variance8x16_c, 0),
474 make_tuple(4, 3, variance16x8_c), 834 make_tuple(4, 3, variance16x8_c, 0),
475 make_tuple(4, 4, variance16x16_c))); 835 make_tuple(4, 4, variance16x16_c, 0)));
476 836
477 #if HAVE_NEON 837 #if HAVE_NEON
478 const vp8_sse_fn_t get4x4sse_cs_neon = vp8_get4x4sse_cs_neon; 838 const vp8_sse_fn_t get4x4sse_cs_neon = vp8_get4x4sse_cs_neon;
479 INSTANTIATE_TEST_CASE_P( 839 INSTANTIATE_TEST_CASE_P(
480 NEON, VP8SseTest, 840 NEON, VP8SseTest,
481 ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon))); 841 ::testing::Values(make_tuple(2, 2, get4x4sse_cs_neon)));
482 842
483 const vp8_variance_fn_t mse16x16_neon = vp8_mse16x16_neon; 843 const vp8_variance_fn_t mse16x16_neon = vp8_mse16x16_neon;
484 INSTANTIATE_TEST_CASE_P( 844 INSTANTIATE_TEST_CASE_P(
485 NEON, VP8MseTest, 845 NEON, VP8MseTest,
486 ::testing::Values(make_tuple(4, 4, mse16x16_neon))); 846 ::testing::Values(make_tuple(4, 4, mse16x16_neon)));
487 847
488 const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon; 848 const vp8_variance_fn_t variance8x8_neon = vp8_variance8x8_neon;
489 const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon; 849 const vp8_variance_fn_t variance8x16_neon = vp8_variance8x16_neon;
490 const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon; 850 const vp8_variance_fn_t variance16x8_neon = vp8_variance16x8_neon;
491 const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon; 851 const vp8_variance_fn_t variance16x16_neon = vp8_variance16x16_neon;
492 INSTANTIATE_TEST_CASE_P( 852 INSTANTIATE_TEST_CASE_P(
493 NEON, VP8VarianceTest, 853 NEON, VP8VarianceTest,
494 ::testing::Values(make_tuple(3, 3, variance8x8_neon), 854 ::testing::Values(make_tuple(3, 3, variance8x8_neon, 0),
495 make_tuple(3, 4, variance8x16_neon), 855 make_tuple(3, 4, variance8x16_neon, 0),
496 make_tuple(4, 3, variance16x8_neon), 856 make_tuple(4, 3, variance16x8_neon, 0),
497 make_tuple(4, 4, variance16x16_neon))); 857 make_tuple(4, 4, variance16x16_neon, 0)));
498 #endif 858 #endif
499 859
500
501 #if HAVE_MMX 860 #if HAVE_MMX
502 const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx; 861 const vp8_variance_fn_t variance4x4_mmx = vp8_variance4x4_mmx;
503 const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx; 862 const vp8_variance_fn_t variance8x8_mmx = vp8_variance8x8_mmx;
504 const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx; 863 const vp8_variance_fn_t variance8x16_mmx = vp8_variance8x16_mmx;
505 const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx; 864 const vp8_variance_fn_t variance16x8_mmx = vp8_variance16x8_mmx;
506 const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx; 865 const vp8_variance_fn_t variance16x16_mmx = vp8_variance16x16_mmx;
507 INSTANTIATE_TEST_CASE_P( 866 INSTANTIATE_TEST_CASE_P(
508 MMX, VP8VarianceTest, 867 MMX, VP8VarianceTest,
509 ::testing::Values(make_tuple(2, 2, variance4x4_mmx), 868 ::testing::Values(make_tuple(2, 2, variance4x4_mmx, 0),
510 make_tuple(3, 3, variance8x8_mmx), 869 make_tuple(3, 3, variance8x8_mmx, 0),
511 make_tuple(3, 4, variance8x16_mmx), 870 make_tuple(3, 4, variance8x16_mmx, 0),
512 make_tuple(4, 3, variance16x8_mmx), 871 make_tuple(4, 3, variance16x8_mmx, 0),
513 make_tuple(4, 4, variance16x16_mmx))); 872 make_tuple(4, 4, variance16x16_mmx, 0)));
514 #endif 873 #endif
515 874
516 #if HAVE_SSE2 875 #if HAVE_SSE2
517 const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt; 876 const vp8_variance_fn_t variance4x4_wmt = vp8_variance4x4_wmt;
518 const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt; 877 const vp8_variance_fn_t variance8x8_wmt = vp8_variance8x8_wmt;
519 const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt; 878 const vp8_variance_fn_t variance8x16_wmt = vp8_variance8x16_wmt;
520 const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt; 879 const vp8_variance_fn_t variance16x8_wmt = vp8_variance16x8_wmt;
521 const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt; 880 const vp8_variance_fn_t variance16x16_wmt = vp8_variance16x16_wmt;
522 INSTANTIATE_TEST_CASE_P( 881 INSTANTIATE_TEST_CASE_P(
523 SSE2, VP8VarianceTest, 882 SSE2, VP8VarianceTest,
524 ::testing::Values(make_tuple(2, 2, variance4x4_wmt), 883 ::testing::Values(make_tuple(2, 2, variance4x4_wmt, 0),
525 make_tuple(3, 3, variance8x8_wmt), 884 make_tuple(3, 3, variance8x8_wmt, 0),
526 make_tuple(3, 4, variance8x16_wmt), 885 make_tuple(3, 4, variance8x16_wmt, 0),
527 make_tuple(4, 3, variance16x8_wmt), 886 make_tuple(4, 3, variance16x8_wmt, 0),
528 make_tuple(4, 4, variance16x16_wmt))); 887 make_tuple(4, 4, variance16x16_wmt, 0)));
529 #endif 888 #endif
530 #endif // CONFIG_VP8_ENCODER 889 #endif // CONFIG_VP8_ENCODER
531 890
532 } // namespace vp8 891 } // namespace vp8
533 892
534 // ----------------------------------------------------------------------------- 893 // -----------------------------------------------------------------------------
535 // VP9 test cases. 894 // VP9 test cases.
536 895
537 namespace vp9 { 896 namespace vp9 {
538 897
539 #if CONFIG_VP9_ENCODER 898 #if CONFIG_VP9_ENCODER
540
541 TEST_P(SumOfSquaresTest, Const) { ConstTest(); } 899 TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
542 TEST_P(SumOfSquaresTest, Ref) { RefTest(); } 900 TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
543 901
544 INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest, 902 INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
545 ::testing::Values(vp9_get_mb_ss_c)); 903 ::testing::Values(vp9_get_mb_ss_c));
546 904
547 typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest; 905 typedef VarianceTest<vp9_variance_fn_t> VP9VarianceTest;
548 typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest; 906 typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceTest;
549 typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest; 907 typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t> VP9SubpelAvgVarianceTest;
550 908
551 TEST_P(VP9VarianceTest, Zero) { ZeroTest(); } 909 TEST_P(VP9VarianceTest, Zero) { ZeroTest(); }
552 TEST_P(VP9VarianceTest, Ref) { RefTest(); } 910 TEST_P(VP9VarianceTest, Ref) { RefTest(); }
911 TEST_P(VP9VarianceTest, RefStride) { RefStrideTest(); }
553 TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); } 912 TEST_P(VP9SubpelVarianceTest, Ref) { RefTest(); }
913 TEST_P(VP9SubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
554 TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); } 914 TEST_P(VP9SubpelAvgVarianceTest, Ref) { RefTest(); }
555 TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); } 915 TEST_P(VP9VarianceTest, OneQuarter) { OneQuarterTest(); }
556 916
917 #if CONFIG_VP9_HIGHBITDEPTH
918 typedef VarianceTest<vp9_variance_fn_t> VP9VarianceHighTest;
919 typedef SubpelVarianceTest<vp9_subpixvariance_fn_t> VP9SubpelVarianceHighTest;
920 typedef SubpelVarianceTest<vp9_subp_avg_variance_fn_t>
921 VP9SubpelAvgVarianceHighTest;
922
923 TEST_P(VP9VarianceHighTest, Zero) { ZeroTest(); }
924 TEST_P(VP9VarianceHighTest, Ref) { RefTest(); }
925 TEST_P(VP9VarianceHighTest, RefStride) { RefStrideTest(); }
926 TEST_P(VP9SubpelVarianceHighTest, Ref) { RefTest(); }
927 TEST_P(VP9SubpelVarianceHighTest, ExtremeRef) { ExtremeRefTest(); }
928 TEST_P(VP9SubpelAvgVarianceHighTest, Ref) { RefTest(); }
929 TEST_P(VP9VarianceHighTest, OneQuarter) { OneQuarterTest(); }
930 #endif // CONFIG_VP9_HIGHBITDEPTH
931
557 const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c; 932 const vp9_variance_fn_t variance4x4_c = vp9_variance4x4_c;
558 const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c; 933 const vp9_variance_fn_t variance4x8_c = vp9_variance4x8_c;
559 const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c; 934 const vp9_variance_fn_t variance8x4_c = vp9_variance8x4_c;
560 const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c; 935 const vp9_variance_fn_t variance8x8_c = vp9_variance8x8_c;
561 const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c; 936 const vp9_variance_fn_t variance8x16_c = vp9_variance8x16_c;
562 const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c; 937 const vp9_variance_fn_t variance16x8_c = vp9_variance16x8_c;
563 const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c; 938 const vp9_variance_fn_t variance16x16_c = vp9_variance16x16_c;
564 const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c; 939 const vp9_variance_fn_t variance16x32_c = vp9_variance16x32_c;
565 const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c; 940 const vp9_variance_fn_t variance32x16_c = vp9_variance32x16_c;
566 const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c; 941 const vp9_variance_fn_t variance32x32_c = vp9_variance32x32_c;
567 const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c; 942 const vp9_variance_fn_t variance32x64_c = vp9_variance32x64_c;
568 const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c; 943 const vp9_variance_fn_t variance64x32_c = vp9_variance64x32_c;
569 const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c; 944 const vp9_variance_fn_t variance64x64_c = vp9_variance64x64_c;
570 INSTANTIATE_TEST_CASE_P( 945 INSTANTIATE_TEST_CASE_P(
571 C, VP9VarianceTest, 946 C, VP9VarianceTest,
572 ::testing::Values(make_tuple(2, 2, variance4x4_c), 947 ::testing::Values(make_tuple(2, 2, variance4x4_c, 0),
573 make_tuple(2, 3, variance4x8_c), 948 make_tuple(2, 3, variance4x8_c, 0),
574 make_tuple(3, 2, variance8x4_c), 949 make_tuple(3, 2, variance8x4_c, 0),
575 make_tuple(3, 3, variance8x8_c), 950 make_tuple(3, 3, variance8x8_c, 0),
576 make_tuple(3, 4, variance8x16_c), 951 make_tuple(3, 4, variance8x16_c, 0),
577 make_tuple(4, 3, variance16x8_c), 952 make_tuple(4, 3, variance16x8_c, 0),
578 make_tuple(4, 4, variance16x16_c), 953 make_tuple(4, 4, variance16x16_c, 0),
579 make_tuple(4, 5, variance16x32_c), 954 make_tuple(4, 5, variance16x32_c, 0),
580 make_tuple(5, 4, variance32x16_c), 955 make_tuple(5, 4, variance32x16_c, 0),
581 make_tuple(5, 5, variance32x32_c), 956 make_tuple(5, 5, variance32x32_c, 0),
582 make_tuple(5, 6, variance32x64_c), 957 make_tuple(5, 6, variance32x64_c, 0),
583 make_tuple(6, 5, variance64x32_c), 958 make_tuple(6, 5, variance64x32_c, 0),
584 make_tuple(6, 6, variance64x64_c))); 959 make_tuple(6, 6, variance64x64_c, 0)));
585 960 #if CONFIG_VP9_HIGHBITDEPTH
961 const vp9_variance_fn_t highbd_10_variance4x4_c = vp9_highbd_10_variance4x4_c;
962 const vp9_variance_fn_t highbd_10_variance4x8_c = vp9_highbd_10_variance4x8_c;
963 const vp9_variance_fn_t highbd_10_variance8x4_c = vp9_highbd_10_variance8x4_c;
964 const vp9_variance_fn_t highbd_10_variance8x8_c = vp9_highbd_10_variance8x8_c;
965 const vp9_variance_fn_t highbd_10_variance8x16_c = vp9_highbd_10_variance8x16_c;
966 const vp9_variance_fn_t highbd_10_variance16x8_c = vp9_highbd_10_variance16x8_c;
967 const vp9_variance_fn_t highbd_10_variance16x16_c =
968 vp9_highbd_10_variance16x16_c;
969 const vp9_variance_fn_t highbd_10_variance16x32_c =
970 vp9_highbd_10_variance16x32_c;
971 const vp9_variance_fn_t highbd_10_variance32x16_c =
972 vp9_highbd_10_variance32x16_c;
973 const vp9_variance_fn_t highbd_10_variance32x32_c =
974 vp9_highbd_10_variance32x32_c;
975 const vp9_variance_fn_t highbd_10_variance32x64_c =
976 vp9_highbd_10_variance32x64_c;
977 const vp9_variance_fn_t highbd_10_variance64x32_c =
978 vp9_highbd_10_variance64x32_c;
979 const vp9_variance_fn_t highbd_10_variance64x64_c =
980 vp9_highbd_10_variance64x64_c;
981 const vp9_variance_fn_t highbd_12_variance4x4_c = vp9_highbd_12_variance4x4_c;
982 const vp9_variance_fn_t highbd_12_variance4x8_c = vp9_highbd_12_variance4x8_c;
983 const vp9_variance_fn_t highbd_12_variance8x4_c = vp9_highbd_12_variance8x4_c;
984 const vp9_variance_fn_t highbd_12_variance8x8_c = vp9_highbd_12_variance8x8_c;
985 const vp9_variance_fn_t highbd_12_variance8x16_c = vp9_highbd_12_variance8x16_c;
986 const vp9_variance_fn_t highbd_12_variance16x8_c = vp9_highbd_12_variance16x8_c;
987 const vp9_variance_fn_t highbd_12_variance16x16_c =
988 vp9_highbd_12_variance16x16_c;
989 const vp9_variance_fn_t highbd_12_variance16x32_c =
990 vp9_highbd_12_variance16x32_c;
991 const vp9_variance_fn_t highbd_12_variance32x16_c =
992 vp9_highbd_12_variance32x16_c;
993 const vp9_variance_fn_t highbd_12_variance32x32_c =
994 vp9_highbd_12_variance32x32_c;
995 const vp9_variance_fn_t highbd_12_variance32x64_c =
996 vp9_highbd_12_variance32x64_c;
997 const vp9_variance_fn_t highbd_12_variance64x32_c =
998 vp9_highbd_12_variance64x32_c;
999 const vp9_variance_fn_t highbd_12_variance64x64_c =
1000 vp9_highbd_12_variance64x64_c;
1001 const vp9_variance_fn_t highbd_variance4x4_c = vp9_highbd_variance4x4_c;
1002 const vp9_variance_fn_t highbd_variance4x8_c = vp9_highbd_variance4x8_c;
1003 const vp9_variance_fn_t highbd_variance8x4_c = vp9_highbd_variance8x4_c;
1004 const vp9_variance_fn_t highbd_variance8x8_c = vp9_highbd_variance8x8_c;
1005 const vp9_variance_fn_t highbd_variance8x16_c = vp9_highbd_variance8x16_c;
1006 const vp9_variance_fn_t highbd_variance16x8_c = vp9_highbd_variance16x8_c;
1007 const vp9_variance_fn_t highbd_variance16x16_c = vp9_highbd_variance16x16_c;
1008 const vp9_variance_fn_t highbd_variance16x32_c = vp9_highbd_variance16x32_c;
1009 const vp9_variance_fn_t highbd_variance32x16_c = vp9_highbd_variance32x16_c;
1010 const vp9_variance_fn_t highbd_variance32x32_c = vp9_highbd_variance32x32_c;
1011 const vp9_variance_fn_t highbd_variance32x64_c = vp9_highbd_variance32x64_c;
1012 const vp9_variance_fn_t highbd_variance64x32_c = vp9_highbd_variance64x32_c;
1013 const vp9_variance_fn_t highbd_variance64x64_c = vp9_highbd_variance64x64_c;
1014 INSTANTIATE_TEST_CASE_P(
1015 C, VP9VarianceHighTest,
1016 ::testing::Values(make_tuple(2, 2, highbd_10_variance4x4_c, 10),
1017 make_tuple(2, 3, highbd_10_variance4x8_c, 10),
1018 make_tuple(3, 2, highbd_10_variance8x4_c, 10),
1019 make_tuple(3, 3, highbd_10_variance8x8_c, 10),
1020 make_tuple(3, 4, highbd_10_variance8x16_c, 10),
1021 make_tuple(4, 3, highbd_10_variance16x8_c, 10),
1022 make_tuple(4, 4, highbd_10_variance16x16_c, 10),
1023 make_tuple(4, 5, highbd_10_variance16x32_c, 10),
1024 make_tuple(5, 4, highbd_10_variance32x16_c, 10),
1025 make_tuple(5, 5, highbd_10_variance32x32_c, 10),
1026 make_tuple(5, 6, highbd_10_variance32x64_c, 10),
1027 make_tuple(6, 5, highbd_10_variance64x32_c, 10),
1028 make_tuple(6, 6, highbd_10_variance64x64_c, 10),
1029 make_tuple(2, 2, highbd_12_variance4x4_c, 12),
1030 make_tuple(2, 3, highbd_12_variance4x8_c, 12),
1031 make_tuple(3, 2, highbd_12_variance8x4_c, 12),
1032 make_tuple(3, 3, highbd_12_variance8x8_c, 12),
1033 make_tuple(3, 4, highbd_12_variance8x16_c, 12),
1034 make_tuple(4, 3, highbd_12_variance16x8_c, 12),
1035 make_tuple(4, 4, highbd_12_variance16x16_c, 12),
1036 make_tuple(4, 5, highbd_12_variance16x32_c, 12),
1037 make_tuple(5, 4, highbd_12_variance32x16_c, 12),
1038 make_tuple(5, 5, highbd_12_variance32x32_c, 12),
1039 make_tuple(5, 6, highbd_12_variance32x64_c, 12),
1040 make_tuple(6, 5, highbd_12_variance64x32_c, 12),
1041 make_tuple(6, 6, highbd_12_variance64x64_c, 12),
1042 make_tuple(2, 2, highbd_variance4x4_c, 8),
1043 make_tuple(2, 3, highbd_variance4x8_c, 8),
1044 make_tuple(3, 2, highbd_variance8x4_c, 8),
1045 make_tuple(3, 3, highbd_variance8x8_c, 8),
1046 make_tuple(3, 4, highbd_variance8x16_c, 8),
1047 make_tuple(4, 3, highbd_variance16x8_c, 8),
1048 make_tuple(4, 4, highbd_variance16x16_c, 8),
1049 make_tuple(4, 5, highbd_variance16x32_c, 8),
1050 make_tuple(5, 4, highbd_variance32x16_c, 8),
1051 make_tuple(5, 5, highbd_variance32x32_c, 8),
1052 make_tuple(5, 6, highbd_variance32x64_c, 8),
1053 make_tuple(6, 5, highbd_variance64x32_c, 8),
1054 make_tuple(6, 6, highbd_variance64x64_c, 8)));
1055 #endif // CONFIG_VP9_HIGHBITDEPTH
586 const vp9_subpixvariance_fn_t subpel_variance4x4_c = 1056 const vp9_subpixvariance_fn_t subpel_variance4x4_c =
587 vp9_sub_pixel_variance4x4_c; 1057 vp9_sub_pixel_variance4x4_c;
588 const vp9_subpixvariance_fn_t subpel_variance4x8_c = 1058 const vp9_subpixvariance_fn_t subpel_variance4x8_c =
589 vp9_sub_pixel_variance4x8_c; 1059 vp9_sub_pixel_variance4x8_c;
590 const vp9_subpixvariance_fn_t subpel_variance8x4_c = 1060 const vp9_subpixvariance_fn_t subpel_variance8x4_c =
591 vp9_sub_pixel_variance8x4_c; 1061 vp9_sub_pixel_variance8x4_c;
592 const vp9_subpixvariance_fn_t subpel_variance8x8_c = 1062 const vp9_subpixvariance_fn_t subpel_variance8x8_c =
593 vp9_sub_pixel_variance8x8_c; 1063 vp9_sub_pixel_variance8x8_c;
594 const vp9_subpixvariance_fn_t subpel_variance8x16_c = 1064 const vp9_subpixvariance_fn_t subpel_variance8x16_c =
595 vp9_sub_pixel_variance8x16_c; 1065 vp9_sub_pixel_variance8x16_c;
596 const vp9_subpixvariance_fn_t subpel_variance16x8_c = 1066 const vp9_subpixvariance_fn_t subpel_variance16x8_c =
597 vp9_sub_pixel_variance16x8_c; 1067 vp9_sub_pixel_variance16x8_c;
598 const vp9_subpixvariance_fn_t subpel_variance16x16_c = 1068 const vp9_subpixvariance_fn_t subpel_variance16x16_c =
599 vp9_sub_pixel_variance16x16_c; 1069 vp9_sub_pixel_variance16x16_c;
600 const vp9_subpixvariance_fn_t subpel_variance16x32_c = 1070 const vp9_subpixvariance_fn_t subpel_variance16x32_c =
601 vp9_sub_pixel_variance16x32_c; 1071 vp9_sub_pixel_variance16x32_c;
602 const vp9_subpixvariance_fn_t subpel_variance32x16_c = 1072 const vp9_subpixvariance_fn_t subpel_variance32x16_c =
603 vp9_sub_pixel_variance32x16_c; 1073 vp9_sub_pixel_variance32x16_c;
604 const vp9_subpixvariance_fn_t subpel_variance32x32_c = 1074 const vp9_subpixvariance_fn_t subpel_variance32x32_c =
605 vp9_sub_pixel_variance32x32_c; 1075 vp9_sub_pixel_variance32x32_c;
606 const vp9_subpixvariance_fn_t subpel_variance32x64_c = 1076 const vp9_subpixvariance_fn_t subpel_variance32x64_c =
607 vp9_sub_pixel_variance32x64_c; 1077 vp9_sub_pixel_variance32x64_c;
608 const vp9_subpixvariance_fn_t subpel_variance64x32_c = 1078 const vp9_subpixvariance_fn_t subpel_variance64x32_c =
609 vp9_sub_pixel_variance64x32_c; 1079 vp9_sub_pixel_variance64x32_c;
610 const vp9_subpixvariance_fn_t subpel_variance64x64_c = 1080 const vp9_subpixvariance_fn_t subpel_variance64x64_c =
611 vp9_sub_pixel_variance64x64_c; 1081 vp9_sub_pixel_variance64x64_c;
612 INSTANTIATE_TEST_CASE_P( 1082 INSTANTIATE_TEST_CASE_P(
613 C, VP9SubpelVarianceTest, 1083 C, VP9SubpelVarianceTest,
614 ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c), 1084 ::testing::Values(make_tuple(2, 2, subpel_variance4x4_c, 0),
615 make_tuple(2, 3, subpel_variance4x8_c), 1085 make_tuple(2, 3, subpel_variance4x8_c, 0),
616 make_tuple(3, 2, subpel_variance8x4_c), 1086 make_tuple(3, 2, subpel_variance8x4_c, 0),
617 make_tuple(3, 3, subpel_variance8x8_c), 1087 make_tuple(3, 3, subpel_variance8x8_c, 0),
618 make_tuple(3, 4, subpel_variance8x16_c), 1088 make_tuple(3, 4, subpel_variance8x16_c, 0),
619 make_tuple(4, 3, subpel_variance16x8_c), 1089 make_tuple(4, 3, subpel_variance16x8_c, 0),
620 make_tuple(4, 4, subpel_variance16x16_c), 1090 make_tuple(4, 4, subpel_variance16x16_c, 0),
621 make_tuple(4, 5, subpel_variance16x32_c), 1091 make_tuple(4, 5, subpel_variance16x32_c, 0),
622 make_tuple(5, 4, subpel_variance32x16_c), 1092 make_tuple(5, 4, subpel_variance32x16_c, 0),
623 make_tuple(5, 5, subpel_variance32x32_c), 1093 make_tuple(5, 5, subpel_variance32x32_c, 0),
624 make_tuple(5, 6, subpel_variance32x64_c), 1094 make_tuple(5, 6, subpel_variance32x64_c, 0),
625 make_tuple(6, 5, subpel_variance64x32_c), 1095 make_tuple(6, 5, subpel_variance64x32_c, 0),
626 make_tuple(6, 6, subpel_variance64x64_c))); 1096 make_tuple(6, 6, subpel_variance64x64_c, 0)));
627
628 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c = 1097 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_c =
629 vp9_sub_pixel_avg_variance4x4_c; 1098 vp9_sub_pixel_avg_variance4x4_c;
630 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c = 1099 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_c =
631 vp9_sub_pixel_avg_variance4x8_c; 1100 vp9_sub_pixel_avg_variance4x8_c;
632 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c = 1101 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_c =
633 vp9_sub_pixel_avg_variance8x4_c; 1102 vp9_sub_pixel_avg_variance8x4_c;
634 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c = 1103 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_c =
635 vp9_sub_pixel_avg_variance8x8_c; 1104 vp9_sub_pixel_avg_variance8x8_c;
636 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c = 1105 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_c =
637 vp9_sub_pixel_avg_variance8x16_c; 1106 vp9_sub_pixel_avg_variance8x16_c;
638 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c = 1107 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_c =
639 vp9_sub_pixel_avg_variance16x8_c; 1108 vp9_sub_pixel_avg_variance16x8_c;
640 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c = 1109 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_c =
641 vp9_sub_pixel_avg_variance16x16_c; 1110 vp9_sub_pixel_avg_variance16x16_c;
642 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c = 1111 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_c =
643 vp9_sub_pixel_avg_variance16x32_c; 1112 vp9_sub_pixel_avg_variance16x32_c;
644 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c = 1113 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_c =
645 vp9_sub_pixel_avg_variance32x16_c; 1114 vp9_sub_pixel_avg_variance32x16_c;
646 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c = 1115 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_c =
647 vp9_sub_pixel_avg_variance32x32_c; 1116 vp9_sub_pixel_avg_variance32x32_c;
648 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c = 1117 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_c =
649 vp9_sub_pixel_avg_variance32x64_c; 1118 vp9_sub_pixel_avg_variance32x64_c;
650 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c = 1119 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_c =
651 vp9_sub_pixel_avg_variance64x32_c; 1120 vp9_sub_pixel_avg_variance64x32_c;
652 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c = 1121 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_c =
653 vp9_sub_pixel_avg_variance64x64_c; 1122 vp9_sub_pixel_avg_variance64x64_c;
654 INSTANTIATE_TEST_CASE_P( 1123 INSTANTIATE_TEST_CASE_P(
655 C, VP9SubpelAvgVarianceTest, 1124 C, VP9SubpelAvgVarianceTest,
656 ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c), 1125 ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_c, 0),
657 make_tuple(2, 3, subpel_avg_variance4x8_c), 1126 make_tuple(2, 3, subpel_avg_variance4x8_c, 0),
658 make_tuple(3, 2, subpel_avg_variance8x4_c), 1127 make_tuple(3, 2, subpel_avg_variance8x4_c, 0),
659 make_tuple(3, 3, subpel_avg_variance8x8_c), 1128 make_tuple(3, 3, subpel_avg_variance8x8_c, 0),
660 make_tuple(3, 4, subpel_avg_variance8x16_c), 1129 make_tuple(3, 4, subpel_avg_variance8x16_c, 0),
661 make_tuple(4, 3, subpel_avg_variance16x8_c), 1130 make_tuple(4, 3, subpel_avg_variance16x8_c, 0),
662 make_tuple(4, 4, subpel_avg_variance16x16_c), 1131 make_tuple(4, 4, subpel_avg_variance16x16_c, 0),
663 make_tuple(4, 5, subpel_avg_variance16x32_c), 1132 make_tuple(4, 5, subpel_avg_variance16x32_c, 0),
664 make_tuple(5, 4, subpel_avg_variance32x16_c), 1133 make_tuple(5, 4, subpel_avg_variance32x16_c, 0),
665 make_tuple(5, 5, subpel_avg_variance32x32_c), 1134 make_tuple(5, 5, subpel_avg_variance32x32_c, 0),
666 make_tuple(5, 6, subpel_avg_variance32x64_c), 1135 make_tuple(5, 6, subpel_avg_variance32x64_c, 0),
667 make_tuple(6, 5, subpel_avg_variance64x32_c), 1136 make_tuple(6, 5, subpel_avg_variance64x32_c, 0),
668 make_tuple(6, 6, subpel_avg_variance64x64_c))); 1137 make_tuple(6, 6, subpel_avg_variance64x64_c, 0)));
1138 #if CONFIG_VP9_HIGHBITDEPTH
1139 const vp9_subpixvariance_fn_t highbd_10_subpel_variance4x4_c =
1140 vp9_highbd_10_sub_pixel_variance4x4_c;
1141 const vp9_subpixvariance_fn_t highbd_10_subpel_variance4x8_c =
1142 vp9_highbd_10_sub_pixel_variance4x8_c;
1143 const vp9_subpixvariance_fn_t highbd_10_subpel_variance8x4_c =
1144 vp9_highbd_10_sub_pixel_variance8x4_c;
1145 const vp9_subpixvariance_fn_t highbd_10_subpel_variance8x8_c =
1146 vp9_highbd_10_sub_pixel_variance8x8_c;
1147 const vp9_subpixvariance_fn_t highbd_10_subpel_variance8x16_c =
1148 vp9_highbd_10_sub_pixel_variance8x16_c;
1149 const vp9_subpixvariance_fn_t highbd_10_subpel_variance16x8_c =
1150 vp9_highbd_10_sub_pixel_variance16x8_c;
1151 const vp9_subpixvariance_fn_t highbd_10_subpel_variance16x16_c =
1152 vp9_highbd_10_sub_pixel_variance16x16_c;
1153 const vp9_subpixvariance_fn_t highbd_10_subpel_variance16x32_c =
1154 vp9_highbd_10_sub_pixel_variance16x32_c;
1155 const vp9_subpixvariance_fn_t highbd_10_subpel_variance32x16_c =
1156 vp9_highbd_10_sub_pixel_variance32x16_c;
1157 const vp9_subpixvariance_fn_t highbd_10_subpel_variance32x32_c =
1158 vp9_highbd_10_sub_pixel_variance32x32_c;
1159 const vp9_subpixvariance_fn_t highbd_10_subpel_variance32x64_c =
1160 vp9_highbd_10_sub_pixel_variance32x64_c;
1161 const vp9_subpixvariance_fn_t highbd_10_subpel_variance64x32_c =
1162 vp9_highbd_10_sub_pixel_variance64x32_c;
1163 const vp9_subpixvariance_fn_t highbd_10_subpel_variance64x64_c =
1164 vp9_highbd_10_sub_pixel_variance64x64_c;
1165 const vp9_subpixvariance_fn_t highbd_12_subpel_variance4x4_c =
1166 vp9_highbd_12_sub_pixel_variance4x4_c;
1167 const vp9_subpixvariance_fn_t highbd_12_subpel_variance4x8_c =
1168 vp9_highbd_12_sub_pixel_variance4x8_c;
1169 const vp9_subpixvariance_fn_t highbd_12_subpel_variance8x4_c =
1170 vp9_highbd_12_sub_pixel_variance8x4_c;
1171 const vp9_subpixvariance_fn_t highbd_12_subpel_variance8x8_c =
1172 vp9_highbd_12_sub_pixel_variance8x8_c;
1173 const vp9_subpixvariance_fn_t highbd_12_subpel_variance8x16_c =
1174 vp9_highbd_12_sub_pixel_variance8x16_c;
1175 const vp9_subpixvariance_fn_t highbd_12_subpel_variance16x8_c =
1176 vp9_highbd_12_sub_pixel_variance16x8_c;
1177 const vp9_subpixvariance_fn_t highbd_12_subpel_variance16x16_c =
1178 vp9_highbd_12_sub_pixel_variance16x16_c;
1179 const vp9_subpixvariance_fn_t highbd_12_subpel_variance16x32_c =
1180 vp9_highbd_12_sub_pixel_variance16x32_c;
1181 const vp9_subpixvariance_fn_t highbd_12_subpel_variance32x16_c =
1182 vp9_highbd_12_sub_pixel_variance32x16_c;
1183 const vp9_subpixvariance_fn_t highbd_12_subpel_variance32x32_c =
1184 vp9_highbd_12_sub_pixel_variance32x32_c;
1185 const vp9_subpixvariance_fn_t highbd_12_subpel_variance32x64_c =
1186 vp9_highbd_12_sub_pixel_variance32x64_c;
1187 const vp9_subpixvariance_fn_t highbd_12_subpel_variance64x32_c =
1188 vp9_highbd_12_sub_pixel_variance64x32_c;
1189 const vp9_subpixvariance_fn_t highbd_12_subpel_variance64x64_c =
1190 vp9_highbd_12_sub_pixel_variance64x64_c;
1191 const vp9_subpixvariance_fn_t highbd_subpel_variance4x4_c =
1192 vp9_highbd_sub_pixel_variance4x4_c;
1193 const vp9_subpixvariance_fn_t highbd_subpel_variance4x8_c =
1194 vp9_highbd_sub_pixel_variance4x8_c;
1195 const vp9_subpixvariance_fn_t highbd_subpel_variance8x4_c =
1196 vp9_highbd_sub_pixel_variance8x4_c;
1197 const vp9_subpixvariance_fn_t highbd_subpel_variance8x8_c =
1198 vp9_highbd_sub_pixel_variance8x8_c;
1199 const vp9_subpixvariance_fn_t highbd_subpel_variance8x16_c =
1200 vp9_highbd_sub_pixel_variance8x16_c;
1201 const vp9_subpixvariance_fn_t highbd_subpel_variance16x8_c =
1202 vp9_highbd_sub_pixel_variance16x8_c;
1203 const vp9_subpixvariance_fn_t highbd_subpel_variance16x16_c =
1204 vp9_highbd_sub_pixel_variance16x16_c;
1205 const vp9_subpixvariance_fn_t highbd_subpel_variance16x32_c =
1206 vp9_highbd_sub_pixel_variance16x32_c;
1207 const vp9_subpixvariance_fn_t highbd_subpel_variance32x16_c =
1208 vp9_highbd_sub_pixel_variance32x16_c;
1209 const vp9_subpixvariance_fn_t highbd_subpel_variance32x32_c =
1210 vp9_highbd_sub_pixel_variance32x32_c;
1211 const vp9_subpixvariance_fn_t highbd_subpel_variance32x64_c =
1212 vp9_highbd_sub_pixel_variance32x64_c;
1213 const vp9_subpixvariance_fn_t highbd_subpel_variance64x32_c =
1214 vp9_highbd_sub_pixel_variance64x32_c;
1215 const vp9_subpixvariance_fn_t highbd_subpel_variance64x64_c =
1216 vp9_highbd_sub_pixel_variance64x64_c;
1217 INSTANTIATE_TEST_CASE_P(
1218 C, VP9SubpelVarianceHighTest,
1219 ::testing::Values(make_tuple(2, 2, highbd_10_subpel_variance4x4_c, 10),
1220 make_tuple(2, 3, highbd_10_subpel_variance4x8_c, 10),
1221 make_tuple(3, 2, highbd_10_subpel_variance8x4_c, 10),
1222 make_tuple(3, 3, highbd_10_subpel_variance8x8_c, 10),
1223 make_tuple(3, 4, highbd_10_subpel_variance8x16_c, 10),
1224 make_tuple(4, 3, highbd_10_subpel_variance16x8_c, 10),
1225 make_tuple(4, 4, highbd_10_subpel_variance16x16_c, 10),
1226 make_tuple(4, 5, highbd_10_subpel_variance16x32_c, 10),
1227 make_tuple(5, 4, highbd_10_subpel_variance32x16_c, 10),
1228 make_tuple(5, 5, highbd_10_subpel_variance32x32_c, 10),
1229 make_tuple(5, 6, highbd_10_subpel_variance32x64_c, 10),
1230 make_tuple(6, 5, highbd_10_subpel_variance64x32_c, 10),
1231 make_tuple(6, 6, highbd_10_subpel_variance64x64_c, 10),
1232 make_tuple(2, 2, highbd_12_subpel_variance4x4_c, 12),
1233 make_tuple(2, 3, highbd_12_subpel_variance4x8_c, 12),
1234 make_tuple(3, 2, highbd_12_subpel_variance8x4_c, 12),
1235 make_tuple(3, 3, highbd_12_subpel_variance8x8_c, 12),
1236 make_tuple(3, 4, highbd_12_subpel_variance8x16_c, 12),
1237 make_tuple(4, 3, highbd_12_subpel_variance16x8_c, 12),
1238 make_tuple(4, 4, highbd_12_subpel_variance16x16_c, 12),
1239 make_tuple(4, 5, highbd_12_subpel_variance16x32_c, 12),
1240 make_tuple(5, 4, highbd_12_subpel_variance32x16_c, 12),
1241 make_tuple(5, 5, highbd_12_subpel_variance32x32_c, 12),
1242 make_tuple(5, 6, highbd_12_subpel_variance32x64_c, 12),
1243 make_tuple(6, 5, highbd_12_subpel_variance64x32_c, 12),
1244 make_tuple(6, 6, highbd_12_subpel_variance64x64_c, 12),
1245 make_tuple(2, 2, highbd_subpel_variance4x4_c, 8),
1246 make_tuple(2, 3, highbd_subpel_variance4x8_c, 8),
1247 make_tuple(3, 2, highbd_subpel_variance8x4_c, 8),
1248 make_tuple(3, 3, highbd_subpel_variance8x8_c, 8),
1249 make_tuple(3, 4, highbd_subpel_variance8x16_c, 8),
1250 make_tuple(4, 3, highbd_subpel_variance16x8_c, 8),
1251 make_tuple(4, 4, highbd_subpel_variance16x16_c, 8),
1252 make_tuple(4, 5, highbd_subpel_variance16x32_c, 8),
1253 make_tuple(5, 4, highbd_subpel_variance32x16_c, 8),
1254 make_tuple(5, 5, highbd_subpel_variance32x32_c, 8),
1255 make_tuple(5, 6, highbd_subpel_variance32x64_c, 8),
1256 make_tuple(6, 5, highbd_subpel_variance64x32_c, 8),
1257 make_tuple(6, 6, highbd_subpel_variance64x64_c, 8)));
1258 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance4x4_c =
1259 vp9_highbd_10_sub_pixel_avg_variance4x4_c;
1260 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance4x8_c =
1261 vp9_highbd_10_sub_pixel_avg_variance4x8_c;
1262 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x4_c =
1263 vp9_highbd_10_sub_pixel_avg_variance8x4_c;
1264 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x8_c =
1265 vp9_highbd_10_sub_pixel_avg_variance8x8_c;
1266 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x16_c =
1267 vp9_highbd_10_sub_pixel_avg_variance8x16_c;
1268 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x8_c =
1269 vp9_highbd_10_sub_pixel_avg_variance16x8_c;
1270 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x16_c =
1271 vp9_highbd_10_sub_pixel_avg_variance16x16_c;
1272 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x32_c =
1273 vp9_highbd_10_sub_pixel_avg_variance16x32_c;
1274 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x16_c =
1275 vp9_highbd_10_sub_pixel_avg_variance32x16_c;
1276 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x32_c =
1277 vp9_highbd_10_sub_pixel_avg_variance32x32_c;
1278 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x64_c =
1279 vp9_highbd_10_sub_pixel_avg_variance32x64_c;
1280 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance64x32_c =
1281 vp9_highbd_10_sub_pixel_avg_variance64x32_c;
1282 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance64x64_c =
1283 vp9_highbd_10_sub_pixel_avg_variance64x64_c;
1284 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance4x4_c =
1285 vp9_highbd_12_sub_pixel_avg_variance4x4_c;
1286 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance4x8_c =
1287 vp9_highbd_12_sub_pixel_avg_variance4x8_c;
1288 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x4_c =
1289 vp9_highbd_12_sub_pixel_avg_variance8x4_c;
1290 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x8_c =
1291 vp9_highbd_12_sub_pixel_avg_variance8x8_c;
1292 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x16_c =
1293 vp9_highbd_12_sub_pixel_avg_variance8x16_c;
1294 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x8_c =
1295 vp9_highbd_12_sub_pixel_avg_variance16x8_c;
1296 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x16_c =
1297 vp9_highbd_12_sub_pixel_avg_variance16x16_c;
1298 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x32_c =
1299 vp9_highbd_12_sub_pixel_avg_variance16x32_c;
1300 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x16_c =
1301 vp9_highbd_12_sub_pixel_avg_variance32x16_c;
1302 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x32_c =
1303 vp9_highbd_12_sub_pixel_avg_variance32x32_c;
1304 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x64_c =
1305 vp9_highbd_12_sub_pixel_avg_variance32x64_c;
1306 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance64x32_c =
1307 vp9_highbd_12_sub_pixel_avg_variance64x32_c;
1308 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance64x64_c =
1309 vp9_highbd_12_sub_pixel_avg_variance64x64_c;
1310 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance4x4_c =
1311 vp9_highbd_sub_pixel_avg_variance4x4_c;
1312 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance4x8_c =
1313 vp9_highbd_sub_pixel_avg_variance4x8_c;
1314 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x4_c =
1315 vp9_highbd_sub_pixel_avg_variance8x4_c;
1316 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x8_c =
1317 vp9_highbd_sub_pixel_avg_variance8x8_c;
1318 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x16_c =
1319 vp9_highbd_sub_pixel_avg_variance8x16_c;
1320 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x8_c =
1321 vp9_highbd_sub_pixel_avg_variance16x8_c;
1322 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x16_c =
1323 vp9_highbd_sub_pixel_avg_variance16x16_c;
1324 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x32_c =
1325 vp9_highbd_sub_pixel_avg_variance16x32_c;
1326 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x16_c =
1327 vp9_highbd_sub_pixel_avg_variance32x16_c;
1328 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x32_c =
1329 vp9_highbd_sub_pixel_avg_variance32x32_c;
1330 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x64_c =
1331 vp9_highbd_sub_pixel_avg_variance32x64_c;
1332 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance64x32_c =
1333 vp9_highbd_sub_pixel_avg_variance64x32_c;
1334 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance64x64_c =
1335 vp9_highbd_sub_pixel_avg_variance64x64_c;
1336 INSTANTIATE_TEST_CASE_P(
1337 C, VP9SubpelAvgVarianceHighTest,
1338 ::testing::Values(
1339 make_tuple(2, 2, highbd_10_subpel_avg_variance4x4_c, 10),
1340 make_tuple(2, 3, highbd_10_subpel_avg_variance4x8_c, 10),
1341 make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_c, 10),
1342 make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_c, 10),
1343 make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_c, 10),
1344 make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_c, 10),
1345 make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_c, 10),
1346 make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_c, 10),
1347 make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_c, 10),
1348 make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_c, 10),
1349 make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_c, 10),
1350 make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_c, 10),
1351 make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_c, 10),
1352 make_tuple(2, 2, highbd_12_subpel_avg_variance4x4_c, 12),
1353 make_tuple(2, 3, highbd_12_subpel_avg_variance4x8_c, 12),
1354 make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_c, 12),
1355 make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_c, 12),
1356 make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_c, 12),
1357 make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_c, 12),
1358 make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_c, 12),
1359 make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_c, 12),
1360 make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_c, 12),
1361 make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_c, 12),
1362 make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_c, 12),
1363 make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_c, 12),
1364 make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_c, 12),
1365 make_tuple(2, 2, highbd_subpel_avg_variance4x4_c, 8),
1366 make_tuple(2, 3, highbd_subpel_avg_variance4x8_c, 8),
1367 make_tuple(3, 2, highbd_subpel_avg_variance8x4_c, 8),
1368 make_tuple(3, 3, highbd_subpel_avg_variance8x8_c, 8),
1369 make_tuple(3, 4, highbd_subpel_avg_variance8x16_c, 8),
1370 make_tuple(4, 3, highbd_subpel_avg_variance16x8_c, 8),
1371 make_tuple(4, 4, highbd_subpel_avg_variance16x16_c, 8),
1372 make_tuple(4, 5, highbd_subpel_avg_variance16x32_c, 8),
1373 make_tuple(5, 4, highbd_subpel_avg_variance32x16_c, 8),
1374 make_tuple(5, 5, highbd_subpel_avg_variance32x32_c, 8),
1375 make_tuple(5, 6, highbd_subpel_avg_variance32x64_c, 8),
1376 make_tuple(6, 5, highbd_subpel_avg_variance64x32_c, 8),
1377 make_tuple(6, 6, highbd_subpel_avg_variance64x64_c, 8)));
1378 #endif // CONFIG_VP9_HIGHBITDEPTH
669 1379
670 #if HAVE_SSE2 1380 #if HAVE_SSE2
671 #if CONFIG_USE_X86INC 1381 #if CONFIG_USE_X86INC
672
673 INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest, 1382 INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
674 ::testing::Values(vp9_get_mb_ss_sse2)); 1383 ::testing::Values(vp9_get_mb_ss_sse2));
675 1384
676 const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2; 1385 const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
677 const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2; 1386 const vp9_variance_fn_t variance4x8_sse2 = vp9_variance4x8_sse2;
678 const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2; 1387 const vp9_variance_fn_t variance8x4_sse2 = vp9_variance8x4_sse2;
679 const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2; 1388 const vp9_variance_fn_t variance8x8_sse2 = vp9_variance8x8_sse2;
680 const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2; 1389 const vp9_variance_fn_t variance8x16_sse2 = vp9_variance8x16_sse2;
681 const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2; 1390 const vp9_variance_fn_t variance16x8_sse2 = vp9_variance16x8_sse2;
682 const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2; 1391 const vp9_variance_fn_t variance16x16_sse2 = vp9_variance16x16_sse2;
683 const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2; 1392 const vp9_variance_fn_t variance16x32_sse2 = vp9_variance16x32_sse2;
684 const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2; 1393 const vp9_variance_fn_t variance32x16_sse2 = vp9_variance32x16_sse2;
685 const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2; 1394 const vp9_variance_fn_t variance32x32_sse2 = vp9_variance32x32_sse2;
686 const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2; 1395 const vp9_variance_fn_t variance32x64_sse2 = vp9_variance32x64_sse2;
687 const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2; 1396 const vp9_variance_fn_t variance64x32_sse2 = vp9_variance64x32_sse2;
688 const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2; 1397 const vp9_variance_fn_t variance64x64_sse2 = vp9_variance64x64_sse2;
689 INSTANTIATE_TEST_CASE_P( 1398 INSTANTIATE_TEST_CASE_P(
690 SSE2, VP9VarianceTest, 1399 SSE2, VP9VarianceTest,
691 ::testing::Values(make_tuple(2, 2, variance4x4_sse2), 1400 ::testing::Values(make_tuple(2, 2, variance4x4_sse2, 0),
692 make_tuple(2, 3, variance4x8_sse2), 1401 make_tuple(2, 3, variance4x8_sse2, 0),
693 make_tuple(3, 2, variance8x4_sse2), 1402 make_tuple(3, 2, variance8x4_sse2, 0),
694 make_tuple(3, 3, variance8x8_sse2), 1403 make_tuple(3, 3, variance8x8_sse2, 0),
695 make_tuple(3, 4, variance8x16_sse2), 1404 make_tuple(3, 4, variance8x16_sse2, 0),
696 make_tuple(4, 3, variance16x8_sse2), 1405 make_tuple(4, 3, variance16x8_sse2, 0),
697 make_tuple(4, 4, variance16x16_sse2), 1406 make_tuple(4, 4, variance16x16_sse2, 0),
698 make_tuple(4, 5, variance16x32_sse2), 1407 make_tuple(4, 5, variance16x32_sse2, 0),
699 make_tuple(5, 4, variance32x16_sse2), 1408 make_tuple(5, 4, variance32x16_sse2, 0),
700 make_tuple(5, 5, variance32x32_sse2), 1409 make_tuple(5, 5, variance32x32_sse2, 0),
701 make_tuple(5, 6, variance32x64_sse2), 1410 make_tuple(5, 6, variance32x64_sse2, 0),
702 make_tuple(6, 5, variance64x32_sse2), 1411 make_tuple(6, 5, variance64x32_sse2, 0),
703 make_tuple(6, 6, variance64x64_sse2))); 1412 make_tuple(6, 6, variance64x64_sse2, 0)));
704
705 const vp9_subpixvariance_fn_t subpel_variance4x4_sse = 1413 const vp9_subpixvariance_fn_t subpel_variance4x4_sse =
706 vp9_sub_pixel_variance4x4_sse; 1414 vp9_sub_pixel_variance4x4_sse;
707 const vp9_subpixvariance_fn_t subpel_variance4x8_sse = 1415 const vp9_subpixvariance_fn_t subpel_variance4x8_sse =
708 vp9_sub_pixel_variance4x8_sse; 1416 vp9_sub_pixel_variance4x8_sse;
709 const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 = 1417 const vp9_subpixvariance_fn_t subpel_variance8x4_sse2 =
710 vp9_sub_pixel_variance8x4_sse2; 1418 vp9_sub_pixel_variance8x4_sse2;
711 const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 = 1419 const vp9_subpixvariance_fn_t subpel_variance8x8_sse2 =
712 vp9_sub_pixel_variance8x8_sse2; 1420 vp9_sub_pixel_variance8x8_sse2;
713 const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 = 1421 const vp9_subpixvariance_fn_t subpel_variance8x16_sse2 =
714 vp9_sub_pixel_variance8x16_sse2; 1422 vp9_sub_pixel_variance8x16_sse2;
715 const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 = 1423 const vp9_subpixvariance_fn_t subpel_variance16x8_sse2 =
716 vp9_sub_pixel_variance16x8_sse2; 1424 vp9_sub_pixel_variance16x8_sse2;
717 const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 = 1425 const vp9_subpixvariance_fn_t subpel_variance16x16_sse2 =
718 vp9_sub_pixel_variance16x16_sse2; 1426 vp9_sub_pixel_variance16x16_sse2;
719 const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 = 1427 const vp9_subpixvariance_fn_t subpel_variance16x32_sse2 =
720 vp9_sub_pixel_variance16x32_sse2; 1428 vp9_sub_pixel_variance16x32_sse2;
721 const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 = 1429 const vp9_subpixvariance_fn_t subpel_variance32x16_sse2 =
722 vp9_sub_pixel_variance32x16_sse2; 1430 vp9_sub_pixel_variance32x16_sse2;
723 const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 = 1431 const vp9_subpixvariance_fn_t subpel_variance32x32_sse2 =
724 vp9_sub_pixel_variance32x32_sse2; 1432 vp9_sub_pixel_variance32x32_sse2;
725 const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 = 1433 const vp9_subpixvariance_fn_t subpel_variance32x64_sse2 =
726 vp9_sub_pixel_variance32x64_sse2; 1434 vp9_sub_pixel_variance32x64_sse2;
727 const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 = 1435 const vp9_subpixvariance_fn_t subpel_variance64x32_sse2 =
728 vp9_sub_pixel_variance64x32_sse2; 1436 vp9_sub_pixel_variance64x32_sse2;
729 const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 = 1437 const vp9_subpixvariance_fn_t subpel_variance64x64_sse2 =
730 vp9_sub_pixel_variance64x64_sse2; 1438 vp9_sub_pixel_variance64x64_sse2;
731 INSTANTIATE_TEST_CASE_P( 1439 INSTANTIATE_TEST_CASE_P(
732 SSE2, VP9SubpelVarianceTest, 1440 SSE2, VP9SubpelVarianceTest,
733 ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse), 1441 ::testing::Values(make_tuple(2, 2, subpel_variance4x4_sse, 0),
734 make_tuple(2, 3, subpel_variance4x8_sse), 1442 make_tuple(2, 3, subpel_variance4x8_sse, 0),
735 make_tuple(3, 2, subpel_variance8x4_sse2), 1443 make_tuple(3, 2, subpel_variance8x4_sse2, 0),
736 make_tuple(3, 3, subpel_variance8x8_sse2), 1444 make_tuple(3, 3, subpel_variance8x8_sse2, 0),
737 make_tuple(3, 4, subpel_variance8x16_sse2), 1445 make_tuple(3, 4, subpel_variance8x16_sse2, 0),
738 make_tuple(4, 3, subpel_variance16x8_sse2), 1446 make_tuple(4, 3, subpel_variance16x8_sse2, 0),
739 make_tuple(4, 4, subpel_variance16x16_sse2), 1447 make_tuple(4, 4, subpel_variance16x16_sse2, 0),
740 make_tuple(4, 5, subpel_variance16x32_sse2), 1448 make_tuple(4, 5, subpel_variance16x32_sse2, 0),
741 make_tuple(5, 4, subpel_variance32x16_sse2), 1449 make_tuple(5, 4, subpel_variance32x16_sse2, 0),
742 make_tuple(5, 5, subpel_variance32x32_sse2), 1450 make_tuple(5, 5, subpel_variance32x32_sse2, 0),
743 make_tuple(5, 6, subpel_variance32x64_sse2), 1451 make_tuple(5, 6, subpel_variance32x64_sse2, 0),
744 make_tuple(6, 5, subpel_variance64x32_sse2), 1452 make_tuple(6, 5, subpel_variance64x32_sse2, 0),
745 make_tuple(6, 6, subpel_variance64x64_sse2))); 1453 make_tuple(6, 6, subpel_variance64x64_sse2, 0)));
746
747 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse = 1454 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_sse =
748 vp9_sub_pixel_avg_variance4x4_sse; 1455 vp9_sub_pixel_avg_variance4x4_sse;
749 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse = 1456 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_sse =
750 vp9_sub_pixel_avg_variance4x8_sse; 1457 vp9_sub_pixel_avg_variance4x8_sse;
751 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 = 1458 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_sse2 =
752 vp9_sub_pixel_avg_variance8x4_sse2; 1459 vp9_sub_pixel_avg_variance8x4_sse2;
753 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 = 1460 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_sse2 =
754 vp9_sub_pixel_avg_variance8x8_sse2; 1461 vp9_sub_pixel_avg_variance8x8_sse2;
755 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 = 1462 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_sse2 =
756 vp9_sub_pixel_avg_variance8x16_sse2; 1463 vp9_sub_pixel_avg_variance8x16_sse2;
757 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 = 1464 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_sse2 =
758 vp9_sub_pixel_avg_variance16x8_sse2; 1465 vp9_sub_pixel_avg_variance16x8_sse2;
759 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 = 1466 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_sse2 =
760 vp9_sub_pixel_avg_variance16x16_sse2; 1467 vp9_sub_pixel_avg_variance16x16_sse2;
761 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 = 1468 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_sse2 =
762 vp9_sub_pixel_avg_variance16x32_sse2; 1469 vp9_sub_pixel_avg_variance16x32_sse2;
763 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 = 1470 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_sse2 =
764 vp9_sub_pixel_avg_variance32x16_sse2; 1471 vp9_sub_pixel_avg_variance32x16_sse2;
765 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 = 1472 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_sse2 =
766 vp9_sub_pixel_avg_variance32x32_sse2; 1473 vp9_sub_pixel_avg_variance32x32_sse2;
767 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 = 1474 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_sse2 =
768 vp9_sub_pixel_avg_variance32x64_sse2; 1475 vp9_sub_pixel_avg_variance32x64_sse2;
769 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 = 1476 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_sse2 =
770 vp9_sub_pixel_avg_variance64x32_sse2; 1477 vp9_sub_pixel_avg_variance64x32_sse2;
771 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 = 1478 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_sse2 =
772 vp9_sub_pixel_avg_variance64x64_sse2; 1479 vp9_sub_pixel_avg_variance64x64_sse2;
773 INSTANTIATE_TEST_CASE_P( 1480 INSTANTIATE_TEST_CASE_P(
774 SSE2, VP9SubpelAvgVarianceTest, 1481 SSE2, VP9SubpelAvgVarianceTest,
775 ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse), 1482 ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_sse, 0),
776 make_tuple(2, 3, subpel_avg_variance4x8_sse), 1483 make_tuple(2, 3, subpel_avg_variance4x8_sse, 0),
777 make_tuple(3, 2, subpel_avg_variance8x4_sse2), 1484 make_tuple(3, 2, subpel_avg_variance8x4_sse2, 0),
778 make_tuple(3, 3, subpel_avg_variance8x8_sse2), 1485 make_tuple(3, 3, subpel_avg_variance8x8_sse2, 0),
779 make_tuple(3, 4, subpel_avg_variance8x16_sse2), 1486 make_tuple(3, 4, subpel_avg_variance8x16_sse2, 0),
780 make_tuple(4, 3, subpel_avg_variance16x8_sse2), 1487 make_tuple(4, 3, subpel_avg_variance16x8_sse2, 0),
781 make_tuple(4, 4, subpel_avg_variance16x16_sse2), 1488 make_tuple(4, 4, subpel_avg_variance16x16_sse2, 0),
782 make_tuple(4, 5, subpel_avg_variance16x32_sse2), 1489 make_tuple(4, 5, subpel_avg_variance16x32_sse2, 0),
783 make_tuple(5, 4, subpel_avg_variance32x16_sse2), 1490 make_tuple(5, 4, subpel_avg_variance32x16_sse2, 0),
784 make_tuple(5, 5, subpel_avg_variance32x32_sse2), 1491 make_tuple(5, 5, subpel_avg_variance32x32_sse2, 0),
785 make_tuple(5, 6, subpel_avg_variance32x64_sse2), 1492 make_tuple(5, 6, subpel_avg_variance32x64_sse2, 0),
786 make_tuple(6, 5, subpel_avg_variance64x32_sse2), 1493 make_tuple(6, 5, subpel_avg_variance64x32_sse2, 0),
787 make_tuple(6, 6, subpel_avg_variance64x64_sse2))); 1494 make_tuple(6, 6, subpel_avg_variance64x64_sse2, 0)));
788 #endif 1495 #if CONFIG_VP9_HIGHBITDEPTH
789 #endif 1496 const vp9_variance_fn_t highbd_variance8x8_sse2 = vp9_highbd_variance8x8_sse2;
790 1497 const vp9_variance_fn_t highbd_10_variance8x8_sse2 =
1498 vp9_highbd_10_variance8x8_sse2;
1499 const vp9_variance_fn_t highbd_12_variance8x8_sse2 =
1500 vp9_highbd_12_variance8x8_sse2;
1501 const vp9_variance_fn_t highbd_variance8x16_sse2 = vp9_highbd_variance8x16_sse2;
1502 const vp9_variance_fn_t highbd_10_variance8x16_sse2 =
1503 vp9_highbd_10_variance8x16_sse2;
1504 const vp9_variance_fn_t highbd_12_variance8x16_sse2 =
1505 vp9_highbd_12_variance8x16_sse2;
1506 const vp9_variance_fn_t highbd_variance16x8_sse2 =
1507 vp9_highbd_variance16x8_sse2;
1508 const vp9_variance_fn_t highbd_10_variance16x8_sse2 =
1509 vp9_highbd_10_variance16x8_sse2;
1510 const vp9_variance_fn_t highbd_12_variance16x8_sse2 =
1511 vp9_highbd_12_variance16x8_sse2;
1512 const vp9_variance_fn_t highbd_variance16x16_sse2 =
1513 vp9_highbd_variance16x16_sse2;
1514 const vp9_variance_fn_t highbd_10_variance16x16_sse2 =
1515 vp9_highbd_10_variance16x16_sse2;
1516 const vp9_variance_fn_t highbd_12_variance16x16_sse2 =
1517 vp9_highbd_12_variance16x16_sse2;
1518 const vp9_variance_fn_t highbd_variance16x32_sse2 =
1519 vp9_highbd_variance16x32_sse2;
1520 const vp9_variance_fn_t highbd_10_variance16x32_sse2 =
1521 vp9_highbd_10_variance16x32_sse2;
1522 const vp9_variance_fn_t highbd_12_variance16x32_sse2 =
1523 vp9_highbd_12_variance16x32_sse2;
1524 const vp9_variance_fn_t highbd_variance32x16_sse2 =
1525 vp9_highbd_variance32x16_sse2;
1526 const vp9_variance_fn_t highbd_10_variance32x16_sse2 =
1527 vp9_highbd_10_variance32x16_sse2;
1528 const vp9_variance_fn_t highbd_12_variance32x16_sse2 =
1529 vp9_highbd_12_variance32x16_sse2;
1530 const vp9_variance_fn_t highbd_variance32x32_sse2 =
1531 vp9_highbd_variance32x32_sse2;
1532 const vp9_variance_fn_t highbd_10_variance32x32_sse2 =
1533 vp9_highbd_10_variance32x32_sse2;
1534 const vp9_variance_fn_t highbd_12_variance32x32_sse2 =
1535 vp9_highbd_12_variance32x32_sse2;
1536 const vp9_variance_fn_t highbd_variance32x64_sse2 =
1537 vp9_highbd_variance32x64_sse2;
1538 const vp9_variance_fn_t highbd_10_variance32x64_sse2 =
1539 vp9_highbd_10_variance32x64_sse2;
1540 const vp9_variance_fn_t highbd_12_variance32x64_sse2 =
1541 vp9_highbd_12_variance32x64_sse2;
1542 const vp9_variance_fn_t highbd_variance64x32_sse2 =
1543 vp9_highbd_variance64x32_sse2;
1544 const vp9_variance_fn_t highbd_10_variance64x32_sse2 =
1545 vp9_highbd_10_variance64x32_sse2;
1546 const vp9_variance_fn_t highbd_12_variance64x32_sse2 =
1547 vp9_highbd_12_variance64x32_sse2;
1548 const vp9_variance_fn_t highbd_variance64x64_sse2 =
1549 vp9_highbd_variance64x64_sse2;
1550 const vp9_variance_fn_t highbd_10_variance64x64_sse2 =
1551 vp9_highbd_10_variance64x64_sse2;
1552 const vp9_variance_fn_t highbd_12_variance64x64_sse2 =
1553 vp9_highbd_12_variance64x64_sse2;
1554 INSTANTIATE_TEST_CASE_P(
1555 SSE2, VP9VarianceHighTest,
1556 ::testing::Values(make_tuple(3, 3, highbd_10_variance8x8_sse2, 10),
1557 make_tuple(3, 4, highbd_10_variance8x16_sse2, 10),
1558 make_tuple(4, 3, highbd_10_variance16x8_sse2, 10),
1559 make_tuple(4, 4, highbd_10_variance16x16_sse2, 10),
1560 make_tuple(4, 5, highbd_10_variance16x32_sse2, 10),
1561 make_tuple(5, 4, highbd_10_variance32x16_sse2, 10),
1562 make_tuple(5, 5, highbd_10_variance32x32_sse2, 10),
1563 make_tuple(5, 6, highbd_10_variance32x64_sse2, 10),
1564 make_tuple(6, 5, highbd_10_variance64x32_sse2, 10),
1565 make_tuple(6, 6, highbd_10_variance64x64_sse2, 10),
1566 make_tuple(3, 3, highbd_12_variance8x8_sse2, 12),
1567 make_tuple(3, 4, highbd_12_variance8x16_sse2, 12),
1568 make_tuple(4, 3, highbd_12_variance16x8_sse2, 12),
1569 make_tuple(4, 4, highbd_12_variance16x16_sse2, 12),
1570 make_tuple(4, 5, highbd_12_variance16x32_sse2, 12),
1571 make_tuple(5, 4, highbd_12_variance32x16_sse2, 12),
1572 make_tuple(5, 5, highbd_12_variance32x32_sse2, 12),
1573 make_tuple(5, 6, highbd_12_variance32x64_sse2, 12),
1574 make_tuple(6, 5, highbd_12_variance64x32_sse2, 12),
1575 make_tuple(6, 6, highbd_12_variance64x64_sse2, 12),
1576 make_tuple(3, 3, highbd_variance8x8_sse2, 8),
1577 make_tuple(3, 4, highbd_variance8x16_sse2, 8),
1578 make_tuple(4, 3, highbd_variance16x8_sse2, 8),
1579 make_tuple(4, 4, highbd_variance16x16_sse2, 8),
1580 make_tuple(4, 5, highbd_variance16x32_sse2, 8),
1581 make_tuple(5, 4, highbd_variance32x16_sse2, 8),
1582 make_tuple(5, 5, highbd_variance32x32_sse2, 8),
1583 make_tuple(5, 6, highbd_variance32x64_sse2, 8),
1584 make_tuple(6, 5, highbd_variance64x32_sse2, 8),
1585 make_tuple(6, 6, highbd_variance64x64_sse2, 8)));
1586 const vp9_subpixvariance_fn_t highbd_subpel_variance8x4_sse2 =
1587 vp9_highbd_sub_pixel_variance8x4_sse2;
1588 const vp9_subpixvariance_fn_t highbd_subpel_variance8x8_sse2 =
1589 vp9_highbd_sub_pixel_variance8x8_sse2;
1590 const vp9_subpixvariance_fn_t highbd_subpel_variance8x16_sse2 =
1591 vp9_highbd_sub_pixel_variance8x16_sse2;
1592 const vp9_subpixvariance_fn_t highbd_subpel_variance16x8_sse2 =
1593 vp9_highbd_sub_pixel_variance16x8_sse2;
1594 const vp9_subpixvariance_fn_t highbd_subpel_variance16x16_sse2 =
1595 vp9_highbd_sub_pixel_variance16x16_sse2;
1596 const vp9_subpixvariance_fn_t highbd_subpel_variance16x32_sse2 =
1597 vp9_highbd_sub_pixel_variance16x32_sse2;
1598 const vp9_subpixvariance_fn_t highbd_subpel_variance32x16_sse2 =
1599 vp9_highbd_sub_pixel_variance32x16_sse2;
1600 const vp9_subpixvariance_fn_t highbd_subpel_variance32x32_sse2 =
1601 vp9_highbd_sub_pixel_variance32x32_sse2;
1602 const vp9_subpixvariance_fn_t highbd_subpel_variance32x64_sse2 =
1603 vp9_highbd_sub_pixel_variance32x64_sse2;
1604 const vp9_subpixvariance_fn_t highbd_subpel_variance64x32_sse2 =
1605 vp9_highbd_sub_pixel_variance64x32_sse2;
1606 const vp9_subpixvariance_fn_t highbd_subpel_variance64x64_sse2 =
1607 vp9_highbd_sub_pixel_variance64x64_sse2;
1608 const vp9_subpixvariance_fn_t highbd_10_subpel_variance8x4_sse2 =
1609 vp9_highbd_10_sub_pixel_variance8x4_sse2;
1610 const vp9_subpixvariance_fn_t highbd_10_subpel_variance8x8_sse2 =
1611 vp9_highbd_10_sub_pixel_variance8x8_sse2;
1612 const vp9_subpixvariance_fn_t highbd_10_subpel_variance8x16_sse2 =
1613 vp9_highbd_10_sub_pixel_variance8x16_sse2;
1614 const vp9_subpixvariance_fn_t highbd_10_subpel_variance16x8_sse2 =
1615 vp9_highbd_10_sub_pixel_variance16x8_sse2;
1616 const vp9_subpixvariance_fn_t highbd_10_subpel_variance16x16_sse2 =
1617 vp9_highbd_10_sub_pixel_variance16x16_sse2;
1618 const vp9_subpixvariance_fn_t highbd_10_subpel_variance16x32_sse2 =
1619 vp9_highbd_10_sub_pixel_variance16x32_sse2;
1620 const vp9_subpixvariance_fn_t highbd_10_subpel_variance32x16_sse2 =
1621 vp9_highbd_10_sub_pixel_variance32x16_sse2;
1622 const vp9_subpixvariance_fn_t highbd_10_subpel_variance32x32_sse2 =
1623 vp9_highbd_10_sub_pixel_variance32x32_sse2;
1624 const vp9_subpixvariance_fn_t highbd_10_subpel_variance32x64_sse2 =
1625 vp9_highbd_10_sub_pixel_variance32x64_sse2;
1626 const vp9_subpixvariance_fn_t highbd_10_subpel_variance64x32_sse2 =
1627 vp9_highbd_10_sub_pixel_variance64x32_sse2;
1628 const vp9_subpixvariance_fn_t highbd_10_subpel_variance64x64_sse2 =
1629 vp9_highbd_10_sub_pixel_variance64x64_sse2;
1630 const vp9_subpixvariance_fn_t highbd_12_subpel_variance8x4_sse2 =
1631 vp9_highbd_12_sub_pixel_variance8x4_sse2;
1632 const vp9_subpixvariance_fn_t highbd_12_subpel_variance8x8_sse2 =
1633 vp9_highbd_12_sub_pixel_variance8x8_sse2;
1634 const vp9_subpixvariance_fn_t highbd_12_subpel_variance8x16_sse2 =
1635 vp9_highbd_12_sub_pixel_variance8x16_sse2;
1636 const vp9_subpixvariance_fn_t highbd_12_subpel_variance16x8_sse2 =
1637 vp9_highbd_12_sub_pixel_variance16x8_sse2;
1638 const vp9_subpixvariance_fn_t highbd_12_subpel_variance16x16_sse2 =
1639 vp9_highbd_12_sub_pixel_variance16x16_sse2;
1640 const vp9_subpixvariance_fn_t highbd_12_subpel_variance16x32_sse2 =
1641 vp9_highbd_12_sub_pixel_variance16x32_sse2;
1642 const vp9_subpixvariance_fn_t highbd_12_subpel_variance32x16_sse2 =
1643 vp9_highbd_12_sub_pixel_variance32x16_sse2;
1644 const vp9_subpixvariance_fn_t highbd_12_subpel_variance32x32_sse2 =
1645 vp9_highbd_12_sub_pixel_variance32x32_sse2;
1646 const vp9_subpixvariance_fn_t highbd_12_subpel_variance32x64_sse2 =
1647 vp9_highbd_12_sub_pixel_variance32x64_sse2;
1648 const vp9_subpixvariance_fn_t highbd_12_subpel_variance64x32_sse2 =
1649 vp9_highbd_12_sub_pixel_variance64x32_sse2;
1650 const vp9_subpixvariance_fn_t highbd_12_subpel_variance64x64_sse2 =
1651 vp9_highbd_12_sub_pixel_variance64x64_sse2;
1652 INSTANTIATE_TEST_CASE_P(
1653 SSE2, VP9SubpelVarianceHighTest,
1654 ::testing::Values(make_tuple(3, 2, highbd_10_subpel_variance8x4_sse2, 10),
1655 make_tuple(3, 3, highbd_10_subpel_variance8x8_sse2, 10),
1656 make_tuple(3, 4, highbd_10_subpel_variance8x16_sse2, 10),
1657 make_tuple(4, 3, highbd_10_subpel_variance16x8_sse2, 10),
1658 make_tuple(4, 4, highbd_10_subpel_variance16x16_sse2, 10),
1659 make_tuple(4, 5, highbd_10_subpel_variance16x32_sse2, 10),
1660 make_tuple(5, 4, highbd_10_subpel_variance32x16_sse2, 10),
1661 make_tuple(5, 5, highbd_10_subpel_variance32x32_sse2, 10),
1662 make_tuple(5, 6, highbd_10_subpel_variance32x64_sse2, 10),
1663 make_tuple(6, 5, highbd_10_subpel_variance64x32_sse2, 10),
1664 make_tuple(6, 6, highbd_10_subpel_variance64x64_sse2, 10),
1665 make_tuple(3, 2, highbd_12_subpel_variance8x4_sse2, 12),
1666 make_tuple(3, 3, highbd_12_subpel_variance8x8_sse2, 12),
1667 make_tuple(3, 4, highbd_12_subpel_variance8x16_sse2, 12),
1668 make_tuple(4, 3, highbd_12_subpel_variance16x8_sse2, 12),
1669 make_tuple(4, 4, highbd_12_subpel_variance16x16_sse2, 12),
1670 make_tuple(4, 5, highbd_12_subpel_variance16x32_sse2, 12),
1671 make_tuple(5, 4, highbd_12_subpel_variance32x16_sse2, 12),
1672 make_tuple(5, 5, highbd_12_subpel_variance32x32_sse2, 12),
1673 make_tuple(5, 6, highbd_12_subpel_variance32x64_sse2, 12),
1674 make_tuple(6, 5, highbd_12_subpel_variance64x32_sse2, 12),
1675 make_tuple(6, 6, highbd_12_subpel_variance64x64_sse2, 12),
1676 make_tuple(3, 2, highbd_subpel_variance8x4_sse2, 8),
1677 make_tuple(3, 3, highbd_subpel_variance8x8_sse2, 8),
1678 make_tuple(3, 4, highbd_subpel_variance8x16_sse2, 8),
1679 make_tuple(4, 3, highbd_subpel_variance16x8_sse2, 8),
1680 make_tuple(4, 4, highbd_subpel_variance16x16_sse2, 8),
1681 make_tuple(4, 5, highbd_subpel_variance16x32_sse2, 8),
1682 make_tuple(5, 4, highbd_subpel_variance32x16_sse2, 8),
1683 make_tuple(5, 5, highbd_subpel_variance32x32_sse2, 8),
1684 make_tuple(5, 6, highbd_subpel_variance32x64_sse2, 8),
1685 make_tuple(6, 5, highbd_subpel_variance64x32_sse2, 8),
1686 make_tuple(6, 6, highbd_subpel_variance64x64_sse2, 8)));
1687 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x4_sse2 =
1688 vp9_highbd_sub_pixel_avg_variance8x4_sse2;
1689 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x8_sse2 =
1690 vp9_highbd_sub_pixel_avg_variance8x8_sse2;
1691 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance8x16_sse2 =
1692 vp9_highbd_sub_pixel_avg_variance8x16_sse2;
1693 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x8_sse2 =
1694 vp9_highbd_sub_pixel_avg_variance16x8_sse2;
1695 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x16_sse2 =
1696 vp9_highbd_sub_pixel_avg_variance16x16_sse2;
1697 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance16x32_sse2 =
1698 vp9_highbd_sub_pixel_avg_variance16x32_sse2;
1699 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x16_sse2 =
1700 vp9_highbd_sub_pixel_avg_variance32x16_sse2;
1701 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x32_sse2 =
1702 vp9_highbd_sub_pixel_avg_variance32x32_sse2;
1703 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance32x64_sse2 =
1704 vp9_highbd_sub_pixel_avg_variance32x64_sse2;
1705 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance64x32_sse2 =
1706 vp9_highbd_sub_pixel_avg_variance64x32_sse2;
1707 const vp9_subp_avg_variance_fn_t highbd_subpel_avg_variance64x64_sse2 =
1708 vp9_highbd_sub_pixel_avg_variance64x64_sse2;
1709 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x4_sse2 =
1710 vp9_highbd_10_sub_pixel_avg_variance8x4_sse2;
1711 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x8_sse2 =
1712 vp9_highbd_10_sub_pixel_avg_variance8x8_sse2;
1713 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance8x16_sse2 =
1714 vp9_highbd_10_sub_pixel_avg_variance8x16_sse2;
1715 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x8_sse2 =
1716 vp9_highbd_10_sub_pixel_avg_variance16x8_sse2;
1717 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x16_sse2 =
1718 vp9_highbd_10_sub_pixel_avg_variance16x16_sse2;
1719 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance16x32_sse2 =
1720 vp9_highbd_10_sub_pixel_avg_variance16x32_sse2;
1721 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x16_sse2 =
1722 vp9_highbd_10_sub_pixel_avg_variance32x16_sse2;
1723 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x32_sse2 =
1724 vp9_highbd_10_sub_pixel_avg_variance32x32_sse2;
1725 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance32x64_sse2 =
1726 vp9_highbd_10_sub_pixel_avg_variance32x64_sse2;
1727 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance64x32_sse2 =
1728 vp9_highbd_10_sub_pixel_avg_variance64x32_sse2;
1729 const vp9_subp_avg_variance_fn_t highbd_10_subpel_avg_variance64x64_sse2 =
1730 vp9_highbd_10_sub_pixel_avg_variance64x64_sse2;
1731 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x4_sse2 =
1732 vp9_highbd_12_sub_pixel_avg_variance8x4_sse2;
1733 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x8_sse2 =
1734 vp9_highbd_12_sub_pixel_avg_variance8x8_sse2;
1735 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance8x16_sse2 =
1736 vp9_highbd_12_sub_pixel_avg_variance8x16_sse2;
1737 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x8_sse2 =
1738 vp9_highbd_12_sub_pixel_avg_variance16x8_sse2;
1739 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x16_sse2 =
1740 vp9_highbd_12_sub_pixel_avg_variance16x16_sse2;
1741 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance16x32_sse2 =
1742 vp9_highbd_12_sub_pixel_avg_variance16x32_sse2;
1743 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x16_sse2 =
1744 vp9_highbd_12_sub_pixel_avg_variance32x16_sse2;
1745 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x32_sse2 =
1746 vp9_highbd_12_sub_pixel_avg_variance32x32_sse2;
1747 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance32x64_sse2 =
1748 vp9_highbd_12_sub_pixel_avg_variance32x64_sse2;
1749 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance64x32_sse2 =
1750 vp9_highbd_12_sub_pixel_avg_variance64x32_sse2;
1751 const vp9_subp_avg_variance_fn_t highbd_12_subpel_avg_variance64x64_sse2 =
1752 vp9_highbd_12_sub_pixel_avg_variance64x64_sse2;
1753 INSTANTIATE_TEST_CASE_P(
1754 SSE2, VP9SubpelAvgVarianceHighTest,
1755 ::testing::Values(
1756 make_tuple(3, 2, highbd_10_subpel_avg_variance8x4_sse2, 10),
1757 make_tuple(3, 3, highbd_10_subpel_avg_variance8x8_sse2, 10),
1758 make_tuple(3, 4, highbd_10_subpel_avg_variance8x16_sse2, 10),
1759 make_tuple(4, 3, highbd_10_subpel_avg_variance16x8_sse2, 10),
1760 make_tuple(4, 4, highbd_10_subpel_avg_variance16x16_sse2, 10),
1761 make_tuple(4, 5, highbd_10_subpel_avg_variance16x32_sse2, 10),
1762 make_tuple(5, 4, highbd_10_subpel_avg_variance32x16_sse2, 10),
1763 make_tuple(5, 5, highbd_10_subpel_avg_variance32x32_sse2, 10),
1764 make_tuple(5, 6, highbd_10_subpel_avg_variance32x64_sse2, 10),
1765 make_tuple(6, 5, highbd_10_subpel_avg_variance64x32_sse2, 10),
1766 make_tuple(6, 6, highbd_10_subpel_avg_variance64x64_sse2, 10),
1767 make_tuple(3, 2, highbd_12_subpel_avg_variance8x4_sse2, 12),
1768 make_tuple(3, 3, highbd_12_subpel_avg_variance8x8_sse2, 12),
1769 make_tuple(3, 4, highbd_12_subpel_avg_variance8x16_sse2, 12),
1770 make_tuple(4, 3, highbd_12_subpel_avg_variance16x8_sse2, 12),
1771 make_tuple(4, 4, highbd_12_subpel_avg_variance16x16_sse2, 12),
1772 make_tuple(4, 5, highbd_12_subpel_avg_variance16x32_sse2, 12),
1773 make_tuple(5, 4, highbd_12_subpel_avg_variance32x16_sse2, 12),
1774 make_tuple(5, 5, highbd_12_subpel_avg_variance32x32_sse2, 12),
1775 make_tuple(5, 6, highbd_12_subpel_avg_variance32x64_sse2, 12),
1776 make_tuple(6, 5, highbd_12_subpel_avg_variance64x32_sse2, 12),
1777 make_tuple(6, 6, highbd_12_subpel_avg_variance64x64_sse2, 12),
1778 make_tuple(3, 2, highbd_subpel_avg_variance8x4_sse2, 8),
1779 make_tuple(3, 3, highbd_subpel_avg_variance8x8_sse2, 8),
1780 make_tuple(3, 4, highbd_subpel_avg_variance8x16_sse2, 8),
1781 make_tuple(4, 3, highbd_subpel_avg_variance16x8_sse2, 8),
1782 make_tuple(4, 4, highbd_subpel_avg_variance16x16_sse2, 8),
1783 make_tuple(4, 5, highbd_subpel_avg_variance16x32_sse2, 8),
1784 make_tuple(5, 4, highbd_subpel_avg_variance32x16_sse2, 8),
1785 make_tuple(5, 5, highbd_subpel_avg_variance32x32_sse2, 8),
1786 make_tuple(5, 6, highbd_subpel_avg_variance32x64_sse2, 8),
1787 make_tuple(6, 5, highbd_subpel_avg_variance64x32_sse2, 8),
1788 make_tuple(6, 6, highbd_subpel_avg_variance64x64_sse2, 8)));
1789 #endif // CONFIG_VP9_HIGHBITDEPTH
1790 #endif // CONFIG_USE_X86INC
1791 #endif // HAVE_SSE2
791 #if HAVE_SSSE3 1792 #if HAVE_SSSE3
792 #if CONFIG_USE_X86INC 1793 #if CONFIG_USE_X86INC
793 1794
794 const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 = 1795 const vp9_subpixvariance_fn_t subpel_variance4x4_ssse3 =
795 vp9_sub_pixel_variance4x4_ssse3; 1796 vp9_sub_pixel_variance4x4_ssse3;
796 const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 = 1797 const vp9_subpixvariance_fn_t subpel_variance4x8_ssse3 =
797 vp9_sub_pixel_variance4x8_ssse3; 1798 vp9_sub_pixel_variance4x8_ssse3;
798 const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 = 1799 const vp9_subpixvariance_fn_t subpel_variance8x4_ssse3 =
799 vp9_sub_pixel_variance8x4_ssse3; 1800 vp9_sub_pixel_variance8x4_ssse3;
800 const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 = 1801 const vp9_subpixvariance_fn_t subpel_variance8x8_ssse3 =
(...skipping 11 matching lines...) Expand all
812 const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 = 1813 const vp9_subpixvariance_fn_t subpel_variance32x32_ssse3 =
813 vp9_sub_pixel_variance32x32_ssse3; 1814 vp9_sub_pixel_variance32x32_ssse3;
814 const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 = 1815 const vp9_subpixvariance_fn_t subpel_variance32x64_ssse3 =
815 vp9_sub_pixel_variance32x64_ssse3; 1816 vp9_sub_pixel_variance32x64_ssse3;
816 const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 = 1817 const vp9_subpixvariance_fn_t subpel_variance64x32_ssse3 =
817 vp9_sub_pixel_variance64x32_ssse3; 1818 vp9_sub_pixel_variance64x32_ssse3;
818 const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 = 1819 const vp9_subpixvariance_fn_t subpel_variance64x64_ssse3 =
819 vp9_sub_pixel_variance64x64_ssse3; 1820 vp9_sub_pixel_variance64x64_ssse3;
820 INSTANTIATE_TEST_CASE_P( 1821 INSTANTIATE_TEST_CASE_P(
821 SSSE3, VP9SubpelVarianceTest, 1822 SSSE3, VP9SubpelVarianceTest,
822 ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3), 1823 ::testing::Values(make_tuple(2, 2, subpel_variance4x4_ssse3, 0),
823 make_tuple(2, 3, subpel_variance4x8_ssse3), 1824 make_tuple(2, 3, subpel_variance4x8_ssse3, 0),
824 make_tuple(3, 2, subpel_variance8x4_ssse3), 1825 make_tuple(3, 2, subpel_variance8x4_ssse3, 0),
825 make_tuple(3, 3, subpel_variance8x8_ssse3), 1826 make_tuple(3, 3, subpel_variance8x8_ssse3, 0),
826 make_tuple(3, 4, subpel_variance8x16_ssse3), 1827 make_tuple(3, 4, subpel_variance8x16_ssse3, 0),
827 make_tuple(4, 3, subpel_variance16x8_ssse3), 1828 make_tuple(4, 3, subpel_variance16x8_ssse3, 0),
828 make_tuple(4, 4, subpel_variance16x16_ssse3), 1829 make_tuple(4, 4, subpel_variance16x16_ssse3, 0),
829 make_tuple(4, 5, subpel_variance16x32_ssse3), 1830 make_tuple(4, 5, subpel_variance16x32_ssse3, 0),
830 make_tuple(5, 4, subpel_variance32x16_ssse3), 1831 make_tuple(5, 4, subpel_variance32x16_ssse3, 0),
831 make_tuple(5, 5, subpel_variance32x32_ssse3), 1832 make_tuple(5, 5, subpel_variance32x32_ssse3, 0),
832 make_tuple(5, 6, subpel_variance32x64_ssse3), 1833 make_tuple(5, 6, subpel_variance32x64_ssse3, 0),
833 make_tuple(6, 5, subpel_variance64x32_ssse3), 1834 make_tuple(6, 5, subpel_variance64x32_ssse3, 0),
834 make_tuple(6, 6, subpel_variance64x64_ssse3))); 1835 make_tuple(6, 6, subpel_variance64x64_ssse3, 0)));
835
836 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 = 1836 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_ssse3 =
837 vp9_sub_pixel_avg_variance4x4_ssse3; 1837 vp9_sub_pixel_avg_variance4x4_ssse3;
838 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 = 1838 const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_ssse3 =
839 vp9_sub_pixel_avg_variance4x8_ssse3; 1839 vp9_sub_pixel_avg_variance4x8_ssse3;
840 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 = 1840 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_ssse3 =
841 vp9_sub_pixel_avg_variance8x4_ssse3; 1841 vp9_sub_pixel_avg_variance8x4_ssse3;
842 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 = 1842 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_ssse3 =
843 vp9_sub_pixel_avg_variance8x8_ssse3; 1843 vp9_sub_pixel_avg_variance8x8_ssse3;
844 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 = 1844 const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_ssse3 =
845 vp9_sub_pixel_avg_variance8x16_ssse3; 1845 vp9_sub_pixel_avg_variance8x16_ssse3;
846 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 = 1846 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_ssse3 =
847 vp9_sub_pixel_avg_variance16x8_ssse3; 1847 vp9_sub_pixel_avg_variance16x8_ssse3;
848 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 = 1848 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_ssse3 =
849 vp9_sub_pixel_avg_variance16x16_ssse3; 1849 vp9_sub_pixel_avg_variance16x16_ssse3;
850 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 = 1850 const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_ssse3 =
851 vp9_sub_pixel_avg_variance16x32_ssse3; 1851 vp9_sub_pixel_avg_variance16x32_ssse3;
852 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 = 1852 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_ssse3 =
853 vp9_sub_pixel_avg_variance32x16_ssse3; 1853 vp9_sub_pixel_avg_variance32x16_ssse3;
854 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 = 1854 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_ssse3 =
855 vp9_sub_pixel_avg_variance32x32_ssse3; 1855 vp9_sub_pixel_avg_variance32x32_ssse3;
856 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 = 1856 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_ssse3 =
857 vp9_sub_pixel_avg_variance32x64_ssse3; 1857 vp9_sub_pixel_avg_variance32x64_ssse3;
858 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 = 1858 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_ssse3 =
859 vp9_sub_pixel_avg_variance64x32_ssse3; 1859 vp9_sub_pixel_avg_variance64x32_ssse3;
860 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 = 1860 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_ssse3 =
861 vp9_sub_pixel_avg_variance64x64_ssse3; 1861 vp9_sub_pixel_avg_variance64x64_ssse3;
862 INSTANTIATE_TEST_CASE_P( 1862 INSTANTIATE_TEST_CASE_P(
863 SSSE3, VP9SubpelAvgVarianceTest, 1863 SSSE3, VP9SubpelAvgVarianceTest,
864 ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3), 1864 ::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_ssse3, 0),
865 make_tuple(2, 3, subpel_avg_variance4x8_ssse3), 1865 make_tuple(2, 3, subpel_avg_variance4x8_ssse3, 0),
866 make_tuple(3, 2, subpel_avg_variance8x4_ssse3), 1866 make_tuple(3, 2, subpel_avg_variance8x4_ssse3, 0),
867 make_tuple(3, 3, subpel_avg_variance8x8_ssse3), 1867 make_tuple(3, 3, subpel_avg_variance8x8_ssse3, 0),
868 make_tuple(3, 4, subpel_avg_variance8x16_ssse3), 1868 make_tuple(3, 4, subpel_avg_variance8x16_ssse3, 0),
869 make_tuple(4, 3, subpel_avg_variance16x8_ssse3), 1869 make_tuple(4, 3, subpel_avg_variance16x8_ssse3, 0),
870 make_tuple(4, 4, subpel_avg_variance16x16_ssse3), 1870 make_tuple(4, 4, subpel_avg_variance16x16_ssse3, 0),
871 make_tuple(4, 5, subpel_avg_variance16x32_ssse3), 1871 make_tuple(4, 5, subpel_avg_variance16x32_ssse3, 0),
872 make_tuple(5, 4, subpel_avg_variance32x16_ssse3), 1872 make_tuple(5, 4, subpel_avg_variance32x16_ssse3, 0),
873 make_tuple(5, 5, subpel_avg_variance32x32_ssse3), 1873 make_tuple(5, 5, subpel_avg_variance32x32_ssse3, 0),
874 make_tuple(5, 6, subpel_avg_variance32x64_ssse3), 1874 make_tuple(5, 6, subpel_avg_variance32x64_ssse3, 0),
875 make_tuple(6, 5, subpel_avg_variance64x32_ssse3), 1875 make_tuple(6, 5, subpel_avg_variance64x32_ssse3, 0),
876 make_tuple(6, 6, subpel_avg_variance64x64_ssse3))); 1876 make_tuple(6, 6, subpel_avg_variance64x64_ssse3, 0)));
877 #endif 1877 #endif // CONFIG_USE_X86INC
878 #endif 1878 #endif // HAVE_SSSE3
879 1879
880 #if HAVE_AVX2 1880 #if HAVE_AVX2
881 1881
882 const vp9_variance_fn_t variance16x16_avx2 = vp9_variance16x16_avx2; 1882 const vp9_variance_fn_t variance16x16_avx2 = vp9_variance16x16_avx2;
883 const vp9_variance_fn_t variance32x16_avx2 = vp9_variance32x16_avx2; 1883 const vp9_variance_fn_t variance32x16_avx2 = vp9_variance32x16_avx2;
884 const vp9_variance_fn_t variance32x32_avx2 = vp9_variance32x32_avx2; 1884 const vp9_variance_fn_t variance32x32_avx2 = vp9_variance32x32_avx2;
885 const vp9_variance_fn_t variance64x32_avx2 = vp9_variance64x32_avx2; 1885 const vp9_variance_fn_t variance64x32_avx2 = vp9_variance64x32_avx2;
886 const vp9_variance_fn_t variance64x64_avx2 = vp9_variance64x64_avx2; 1886 const vp9_variance_fn_t variance64x64_avx2 = vp9_variance64x64_avx2;
887 INSTANTIATE_TEST_CASE_P( 1887 INSTANTIATE_TEST_CASE_P(
888 AVX2, VP9VarianceTest, 1888 AVX2, VP9VarianceTest,
889 ::testing::Values(make_tuple(4, 4, variance16x16_avx2), 1889 ::testing::Values(make_tuple(4, 4, variance16x16_avx2, 0),
890 make_tuple(5, 4, variance32x16_avx2), 1890 make_tuple(5, 4, variance32x16_avx2, 0),
891 make_tuple(5, 5, variance32x32_avx2), 1891 make_tuple(5, 5, variance32x32_avx2, 0),
892 make_tuple(6, 5, variance64x32_avx2), 1892 make_tuple(6, 5, variance64x32_avx2, 0),
893 make_tuple(6, 6, variance64x64_avx2))); 1893 make_tuple(6, 6, variance64x64_avx2, 0)));
894 1894
895 const vp9_subpixvariance_fn_t subpel_variance32x32_avx2 = 1895 const vp9_subpixvariance_fn_t subpel_variance32x32_avx2 =
896 vp9_sub_pixel_variance32x32_avx2; 1896 vp9_sub_pixel_variance32x32_avx2;
897 const vp9_subpixvariance_fn_t subpel_variance64x64_avx2 = 1897 const vp9_subpixvariance_fn_t subpel_variance64x64_avx2 =
898 vp9_sub_pixel_variance64x64_avx2; 1898 vp9_sub_pixel_variance64x64_avx2;
899 INSTANTIATE_TEST_CASE_P( 1899 INSTANTIATE_TEST_CASE_P(
900 AVX2, VP9SubpelVarianceTest, 1900 AVX2, VP9SubpelVarianceTest,
901 ::testing::Values(make_tuple(5, 5, subpel_variance32x32_avx2), 1901 ::testing::Values(make_tuple(5, 5, subpel_variance32x32_avx2, 0),
902 make_tuple(6, 6, subpel_variance64x64_avx2))); 1902 make_tuple(6, 6, subpel_variance64x64_avx2, 0)));
903 1903
904 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_avx2 = 1904 const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_avx2 =
905 vp9_sub_pixel_avg_variance32x32_avx2; 1905 vp9_sub_pixel_avg_variance32x32_avx2;
906 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_avx2 = 1906 const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_avx2 =
907 vp9_sub_pixel_avg_variance64x64_avx2; 1907 vp9_sub_pixel_avg_variance64x64_avx2;
908 INSTANTIATE_TEST_CASE_P( 1908 INSTANTIATE_TEST_CASE_P(
909 AVX2, VP9SubpelAvgVarianceTest, 1909 AVX2, VP9SubpelAvgVarianceTest,
910 ::testing::Values(make_tuple(5, 5, subpel_avg_variance32x32_avx2), 1910 ::testing::Values(make_tuple(5, 5, subpel_avg_variance32x32_avx2, 0),
911 make_tuple(6, 6, subpel_avg_variance64x64_avx2))); 1911 make_tuple(6, 6, subpel_avg_variance64x64_avx2, 0)));
912 #endif // HAVE_AVX2 1912 #endif // HAVE_AVX2
913 #if HAVE_NEON 1913 #if HAVE_NEON
914 const vp9_variance_fn_t variance8x8_neon = vp9_variance8x8_neon; 1914 const vp9_variance_fn_t variance8x8_neon = vp9_variance8x8_neon;
915 const vp9_variance_fn_t variance16x16_neon = vp9_variance16x16_neon; 1915 const vp9_variance_fn_t variance16x16_neon = vp9_variance16x16_neon;
916 const vp9_variance_fn_t variance32x32_neon = vp9_variance32x32_neon; 1916 const vp9_variance_fn_t variance32x32_neon = vp9_variance32x32_neon;
917 INSTANTIATE_TEST_CASE_P( 1917 INSTANTIATE_TEST_CASE_P(
918 NEON, VP9VarianceTest, 1918 NEON, VP9VarianceTest,
919 ::testing::Values(make_tuple(3, 3, variance8x8_neon), 1919 ::testing::Values(make_tuple(3, 3, variance8x8_neon, 0),
920 make_tuple(4, 4, variance16x16_neon), 1920 make_tuple(4, 4, variance16x16_neon, 0),
921 make_tuple(5, 5, variance32x32_neon))); 1921 make_tuple(5, 5, variance32x32_neon, 0)));
922 1922
923 const vp9_subpixvariance_fn_t subpel_variance8x8_neon = 1923 const vp9_subpixvariance_fn_t subpel_variance8x8_neon =
924 vp9_sub_pixel_variance8x8_neon; 1924 vp9_sub_pixel_variance8x8_neon;
925 const vp9_subpixvariance_fn_t subpel_variance16x16_neon = 1925 const vp9_subpixvariance_fn_t subpel_variance16x16_neon =
926 vp9_sub_pixel_variance16x16_neon; 1926 vp9_sub_pixel_variance16x16_neon;
927 const vp9_subpixvariance_fn_t subpel_variance32x32_neon = 1927 const vp9_subpixvariance_fn_t subpel_variance32x32_neon =
928 vp9_sub_pixel_variance32x32_neon; 1928 vp9_sub_pixel_variance32x32_neon;
929 INSTANTIATE_TEST_CASE_P( 1929 INSTANTIATE_TEST_CASE_P(
930 NEON, VP9SubpelVarianceTest, 1930 NEON, VP9SubpelVarianceTest,
931 ::testing::Values(make_tuple(3, 3, subpel_variance8x8_neon), 1931 ::testing::Values(make_tuple(3, 3, subpel_variance8x8_neon, 0),
932 make_tuple(4, 4, subpel_variance16x16_neon), 1932 make_tuple(4, 4, subpel_variance16x16_neon, 0),
933 make_tuple(5, 5, subpel_variance32x32_neon))); 1933 make_tuple(5, 5, subpel_variance32x32_neon, 0)));
934 #endif // HAVE_NEON 1934 #endif // HAVE_NEON
935 #endif // CONFIG_VP9_ENCODER 1935 #endif // CONFIG_VP9_ENCODER
936 1936
937 } // namespace vp9 1937 } // namespace vp9
938
939 } // namespace 1938 } // namespace
OLDNEW
« no previous file with comments | « source/libvpx/test/test_vectors.cc ('k') | source/libvpx/test/vp9_error_block_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698