Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Unified Diff: source/libvpx/test/fdct8x8_test.cc

Issue 592203002: libvpx: Pull from upstream (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « source/libvpx/test/fdct4x4_test.cc ('k') | source/libvpx/test/idct8x8_test.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: source/libvpx/test/fdct8x8_test.cc
===================================================================
--- source/libvpx/test/fdct8x8_test.cc (revision 292072)
+++ source/libvpx/test/fdct8x8_test.cc (working copy)
@@ -20,45 +20,96 @@
#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
-extern "C" {
-void vp9_idct8x8_64_add_c(const int16_t *input, uint8_t *output, int pitch);
+const int kNumCoeffs = 64;
+const double kPi = 3.141592653589793238462643383279502884;
+void reference_8x8_dct_1d(const double in[8], double out[8], int stride) {
+ const double kInvSqrt2 = 0.707106781186547524400844362104;
+ for (int k = 0; k < 8; k++) {
+ out[k] = 0.0;
+ for (int n = 0; n < 8; n++)
+ out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0);
+ if (k == 0)
+ out[k] = out[k] * kInvSqrt2;
+ }
}
+void reference_8x8_dct_2d(const int16_t input[kNumCoeffs],
+ double output[kNumCoeffs]) {
+ // First transform columns
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = input[j*8 + i];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ for (int j = 0; j < 8; ++j)
+ output[j * 8 + i] = temp_out[j];
+ }
+ // Then transform rows
+ for (int i = 0; i < 8; ++i) {
+ double temp_in[8], temp_out[8];
+ for (int j = 0; j < 8; ++j)
+ temp_in[j] = output[j + i*8];
+ reference_8x8_dct_1d(temp_in, temp_out, 1);
+ // Scale by some magic number
+ for (int j = 0; j < 8; ++j)
+ output[j + i * 8] = temp_out[j] * 2;
+ }
+}
+
using libvpx_test::ACMRandom;
namespace {
-typedef void (*FdctFunc)(const int16_t *in, int16_t *out, int stride);
-typedef void (*IdctFunc)(const int16_t *in, uint8_t *out, int stride);
-typedef void (*FhtFunc)(const int16_t *in, int16_t *out, int stride,
+typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
+typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
+typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
int tx_type);
-typedef void (*IhtFunc)(const int16_t *in, uint8_t *out, int stride,
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
int tx_type);
-typedef std::tr1::tuple<FdctFunc, IdctFunc, int> Dct8x8Param;
-typedef std::tr1::tuple<FhtFunc, IhtFunc, int> Ht8x8Param;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct8x8Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht8x8Param;
-void fdct8x8_ref(const int16_t *in, int16_t *out, int stride, int /*tx_type*/) {
+void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fdct8x8_c(in, out, stride);
}
-void fht8x8_ref(const int16_t *in, int16_t *out, int stride, int tx_type) {
+void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp9_fht8x8_c(in, out, stride, tx_type);
}
+#if CONFIG_VP9_HIGHBITDEPTH
+void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
+ vp9_high_idct8x8_64_add_c(in, out, stride, 10);
+}
+
+void idct8x8_12(const tran_low_t *in, uint8_t *out, int stride) {
+ vp9_high_idct8x8_64_add_c(in, out, stride, 12);
+}
+
+void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_high_iht8x8_64_add_c(in, out, stride, tx_type, 10);
+}
+
+void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+ vp9_high_iht8x8_64_add_c(in, out, stride, tx_type, 12);
+}
+#endif
+
class FwdTrans8x8TestBase {
public:
virtual ~FwdTrans8x8TestBase() {}
protected:
- virtual void RunFwdTxfm(int16_t *in, int16_t *out, int stride) = 0;
- virtual void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) = 0;
+ virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
+ virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
void RunSignBiasCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_output_block, 64);
+ DECLARE_ALIGNED_ARRAY(16, tran_low_t, test_output_block, 64);
int count_sign_block[64][2];
const int count_test_block = 100000;
@@ -67,7 +118,8 @@
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = rnd.Rand8() - rnd.Rand8();
+ test_input_block[j] = ((rnd.Rand16() >> (16 - bit_depth_)) & mask_) -
+ ((rnd.Rand16() >> (16 - bit_depth_)) & mask_);
ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
@@ -82,7 +134,7 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
const int max_diff = 1125;
- EXPECT_LT(diff, max_diff)
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
<< "Error: 8x8 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-255, 255] at index " << j
@@ -111,7 +163,7 @@
for (int j = 0; j < 64; ++j) {
const int diff = abs(count_sign_block[j][0] - count_sign_block[j][1]);
const int max_diff = 10000;
- EXPECT_LT(diff, max_diff)
+ EXPECT_LT(diff, max_diff << (bit_depth_ - 8))
<< "Error: 4x4 FDCT/FHT has a sign bias > "
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-15, 15] at index " << j
@@ -127,16 +179,28 @@
int total_error = 0;
const int count_test_block = 100000;
DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
+ DECLARE_ALIGNED_ARRAY(16, tran_low_t, test_temp_block, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, 64);
+ DECLARE_ALIGNED_ARRAY(16, uint16_t, src16, 64);
+#endif
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 64; ++j) {
- src[j] = rnd.Rand8();
- dst[j] = rnd.Rand8();
- test_input_block[j] = src[j] - dst[j];
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8();
+ dst[j] = rnd.Rand8();
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand16() & mask_;
+ dst16[j] = rnd.Rand16() & mask_;
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
+ }
}
ASM_REGISTER_STATE_CHECK(
@@ -152,11 +216,23 @@
test_temp_block[j] *= 4;
}
}
- ASM_REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
@@ -164,11 +240,11 @@
}
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
<< " roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
<< "error > 1/5 per block";
}
@@ -180,37 +256,68 @@
int total_coeff_error = 0;
const int count_test_block = 100000;
DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_temp_block, 64);
- DECLARE_ALIGNED_ARRAY(16, int16_t, ref_temp_block, 64);
+ DECLARE_ALIGNED_ARRAY(16, tran_low_t, test_temp_block, 64);
+ DECLARE_ALIGNED_ARRAY(16, tran_low_t, ref_temp_block, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, 64);
DECLARE_ALIGNED_ARRAY(16, uint8_t, src, 64);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, 64);
+ DECLARE_ALIGNED_ARRAY(16, uint16_t, src16, 64);
+#endif
for (int i = 0; i < count_test_block; ++i) {
- // Initialize a test block with input range [-255, 255].
+ // Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < 64; ++j) {
- if (i == 0) {
- src[j] = 255;
- dst[j] = 0;
- } else if (i == 1) {
- src[j] = 0;
- dst[j] = 255;
+ if (bit_depth_ == VPX_BITS_8) {
+ if (i == 0) {
+ src[j] = 255;
+ dst[j] = 0;
+ } else if (i == 1) {
+ src[j] = 0;
+ dst[j] = 255;
+ } else {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = rnd.Rand8() % 2 ? 255 : 0;
+ }
+ test_input_block[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
} else {
- src[j] = rnd.Rand8() % 2 ? 255 : 0;
- dst[j] = rnd.Rand8() % 2 ? 255 : 0;
+ if (i == 0) {
+ src16[j] = mask_;
+ dst16[j] = 0;
+ } else if (i == 1) {
+ src16[j] = 0;
+ dst16[j] = mask_;
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ }
+ test_input_block[j] = src16[j] - dst16[j];
+#endif
}
-
- test_input_block[j] = src[j] - dst[j];
}
ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
ASM_REGISTER_STATE_CHECK(
fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
- ASM_REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
+#endif
+ }
for (int j = 0; j < 64; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
const int diff = dst[j] - src[j];
+#endif
const int error = diff * diff;
if (max_error < error)
max_error = error;
@@ -220,11 +327,11 @@
total_coeff_error += abs(coeff_diff);
}
- EXPECT_GE(1, max_error)
+ EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has"
<< "an individual roundtrip error > 1";
- EXPECT_GE(count_test_block/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
<< " roundtrip error > 1/5 per block";
@@ -234,9 +341,97 @@
}
}
+ void RunInvAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+#if CONFIG_VP9_HIGHBITDEPTH
+ DECLARE_ALIGNED_ARRAY(16, uint16_t, src16, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs);
+#endif
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-255, 255].
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ if (bit_depth_ == VPX_BITS_8) {
+ src[j] = rnd.Rand8() % 2 ? 255 : 0;
+ dst[j] = src[j] > 0 ? 0 : 255;
+ in[j] = src[j] - dst[j];
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
+ dst16[j] = src16[j] > 0 ? 0 : mask_;
+ in[j] = src16[j] - dst16[j];
+#endif
+ }
+ }
+
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ if (bit_depth_ == VPX_BITS_8) {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
+#if CONFIG_VP9_HIGHBITDEPTH
+ } else {
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
+ pitch_));
+#endif
+ }
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ const uint32_t diff =
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+#else
+ const uint32_t diff = dst[j] - src[j];
+#endif
+ const uint32_t error = diff * diff;
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
+
+ void RunFwdAccuracyCheck() {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ const int count_test_block = 1000;
+ DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff_r, kNumCoeffs);
+ DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff, kNumCoeffs);
+
+ for (int i = 0; i < count_test_block; ++i) {
+ double out_r[kNumCoeffs];
+
+ // Initialize a test block with input range [-mask_, mask_].
+ for (int j = 0; j < kNumCoeffs; ++j)
+ in[j] = rnd.Rand8() % 2 == 0 ? mask_ : -mask_;
+
+ RunFwdTxfm(in, coeff, pitch_);
+ reference_8x8_dct_2d(in, out_r);
+ for (int j = 0; j < kNumCoeffs; ++j)
+ coeff_r[j] = static_cast<tran_low_t>(round(out_r[j]));
+
+ for (int j = 0; j < kNumCoeffs; ++j) {
+ const uint32_t diff = coeff[j] - coeff_r[j];
+ const uint32_t error = diff * diff;
+ EXPECT_GE(9u << 2 * (bit_depth_ - 8), error)
+ << "Error: 8x8 DCT has error " << error
+ << " at index " << j;
+ }
+ }
+ }
int pitch_;
int tx_type_;
FhtFunc fwd_txfm_ref;
+ vpx_bit_depth_t bit_depth_;
+ int mask_;
};
class FwdTrans8x8DCT
@@ -251,15 +446,17 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fdct8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride);
}
@@ -279,6 +476,14 @@
RunExtremalCheck();
}
+TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) {
+ RunFwdAccuracyCheck();
+}
+
+TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) {
+ RunInvAccuracyCheck();
+}
+
class FwdTrans8x8HT
: public FwdTrans8x8TestBase,
public ::testing::TestWithParam<Ht8x8Param> {
@@ -291,15 +496,17 @@
tx_type_ = GET_PARAM(2);
pitch_ = 8;
fwd_txfm_ref = fht8x8_ref;
+ bit_depth_ = GET_PARAM(3);
+ mask_ = (1 << bit_depth_) - 1;
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
- void RunFwdTxfm(int16_t *in, int16_t *out, int stride) {
+ void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
fwd_txfm_(in, out, stride, tx_type_);
}
- void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) {
+ void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
inv_txfm_(out, dst, stride, tx_type_);
}
@@ -321,50 +528,81 @@
using std::tr1::make_tuple;
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0)));
+ make_tuple(&vp9_high_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_high_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0, VPX_BITS_8)));
+#else
INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8DCT,
+ ::testing::Values(
+ make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_c, 0, VPX_BITS_8)));
+#endif
+
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3)));
+ make_tuple(&vp9_high_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
+ make_tuple(&vp9_high_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
+ make_tuple(&vp9_high_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
+ make_tuple(&vp9_high_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
+ make_tuple(&vp9_high_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
+ make_tuple(&vp9_high_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
+ make_tuple(&vp9_high_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
+ make_tuple(&vp9_high_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#else
+INSTANTIATE_TEST_CASE_P(
+ C, FwdTrans8x8HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#endif
-#if HAVE_NEON_ASM
+#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
NEON, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_neon, &vp9_idct8x8_64_add_neon, 0)));
+ make_tuple(&vp9_fdct8x8_neon, &vp9_idct8x8_64_add_neon, 0,
+ VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
DISABLED_NEON, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2),
- make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3)));
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_neon, 3, VPX_BITS_8)));
#endif
-#if HAVE_SSE2
+#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_sse2, &vp9_idct8x8_64_add_sse2, 0)));
+ make_tuple(&vp9_fdct8x8_sse2, &vp9_idct8x8_64_add_sse2, 0,
+ VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2),
- make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3)));
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht8x8_sse2, &vp9_iht8x8_64_add_sse2, 3, VPX_BITS_8)));
#endif
-#if HAVE_SSSE3 && ARCH_X86_64
+#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSSE3, FwdTrans8x8DCT,
::testing::Values(
- make_tuple(&vp9_fdct8x8_ssse3, &vp9_idct8x8_64_add_ssse3, 0)));
+ make_tuple(&vp9_fdct8x8_ssse3, &vp9_idct8x8_64_add_ssse3, 0,
+ VPX_BITS_8)));
#endif
} // namespace
« no previous file with comments | « source/libvpx/test/fdct4x4_test.cc ('k') | source/libvpx/test/idct8x8_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698