OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
349 virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0; | 349 virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0; |
350 | 350 |
351 virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0; | 351 virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0; |
352 | 352 |
353 void RunAccuracyCheck() { | 353 void RunAccuracyCheck() { |
354 ACMRandom rnd(ACMRandom::DeterministicSeed()); | 354 ACMRandom rnd(ACMRandom::DeterministicSeed()); |
355 uint32_t max_error = 0; | 355 uint32_t max_error = 0; |
356 int64_t total_error = 0; | 356 int64_t total_error = 0; |
357 const int count_test_block = 10000; | 357 const int count_test_block = 10000; |
358 for (int i = 0; i < count_test_block; ++i) { | 358 for (int i = 0; i < count_test_block; ++i) { |
359 DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs); | 359 DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]); |
360 DECLARE_ALIGNED_ARRAY(16, tran_low_t, test_temp_block, kNumCoeffs); | 360 DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]); |
361 DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs); | 361 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]); |
362 DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs); | 362 DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]); |
363 #if CONFIG_VP9_HIGHBITDEPTH | 363 #if CONFIG_VP9_HIGHBITDEPTH |
364 DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs); | 364 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]); |
365 DECLARE_ALIGNED_ARRAY(16, uint16_t, src16, kNumCoeffs); | 365 DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]); |
366 #endif | 366 #endif |
367 | 367 |
368 // Initialize a test block with input range [-mask_, mask_]. | 368 // Initialize a test block with input range [-mask_, mask_]. |
369 for (int j = 0; j < kNumCoeffs; ++j) { | 369 for (int j = 0; j < kNumCoeffs; ++j) { |
370 if (bit_depth_ == VPX_BITS_8) { | 370 if (bit_depth_ == VPX_BITS_8) { |
371 src[j] = rnd.Rand8(); | 371 src[j] = rnd.Rand8(); |
372 dst[j] = rnd.Rand8(); | 372 dst[j] = rnd.Rand8(); |
373 test_input_block[j] = src[j] - dst[j]; | 373 test_input_block[j] = src[j] - dst[j]; |
374 #if CONFIG_VP9_HIGHBITDEPTH | 374 #if CONFIG_VP9_HIGHBITDEPTH |
375 } else { | 375 } else { |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
409 EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error) | 409 EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error) |
410 << "Error: 16x16 FHT/IHT has an individual round trip error > 1"; | 410 << "Error: 16x16 FHT/IHT has an individual round trip error > 1"; |
411 | 411 |
412 EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error) | 412 EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error) |
413 << "Error: 16x16 FHT/IHT has average round trip error > 1 per block"; | 413 << "Error: 16x16 FHT/IHT has average round trip error > 1 per block"; |
414 } | 414 } |
415 | 415 |
416 void RunCoeffCheck() { | 416 void RunCoeffCheck() { |
417 ACMRandom rnd(ACMRandom::DeterministicSeed()); | 417 ACMRandom rnd(ACMRandom::DeterministicSeed()); |
418 const int count_test_block = 1000; | 418 const int count_test_block = 1000; |
419 DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs); | 419 DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]); |
420 DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_ref_block, kNumCoeffs); | 420 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]); |
421 DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_block, kNumCoeffs); | 421 DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]); |
422 | 422 |
423 for (int i = 0; i < count_test_block; ++i) { | 423 for (int i = 0; i < count_test_block; ++i) { |
424 // Initialize a test block with input range [-mask_, mask_]. | 424 // Initialize a test block with input range [-mask_, mask_]. |
425 for (int j = 0; j < kNumCoeffs; ++j) | 425 for (int j = 0; j < kNumCoeffs; ++j) |
426 input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_); | 426 input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_); |
427 | 427 |
428 fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_); | 428 fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_); |
429 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_)); | 429 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_)); |
430 | 430 |
431 // The minimum quant value is 4. | 431 // The minimum quant value is 4. |
432 for (int j = 0; j < kNumCoeffs; ++j) | 432 for (int j = 0; j < kNumCoeffs; ++j) |
433 EXPECT_EQ(output_block[j], output_ref_block[j]); | 433 EXPECT_EQ(output_block[j], output_ref_block[j]); |
434 } | 434 } |
435 } | 435 } |
436 | 436 |
437 void RunMemCheck() { | 437 void RunMemCheck() { |
438 ACMRandom rnd(ACMRandom::DeterministicSeed()); | 438 ACMRandom rnd(ACMRandom::DeterministicSeed()); |
439 const int count_test_block = 1000; | 439 const int count_test_block = 1000; |
440 DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs); | 440 DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]); |
441 DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs); | 441 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]); |
442 DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_ref_block, kNumCoeffs); | 442 DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]); |
443 DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_block, kNumCoeffs); | |
444 | 443 |
445 for (int i = 0; i < count_test_block; ++i) { | 444 for (int i = 0; i < count_test_block; ++i) { |
446 // Initialize a test block with input range [-mask_, mask_]. | 445 // Initialize a test block with input range [-mask_, mask_]. |
447 for (int j = 0; j < kNumCoeffs; ++j) { | 446 for (int j = 0; j < kNumCoeffs; ++j) { |
448 input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_); | |
449 input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_; | 447 input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_; |
450 } | 448 } |
451 if (i == 0) { | 449 if (i == 0) { |
452 for (int j = 0; j < kNumCoeffs; ++j) | 450 for (int j = 0; j < kNumCoeffs; ++j) |
453 input_extreme_block[j] = mask_; | 451 input_extreme_block[j] = mask_; |
454 } else if (i == 1) { | 452 } else if (i == 1) { |
455 for (int j = 0; j < kNumCoeffs; ++j) | 453 for (int j = 0; j < kNumCoeffs; ++j) |
456 input_extreme_block[j] = -mask_; | 454 input_extreme_block[j] = -mask_; |
457 } | 455 } |
458 | 456 |
459 fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_); | 457 fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_); |
460 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block, | 458 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block, |
461 output_block, pitch_)); | 459 output_block, pitch_)); |
462 | 460 |
463 // The minimum quant value is 4. | 461 // The minimum quant value is 4. |
464 for (int j = 0; j < kNumCoeffs; ++j) { | 462 for (int j = 0; j < kNumCoeffs; ++j) { |
465 EXPECT_EQ(output_block[j], output_ref_block[j]); | 463 EXPECT_EQ(output_block[j], output_ref_block[j]); |
466 EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j])) | 464 EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j])) |
467 << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE"; | 465 << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE"; |
468 } | 466 } |
469 } | 467 } |
470 } | 468 } |
471 | 469 |
472 void RunQuantCheck(int dc_thred, int ac_thred) { | 470 void RunQuantCheck(int dc_thred, int ac_thred) { |
473 ACMRandom rnd(ACMRandom::DeterministicSeed()); | 471 ACMRandom rnd(ACMRandom::DeterministicSeed()); |
474 const int count_test_block = 100000; | 472 const int count_test_block = 100000; |
475 DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs); | 473 DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]); |
476 DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs); | 474 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]); |
477 DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_ref_block, kNumCoeffs); | |
478 | 475 |
479 DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs); | 476 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]); |
480 DECLARE_ALIGNED_ARRAY(16, uint8_t, ref, kNumCoeffs); | 477 DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]); |
481 #if CONFIG_VP9_HIGHBITDEPTH | 478 #if CONFIG_VP9_HIGHBITDEPTH |
482 DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs); | 479 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]); |
483 DECLARE_ALIGNED_ARRAY(16, uint16_t, ref16, kNumCoeffs); | 480 DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]); |
484 #endif | 481 #endif |
485 | 482 |
486 for (int i = 0; i < count_test_block; ++i) { | 483 for (int i = 0; i < count_test_block; ++i) { |
487 // Initialize a test block with input range [-mask_, mask_]. | 484 // Initialize a test block with input range [-mask_, mask_]. |
488 for (int j = 0; j < kNumCoeffs; ++j) { | 485 for (int j = 0; j < kNumCoeffs; ++j) { |
489 if (bit_depth_ == VPX_BITS_8) | |
490 input_block[j] = rnd.Rand8() - rnd.Rand8(); | |
491 else | |
492 input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_); | |
493 input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_; | 486 input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_; |
494 } | 487 } |
495 if (i == 0) | 488 if (i == 0) |
496 for (int j = 0; j < kNumCoeffs; ++j) | 489 for (int j = 0; j < kNumCoeffs; ++j) |
497 input_extreme_block[j] = mask_; | 490 input_extreme_block[j] = mask_; |
498 if (i == 1) | 491 if (i == 1) |
499 for (int j = 0; j < kNumCoeffs; ++j) | 492 for (int j = 0; j < kNumCoeffs; ++j) |
500 input_extreme_block[j] = -mask_; | 493 input_extreme_block[j] = -mask_; |
501 | 494 |
502 fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_); | 495 fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_); |
503 | 496 |
504 // clear reconstructed pixel buffers | 497 // clear reconstructed pixel buffers |
505 vpx_memset(dst, 0, kNumCoeffs * sizeof(uint8_t)); | 498 memset(dst, 0, kNumCoeffs * sizeof(uint8_t)); |
506 vpx_memset(ref, 0, kNumCoeffs * sizeof(uint8_t)); | 499 memset(ref, 0, kNumCoeffs * sizeof(uint8_t)); |
507 #if CONFIG_VP9_HIGHBITDEPTH | 500 #if CONFIG_VP9_HIGHBITDEPTH |
508 vpx_memset(dst16, 0, kNumCoeffs * sizeof(uint16_t)); | 501 memset(dst16, 0, kNumCoeffs * sizeof(uint16_t)); |
509 vpx_memset(ref16, 0, kNumCoeffs * sizeof(uint16_t)); | 502 memset(ref16, 0, kNumCoeffs * sizeof(uint16_t)); |
510 #endif | 503 #endif |
511 | 504 |
512 // quantization with maximum allowed step sizes | 505 // quantization with maximum allowed step sizes |
513 output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred; | 506 output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred; |
514 for (int j = 1; j < kNumCoeffs; ++j) | 507 for (int j = 1; j < kNumCoeffs; ++j) |
515 output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred; | 508 output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred; |
516 if (bit_depth_ == VPX_BITS_8) { | 509 if (bit_depth_ == VPX_BITS_8) { |
517 inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_); | 510 inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_); |
518 ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_)); | 511 ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_)); |
519 #if CONFIG_VP9_HIGHBITDEPTH | 512 #if CONFIG_VP9_HIGHBITDEPTH |
(...skipping 12 matching lines...) Expand all Loading... |
532 for (int j = 0; j < kNumCoeffs; ++j) | 525 for (int j = 0; j < kNumCoeffs; ++j) |
533 EXPECT_EQ(ref16[j], dst16[j]); | 526 EXPECT_EQ(ref16[j], dst16[j]); |
534 #endif | 527 #endif |
535 } | 528 } |
536 } | 529 } |
537 } | 530 } |
538 | 531 |
539 void RunInvAccuracyCheck() { | 532 void RunInvAccuracyCheck() { |
540 ACMRandom rnd(ACMRandom::DeterministicSeed()); | 533 ACMRandom rnd(ACMRandom::DeterministicSeed()); |
541 const int count_test_block = 1000; | 534 const int count_test_block = 1000; |
542 DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs); | 535 DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]); |
543 DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff, kNumCoeffs); | 536 DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]); |
544 DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs); | 537 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]); |
545 DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs); | 538 DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]); |
546 #if CONFIG_VP9_HIGHBITDEPTH | 539 #if CONFIG_VP9_HIGHBITDEPTH |
547 DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs); | 540 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]); |
548 DECLARE_ALIGNED_ARRAY(16, uint16_t, src16, kNumCoeffs); | 541 DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]); |
549 #endif // CONFIG_VP9_HIGHBITDEPTH | 542 #endif // CONFIG_VP9_HIGHBITDEPTH |
550 | 543 |
551 for (int i = 0; i < count_test_block; ++i) { | 544 for (int i = 0; i < count_test_block; ++i) { |
552 double out_r[kNumCoeffs]; | 545 double out_r[kNumCoeffs]; |
553 | 546 |
554 // Initialize a test block with input range [-255, 255]. | 547 // Initialize a test block with input range [-255, 255]. |
555 for (int j = 0; j < kNumCoeffs; ++j) { | 548 for (int j = 0; j < kNumCoeffs; ++j) { |
556 if (bit_depth_ == VPX_BITS_8) { | 549 if (bit_depth_ == VPX_BITS_8) { |
557 src[j] = rnd.Rand8(); | 550 src[j] = rnd.Rand8(); |
558 dst[j] = rnd.Rand8(); | 551 dst[j] = rnd.Rand8(); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
592 << " at index " << j; | 585 << " at index " << j; |
593 } | 586 } |
594 } | 587 } |
595 } | 588 } |
596 | 589 |
597 void CompareInvReference(IdctFunc ref_txfm, int thresh) { | 590 void CompareInvReference(IdctFunc ref_txfm, int thresh) { |
598 ACMRandom rnd(ACMRandom::DeterministicSeed()); | 591 ACMRandom rnd(ACMRandom::DeterministicSeed()); |
599 const int count_test_block = 10000; | 592 const int count_test_block = 10000; |
600 const int eob = 10; | 593 const int eob = 10; |
601 const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan; | 594 const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan; |
602 DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff, kNumCoeffs); | 595 DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]); |
603 DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs); | 596 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]); |
604 DECLARE_ALIGNED_ARRAY(16, uint8_t, ref, kNumCoeffs); | 597 DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]); |
605 #if CONFIG_VP9_HIGHBITDEPTH | 598 #if CONFIG_VP9_HIGHBITDEPTH |
606 DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs); | 599 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]); |
607 DECLARE_ALIGNED_ARRAY(16, uint16_t, ref16, kNumCoeffs); | 600 DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]); |
608 #endif // CONFIG_VP9_HIGHBITDEPTH | 601 #endif // CONFIG_VP9_HIGHBITDEPTH |
609 | 602 |
610 for (int i = 0; i < count_test_block; ++i) { | 603 for (int i = 0; i < count_test_block; ++i) { |
611 for (int j = 0; j < kNumCoeffs; ++j) { | 604 for (int j = 0; j < kNumCoeffs; ++j) { |
612 if (j < eob) { | 605 if (j < eob) { |
613 // Random values less than the threshold, either positive or negative | 606 // Random values less than the threshold, either positive or negative |
614 coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2)); | 607 coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2)); |
615 } else { | 608 } else { |
616 coeff[scan[j]] = 0; | 609 coeff[scan[j]] = 0; |
617 } | 610 } |
(...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
927 make_tuple(&idct16x16_10_add_10_c, | 920 make_tuple(&idct16x16_10_add_10_c, |
928 &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10), | 921 &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10), |
929 make_tuple(&idct16x16_10, | 922 make_tuple(&idct16x16_10, |
930 &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10), | 923 &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10), |
931 make_tuple(&idct16x16_10_add_12_c, | 924 make_tuple(&idct16x16_10_add_12_c, |
932 &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12), | 925 &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12), |
933 make_tuple(&idct16x16_12, | 926 make_tuple(&idct16x16_12, |
934 &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12))); | 927 &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12))); |
935 #endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE | 928 #endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE |
936 | 929 |
937 #if HAVE_SSSE3 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE | 930 #if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE |
938 INSTANTIATE_TEST_CASE_P( | 931 INSTANTIATE_TEST_CASE_P( |
939 SSSE3, Trans16x16DCT, | 932 MSA, Trans16x16DCT, |
940 ::testing::Values( | 933 ::testing::Values( |
941 make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_ssse3, 0, | 934 make_tuple(&vp9_fdct16x16_c, |
942 VPX_BITS_8))); | 935 &vp9_idct16x16_256_add_msa, 0, VPX_BITS_8))); |
943 #endif // HAVE_SSSE3 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE | 936 INSTANTIATE_TEST_CASE_P( |
| 937 MSA, Trans16x16HT, |
| 938 ::testing::Values( |
| 939 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8), |
| 940 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8), |
| 941 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8), |
| 942 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 3, VPX_BITS_8))); |
| 943 #endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE |
944 } // namespace | 944 } // namespace |
OLD | NEW |