| Index: source/libvpx/vp9/encoder/x86/vp9_variance_mmx.c
|
| ===================================================================
|
| --- source/libvpx/vp9/encoder/x86/vp9_variance_mmx.c (revision 278778)
|
| +++ source/libvpx/vp9/encoder/x86/vp9_variance_mmx.c (working copy)
|
| @@ -10,144 +10,94 @@
|
|
|
| #include "./vpx_config.h"
|
| #include "vp9/encoder/vp9_variance.h"
|
| -#include "vp9/common/vp9_pragmas.h"
|
| #include "vpx_ports/mem.h"
|
|
|
| -extern unsigned int vp9_get8x8var_mmx
|
| -(
|
| - const unsigned char *src_ptr,
|
| - int source_stride,
|
| - const unsigned char *ref_ptr,
|
| - int recon_stride,
|
| - unsigned int *SSE,
|
| - int *Sum
|
| -);
|
| -extern unsigned int vp9_get4x4var_mmx
|
| -(
|
| - const unsigned char *src_ptr,
|
| - int source_stride,
|
| - const unsigned char *ref_ptr,
|
| - int recon_stride,
|
| - unsigned int *SSE,
|
| - int *Sum
|
| -);
|
| +unsigned int vp9_get8x8var_mmx(const uint8_t *src, int src_stride,
|
| + const uint8_t *ref, int ref_stride,
|
| + unsigned int *sse, int *sum);
|
|
|
| -unsigned int vp9_variance4x4_mmx(
|
| - const unsigned char *src_ptr,
|
| - int source_stride,
|
| - const unsigned char *ref_ptr,
|
| - int recon_stride,
|
| - unsigned int *sse) {
|
| - unsigned int var;
|
| - int avg;
|
| +unsigned int vp9_get4x4var_mmx(const uint8_t *src, int src_stride,
|
| + const uint8_t *ref, int ref_stride,
|
| + unsigned int *SSE, int *sum);
|
|
|
| - vp9_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
|
| - *sse = var;
|
| - return (var - (((unsigned int)avg * avg) >> 4));
|
| +unsigned int vp9_variance4x4_mmx(const uint8_t *src, int src_stride,
|
| + const uint8_t *ref, int ref_stride,
|
| + unsigned int *sse) {
|
| + int sum;
|
| + vp9_get4x4var_mmx(src, src_stride, ref, ref_stride, sse, &sum);
|
| + return *sse - (((unsigned int)sum * sum) >> 4);
|
| }
|
|
|
| -unsigned int vp9_variance8x8_mmx(
|
| - const unsigned char *src_ptr,
|
| - int source_stride,
|
| - const unsigned char *ref_ptr,
|
| - int recon_stride,
|
| - unsigned int *sse) {
|
| - unsigned int var;
|
| - int avg;
|
| -
|
| - vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg);
|
| - *sse = var;
|
| -
|
| - return (var - (((unsigned int)avg * avg) >> 6));
|
| +unsigned int vp9_variance8x8_mmx(const uint8_t *src, int src_stride,
|
| + const uint8_t *ref, int ref_stride,
|
| + unsigned int *sse) {
|
| + int sum;
|
| + vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, sse, &sum);
|
| + return *sse - (((unsigned int)sum * sum) >> 6);
|
| }
|
|
|
| -unsigned int vp9_mse16x16_mmx(
|
| - const unsigned char *src_ptr,
|
| - int source_stride,
|
| - const unsigned char *ref_ptr,
|
| - int recon_stride,
|
| - unsigned int *sse) {
|
| - unsigned int sse0, sse1, sse2, sse3, var;
|
| +unsigned int vp9_mse16x16_mmx(const uint8_t *src, int src_stride,
|
| + const uint8_t *ref, int ref_stride,
|
| + unsigned int *sse) {
|
| + unsigned int sse0, sse1, sse2, sse3;
|
| int sum0, sum1, sum2, sum3;
|
|
|
| + vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0);
|
| + vp9_get8x8var_mmx(src + 8, src_stride, ref + 8, ref_stride, &sse1, &sum1);
|
| + vp9_get8x8var_mmx(src + 8 * src_stride, src_stride,
|
| + ref + 8 * ref_stride, ref_stride, &sse2, &sum2);
|
| + vp9_get8x8var_mmx(src + 8 * src_stride + 8, src_stride,
|
| + ref + 8 * ref_stride + 8, ref_stride, &sse3, &sum3);
|
|
|
| - vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0,
|
| - &sum0);
|
| - vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride,
|
| - &sse1, &sum1);
|
| - vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride,
|
| - ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
|
| - vp9_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride,
|
| - ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
|
| -
|
| - var = sse0 + sse1 + sse2 + sse3;
|
| - *sse = var;
|
| - return var;
|
| + *sse = sse0 + sse1 + sse2 + sse3;
|
| + return *sse;
|
| }
|
|
|
|
|
| -unsigned int vp9_variance16x16_mmx(
|
| - const unsigned char *src_ptr,
|
| - int source_stride,
|
| - const unsigned char *ref_ptr,
|
| - int recon_stride,
|
| - unsigned int *sse) {
|
| - unsigned int sse0, sse1, sse2, sse3, var;
|
| - int sum0, sum1, sum2, sum3, avg;
|
| +unsigned int vp9_variance16x16_mmx(const uint8_t *src, int src_stride,
|
| + const uint8_t *ref, int ref_stride,
|
| + unsigned int *sse) {
|
| + unsigned int sse0, sse1, sse2, sse3;
|
| + int sum0, sum1, sum2, sum3, sum;
|
|
|
| - vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0,
|
| - &sum0);
|
| - vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride,
|
| - &sse1, &sum1);
|
| - vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride,
|
| - ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2);
|
| - vp9_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride,
|
| - ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
|
| + vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0);
|
| + vp9_get8x8var_mmx(src + 8, src_stride, ref + 8, ref_stride, &sse1, &sum1);
|
| + vp9_get8x8var_mmx(src + 8 * src_stride, src_stride,
|
| + ref + 8 * ref_stride, ref_stride, &sse2, &sum2);
|
| + vp9_get8x8var_mmx(src + 8 * src_stride + 8, src_stride,
|
| + ref + 8 * ref_stride + 8, ref_stride, &sse3, &sum3);
|
|
|
| - var = sse0 + sse1 + sse2 + sse3;
|
| - avg = sum0 + sum1 + sum2 + sum3;
|
| - *sse = var;
|
| - return (var - (((unsigned int)avg * avg) >> 8));
|
| + *sse = sse0 + sse1 + sse2 + sse3;
|
| + sum = sum0 + sum1 + sum2 + sum3;
|
| + return *sse - (((unsigned int)sum * sum) >> 8);
|
| }
|
|
|
| -unsigned int vp9_variance16x8_mmx(
|
| - const unsigned char *src_ptr,
|
| - int source_stride,
|
| - const unsigned char *ref_ptr,
|
| - int recon_stride,
|
| - unsigned int *sse) {
|
| - unsigned int sse0, sse1, var;
|
| - int sum0, sum1, avg;
|
| +unsigned int vp9_variance16x8_mmx(const uint8_t *src, int src_stride,
|
| + const uint8_t *ref, int ref_stride,
|
| + unsigned int *sse) {
|
| + unsigned int sse0, sse1;
|
| + int sum0, sum1, sum;
|
|
|
| - vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0,
|
| - &sum0);
|
| - vp9_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride,
|
| - &sse1, &sum1);
|
| + vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0);
|
| + vp9_get8x8var_mmx(src + 8, src_stride, ref + 8, ref_stride, &sse1, &sum1);
|
|
|
| - var = sse0 + sse1;
|
| - avg = sum0 + sum1;
|
| - *sse = var;
|
| - return (var - (((unsigned int)avg * avg) >> 7));
|
| + *sse = sse0 + sse1;
|
| + sum = sum0 + sum1;
|
| + return *sse - (((unsigned int)sum * sum) >> 7);
|
| }
|
|
|
|
|
| -unsigned int vp9_variance8x16_mmx(
|
| - const unsigned char *src_ptr,
|
| - int source_stride,
|
| - const unsigned char *ref_ptr,
|
| - int recon_stride,
|
| - unsigned int *sse) {
|
| - unsigned int sse0, sse1, var;
|
| - int sum0, sum1, avg;
|
| +unsigned int vp9_variance8x16_mmx(const uint8_t *src, int src_stride,
|
| + const uint8_t *ref, int ref_stride,
|
| + unsigned int *sse) {
|
| + unsigned int sse0, sse1;
|
| + int sum0, sum1, sum;
|
|
|
| - vp9_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0,
|
| - &sum0);
|
| - vp9_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride,
|
| - ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1);
|
| + vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0);
|
| + vp9_get8x8var_mmx(src + 8 * src_stride, src_stride,
|
| + ref + 8 * ref_stride, ref_stride, &sse1, &sum1);
|
|
|
| - var = sse0 + sse1;
|
| - avg = sum0 + sum1;
|
| - *sse = var;
|
| -
|
| - return (var - (((unsigned int)avg * avg) >> 7));
|
| + *sse = sse0 + sse1;
|
| + sum = sum0 + sum1;
|
| + return *sse - (((unsigned int)sum * sum) >> 7);
|
| }
|
|
|