Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(230)

Side by Side Diff: source/libvpx/vp8/encoder/onyx_if.c

Issue 1162573005: libvpx: Pull from upstream (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/libvpx.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « source/libvpx/vp8/encoder/modecosts.c ('k') | source/libvpx/vp8/encoder/onyx_int.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 2113 matching lines...) Expand 10 before | Expand all | Expand 10 after
2124 2124
2125 #if CONFIG_MULTITHREAD 2125 #if CONFIG_MULTITHREAD
2126 if(vp8cx_create_encoder_threads(cpi)) 2126 if(vp8cx_create_encoder_threads(cpi))
2127 { 2127 {
2128 vp8_remove_compressor(&cpi); 2128 vp8_remove_compressor(&cpi);
2129 return 0; 2129 return 0;
2130 } 2130 }
2131 #endif 2131 #endif
2132 2132
2133 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16; 2133 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
2134 cpi->fn_ptr[BLOCK_16X16].vf = vp8_variance16x16; 2134 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
2135 cpi->fn_ptr[BLOCK_16X16].svf = vp8_sub_pixel_variance16x16; 2135 cpi->fn_ptr[BLOCK_16X16].svf = vp8_sub_pixel_variance16x16;
2136 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = vp8_variance_halfpixvar16x16_h; 2136 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = vp8_variance_halfpixvar16x16_h;
2137 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = vp8_variance_halfpixvar16x16_v; 2137 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = vp8_variance_halfpixvar16x16_v;
2138 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = vp8_variance_halfpixvar16x16_hv; 2138 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = vp8_variance_halfpixvar16x16_hv;
2139 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3; 2139 cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3;
2140 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8; 2140 cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8;
2141 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d; 2141 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
2142 2142
2143 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8; 2143 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
2144 cpi->fn_ptr[BLOCK_16X8].vf = vp8_variance16x8; 2144 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
2145 cpi->fn_ptr[BLOCK_16X8].svf = vp8_sub_pixel_variance16x8; 2145 cpi->fn_ptr[BLOCK_16X8].svf = vp8_sub_pixel_variance16x8;
2146 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL; 2146 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
2147 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL; 2147 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
2148 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL; 2148 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
2149 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3; 2149 cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3;
2150 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8; 2150 cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8;
2151 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d; 2151 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
2152 2152
2153 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16; 2153 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
2154 cpi->fn_ptr[BLOCK_8X16].vf = vp8_variance8x16; 2154 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
2155 cpi->fn_ptr[BLOCK_8X16].svf = vp8_sub_pixel_variance8x16; 2155 cpi->fn_ptr[BLOCK_8X16].svf = vp8_sub_pixel_variance8x16;
2156 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL; 2156 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
2157 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL; 2157 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
2158 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL; 2158 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
2159 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3; 2159 cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3;
2160 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8; 2160 cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8;
2161 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d; 2161 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
2162 2162
2163 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8; 2163 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
2164 cpi->fn_ptr[BLOCK_8X8].vf = vp8_variance8x8; 2164 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
2165 cpi->fn_ptr[BLOCK_8X8].svf = vp8_sub_pixel_variance8x8; 2165 cpi->fn_ptr[BLOCK_8X8].svf = vp8_sub_pixel_variance8x8;
2166 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL; 2166 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
2167 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL; 2167 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
2168 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL; 2168 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
2169 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3; 2169 cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3;
2170 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8; 2170 cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8;
2171 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d; 2171 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2172 2172
2173 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4; 2173 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2174 cpi->fn_ptr[BLOCK_4X4].vf = vp8_variance4x4; 2174 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2175 cpi->fn_ptr[BLOCK_4X4].svf = vp8_sub_pixel_variance4x4; 2175 cpi->fn_ptr[BLOCK_4X4].svf = vp8_sub_pixel_variance4x4;
2176 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL; 2176 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
2177 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL; 2177 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
2178 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL; 2178 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
2179 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3; 2179 cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3;
2180 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8; 2180 cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8;
2181 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d; 2181 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2182 2182
2183 #if ARCH_X86 || ARCH_X86_64 2183 #if ARCH_X86 || ARCH_X86_64
2184 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn; 2184 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
(...skipping 366 matching lines...) Expand 10 before | Expand all | Expand 10 after
2551 unsigned int row, col; 2551 unsigned int row, col;
2552 uint64_t total_sse = 0; 2552 uint64_t total_sse = 0;
2553 int diff; 2553 int diff;
2554 2554
2555 for (row = 0; row + 16 <= rows; row += 16) 2555 for (row = 0; row + 16 <= rows; row += 16)
2556 { 2556 {
2557 for (col = 0; col + 16 <= cols; col += 16) 2557 for (col = 0; col + 16 <= cols; col += 16)
2558 { 2558 {
2559 unsigned int sse; 2559 unsigned int sse;
2560 2560
2561 vp8_mse16x16(orig + col, orig_stride, 2561 vpx_mse16x16(orig + col, orig_stride,
2562 recon + col, recon_stride, 2562 recon + col, recon_stride,
2563 &sse); 2563 &sse);
2564 total_sse += sse; 2564 total_sse += sse;
2565 } 2565 }
2566 2566
2567 /* Handle odd-sized width */ 2567 /* Handle odd-sized width */
2568 if (col < cols) 2568 if (col < cols)
2569 { 2569 {
2570 unsigned int border_row, border_col; 2570 unsigned int border_row, border_col;
2571 unsigned char *border_orig = orig; 2571 unsigned char *border_orig = orig;
(...skipping 805 matching lines...) Expand 10 before | Expand all | Expand 10 after
3377 * zero_last mode at least |x| frames in a row. 3377 * zero_last mode at least |x| frames in a row.
3378 */ 3378 */
3379 for (i = 0; i < source->y_height; i += 16 * skip) 3379 for (i = 0; i < source->y_height; i += 16 * skip)
3380 { 3380 {
3381 int block_index_row = (i >> 4) * cpi->common.mb_cols; 3381 int block_index_row = (i >> 4) * cpi->common.mb_cols;
3382 for (j = 0; j < source->y_width; j += 16 * skip) 3382 for (j = 0; j < source->y_width; j += 16 * skip)
3383 { 3383 {
3384 int index = block_index_row + (j >> 4); 3384 int index = block_index_row + (j >> 4);
3385 if (cpi->consec_zero_last[index] >= min_consec_zero_last) { 3385 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3386 unsigned int sse; 3386 unsigned int sse;
3387 Total += vp8_mse16x16(src + j, 3387 Total += vpx_mse16x16(src + j,
3388 source->y_stride, 3388 source->y_stride,
3389 dst + j, dest->y_stride, 3389 dst + j, dest->y_stride,
3390 &sse); 3390 &sse);
3391 num_blocks++; 3391 num_blocks++;
3392 } 3392 }
3393 } 3393 }
3394 src += 16 * skip * source->y_stride; 3394 src += 16 * skip * source->y_stride;
3395 dst += 16 * skip * dest->y_stride; 3395 dst += 16 * skip * dest->y_stride;
3396 } 3396 }
3397 // Only return non-zero if we have at least ~1/16 samples for estimate. 3397 // Only return non-zero if we have at least ~1/16 samples for estimate.
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
3441 // summing the normalized mean square error, only for blocks that have 3441 // summing the normalized mean square error, only for blocks that have
3442 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in 3442 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3443 // a row and have small sum difference between current and previous frame. 3443 // a row and have small sum difference between current and previous frame.
3444 // Normalization here is by the contrast of the current frame block. 3444 // Normalization here is by the contrast of the current frame block.
3445 for (i = 0; i < cm->Height; i += 16 * skip) { 3445 for (i = 0; i < cm->Height; i += 16 * skip) {
3446 int block_index_row = (i >> 4) * cm->mb_cols; 3446 int block_index_row = (i >> 4) * cm->mb_cols;
3447 for (j = 0; j < cm->Width; j += 16 * skip) { 3447 for (j = 0; j < cm->Width; j += 16 * skip) {
3448 int index = block_index_row + (j >> 4); 3448 int index = block_index_row + (j >> 4);
3449 if (cpi->consec_zero_last[index] >= min_consec_zero_last) { 3449 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3450 unsigned int sse; 3450 unsigned int sse;
3451 const unsigned int var = vp8_variance16x16(src + j, 3451 const unsigned int var = vpx_variance16x16(src + j,
3452 ystride, 3452 ystride,
3453 dst + j, 3453 dst + j,
3454 ystride, 3454 ystride,
3455 &sse); 3455 &sse);
3456 // Only consider this block as valid for noise measurement 3456 // Only consider this block as valid for noise measurement
3457 // if the sum_diff average of the current and previous frame 3457 // if the sum_diff average of the current and previous frame
3458 // is small (to avoid effects from lighting change). 3458 // is small (to avoid effects from lighting change).
3459 if ((sse - var) < 128) { 3459 if ((sse - var) < 128) {
3460 unsigned int sse2; 3460 unsigned int sse2;
3461 const unsigned int act = vp8_variance16x16(src + j, 3461 const unsigned int act = vpx_variance16x16(src + j,
3462 ystride, 3462 ystride,
3463 const_source, 3463 const_source,
3464 0, 3464 0,
3465 &sse2); 3465 &sse2);
3466 if (act > 0) 3466 if (act > 0)
3467 total += sse / act; 3467 total += sse / act;
3468 num_blocks++; 3468 num_blocks++;
3469 } 3469 }
3470 } 3470 }
3471 } 3471 }
(...skipping 2514 matching lines...) Expand 10 before | Expand all | Expand 10 after
5986 unsigned char *dst = dest->y_buffer; 5986 unsigned char *dst = dest->y_buffer;
5987 5987
5988 /* Loop through the Y plane raw and reconstruction data summing 5988 /* Loop through the Y plane raw and reconstruction data summing
5989 * (square differences) 5989 * (square differences)
5990 */ 5990 */
5991 for (i = 0; i < source->y_height; i += 16) 5991 for (i = 0; i < source->y_height; i += 16)
5992 { 5992 {
5993 for (j = 0; j < source->y_width; j += 16) 5993 for (j = 0; j < source->y_width; j += 16)
5994 { 5994 {
5995 unsigned int sse; 5995 unsigned int sse;
5996 Total += vp8_mse16x16(src + j, source->y_stride, dst + j, dest->y_st ride, &sse); 5996 Total += vpx_mse16x16(src + j, source->y_stride,
5997 dst + j, dest->y_stride, &sse);
5997 } 5998 }
5998 5999
5999 src += 16 * source->y_stride; 6000 src += 16 * source->y_stride;
6000 dst += 16 * dest->y_stride; 6001 dst += 16 * dest->y_stride;
6001 } 6002 }
6002 6003
6003 return Total; 6004 return Total;
6004 } 6005 }
6005 6006
6006 6007
6007 int vp8_get_quantizer(VP8_COMP *cpi) 6008 int vp8_get_quantizer(VP8_COMP *cpi)
6008 { 6009 {
6009 return cpi->common.base_qindex; 6010 return cpi->common.base_qindex;
6010 } 6011 }
OLDNEW
« no previous file with comments | « source/libvpx/vp8/encoder/modecosts.c ('k') | source/libvpx/vp8/encoder/onyx_int.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698