| OLD | NEW | 
|---|
| 1 /* | 1 /* | 
| 2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 
| 3  * | 3  * | 
| 4  *  Use of this source code is governed by a BSD-style license | 4  *  Use of this source code is governed by a BSD-style license | 
| 5  *  that can be found in the LICENSE file in the root of the source | 5  *  that can be found in the LICENSE file in the root of the source | 
| 6  *  tree. An additional intellectual property rights grant can be found | 6  *  tree. An additional intellectual property rights grant can be found | 
| 7  *  in the file PATENTS.  All contributing project authors may | 7  *  in the file PATENTS.  All contributing project authors may | 
| 8  *  be found in the AUTHORS file in the root of the source tree. | 8  *  be found in the AUTHORS file in the root of the source tree. | 
| 9  */ | 9  */ | 
| 10 | 10 | 
| 11 | 11 | 
| 12 #include "vpx_ports/config.h" | 12 #include "vpx_ports/config.h" | 
| 13 #include "vp8/encoder/variance.h" | 13 #include "vp8/encoder/variance.h" | 
| 14 #include "vp8/encoder/onyx_int.h" | 14 #include "vp8/encoder/onyx_int.h" | 
| 15 | 15 | 
| 16 | 16 | 
| 17 void vp8_arch_x86_encoder_init(VP8_COMP *cpi); | 17 void vp8_arch_x86_encoder_init(VP8_COMP *cpi); | 
| 18 void vp8_arch_arm_encoder_init(VP8_COMP *cpi); | 18 void vp8_arch_arm_encoder_init(VP8_COMP *cpi); | 
| 19 | 19 | 
| 20 extern void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d); |  | 
| 21 |  | 
| 22 void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER
     _CONFIG *dst_ybc, int Fraction); | 20 void (*vp8_yv12_copy_partial_frame_ptr)(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER
     _CONFIG *dst_ybc, int Fraction); | 
| 23 extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER
     _CONFIG *dst_ybc, int Fraction); | 21 extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER
     _CONFIG *dst_ybc, int Fraction); | 
| 24 | 22 | 
| 25 void vp8_cmachine_specific_config(VP8_COMP *cpi) | 23 void vp8_cmachine_specific_config(VP8_COMP *cpi) | 
| 26 { | 24 { | 
| 27 #if CONFIG_RUNTIME_CPU_DETECT | 25 #if CONFIG_RUNTIME_CPU_DETECT | 
| 28     cpi->rtcd.common                    = &cpi->common.rtcd; | 26     cpi->rtcd.common                    = &cpi->common.rtcd; | 
| 29     cpi->rtcd.variance.sad16x16              = vp8_sad16x16_c; | 27     cpi->rtcd.variance.sad16x16              = vp8_sad16x16_c; | 
| 30     cpi->rtcd.variance.sad16x8               = vp8_sad16x8_c; | 28     cpi->rtcd.variance.sad16x8               = vp8_sad16x8_c; | 
| 31     cpi->rtcd.variance.sad8x16               = vp8_sad8x16_c; | 29     cpi->rtcd.variance.sad8x16               = vp8_sad8x16_c; | 
| (...skipping 10 matching lines...) Expand all  Loading... | 
| 42     cpi->rtcd.variance.sad16x8x8             = vp8_sad16x8x8_c; | 40     cpi->rtcd.variance.sad16x8x8             = vp8_sad16x8x8_c; | 
| 43     cpi->rtcd.variance.sad8x16x8             = vp8_sad8x16x8_c; | 41     cpi->rtcd.variance.sad8x16x8             = vp8_sad8x16x8_c; | 
| 44     cpi->rtcd.variance.sad8x8x8              = vp8_sad8x8x8_c; | 42     cpi->rtcd.variance.sad8x8x8              = vp8_sad8x8x8_c; | 
| 45     cpi->rtcd.variance.sad4x4x8              = vp8_sad4x4x8_c; | 43     cpi->rtcd.variance.sad4x4x8              = vp8_sad4x4x8_c; | 
| 46 | 44 | 
| 47     cpi->rtcd.variance.sad16x16x4d           = vp8_sad16x16x4d_c; | 45     cpi->rtcd.variance.sad16x16x4d           = vp8_sad16x16x4d_c; | 
| 48     cpi->rtcd.variance.sad16x8x4d            = vp8_sad16x8x4d_c; | 46     cpi->rtcd.variance.sad16x8x4d            = vp8_sad16x8x4d_c; | 
| 49     cpi->rtcd.variance.sad8x16x4d            = vp8_sad8x16x4d_c; | 47     cpi->rtcd.variance.sad8x16x4d            = vp8_sad8x16x4d_c; | 
| 50     cpi->rtcd.variance.sad8x8x4d             = vp8_sad8x8x4d_c; | 48     cpi->rtcd.variance.sad8x8x4d             = vp8_sad8x8x4d_c; | 
| 51     cpi->rtcd.variance.sad4x4x4d             = vp8_sad4x4x4d_c; | 49     cpi->rtcd.variance.sad4x4x4d             = vp8_sad4x4x4d_c; | 
| 52 | 50 #if ARCH_X86 || ARCH_X86_64 | 
|  | 51     cpi->rtcd.variance.copy32xn              = vp8_copy32xn_c; | 
|  | 52 #endif | 
| 53     cpi->rtcd.variance.var4x4                = vp8_variance4x4_c; | 53     cpi->rtcd.variance.var4x4                = vp8_variance4x4_c; | 
| 54     cpi->rtcd.variance.var8x8                = vp8_variance8x8_c; | 54     cpi->rtcd.variance.var8x8                = vp8_variance8x8_c; | 
| 55     cpi->rtcd.variance.var8x16               = vp8_variance8x16_c; | 55     cpi->rtcd.variance.var8x16               = vp8_variance8x16_c; | 
| 56     cpi->rtcd.variance.var16x8               = vp8_variance16x8_c; | 56     cpi->rtcd.variance.var16x8               = vp8_variance16x8_c; | 
| 57     cpi->rtcd.variance.var16x16              = vp8_variance16x16_c; | 57     cpi->rtcd.variance.var16x16              = vp8_variance16x16_c; | 
| 58 | 58 | 
| 59     cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_c; | 59     cpi->rtcd.variance.subpixvar4x4          = vp8_sub_pixel_variance4x4_c; | 
| 60     cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_c; | 60     cpi->rtcd.variance.subpixvar8x8          = vp8_sub_pixel_variance8x8_c; | 
| 61     cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_c; | 61     cpi->rtcd.variance.subpixvar8x16         = vp8_sub_pixel_variance8x16_c; | 
| 62     cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_c; | 62     cpi->rtcd.variance.subpixvar16x8         = vp8_sub_pixel_variance16x8_c; | 
| 63     cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_c; | 63     cpi->rtcd.variance.subpixvar16x16        = vp8_sub_pixel_variance16x16_c; | 
| 64     cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_c; | 64     cpi->rtcd.variance.halfpixvar16x16_h     = vp8_variance_halfpixvar16x16_h_c; | 
| 65     cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_c; | 65     cpi->rtcd.variance.halfpixvar16x16_v     = vp8_variance_halfpixvar16x16_v_c; | 
| 66     cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_c
     ; | 66     cpi->rtcd.variance.halfpixvar16x16_hv    = vp8_variance_halfpixvar16x16_hv_c
     ; | 
| 67     cpi->rtcd.variance.subpixmse16x16        = vp8_sub_pixel_mse16x16_c; | 67     cpi->rtcd.variance.subpixmse16x16        = vp8_sub_pixel_mse16x16_c; | 
| 68 | 68 | 
| 69     cpi->rtcd.variance.mse16x16              = vp8_mse16x16_c; | 69     cpi->rtcd.variance.mse16x16              = vp8_mse16x16_c; | 
| 70     cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c; | 70     cpi->rtcd.variance.getmbss               = vp8_get_mb_ss_c; | 
| 71 | 71 | 
| 72     cpi->rtcd.variance.get16x16prederror     = vp8_get16x16pred_error_c; |  | 
| 73     cpi->rtcd.variance.get8x8var             = vp8_get8x8var_c; |  | 
| 74     cpi->rtcd.variance.get16x16var           = vp8_get16x16var_c;; |  | 
| 75     cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_c; | 72     cpi->rtcd.variance.get4x4sse_cs          = vp8_get4x4sse_cs_c; | 
| 76 | 73 | 
| 77     cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_c; | 74     cpi->rtcd.fdct.short4x4                  = vp8_short_fdct4x4_c; | 
| 78     cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_c; | 75     cpi->rtcd.fdct.short8x4                  = vp8_short_fdct8x4_c; | 
| 79     cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_c; | 76     cpi->rtcd.fdct.fast4x4                   = vp8_short_fdct4x4_c; | 
| 80     cpi->rtcd.fdct.fast8x4                   = vp8_short_fdct8x4_c; | 77     cpi->rtcd.fdct.fast8x4                   = vp8_short_fdct8x4_c; | 
| 81     cpi->rtcd.fdct.walsh_short4x4            = vp8_short_walsh4x4_c; | 78     cpi->rtcd.fdct.walsh_short4x4            = vp8_short_walsh4x4_c; | 
| 82 | 79 | 
| 83     cpi->rtcd.encodemb.berr                  = vp8_block_error_c; | 80     cpi->rtcd.encodemb.berr                  = vp8_block_error_c; | 
| 84     cpi->rtcd.encodemb.mberr                 = vp8_mbblock_error_c; | 81     cpi->rtcd.encodemb.mberr                 = vp8_mbblock_error_c; | 
| 85     cpi->rtcd.encodemb.mbuverr               = vp8_mbuverror_c; | 82     cpi->rtcd.encodemb.mbuverr               = vp8_mbuverror_c; | 
| 86     cpi->rtcd.encodemb.subb                  = vp8_subtract_b_c; | 83     cpi->rtcd.encodemb.subb                  = vp8_subtract_b_c; | 
| 87     cpi->rtcd.encodemb.submby                = vp8_subtract_mby_c; | 84     cpi->rtcd.encodemb.submby                = vp8_subtract_mby_c; | 
| 88     cpi->rtcd.encodemb.submbuv               = vp8_subtract_mbuv_c; | 85     cpi->rtcd.encodemb.submbuv               = vp8_subtract_mbuv_c; | 
| 89 | 86 | 
| 90     cpi->rtcd.quantize.quantb                = vp8_regular_quantize_b; | 87     cpi->rtcd.quantize.quantb                = vp8_regular_quantize_b; | 
|  | 88     cpi->rtcd.quantize.quantb_pair           = vp8_regular_quantize_b_pair; | 
| 91     cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_c; | 89     cpi->rtcd.quantize.fastquantb            = vp8_fast_quantize_b_c; | 
| 92 #if !(CONFIG_REALTIME_ONLY) | 90     cpi->rtcd.quantize.fastquantb_pair       = vp8_fast_quantize_b_pair_c; | 
| 93     cpi->rtcd.search.full_search             = vp8_full_search_sad; | 91     cpi->rtcd.search.full_search             = vp8_full_search_sad; | 
| 94 #endif | 92     cpi->rtcd.search.refining_search         = vp8_refining_search_sad; | 
| 95     cpi->rtcd.search.diamond_search          = vp8_diamond_search_sad; | 93     cpi->rtcd.search.diamond_search          = vp8_diamond_search_sad; | 
| 96 #if !(CONFIG_REALTIME_ONLY) | 94 #if !(CONFIG_REALTIME_ONLY) | 
| 97     cpi->rtcd.temporal.apply                 = vp8_temporal_filter_apply_c; | 95     cpi->rtcd.temporal.apply                 = vp8_temporal_filter_apply_c; | 
| 98 #endif | 96 #endif | 
| 99 #endif | 97 #endif | 
| 100 | 98 | 
| 101     // Pure C: | 99     // Pure C: | 
| 102     vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame; | 100     vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame; | 
| 103 | 101 | 
| 104 #if CONFIG_PSNR | 102 #if CONFIG_INTERNAL_STATS | 
| 105     cpi->rtcd.variance.ssimpf_8x8            = ssim_parms_8x8_c; | 103     cpi->rtcd.variance.ssimpf_8x8            = ssim_parms_8x8_c; | 
| 106     cpi->rtcd.variance.ssimpf                = ssim_parms_c; | 104     cpi->rtcd.variance.ssimpf                = ssim_parms_c; | 
| 107 #endif | 105 #endif | 
| 108 | 106 | 
| 109 #if ARCH_X86 || ARCH_X86_64 | 107 #if ARCH_X86 || ARCH_X86_64 | 
| 110     vp8_arch_x86_encoder_init(cpi); | 108     vp8_arch_x86_encoder_init(cpi); | 
| 111 #endif | 109 #endif | 
| 112 | 110 | 
| 113 #if ARCH_ARM | 111 #if ARCH_ARM | 
| 114     vp8_arch_arm_encoder_init(cpi); | 112     vp8_arch_arm_encoder_init(cpi); | 
| 115 #endif | 113 #endif | 
| 116 | 114 | 
| 117 } | 115 } | 
| OLD | NEW | 
|---|