| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2013 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2013 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 75 if (rate_ratio[i] == 1.0) { | 75 if (rate_ratio[i] == 1.0) { |
| 76 continue; | 76 continue; |
| 77 } | 77 } |
| 78 | 78 |
| 79 vp9_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta); | 79 vp9_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta); |
| 80 vp9_enable_segfeature(seg, i, SEG_LVL_ALT_Q); | 80 vp9_enable_segfeature(seg, i, SEG_LVL_ALT_Q); |
| 81 } | 81 } |
| 82 } | 82 } |
| 83 } | 83 } |
| 84 | 84 |
| 85 /* TODO(agrange, paulwilkins): The block_variance calls the unoptimized versions |
| 86 * of variance() and highbd_8_variance(). It should not. |
| 87 */ |
| 88 static void aq_variance(const uint8_t *a, int a_stride, |
| 89 const uint8_t *b, int b_stride, |
| 90 int w, int h, unsigned int *sse, int *sum) { |
| 91 int i, j; |
| 92 |
| 93 *sum = 0; |
| 94 *sse = 0; |
| 95 |
| 96 for (i = 0; i < h; i++) { |
| 97 for (j = 0; j < w; j++) { |
| 98 const int diff = a[j] - b[j]; |
| 99 *sum += diff; |
| 100 *sse += diff * diff; |
| 101 } |
| 102 |
| 103 a += a_stride; |
| 104 b += b_stride; |
| 105 } |
| 106 } |
| 107 |
| 108 #if CONFIG_VP9_HIGHBITDEPTH |
| 109 static void aq_highbd_variance64(const uint8_t *a8, int a_stride, |
| 110 const uint8_t *b8, int b_stride, |
| 111 int w, int h, uint64_t *sse, uint64_t *sum) { |
| 112 int i, j; |
| 113 |
| 114 uint16_t *a = CONVERT_TO_SHORTPTR(a8); |
| 115 uint16_t *b = CONVERT_TO_SHORTPTR(b8); |
| 116 *sum = 0; |
| 117 *sse = 0; |
| 118 |
| 119 for (i = 0; i < h; i++) { |
| 120 for (j = 0; j < w; j++) { |
| 121 const int diff = a[j] - b[j]; |
| 122 *sum += diff; |
| 123 *sse += diff * diff; |
| 124 } |
| 125 a += a_stride; |
| 126 b += b_stride; |
| 127 } |
| 128 } |
| 129 |
| 130 static void aq_highbd_8_variance(const uint8_t *a8, int a_stride, |
| 131 const uint8_t *b8, int b_stride, |
| 132 int w, int h, unsigned int *sse, int *sum) { |
| 133 uint64_t sse_long = 0; |
| 134 uint64_t sum_long = 0; |
| 135 aq_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long); |
| 136 *sse = (unsigned int)sse_long; |
| 137 *sum = (int)sum_long; |
| 138 } |
| 139 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 85 | 140 |
| 86 static unsigned int block_variance(VP9_COMP *cpi, MACROBLOCK *x, | 141 static unsigned int block_variance(VP9_COMP *cpi, MACROBLOCK *x, |
| 87 BLOCK_SIZE bs) { | 142 BLOCK_SIZE bs) { |
| 88 MACROBLOCKD *xd = &x->e_mbd; | 143 MACROBLOCKD *xd = &x->e_mbd; |
| 89 unsigned int var, sse; | 144 unsigned int var, sse; |
| 90 int right_overflow = (xd->mb_to_right_edge < 0) ? | 145 int right_overflow = (xd->mb_to_right_edge < 0) ? |
| 91 ((-xd->mb_to_right_edge) >> 3) : 0; | 146 ((-xd->mb_to_right_edge) >> 3) : 0; |
| 92 int bottom_overflow = (xd->mb_to_bottom_edge < 0) ? | 147 int bottom_overflow = (xd->mb_to_bottom_edge < 0) ? |
| 93 ((-xd->mb_to_bottom_edge) >> 3) : 0; | 148 ((-xd->mb_to_bottom_edge) >> 3) : 0; |
| 94 | 149 |
| 95 if (right_overflow || bottom_overflow) { | 150 if (right_overflow || bottom_overflow) { |
| 96 const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow; | 151 const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow; |
| 97 const int bh = 8 * num_8x8_blocks_high_lookup[bs] - bottom_overflow; | 152 const int bh = 8 * num_8x8_blocks_high_lookup[bs] - bottom_overflow; |
| 98 int avg; | 153 int avg; |
| 99 #if CONFIG_VP9_HIGHBITDEPTH | 154 #if CONFIG_VP9_HIGHBITDEPTH |
| 100 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | 155 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 101 highbd_8_variance(x->plane[0].src.buf, x->plane[0].src.stride, | 156 aq_highbd_8_variance(x->plane[0].src.buf, x->plane[0].src.stride, |
| 102 CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), 0, bw, bh, | 157 CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), 0, bw, bh, |
| 103 &sse, &avg); | 158 &sse, &avg); |
| 104 sse >>= 2 * (xd->bd - 8); | 159 sse >>= 2 * (xd->bd - 8); |
| 105 avg >>= (xd->bd - 8); | 160 avg >>= (xd->bd - 8); |
| 106 } else { | 161 } else { |
| 107 variance(x->plane[0].src.buf, x->plane[0].src.stride, | 162 aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, |
| 108 vp9_64_zeros, 0, bw, bh, &sse, &avg); | 163 vp9_64_zeros, 0, bw, bh, &sse, &avg); |
| 109 } | 164 } |
| 110 #else | 165 #else |
| 111 variance(x->plane[0].src.buf, x->plane[0].src.stride, | 166 aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, |
| 112 vp9_64_zeros, 0, bw, bh, &sse, &avg); | 167 vp9_64_zeros, 0, bw, bh, &sse, &avg); |
| 113 #endif // CONFIG_VP9_HIGHBITDEPTH | 168 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 114 var = sse - (((int64_t)avg * avg) / (bw * bh)); | 169 var = sse - (((int64_t)avg * avg) / (bw * bh)); |
| 115 return (256 * var) / (bw * bh); | 170 return (256 * var) / (bw * bh); |
| 116 } else { | 171 } else { |
| 117 #if CONFIG_VP9_HIGHBITDEPTH | 172 #if CONFIG_VP9_HIGHBITDEPTH |
| 118 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | 173 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 119 var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, | 174 var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, |
| 120 x->plane[0].src.stride, | 175 x->plane[0].src.stride, |
| 121 CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), | 176 CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), |
| 122 0, &sse); | 177 0, &sse); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 143 #define DEFAULT_E_MIDPOINT 10.0 | 198 #define DEFAULT_E_MIDPOINT 10.0 |
| 144 int vp9_block_energy(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) { | 199 int vp9_block_energy(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) { |
| 145 double energy; | 200 double energy; |
| 146 double energy_midpoint; | 201 double energy_midpoint; |
| 147 vp9_clear_system_state(); | 202 vp9_clear_system_state(); |
| 148 energy_midpoint = | 203 energy_midpoint = |
| 149 (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT; | 204 (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT; |
| 150 energy = vp9_log_block_var(cpi, x, bs) - energy_midpoint; | 205 energy = vp9_log_block_var(cpi, x, bs) - energy_midpoint; |
| 151 return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX); | 206 return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX); |
| 152 } | 207 } |
| OLD | NEW |