OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
48 vpx_memcpy(dst_ptr1, src_ptr1, linesize); | 48 vpx_memcpy(dst_ptr1, src_ptr1, linesize); |
49 dst_ptr1 += dst_pitch; | 49 dst_ptr1 += dst_pitch; |
50 } | 50 } |
51 | 51 |
52 for (i = 0; i < extend_bottom; i++) { | 52 for (i = 0; i < extend_bottom; i++) { |
53 vpx_memcpy(dst_ptr2, src_ptr2, linesize); | 53 vpx_memcpy(dst_ptr2, src_ptr2, linesize); |
54 dst_ptr2 += dst_pitch; | 54 dst_ptr2 += dst_pitch; |
55 } | 55 } |
56 } | 56 } |
57 | 57 |
| 58 #if CONFIG_VP9_HIGHBITDEPTH |
| 59 static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch, |
| 60 uint8_t *dst8, int dst_pitch, |
| 61 int w, int h, |
| 62 int extend_top, int extend_left, |
| 63 int extend_bottom, int extend_right) { |
| 64 int i, linesize; |
| 65 uint16_t *src = CONVERT_TO_SHORTPTR(src8); |
| 66 uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); |
| 67 |
| 68 // copy the left and right most columns out |
| 69 const uint16_t *src_ptr1 = src; |
| 70 const uint16_t *src_ptr2 = src + w - 1; |
| 71 uint16_t *dst_ptr1 = dst - extend_left; |
| 72 uint16_t *dst_ptr2 = dst + w; |
| 73 |
| 74 for (i = 0; i < h; i++) { |
| 75 vpx_memset16(dst_ptr1, src_ptr1[0], extend_left); |
| 76 vpx_memcpy(dst_ptr1 + extend_left, src_ptr1, w * sizeof(uint16_t)); |
| 77 vpx_memset16(dst_ptr2, src_ptr2[0], extend_right); |
| 78 src_ptr1 += src_pitch; |
| 79 src_ptr2 += src_pitch; |
| 80 dst_ptr1 += dst_pitch; |
| 81 dst_ptr2 += dst_pitch; |
| 82 } |
| 83 |
| 84 // Now copy the top and bottom lines into each line of the respective |
| 85 // borders |
| 86 src_ptr1 = dst - extend_left; |
| 87 src_ptr2 = dst + dst_pitch * (h - 1) - extend_left; |
| 88 dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left; |
| 89 dst_ptr2 = dst + dst_pitch * (h) - extend_left; |
| 90 linesize = extend_left + extend_right + w; |
| 91 |
| 92 for (i = 0; i < extend_top; i++) { |
| 93 vpx_memcpy(dst_ptr1, src_ptr1, linesize * sizeof(uint16_t)); |
| 94 dst_ptr1 += dst_pitch; |
| 95 } |
| 96 |
| 97 for (i = 0; i < extend_bottom; i++) { |
| 98 vpx_memcpy(dst_ptr2, src_ptr2, linesize * sizeof(uint16_t)); |
| 99 dst_ptr2 += dst_pitch; |
| 100 } |
| 101 } |
| 102 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 103 |
58 void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src, | 104 void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src, |
59 YV12_BUFFER_CONFIG *dst) { | 105 YV12_BUFFER_CONFIG *dst) { |
60 // Extend src frame in buffer | 106 // Extend src frame in buffer |
61 // Altref filtering assumes 16 pixel extension | 107 // Altref filtering assumes 16 pixel extension |
62 const int et_y = 16; | 108 const int et_y = 16; |
63 const int el_y = 16; | 109 const int el_y = 16; |
64 // Motion estimation may use src block variance with the block size up | 110 // Motion estimation may use src block variance with the block size up |
65 // to 64x64, so the right and bottom need to be extended to 64 multiple | 111 // to 64x64, so the right and bottom need to be extended to 64 multiple |
66 // or up to 16, whichever is greater. | 112 // or up to 16, whichever is greater. |
67 const int eb_y = MAX(ALIGN_POWER_OF_TWO(src->y_width, 6) - src->y_width, | 113 const int eb_y = MAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) |
68 16); | 114 - src->y_crop_width; |
69 const int er_y = MAX(ALIGN_POWER_OF_TWO(src->y_height, 6) - src->y_height, | 115 const int er_y = MAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) |
70 16); | 116 - src->y_crop_height; |
71 const int uv_width_subsampling = (src->uv_width != src->y_width); | 117 const int uv_width_subsampling = (src->uv_width != src->y_width); |
72 const int uv_height_subsampling = (src->uv_height != src->y_height); | 118 const int uv_height_subsampling = (src->uv_height != src->y_height); |
73 const int et_uv = et_y >> uv_height_subsampling; | 119 const int et_uv = et_y >> uv_height_subsampling; |
74 const int el_uv = el_y >> uv_width_subsampling; | 120 const int el_uv = el_y >> uv_width_subsampling; |
75 const int eb_uv = eb_y >> uv_height_subsampling; | 121 const int eb_uv = eb_y >> uv_height_subsampling; |
76 const int er_uv = er_y >> uv_width_subsampling; | 122 const int er_uv = er_y >> uv_width_subsampling; |
77 | 123 |
| 124 #if CONFIG_VP9_HIGHBITDEPTH |
| 125 if (src->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 126 highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, |
| 127 dst->y_buffer, dst->y_stride, |
| 128 src->y_crop_width, src->y_crop_height, |
| 129 et_y, el_y, eb_y, er_y); |
| 130 |
| 131 highbd_copy_and_extend_plane(src->u_buffer, src->uv_stride, |
| 132 dst->u_buffer, dst->uv_stride, |
| 133 src->uv_crop_width, src->uv_crop_height, |
| 134 et_uv, el_uv, eb_uv, er_uv); |
| 135 |
| 136 highbd_copy_and_extend_plane(src->v_buffer, src->uv_stride, |
| 137 dst->v_buffer, dst->uv_stride, |
| 138 src->uv_crop_width, src->uv_crop_height, |
| 139 et_uv, el_uv, eb_uv, er_uv); |
| 140 return; |
| 141 } |
| 142 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 143 |
78 copy_and_extend_plane(src->y_buffer, src->y_stride, | 144 copy_and_extend_plane(src->y_buffer, src->y_stride, |
79 dst->y_buffer, dst->y_stride, | 145 dst->y_buffer, dst->y_stride, |
80 src->y_width, src->y_height, | 146 src->y_crop_width, src->y_crop_height, |
81 et_y, el_y, eb_y, er_y); | 147 et_y, el_y, eb_y, er_y); |
82 | 148 |
83 copy_and_extend_plane(src->u_buffer, src->uv_stride, | 149 copy_and_extend_plane(src->u_buffer, src->uv_stride, |
84 dst->u_buffer, dst->uv_stride, | 150 dst->u_buffer, dst->uv_stride, |
85 src->uv_width, src->uv_height, | 151 src->uv_crop_width, src->uv_crop_height, |
86 et_uv, el_uv, eb_uv, er_uv); | 152 et_uv, el_uv, eb_uv, er_uv); |
87 | 153 |
88 copy_and_extend_plane(src->v_buffer, src->uv_stride, | 154 copy_and_extend_plane(src->v_buffer, src->uv_stride, |
89 dst->v_buffer, dst->uv_stride, | 155 dst->v_buffer, dst->uv_stride, |
90 src->uv_width, src->uv_height, | 156 src->uv_crop_width, src->uv_crop_height, |
91 et_uv, el_uv, eb_uv, er_uv); | 157 et_uv, el_uv, eb_uv, er_uv); |
92 } | 158 } |
93 | 159 |
94 void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src, | 160 void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src, |
95 YV12_BUFFER_CONFIG *dst, | 161 YV12_BUFFER_CONFIG *dst, |
96 int srcy, int srcx, | 162 int srcy, int srcx, |
97 int srch, int srcw) { | 163 int srch, int srcw) { |
98 // If the side is not touching the bounder then don't extend. | 164 // If the side is not touching the bounder then don't extend. |
99 const int et_y = srcy ? 0 : dst->border; | 165 const int et_y = srcy ? 0 : dst->border; |
100 const int el_y = srcx ? 0 : dst->border; | 166 const int el_y = srcx ? 0 : dst->border; |
(...skipping 21 matching lines...) Expand all Loading... |
122 copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride, | 188 copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride, |
123 dst->u_buffer + dst_uv_offset, dst->uv_stride, | 189 dst->u_buffer + dst_uv_offset, dst->uv_stride, |
124 srcw_uv, srch_uv, | 190 srcw_uv, srch_uv, |
125 et_uv, el_uv, eb_uv, er_uv); | 191 et_uv, el_uv, eb_uv, er_uv); |
126 | 192 |
127 copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride, | 193 copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride, |
128 dst->v_buffer + dst_uv_offset, dst->uv_stride, | 194 dst->v_buffer + dst_uv_offset, dst->uv_stride, |
129 srcw_uv, srch_uv, | 195 srcw_uv, srch_uv, |
130 et_uv, el_uv, eb_uv, er_uv); | 196 et_uv, el_uv, eb_uv, er_uv); |
131 } | 197 } |
OLD | NEW |