OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 | 11 |
12 #ifndef VP9_COMMON_VP9_BLOCKD_H_ | 12 #ifndef VP9_COMMON_VP9_BLOCKD_H_ |
13 #define VP9_COMMON_VP9_BLOCKD_H_ | 13 #define VP9_COMMON_VP9_BLOCKD_H_ |
14 | 14 |
15 #include "./vpx_config.h" | 15 #include "./vpx_config.h" |
16 | 16 |
17 #include "vpx_ports/mem.h" | 17 #include "vpx_ports/mem.h" |
18 #include "vpx_scale/yv12config.h" | 18 #include "vpx_scale/yv12config.h" |
19 | 19 |
20 #include "vp9/common/vp9_common.h" | 20 #include "vp9/common/vp9_common.h" |
21 #include "vp9/common/vp9_common_data.h" | 21 #include "vp9/common/vp9_common_data.h" |
22 #include "vp9/common/vp9_convolve.h" | |
23 #include "vp9/common/vp9_enums.h" | 22 #include "vp9/common/vp9_enums.h" |
24 #include "vp9/common/vp9_mv.h" | 23 #include "vp9/common/vp9_mv.h" |
| 24 #include "vp9/common/vp9_scale.h" |
25 #include "vp9/common/vp9_seg_common.h" | 25 #include "vp9/common/vp9_seg_common.h" |
26 #include "vp9/common/vp9_treecoder.h" | 26 #include "vp9/common/vp9_treecoder.h" |
27 | 27 |
28 #define BLOCK_SIZE_GROUPS 4 | 28 #define BLOCK_SIZE_GROUPS 4 |
29 | |
30 #define PREDICTION_PROBS 3 | |
31 | |
32 #define MBSKIP_CONTEXTS 3 | 29 #define MBSKIP_CONTEXTS 3 |
33 | 30 |
34 /* Segment Feature Masks */ | 31 /* Segment Feature Masks */ |
35 #define MAX_MV_REF_CANDIDATES 2 | 32 #define MAX_MV_REF_CANDIDATES 2 |
36 | 33 |
37 #define INTRA_INTER_CONTEXTS 4 | 34 #define INTRA_INTER_CONTEXTS 4 |
38 #define COMP_INTER_CONTEXTS 5 | 35 #define COMP_INTER_CONTEXTS 5 |
39 #define REF_CONTEXTS 5 | 36 #define REF_CONTEXTS 5 |
40 | 37 |
41 typedef enum { | 38 typedef enum { |
(...skipping 10 matching lines...) Expand all Loading... |
52 return (a != 0) + (b != 0); | 49 return (a != 0) + (b != 0); |
53 } | 50 } |
54 | 51 |
55 typedef enum { | 52 typedef enum { |
56 KEY_FRAME = 0, | 53 KEY_FRAME = 0, |
57 INTER_FRAME = 1, | 54 INTER_FRAME = 1, |
58 NUM_FRAME_TYPES, | 55 NUM_FRAME_TYPES, |
59 } FRAME_TYPE; | 56 } FRAME_TYPE; |
60 | 57 |
61 typedef enum { | 58 typedef enum { |
62 EIGHTTAP_SMOOTH, | 59 EIGHTTAP = 0, |
63 EIGHTTAP, | 60 EIGHTTAP_SMOOTH = 1, |
64 EIGHTTAP_SHARP, | 61 EIGHTTAP_SHARP = 2, |
65 BILINEAR, | 62 BILINEAR = 3, |
66 SWITCHABLE /* should be the last one */ | 63 SWITCHABLE = 4 /* should be the last one */ |
67 } INTERPOLATIONFILTERTYPE; | 64 } INTERPOLATIONFILTERTYPE; |
68 | 65 |
69 typedef enum { | 66 typedef enum { |
70 DC_PRED, // Average of above and left pixels | 67 DC_PRED, // Average of above and left pixels |
71 V_PRED, // Vertical | 68 V_PRED, // Vertical |
72 H_PRED, // Horizontal | 69 H_PRED, // Horizontal |
73 D45_PRED, // Directional 45 deg = round(arctan(1/1) * 180/pi) | 70 D45_PRED, // Directional 45 deg = round(arctan(1/1) * 180/pi) |
74 D135_PRED, // Directional 135 deg = 180 - 45 | 71 D135_PRED, // Directional 135 deg = 180 - 45 |
75 D117_PRED, // Directional 117 deg = 180 - 63 | 72 D117_PRED, // Directional 117 deg = 180 - 63 |
76 D153_PRED, // Directional 153 deg = 180 - 27 | 73 D153_PRED, // Directional 153 deg = 180 - 27 |
77 D27_PRED, // Directional 27 deg = round(arctan(1/2) * 180/pi) | 74 D207_PRED, // Directional 207 deg = 180 + 27 |
78 D63_PRED, // Directional 63 deg = round(arctan(2/1) * 180/pi) | 75 D63_PRED, // Directional 63 deg = round(arctan(2/1) * 180/pi) |
79 TM_PRED, // True-motion | 76 TM_PRED, // True-motion |
80 NEARESTMV, | 77 NEARESTMV, |
81 NEARMV, | 78 NEARMV, |
82 ZEROMV, | 79 ZEROMV, |
83 NEWMV, | 80 NEWMV, |
84 MB_MODE_COUNT | 81 MB_MODE_COUNT |
85 } MB_PREDICTION_MODE; | 82 } MB_PREDICTION_MODE; |
86 | 83 |
87 static INLINE int is_intra_mode(MB_PREDICTION_MODE mode) { | 84 static INLINE int is_intra_mode(MB_PREDICTION_MODE mode) { |
88 return mode <= TM_PRED; | 85 return mode <= TM_PRED; |
89 } | 86 } |
90 | 87 |
91 static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) { | 88 static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) { |
92 return mode >= NEARESTMV && mode <= NEWMV; | 89 return mode >= NEARESTMV && mode <= NEWMV; |
93 } | 90 } |
94 | 91 |
95 #define VP9_INTRA_MODES (TM_PRED + 1) | 92 #define INTRA_MODES (TM_PRED + 1) |
96 | 93 |
97 #define VP9_INTER_MODES (1 + NEWMV - NEARESTMV) | 94 #define INTER_MODES (1 + NEWMV - NEARESTMV) |
98 | 95 |
99 static INLINE int inter_mode_offset(MB_PREDICTION_MODE mode) { | 96 static INLINE int inter_mode_offset(MB_PREDICTION_MODE mode) { |
100 return (mode - NEARESTMV); | 97 return (mode - NEARESTMV); |
101 } | 98 } |
102 | 99 |
103 /* For keyframes, intra block modes are predicted by the (already decoded) | 100 /* For keyframes, intra block modes are predicted by the (already decoded) |
104 modes for the Y blocks to the left and above us; for interframes, there | 101 modes for the Y blocks to the left and above us; for interframes, there |
105 is a single probability table. */ | 102 is a single probability table. */ |
106 | 103 |
107 union b_mode_info { | 104 union b_mode_info { |
108 MB_PREDICTION_MODE as_mode; | 105 MB_PREDICTION_MODE as_mode; |
109 int_mv as_mv[2]; // first, second inter predictor motion vectors | 106 int_mv as_mv[2]; // first, second inter predictor motion vectors |
110 }; | 107 }; |
111 | 108 |
112 typedef enum { | 109 typedef enum { |
113 NONE = -1, | 110 NONE = -1, |
114 INTRA_FRAME = 0, | 111 INTRA_FRAME = 0, |
115 LAST_FRAME = 1, | 112 LAST_FRAME = 1, |
116 GOLDEN_FRAME = 2, | 113 GOLDEN_FRAME = 2, |
117 ALTREF_FRAME = 3, | 114 ALTREF_FRAME = 3, |
118 MAX_REF_FRAMES = 4 | 115 MAX_REF_FRAMES = 4 |
119 } MV_REFERENCE_FRAME; | 116 } MV_REFERENCE_FRAME; |
120 | 117 |
121 static INLINE int b_width_log2(BLOCK_SIZE_TYPE sb_type) { | 118 static INLINE int b_width_log2(BLOCK_SIZE sb_type) { |
122 return b_width_log2_lookup[sb_type]; | 119 return b_width_log2_lookup[sb_type]; |
123 } | 120 } |
124 static INLINE int b_height_log2(BLOCK_SIZE_TYPE sb_type) { | 121 static INLINE int b_height_log2(BLOCK_SIZE sb_type) { |
125 return b_height_log2_lookup[sb_type]; | 122 return b_height_log2_lookup[sb_type]; |
126 } | 123 } |
127 | 124 |
128 static INLINE int mi_width_log2(BLOCK_SIZE_TYPE sb_type) { | 125 static INLINE int mi_width_log2(BLOCK_SIZE sb_type) { |
129 return mi_width_log2_lookup[sb_type]; | 126 return mi_width_log2_lookup[sb_type]; |
130 } | 127 } |
131 | 128 |
132 static INLINE int mi_height_log2(BLOCK_SIZE_TYPE sb_type) { | 129 static INLINE int mi_height_log2(BLOCK_SIZE sb_type) { |
133 return mi_height_log2_lookup[sb_type]; | 130 return mi_height_log2_lookup[sb_type]; |
134 } | 131 } |
135 | 132 |
| 133 // This structure now relates to 8x8 block regions. |
136 typedef struct { | 134 typedef struct { |
137 MB_PREDICTION_MODE mode, uv_mode; | 135 MB_PREDICTION_MODE mode, uv_mode; |
138 MV_REFERENCE_FRAME ref_frame[2]; | 136 MV_REFERENCE_FRAME ref_frame[2]; |
139 TX_SIZE txfm_size; | 137 TX_SIZE txfm_size; |
140 int_mv mv[2]; // for each reference frame used | 138 int_mv mv[2]; // for each reference frame used |
141 int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES]; | 139 int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES]; |
142 int_mv best_mv, best_second_mv; | 140 int_mv best_mv, best_second_mv; |
143 | 141 |
144 uint8_t mb_mode_context[MAX_REF_FRAMES]; | 142 uint8_t mode_context[MAX_REF_FRAMES]; |
145 | 143 |
146 unsigned char mb_skip_coeff; /* does this mb ha
s coefficients at all, 1=no coefficients, 0=need decode tokens */ | 144 unsigned char skip_coeff; // 0=need to decode coeffs, 1=no coefficients |
147 unsigned char segment_id; // Segment id for current frame | 145 unsigned char segment_id; // Segment id for this block. |
148 | 146 |
149 // Flags used for prediction status of various bistream signals | 147 // Flags used for prediction status of various bit-stream signals |
150 unsigned char seg_id_predicted; | 148 unsigned char seg_id_predicted; |
151 | 149 |
152 // Indicates if the mb is part of the image (1) vs border (0) | 150 // Indicates if the block is part of the image (1) vs border (0) |
153 // This can be useful in determining whether the MB provides | 151 // This can be useful in determining whether it provides a valid predictor |
154 // a valid predictor | 152 unsigned char in_image; |
155 unsigned char mb_in_image; | |
156 | 153 |
157 INTERPOLATIONFILTERTYPE interp_filter; | 154 INTERPOLATIONFILTERTYPE interp_filter; |
158 | 155 |
159 BLOCK_SIZE_TYPE sb_type; | 156 BLOCK_SIZE sb_type; |
160 } MB_MODE_INFO; | 157 } MB_MODE_INFO; |
161 | 158 |
162 typedef struct { | 159 typedef struct { |
163 MB_MODE_INFO mbmi; | 160 MB_MODE_INFO mbmi; |
164 union b_mode_info bmi[4]; | 161 union b_mode_info bmi[4]; |
165 } MODE_INFO; | 162 } MODE_INFO; |
166 | 163 |
| 164 static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) { |
| 165 return mbmi->ref_frame[0] > INTRA_FRAME; |
| 166 } |
| 167 |
| 168 static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) { |
| 169 return mbmi->ref_frame[1] > INTRA_FRAME; |
| 170 } |
| 171 |
167 enum mv_precision { | 172 enum mv_precision { |
168 MV_PRECISION_Q3, | 173 MV_PRECISION_Q3, |
169 MV_PRECISION_Q4 | 174 MV_PRECISION_Q4 |
170 }; | 175 }; |
171 | 176 |
172 #define VP9_REF_SCALE_SHIFT 14 | |
173 #define VP9_REF_NO_SCALE (1 << VP9_REF_SCALE_SHIFT) | |
174 | |
175 struct scale_factors { | |
176 int x_scale_fp; // horizontal fixed point scale factor | |
177 int y_scale_fp; // vertical fixed point scale factor | |
178 int x_offset_q4; | |
179 int x_step_q4; | |
180 int y_offset_q4; | |
181 int y_step_q4; | |
182 | |
183 int (*scale_value_x)(int val, const struct scale_factors *scale); | |
184 int (*scale_value_y)(int val, const struct scale_factors *scale); | |
185 void (*set_scaled_offsets)(struct scale_factors *scale, int row, int col); | |
186 MV32 (*scale_mv_q3_to_q4)(const MV *mv, const struct scale_factors *scale); | |
187 MV32 (*scale_mv_q4)(const MV *mv, const struct scale_factors *scale); | |
188 | |
189 convolve_fn_t predict[2][2][2]; // horiz, vert, avg | |
190 }; | |
191 | |
192 #if CONFIG_ALPHA | 177 #if CONFIG_ALPHA |
193 enum { MAX_MB_PLANE = 4 }; | 178 enum { MAX_MB_PLANE = 4 }; |
194 #else | 179 #else |
195 enum { MAX_MB_PLANE = 3 }; | 180 enum { MAX_MB_PLANE = 3 }; |
196 #endif | 181 #endif |
197 | 182 |
198 struct buf_2d { | 183 struct buf_2d { |
199 uint8_t *buf; | 184 uint8_t *buf; |
200 int stride; | 185 int stride; |
201 }; | 186 }; |
202 | 187 |
203 struct macroblockd_plane { | 188 struct macroblockd_plane { |
204 DECLARE_ALIGNED(16, int16_t, qcoeff[64 * 64]); | 189 DECLARE_ALIGNED(16, int16_t, qcoeff[64 * 64]); |
205 DECLARE_ALIGNED(16, int16_t, dqcoeff[64 * 64]); | 190 DECLARE_ALIGNED(16, int16_t, dqcoeff[64 * 64]); |
206 DECLARE_ALIGNED(16, uint16_t, eobs[256]); | 191 DECLARE_ALIGNED(16, uint16_t, eobs[256]); |
207 PLANE_TYPE plane_type; | 192 PLANE_TYPE plane_type; |
208 int subsampling_x; | 193 int subsampling_x; |
209 int subsampling_y; | 194 int subsampling_y; |
210 struct buf_2d dst; | 195 struct buf_2d dst; |
211 struct buf_2d pre[2]; | 196 struct buf_2d pre[2]; |
212 int16_t *dequant; | 197 int16_t *dequant; |
213 ENTROPY_CONTEXT *above_context; | 198 ENTROPY_CONTEXT *above_context; |
214 ENTROPY_CONTEXT *left_context; | 199 ENTROPY_CONTEXT *left_context; |
215 }; | 200 }; |
216 | 201 |
217 #define BLOCK_OFFSET(x, i, n) ((x) + (i) * (n)) | 202 #define BLOCK_OFFSET(x, i) ((x) + (i) * 16) |
218 | |
219 #define MAX_REF_LF_DELTAS 4 | |
220 #define MAX_MODE_LF_DELTAS 2 | |
221 | |
222 struct loopfilter { | |
223 int filter_level; | |
224 | |
225 int sharpness_level; | |
226 int last_sharpness_level; | |
227 | |
228 uint8_t mode_ref_delta_enabled; | |
229 uint8_t mode_ref_delta_update; | |
230 | |
231 // 0 = Intra, Last, GF, ARF | |
232 signed char ref_deltas[MAX_REF_LF_DELTAS]; | |
233 signed char last_ref_deltas[MAX_REF_LF_DELTAS]; | |
234 | |
235 // 0 = ZERO_MV, MV | |
236 signed char mode_deltas[MAX_MODE_LF_DELTAS]; | |
237 signed char last_mode_deltas[MAX_MODE_LF_DELTAS]; | |
238 }; | |
239 | 203 |
240 typedef struct macroblockd { | 204 typedef struct macroblockd { |
241 struct macroblockd_plane plane[MAX_MB_PLANE]; | 205 struct macroblockd_plane plane[MAX_MB_PLANE]; |
242 | 206 |
243 struct scale_factors scale_factor[2]; | 207 struct scale_factors scale_factor[2]; |
244 | 208 |
245 MODE_INFO *prev_mode_info_context; | 209 MODE_INFO *prev_mode_info_context; |
246 MODE_INFO *mode_info_context; | 210 MODE_INFO *mode_info_context; |
247 int mode_info_stride; | 211 int mode_info_stride; |
248 | 212 |
249 int up_available; | 213 int up_available; |
250 int left_available; | 214 int left_available; |
251 int right_available; | 215 int right_available; |
252 | 216 |
253 struct segmentation seg; | |
254 struct loopfilter lf; | |
255 | |
256 // partition contexts | 217 // partition contexts |
257 PARTITION_CONTEXT *above_seg_context; | 218 PARTITION_CONTEXT *above_seg_context; |
258 PARTITION_CONTEXT *left_seg_context; | 219 PARTITION_CONTEXT *left_seg_context; |
259 | 220 |
260 /* Distance of MB away from frame edges */ | 221 /* Distance of MB away from frame edges */ |
261 int mb_to_left_edge; | 222 int mb_to_left_edge; |
262 int mb_to_right_edge; | 223 int mb_to_right_edge; |
263 int mb_to_top_edge; | 224 int mb_to_top_edge; |
264 int mb_to_bottom_edge; | 225 int mb_to_bottom_edge; |
265 | 226 |
(...skipping 11 matching lines...) Expand all Loading... |
277 | 238 |
278 unsigned char sb_index; // index of 32x32 block inside the 64x64 block | 239 unsigned char sb_index; // index of 32x32 block inside the 64x64 block |
279 unsigned char mb_index; // index of 16x16 block inside the 32x32 block | 240 unsigned char mb_index; // index of 16x16 block inside the 32x32 block |
280 unsigned char b_index; // index of 8x8 block inside the 16x16 block | 241 unsigned char b_index; // index of 8x8 block inside the 16x16 block |
281 unsigned char ab_index; // index of 4x4 block inside the 8x8 block | 242 unsigned char ab_index; // index of 4x4 block inside the 8x8 block |
282 | 243 |
283 int q_index; | 244 int q_index; |
284 | 245 |
285 } MACROBLOCKD; | 246 } MACROBLOCKD; |
286 | 247 |
287 static INLINE unsigned char *get_sb_index(MACROBLOCKD *xd, BLOCK_SIZE_TYPE subsi
ze) { | 248 static INLINE unsigned char *get_sb_index(MACROBLOCKD *xd, BLOCK_SIZE subsize) { |
288 switch (subsize) { | 249 switch (subsize) { |
289 case BLOCK_SIZE_SB64X64: | 250 case BLOCK_64X64: |
290 case BLOCK_SIZE_SB64X32: | 251 case BLOCK_64X32: |
291 case BLOCK_SIZE_SB32X64: | 252 case BLOCK_32X64: |
292 case BLOCK_SIZE_SB32X32: | 253 case BLOCK_32X32: |
293 return &xd->sb_index; | 254 return &xd->sb_index; |
294 case BLOCK_SIZE_SB32X16: | 255 case BLOCK_32X16: |
295 case BLOCK_SIZE_SB16X32: | 256 case BLOCK_16X32: |
296 case BLOCK_SIZE_MB16X16: | 257 case BLOCK_16X16: |
297 return &xd->mb_index; | 258 return &xd->mb_index; |
298 case BLOCK_SIZE_SB16X8: | 259 case BLOCK_16X8: |
299 case BLOCK_SIZE_SB8X16: | 260 case BLOCK_8X16: |
300 case BLOCK_SIZE_SB8X8: | 261 case BLOCK_8X8: |
301 return &xd->b_index; | 262 return &xd->b_index; |
302 case BLOCK_SIZE_SB8X4: | 263 case BLOCK_8X4: |
303 case BLOCK_SIZE_SB4X8: | 264 case BLOCK_4X8: |
304 case BLOCK_SIZE_AB4X4: | 265 case BLOCK_4X4: |
305 return &xd->ab_index; | 266 return &xd->ab_index; |
306 default: | 267 default: |
307 assert(0); | 268 assert(0); |
308 return NULL; | 269 return NULL; |
309 } | 270 } |
310 } | 271 } |
311 | 272 |
312 static INLINE void update_partition_context(MACROBLOCKD *xd, | 273 static INLINE void update_partition_context(MACROBLOCKD *xd, BLOCK_SIZE sb_type, |
313 BLOCK_SIZE_TYPE sb_type, | 274 BLOCK_SIZE sb_size) { |
314 BLOCK_SIZE_TYPE sb_size) { | |
315 const int bsl = b_width_log2(sb_size), bs = (1 << bsl) / 2; | 275 const int bsl = b_width_log2(sb_size), bs = (1 << bsl) / 2; |
316 const int bwl = b_width_log2(sb_type); | 276 const int bwl = b_width_log2(sb_type); |
317 const int bhl = b_height_log2(sb_type); | 277 const int bhl = b_height_log2(sb_type); |
318 const int boffset = b_width_log2(BLOCK_SIZE_SB64X64) - bsl; | 278 const int boffset = b_width_log2(BLOCK_64X64) - bsl; |
319 const char pcval0 = ~(0xe << boffset); | 279 const char pcval0 = ~(0xe << boffset); |
320 const char pcval1 = ~(0xf << boffset); | 280 const char pcval1 = ~(0xf << boffset); |
321 const char pcvalue[2] = {pcval0, pcval1}; | 281 const char pcvalue[2] = {pcval0, pcval1}; |
322 | 282 |
323 assert(MAX(bwl, bhl) <= bsl); | 283 assert(MAX(bwl, bhl) <= bsl); |
324 | 284 |
325 // update the partition context at the end notes. set partition bits | 285 // update the partition context at the end notes. set partition bits |
326 // of block sizes larger than the current one to be one, and partition | 286 // of block sizes larger than the current one to be one, and partition |
327 // bits of smaller block sizes to be zero. | 287 // bits of smaller block sizes to be zero. |
328 vpx_memset(xd->above_seg_context, pcvalue[bwl == bsl], bs); | 288 vpx_memset(xd->above_seg_context, pcvalue[bwl == bsl], bs); |
329 vpx_memset(xd->left_seg_context, pcvalue[bhl == bsl], bs); | 289 vpx_memset(xd->left_seg_context, pcvalue[bhl == bsl], bs); |
330 } | 290 } |
331 | 291 |
332 static INLINE int partition_plane_context(MACROBLOCKD *xd, | 292 static INLINE int partition_plane_context(MACROBLOCKD *xd, BLOCK_SIZE sb_type) { |
333 BLOCK_SIZE_TYPE sb_type) { | |
334 int bsl = mi_width_log2(sb_type), bs = 1 << bsl; | 293 int bsl = mi_width_log2(sb_type), bs = 1 << bsl; |
335 int above = 0, left = 0, i; | 294 int above = 0, left = 0, i; |
336 int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - bsl; | 295 int boffset = mi_width_log2(BLOCK_64X64) - bsl; |
337 | 296 |
338 assert(mi_width_log2(sb_type) == mi_height_log2(sb_type)); | 297 assert(mi_width_log2(sb_type) == mi_height_log2(sb_type)); |
339 assert(bsl >= 0); | 298 assert(bsl >= 0); |
340 assert(boffset >= 0); | 299 assert(boffset >= 0); |
341 | 300 |
342 for (i = 0; i < bs; i++) | 301 for (i = 0; i < bs; i++) |
343 above |= (xd->above_seg_context[i] & (1 << boffset)); | 302 above |= (xd->above_seg_context[i] & (1 << boffset)); |
344 for (i = 0; i < bs; i++) | 303 for (i = 0; i < bs; i++) |
345 left |= (xd->left_seg_context[i] & (1 << boffset)); | 304 left |= (xd->left_seg_context[i] & (1 << boffset)); |
346 | 305 |
347 above = (above > 0); | 306 above = (above > 0); |
348 left = (left > 0); | 307 left = (left > 0); |
349 | 308 |
350 return (left * 2 + above) + bsl * PARTITION_PLOFFSET; | 309 return (left * 2 + above) + bsl * PARTITION_PLOFFSET; |
351 } | 310 } |
352 | 311 |
353 static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize, | 312 static BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, PARTITION_TYPE partition) { |
354 PARTITION_TYPE partition) { | 313 const BLOCK_SIZE subsize = subsize_lookup[partition][bsize]; |
355 BLOCK_SIZE_TYPE subsize = subsize_lookup[partition][bsize]; | 314 assert(subsize < BLOCK_SIZES); |
356 assert(subsize != BLOCK_SIZE_TYPES); | |
357 return subsize; | 315 return subsize; |
358 } | 316 } |
359 | 317 |
360 extern const TX_TYPE mode2txfm_map[MB_MODE_COUNT]; | 318 extern const TX_TYPE mode2txfm_map[MB_MODE_COUNT]; |
361 | 319 |
362 static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type, | 320 static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type, |
363 const MACROBLOCKD *xd, int ib) { | 321 const MACROBLOCKD *xd, int ib) { |
364 const MODE_INFO *const mi = xd->mode_info_context; | 322 const MODE_INFO *const mi = xd->mode_info_context; |
365 const MB_MODE_INFO *const mbmi = &mi->mbmi; | 323 const MB_MODE_INFO *const mbmi = &mi->mbmi; |
366 | 324 |
367 if (plane_type != PLANE_TYPE_Y_WITH_DC || | 325 if (plane_type != PLANE_TYPE_Y_WITH_DC || |
368 xd->lossless || | 326 xd->lossless || |
369 mbmi->ref_frame[0] != INTRA_FRAME) | 327 is_inter_block(mbmi)) |
370 return DCT_DCT; | 328 return DCT_DCT; |
371 | 329 |
372 return mode2txfm_map[mbmi->sb_type < BLOCK_SIZE_SB8X8 ? | 330 return mode2txfm_map[mbmi->sb_type < BLOCK_8X8 ? |
373 mi->bmi[ib].as_mode : mbmi->mode]; | 331 mi->bmi[ib].as_mode : mbmi->mode]; |
374 } | 332 } |
375 | 333 |
376 static INLINE TX_TYPE get_tx_type_8x8(PLANE_TYPE plane_type, | 334 static INLINE TX_TYPE get_tx_type_8x8(PLANE_TYPE plane_type, |
377 const MACROBLOCKD *xd) { | 335 const MACROBLOCKD *xd) { |
378 return plane_type == PLANE_TYPE_Y_WITH_DC ? | 336 return plane_type == PLANE_TYPE_Y_WITH_DC ? |
379 mode2txfm_map[xd->mode_info_context->mbmi.mode] : DCT_DCT; | 337 mode2txfm_map[xd->mode_info_context->mbmi.mode] : DCT_DCT; |
380 } | 338 } |
381 | 339 |
382 static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type, | 340 static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type, |
(...skipping 15 matching lines...) Expand all Loading... |
398 xd->plane[3].subsampling_x = 0; | 356 xd->plane[3].subsampling_x = 0; |
399 xd->plane[3].subsampling_y = 0; | 357 xd->plane[3].subsampling_y = 0; |
400 #endif | 358 #endif |
401 } | 359 } |
402 | 360 |
403 | 361 |
404 static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) { | 362 static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) { |
405 return MIN(mbmi->txfm_size, max_uv_txsize_lookup[mbmi->sb_type]); | 363 return MIN(mbmi->txfm_size, max_uv_txsize_lookup[mbmi->sb_type]); |
406 } | 364 } |
407 | 365 |
408 struct plane_block_idx { | 366 static BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize, |
409 int plane; | 367 const struct macroblockd_plane *pd) { |
410 int block; | 368 BLOCK_SIZE bs = ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y]; |
411 }; | 369 assert(bs < BLOCK_SIZES); |
412 | 370 return bs; |
413 // TODO(jkoleszar): returning a struct so it can be used in a const context, | |
414 // expect to refactor this further later. | |
415 static INLINE struct plane_block_idx plane_block_idx(int y_blocks, | |
416 int b_idx) { | |
417 const int v_offset = y_blocks * 5 / 4; | |
418 struct plane_block_idx res; | |
419 | |
420 if (b_idx < y_blocks) { | |
421 res.plane = 0; | |
422 res.block = b_idx; | |
423 } else if (b_idx < v_offset) { | |
424 res.plane = 1; | |
425 res.block = b_idx - y_blocks; | |
426 } else { | |
427 assert(b_idx < y_blocks * 3 / 2); | |
428 res.plane = 2; | |
429 res.block = b_idx - v_offset; | |
430 } | |
431 return res; | |
432 } | 371 } |
433 | 372 |
434 static INLINE int plane_block_width(BLOCK_SIZE_TYPE bsize, | 373 static INLINE int plane_block_width(BLOCK_SIZE bsize, |
435 const struct macroblockd_plane* plane) { | 374 const struct macroblockd_plane* plane) { |
436 return 4 << (b_width_log2(bsize) - plane->subsampling_x); | 375 return 4 << (b_width_log2(bsize) - plane->subsampling_x); |
437 } | 376 } |
438 | 377 |
439 static INLINE int plane_block_height(BLOCK_SIZE_TYPE bsize, | 378 static INLINE int plane_block_height(BLOCK_SIZE bsize, |
440 const struct macroblockd_plane* plane) { | 379 const struct macroblockd_plane* plane) { |
441 return 4 << (b_height_log2(bsize) - plane->subsampling_y); | 380 return 4 << (b_height_log2(bsize) - plane->subsampling_y); |
442 } | 381 } |
443 | 382 |
444 static INLINE int plane_block_width_log2by4( | |
445 BLOCK_SIZE_TYPE bsize, const struct macroblockd_plane* plane) { | |
446 return (b_width_log2(bsize) - plane->subsampling_x); | |
447 } | |
448 | |
449 static INLINE int plane_block_height_log2by4( | |
450 BLOCK_SIZE_TYPE bsize, const struct macroblockd_plane* plane) { | |
451 return (b_height_log2(bsize) - plane->subsampling_y); | |
452 } | |
453 | |
454 typedef void (*foreach_transformed_block_visitor)(int plane, int block, | 383 typedef void (*foreach_transformed_block_visitor)(int plane, int block, |
455 BLOCK_SIZE_TYPE bsize, | 384 BLOCK_SIZE plane_bsize, |
456 int ss_txfrm_size, | 385 TX_SIZE tx_size, |
457 void *arg); | 386 void *arg); |
458 | 387 |
459 static INLINE void foreach_transformed_block_in_plane( | 388 static INLINE void foreach_transformed_block_in_plane( |
460 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane, | 389 const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, |
461 foreach_transformed_block_visitor visit, void *arg) { | 390 foreach_transformed_block_visitor visit, void *arg) { |
462 const int bw = b_width_log2(bsize), bh = b_height_log2(bsize); | 391 const struct macroblockd_plane *const pd = &xd->plane[plane]; |
463 | 392 const MB_MODE_INFO* mbmi = &xd->mode_info_context->mbmi; |
464 // block and transform sizes, in number of 4x4 blocks log 2 ("*_b") | 393 // block and transform sizes, in number of 4x4 blocks log 2 ("*_b") |
465 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 | 394 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 |
466 // transform size varies per plane, look it up in a common way. | 395 // transform size varies per plane, look it up in a common way. |
467 const MB_MODE_INFO* mbmi = &xd->mode_info_context->mbmi; | |
468 const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) | 396 const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) |
469 : mbmi->txfm_size; | 397 : mbmi->txfm_size; |
470 const int block_size_b = bw + bh; | 398 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); |
471 const int txfrm_size_b = tx_size * 2; | 399 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; |
472 | 400 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; |
473 // subsampled size of the block | 401 const int step = 1 << (tx_size << 1); |
474 const int ss_sum = xd->plane[plane].subsampling_x | |
475 + xd->plane[plane].subsampling_y; | |
476 const int ss_block_size = block_size_b - ss_sum; | |
477 | |
478 const int step = 1 << txfrm_size_b; | |
479 | |
480 int i; | 402 int i; |
481 | 403 |
482 assert(txfrm_size_b <= block_size_b); | |
483 assert(txfrm_size_b <= ss_block_size); | |
484 | |
485 // If mb_to_right_edge is < 0 we are in a situation in which | 404 // If mb_to_right_edge is < 0 we are in a situation in which |
486 // the current block size extends into the UMV and we won't | 405 // the current block size extends into the UMV and we won't |
487 // visit the sub blocks that are wholly within the UMV. | 406 // visit the sub blocks that are wholly within the UMV. |
488 if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) { | 407 if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) { |
489 int r, c; | 408 int r, c; |
490 const int sw = bw - xd->plane[plane].subsampling_x; | 409 |
491 const int sh = bh - xd->plane[plane].subsampling_y; | 410 int max_blocks_wide = num_4x4_w; |
492 int max_blocks_wide = 1 << sw; | 411 int max_blocks_high = num_4x4_h; |
493 int max_blocks_high = 1 << sh; | |
494 | 412 |
495 // xd->mb_to_right_edge is in units of pixels * 8. This converts | 413 // xd->mb_to_right_edge is in units of pixels * 8. This converts |
496 // it to 4x4 block sizes. | 414 // it to 4x4 block sizes. |
497 if (xd->mb_to_right_edge < 0) | 415 if (xd->mb_to_right_edge < 0) |
498 max_blocks_wide += | 416 max_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); |
499 + (xd->mb_to_right_edge >> (5 + xd->plane[plane].subsampling_x)); | |
500 | 417 |
501 if (xd->mb_to_bottom_edge < 0) | 418 if (xd->mb_to_bottom_edge < 0) |
502 max_blocks_high += | 419 max_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); |
503 + (xd->mb_to_bottom_edge >> (5 + xd->plane[plane].subsampling_y)); | |
504 | 420 |
505 i = 0; | 421 i = 0; |
506 // Unlike the normal case - in here we have to keep track of the | 422 // Unlike the normal case - in here we have to keep track of the |
507 // row and column of the blocks we use so that we know if we are in | 423 // row and column of the blocks we use so that we know if we are in |
508 // the unrestricted motion border.. | 424 // the unrestricted motion border. |
509 for (r = 0; r < (1 << sh); r += (1 << tx_size)) { | 425 for (r = 0; r < num_4x4_h; r += (1 << tx_size)) { |
510 for (c = 0; c < (1 << sw); c += (1 << tx_size)) { | 426 for (c = 0; c < num_4x4_w; c += (1 << tx_size)) { |
511 if (r < max_blocks_high && c < max_blocks_wide) | 427 if (r < max_blocks_high && c < max_blocks_wide) |
512 visit(plane, i, bsize, txfrm_size_b, arg); | 428 visit(plane, i, plane_bsize, tx_size, arg); |
513 i += step; | 429 i += step; |
514 } | 430 } |
515 } | 431 } |
516 } else { | 432 } else { |
517 for (i = 0; i < (1 << ss_block_size); i += step) { | 433 for (i = 0; i < num_4x4_w * num_4x4_h; i += step) |
518 visit(plane, i, bsize, txfrm_size_b, arg); | 434 visit(plane, i, plane_bsize, tx_size, arg); |
519 } | |
520 } | 435 } |
521 } | 436 } |
522 | 437 |
523 static INLINE void foreach_transformed_block( | 438 static INLINE void foreach_transformed_block( |
524 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, | 439 const MACROBLOCKD* const xd, BLOCK_SIZE bsize, |
525 foreach_transformed_block_visitor visit, void *arg) { | 440 foreach_transformed_block_visitor visit, void *arg) { |
526 int plane; | 441 int plane; |
527 | 442 |
528 for (plane = 0; plane < MAX_MB_PLANE; plane++) { | 443 for (plane = 0; plane < MAX_MB_PLANE; plane++) |
529 foreach_transformed_block_in_plane(xd, bsize, plane, | 444 foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg); |
530 visit, arg); | |
531 } | |
532 } | 445 } |
533 | 446 |
534 static INLINE void foreach_transformed_block_uv( | 447 static INLINE void foreach_transformed_block_uv( |
535 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, | 448 const MACROBLOCKD* const xd, BLOCK_SIZE bsize, |
536 foreach_transformed_block_visitor visit, void *arg) { | 449 foreach_transformed_block_visitor visit, void *arg) { |
537 int plane; | 450 int plane; |
538 | 451 |
539 for (plane = 1; plane < MAX_MB_PLANE; plane++) { | 452 for (plane = 1; plane < MAX_MB_PLANE; plane++) |
540 foreach_transformed_block_in_plane(xd, bsize, plane, | 453 foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg); |
541 visit, arg); | |
542 } | |
543 } | 454 } |
544 | 455 |
545 // TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could | 456 static int raster_block_offset(BLOCK_SIZE plane_bsize, |
546 // calculate the subsampled BLOCK_SIZE_TYPE, but that type isn't defined for | 457 int raster_block, int stride) { |
547 // sizes smaller than 16x16 yet. | 458 const int bw = b_width_log2(plane_bsize); |
548 typedef void (*foreach_predicted_block_visitor)(int plane, int block, | 459 const int y = 4 * (raster_block >> bw); |
549 BLOCK_SIZE_TYPE bsize, | 460 const int x = 4 * (raster_block & ((1 << bw) - 1)); |
550 int pred_w, int pred_h, | |
551 void *arg); | |
552 static INLINE void foreach_predicted_block_in_plane( | |
553 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane, | |
554 foreach_predicted_block_visitor visit, void *arg) { | |
555 int i, x, y; | |
556 | |
557 // block sizes in number of 4x4 blocks log 2 ("*_b") | |
558 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 | |
559 // subsampled size of the block | |
560 const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x; | |
561 const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y; | |
562 | |
563 // size of the predictor to use. | |
564 int pred_w, pred_h; | |
565 | |
566 if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) { | |
567 assert(bsize == BLOCK_SIZE_SB8X8); | |
568 pred_w = 0; | |
569 pred_h = 0; | |
570 } else { | |
571 pred_w = bwl; | |
572 pred_h = bhl; | |
573 } | |
574 assert(pred_w <= bwl); | |
575 assert(pred_h <= bhl); | |
576 | |
577 // visit each subblock in raster order | |
578 i = 0; | |
579 for (y = 0; y < 1 << bhl; y += 1 << pred_h) { | |
580 for (x = 0; x < 1 << bwl; x += 1 << pred_w) { | |
581 visit(plane, i, bsize, pred_w, pred_h, arg); | |
582 i += 1 << pred_w; | |
583 } | |
584 i += (1 << (bwl + pred_h)) - (1 << bwl); | |
585 } | |
586 } | |
587 static INLINE void foreach_predicted_block( | |
588 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, | |
589 foreach_predicted_block_visitor visit, void *arg) { | |
590 int plane; | |
591 | |
592 for (plane = 0; plane < MAX_MB_PLANE; plane++) { | |
593 foreach_predicted_block_in_plane(xd, bsize, plane, visit, arg); | |
594 } | |
595 } | |
596 static INLINE void foreach_predicted_block_uv( | |
597 const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, | |
598 foreach_predicted_block_visitor visit, void *arg) { | |
599 int plane; | |
600 | |
601 for (plane = 1; plane < MAX_MB_PLANE; plane++) { | |
602 foreach_predicted_block_in_plane(xd, bsize, plane, visit, arg); | |
603 } | |
604 } | |
605 static int raster_block_offset(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize, | |
606 int plane, int block, int stride) { | |
607 const int bw = b_width_log2(bsize) - xd->plane[plane].subsampling_x; | |
608 const int y = 4 * (block >> bw), x = 4 * (block & ((1 << bw) - 1)); | |
609 return y * stride + x; | 461 return y * stride + x; |
610 } | 462 } |
611 static int16_t* raster_block_offset_int16(MACROBLOCKD *xd, | 463 static int16_t* raster_block_offset_int16(BLOCK_SIZE plane_bsize, |
612 BLOCK_SIZE_TYPE bsize, | 464 int raster_block, int16_t *base) { |
613 int plane, int block, int16_t *base) { | 465 const int stride = 4 << b_width_log2(plane_bsize); |
614 const int stride = plane_block_width(bsize, &xd->plane[plane]); | 466 return base + raster_block_offset(plane_bsize, raster_block, stride); |
615 return base + raster_block_offset(xd, bsize, plane, block, stride); | |
616 } | 467 } |
617 static uint8_t* raster_block_offset_uint8(MACROBLOCKD *xd, | 468 static uint8_t* raster_block_offset_uint8(BLOCK_SIZE plane_bsize, |
618 BLOCK_SIZE_TYPE bsize, | 469 int raster_block, uint8_t *base, |
619 int plane, int block, | 470 int stride) { |
620 uint8_t *base, int stride) { | 471 return base + raster_block_offset(plane_bsize, raster_block, stride); |
621 return base + raster_block_offset(xd, bsize, plane, block, stride); | |
622 } | 472 } |
623 | 473 |
624 static int txfrm_block_to_raster_block(MACROBLOCKD *xd, | 474 static int txfrm_block_to_raster_block(BLOCK_SIZE plane_bsize, |
625 BLOCK_SIZE_TYPE bsize, | 475 TX_SIZE tx_size, int block) { |
626 int plane, int block, | 476 const int bwl = b_width_log2(plane_bsize); |
627 int ss_txfrm_size) { | 477 const int tx_cols_log2 = bwl - tx_size; |
628 const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x; | |
629 const int txwl = ss_txfrm_size / 2; | |
630 const int tx_cols_log2 = bwl - txwl; | |
631 const int tx_cols = 1 << tx_cols_log2; | 478 const int tx_cols = 1 << tx_cols_log2; |
632 const int raster_mb = block >> ss_txfrm_size; | 479 const int raster_mb = block >> (tx_size << 1); |
633 const int x = (raster_mb & (tx_cols - 1)) << (txwl); | 480 const int x = (raster_mb & (tx_cols - 1)) << tx_size; |
634 const int y = raster_mb >> tx_cols_log2 << (txwl); | 481 const int y = (raster_mb >> tx_cols_log2) << tx_size; |
635 return x + (y << bwl); | 482 return x + (y << bwl); |
636 } | 483 } |
637 | 484 |
638 static void txfrm_block_to_raster_xy(MACROBLOCKD *xd, | 485 static void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize, |
639 BLOCK_SIZE_TYPE bsize, | 486 TX_SIZE tx_size, int block, |
640 int plane, int block, | |
641 int ss_txfrm_size, | |
642 int *x, int *y) { | 487 int *x, int *y) { |
643 const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x; | 488 const int bwl = b_width_log2(plane_bsize); |
644 const int txwl = ss_txfrm_size / 2; | 489 const int tx_cols_log2 = bwl - tx_size; |
645 const int tx_cols_log2 = bwl - txwl; | |
646 const int tx_cols = 1 << tx_cols_log2; | 490 const int tx_cols = 1 << tx_cols_log2; |
647 const int raster_mb = block >> ss_txfrm_size; | 491 const int raster_mb = block >> (tx_size << 1); |
648 *x = (raster_mb & (tx_cols - 1)) << (txwl); | 492 *x = (raster_mb & (tx_cols - 1)) << tx_size; |
649 *y = raster_mb >> tx_cols_log2 << (txwl); | 493 *y = (raster_mb >> tx_cols_log2) << tx_size; |
650 } | 494 } |
651 | 495 |
652 static void extend_for_intra(MACROBLOCKD* const xd, int plane, int block, | 496 static void extend_for_intra(MACROBLOCKD* const xd, BLOCK_SIZE plane_bsize, |
653 BLOCK_SIZE_TYPE bsize, int ss_txfrm_size) { | 497 int plane, int block, TX_SIZE tx_size) { |
654 const int bw = plane_block_width(bsize, &xd->plane[plane]); | 498 struct macroblockd_plane *const pd = &xd->plane[plane]; |
655 const int bh = plane_block_height(bsize, &xd->plane[plane]); | 499 uint8_t *const buf = pd->dst.buf; |
| 500 const int stride = pd->dst.stride; |
| 501 |
656 int x, y; | 502 int x, y; |
657 txfrm_block_to_raster_xy(xd, bsize, plane, block, ss_txfrm_size, &x, &y); | 503 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); |
658 x = x * 4 - 1; | 504 x = x * 4 - 1; |
659 y = y * 4 - 1; | 505 y = y * 4 - 1; |
660 // Copy a pixel into the umv if we are in a situation where the block size | 506 // Copy a pixel into the umv if we are in a situation where the block size |
661 // extends into the UMV. | 507 // extends into the UMV. |
662 // TODO(JBB): Should be able to do the full extend in place so we don't have | 508 // TODO(JBB): Should be able to do the full extend in place so we don't have |
663 // to do this multiple times. | 509 // to do this multiple times. |
664 if (xd->mb_to_right_edge < 0) { | 510 if (xd->mb_to_right_edge < 0) { |
665 int umv_border_start = bw | 511 const int bw = 4 << b_width_log2(plane_bsize); |
666 + (xd->mb_to_right_edge >> (3 + xd->plane[plane].subsampling_x)); | 512 const int umv_border_start = bw + (xd->mb_to_right_edge >> |
| 513 (3 + pd->subsampling_x)); |
667 | 514 |
668 if (x + bw > umv_border_start) | 515 if (x + bw > umv_border_start) |
669 vpx_memset( | 516 vpx_memset(&buf[y * stride + umv_border_start], |
670 xd->plane[plane].dst.buf + y * xd->plane[plane].dst.stride | 517 buf[y * stride + umv_border_start - 1], bw); |
671 + umv_border_start, | |
672 *(xd->plane[plane].dst.buf + y * xd->plane[plane].dst.stride | |
673 + umv_border_start - 1), | |
674 bw); | |
675 } | 518 } |
| 519 |
676 if (xd->mb_to_bottom_edge < 0) { | 520 if (xd->mb_to_bottom_edge < 0) { |
677 int umv_border_start = bh | 521 const int bh = 4 << b_height_log2(plane_bsize); |
678 + (xd->mb_to_bottom_edge >> (3 + xd->plane[plane].subsampling_y)); | 522 const int umv_border_start = bh + (xd->mb_to_bottom_edge >> |
| 523 (3 + pd->subsampling_y)); |
679 int i; | 524 int i; |
680 uint8_t c = *(xd->plane[plane].dst.buf | 525 const uint8_t c = buf[(umv_border_start - 1) * stride + x]; |
681 + (umv_border_start - 1) * xd->plane[plane].dst.stride + x); | 526 uint8_t *d = &buf[umv_border_start * stride + x]; |
682 | |
683 uint8_t *d = xd->plane[plane].dst.buf | |
684 + umv_border_start * xd->plane[plane].dst.stride + x; | |
685 | 527 |
686 if (y + bh > umv_border_start) | 528 if (y + bh > umv_border_start) |
687 for (i = 0; i < bh; i++, d += xd->plane[plane].dst.stride) | 529 for (i = 0; i < bh; ++i, d += stride) |
688 *d = c; | 530 *d = c; |
689 } | 531 } |
690 } | 532 } |
691 static void set_contexts_on_border(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize, | 533 static void set_contexts_on_border(MACROBLOCKD *xd, |
692 int plane, int ss_tx_size, int eob, int aoff, | 534 struct macroblockd_plane *pd, |
693 int loff, ENTROPY_CONTEXT *A, | 535 BLOCK_SIZE plane_bsize, |
694 ENTROPY_CONTEXT *L) { | 536 int tx_size_in_blocks, int has_eob, |
695 const int bw = b_width_log2(bsize), bh = b_height_log2(bsize); | 537 int aoff, int loff, |
696 const int sw = bw - xd->plane[plane].subsampling_x; | 538 ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L) { |
697 const int sh = bh - xd->plane[plane].subsampling_y; | 539 int mi_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize]; |
698 int mi_blocks_wide = 1 << sw; | 540 int mi_blocks_high = num_4x4_blocks_high_lookup[plane_bsize]; |
699 int mi_blocks_high = 1 << sh; | |
700 int tx_size_in_blocks = (1 << ss_tx_size); | |
701 int above_contexts = tx_size_in_blocks; | 541 int above_contexts = tx_size_in_blocks; |
702 int left_contexts = tx_size_in_blocks; | 542 int left_contexts = tx_size_in_blocks; |
703 int pt; | 543 int pt; |
704 | 544 |
705 // xd->mb_to_right_edge is in units of pixels * 8. This converts | 545 // xd->mb_to_right_edge is in units of pixels * 8. This converts |
706 // it to 4x4 block sizes. | 546 // it to 4x4 block sizes. |
707 if (xd->mb_to_right_edge < 0) { | 547 if (xd->mb_to_right_edge < 0) |
708 mi_blocks_wide += (xd->mb_to_right_edge | 548 mi_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); |
709 >> (5 + xd->plane[plane].subsampling_x)); | 549 |
710 } | 550 if (xd->mb_to_bottom_edge < 0) |
| 551 mi_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); |
711 | 552 |
712 // this code attempts to avoid copying into contexts that are outside | 553 // this code attempts to avoid copying into contexts that are outside |
713 // our border. Any blocks that do are set to 0... | 554 // our border. Any blocks that do are set to 0... |
714 if (above_contexts + aoff > mi_blocks_wide) | 555 if (above_contexts + aoff > mi_blocks_wide) |
715 above_contexts = mi_blocks_wide - aoff; | 556 above_contexts = mi_blocks_wide - aoff; |
716 | 557 |
717 if (xd->mb_to_bottom_edge < 0) { | 558 if (left_contexts + loff > mi_blocks_high) |
718 mi_blocks_high += (xd->mb_to_bottom_edge | |
719 >> (5 + xd->plane[plane].subsampling_y)); | |
720 } | |
721 if (left_contexts + loff > mi_blocks_high) { | |
722 left_contexts = mi_blocks_high - loff; | 559 left_contexts = mi_blocks_high - loff; |
723 } | |
724 | 560 |
725 for (pt = 0; pt < above_contexts; pt++) | 561 for (pt = 0; pt < above_contexts; pt++) |
726 A[pt] = eob > 0; | 562 A[pt] = has_eob; |
727 for (pt = above_contexts; pt < (1 << ss_tx_size); pt++) | 563 for (pt = above_contexts; pt < tx_size_in_blocks; pt++) |
728 A[pt] = 0; | 564 A[pt] = 0; |
729 for (pt = 0; pt < left_contexts; pt++) | 565 for (pt = 0; pt < left_contexts; pt++) |
730 L[pt] = eob > 0; | 566 L[pt] = has_eob; |
731 for (pt = left_contexts; pt < (1 << ss_tx_size); pt++) | 567 for (pt = left_contexts; pt < tx_size_in_blocks; pt++) |
732 L[pt] = 0; | 568 L[pt] = 0; |
733 } | 569 } |
734 | 570 |
| 571 static void set_contexts(MACROBLOCKD *xd, struct macroblockd_plane *pd, |
| 572 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, |
| 573 int has_eob, int aoff, int loff) { |
| 574 ENTROPY_CONTEXT *const A = pd->above_context + aoff; |
| 575 ENTROPY_CONTEXT *const L = pd->left_context + loff; |
| 576 const int tx_size_in_blocks = 1 << tx_size; |
| 577 |
| 578 if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) { |
| 579 set_contexts_on_border(xd, pd, plane_bsize, tx_size_in_blocks, has_eob, |
| 580 aoff, loff, A, L); |
| 581 } else { |
| 582 vpx_memset(A, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); |
| 583 vpx_memset(L, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); |
| 584 } |
| 585 } |
735 | 586 |
736 #endif // VP9_COMMON_VP9_BLOCKD_H_ | 587 #endif // VP9_COMMON_VP9_BLOCKD_H_ |
OLD | NEW |