OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 | 11 |
12 #ifndef VP9_COMMON_VP9_BLOCKD_H_ | 12 #ifndef VP9_COMMON_VP9_BLOCKD_H_ |
13 #define VP9_COMMON_VP9_BLOCKD_H_ | 13 #define VP9_COMMON_VP9_BLOCKD_H_ |
14 | 14 |
15 #include "./vpx_config.h" | 15 #include "./vpx_config.h" |
16 | 16 |
17 #include "vpx_ports/mem.h" | 17 #include "vpx_ports/mem.h" |
18 #include "vpx_scale/yv12config.h" | 18 #include "vpx_scale/yv12config.h" |
19 | 19 |
20 #include "vp9/common/vp9_common.h" | 20 #include "vp9/common/vp9_common.h" |
21 #include "vp9/common/vp9_common_data.h" | 21 #include "vp9/common/vp9_common_data.h" |
22 #include "vp9/common/vp9_enums.h" | 22 #include "vp9/common/vp9_enums.h" |
23 #include "vp9/common/vp9_filter.h" | 23 #include "vp9/common/vp9_filter.h" |
24 #include "vp9/common/vp9_mv.h" | 24 #include "vp9/common/vp9_mv.h" |
25 #include "vp9/common/vp9_scale.h" | 25 #include "vp9/common/vp9_scale.h" |
26 #include "vp9/common/vp9_seg_common.h" | 26 #include "vp9/common/vp9_seg_common.h" |
27 #include "vp9/common/vp9_treecoder.h" | 27 #include "vp9/common/vp9_treecoder.h" |
28 | 28 |
29 #define BLOCK_SIZE_GROUPS 4 | 29 #define BLOCK_SIZE_GROUPS 4 |
30 #define MBSKIP_CONTEXTS 3 | 30 #define MBSKIP_CONTEXTS 3 |
| 31 #define INTER_MODE_CONTEXTS 7 |
31 | 32 |
32 /* Segment Feature Masks */ | 33 /* Segment Feature Masks */ |
33 #define MAX_MV_REF_CANDIDATES 2 | 34 #define MAX_MV_REF_CANDIDATES 2 |
34 | 35 |
35 #define INTRA_INTER_CONTEXTS 4 | 36 #define INTRA_INTER_CONTEXTS 4 |
36 #define COMP_INTER_CONTEXTS 5 | 37 #define COMP_INTER_CONTEXTS 5 |
37 #define REF_CONTEXTS 5 | 38 #define REF_CONTEXTS 5 |
38 | 39 |
39 typedef enum { | 40 typedef enum { |
40 PLANE_TYPE_Y_WITH_DC, | 41 PLANE_TYPE_Y = 0, |
41 PLANE_TYPE_UV, | 42 PLANE_TYPE_UV = 1, |
| 43 PLANE_TYPES |
42 } PLANE_TYPE; | 44 } PLANE_TYPE; |
43 | 45 |
44 typedef char ENTROPY_CONTEXT; | 46 typedef char ENTROPY_CONTEXT; |
45 | 47 |
46 typedef char PARTITION_CONTEXT; | 48 typedef char PARTITION_CONTEXT; |
47 | 49 |
48 static INLINE int combine_entropy_contexts(ENTROPY_CONTEXT a, | 50 static INLINE int combine_entropy_contexts(ENTROPY_CONTEXT a, |
49 ENTROPY_CONTEXT b) { | 51 ENTROPY_CONTEXT b) { |
50 return (a != 0) + (b != 0); | 52 return (a != 0) + (b != 0); |
51 } | 53 } |
(...skipping 15 matching lines...) Expand all Loading... |
67 D207_PRED, // Directional 207 deg = 180 + 27 | 69 D207_PRED, // Directional 207 deg = 180 + 27 |
68 D63_PRED, // Directional 63 deg = round(arctan(2/1) * 180/pi) | 70 D63_PRED, // Directional 63 deg = round(arctan(2/1) * 180/pi) |
69 TM_PRED, // True-motion | 71 TM_PRED, // True-motion |
70 NEARESTMV, | 72 NEARESTMV, |
71 NEARMV, | 73 NEARMV, |
72 ZEROMV, | 74 ZEROMV, |
73 NEWMV, | 75 NEWMV, |
74 MB_MODE_COUNT | 76 MB_MODE_COUNT |
75 } MB_PREDICTION_MODE; | 77 } MB_PREDICTION_MODE; |
76 | 78 |
77 static INLINE int is_intra_mode(MB_PREDICTION_MODE mode) { | |
78 return mode <= TM_PRED; | |
79 } | |
80 | |
81 static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) { | 79 static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) { |
82 return mode >= NEARESTMV && mode <= NEWMV; | 80 return mode >= NEARESTMV && mode <= NEWMV; |
83 } | 81 } |
84 | 82 |
85 #define INTRA_MODES (TM_PRED + 1) | 83 #define INTRA_MODES (TM_PRED + 1) |
86 | 84 |
87 #define INTER_MODES (1 + NEWMV - NEARESTMV) | 85 #define INTER_MODES (1 + NEWMV - NEARESTMV) |
88 | 86 |
89 static INLINE int inter_mode_offset(MB_PREDICTION_MODE mode) { | 87 #define INTER_OFFSET(mode) ((mode) - NEARESTMV) |
90 return (mode - NEARESTMV); | 88 |
91 } | |
92 | 89 |
93 /* For keyframes, intra block modes are predicted by the (already decoded) | 90 /* For keyframes, intra block modes are predicted by the (already decoded) |
94 modes for the Y blocks to the left and above us; for interframes, there | 91 modes for the Y blocks to the left and above us; for interframes, there |
95 is a single probability table. */ | 92 is a single probability table. */ |
96 | 93 |
97 typedef struct { | 94 typedef struct { |
98 MB_PREDICTION_MODE as_mode; | 95 MB_PREDICTION_MODE as_mode; |
99 int_mv as_mv[2]; // first, second inter predictor motion vectors | 96 int_mv as_mv[2]; // first, second inter predictor motion vectors |
100 } b_mode_info; | 97 } b_mode_info; |
101 | 98 |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
151 } MODE_INFO; | 148 } MODE_INFO; |
152 | 149 |
153 static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) { | 150 static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) { |
154 return mbmi->ref_frame[0] > INTRA_FRAME; | 151 return mbmi->ref_frame[0] > INTRA_FRAME; |
155 } | 152 } |
156 | 153 |
157 static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) { | 154 static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) { |
158 return mbmi->ref_frame[1] > INTRA_FRAME; | 155 return mbmi->ref_frame[1] > INTRA_FRAME; |
159 } | 156 } |
160 | 157 |
| 158 static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mi, |
| 159 const MODE_INFO *left_mi, int b) { |
| 160 if (b == 0 || b == 2) { |
| 161 if (!left_mi || is_inter_block(&left_mi->mbmi)) |
| 162 return DC_PRED; |
| 163 |
| 164 return left_mi->mbmi.sb_type < BLOCK_8X8 ? left_mi->bmi[b + 1].as_mode |
| 165 : left_mi->mbmi.mode; |
| 166 } else { |
| 167 assert(b == 1 || b == 3); |
| 168 return cur_mi->bmi[b - 1].as_mode; |
| 169 } |
| 170 } |
| 171 |
| 172 static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mi, |
| 173 const MODE_INFO *above_mi, int b) { |
| 174 if (b == 0 || b == 1) { |
| 175 if (!above_mi || is_inter_block(&above_mi->mbmi)) |
| 176 return DC_PRED; |
| 177 |
| 178 return above_mi->mbmi.sb_type < BLOCK_8X8 ? above_mi->bmi[b + 2].as_mode |
| 179 : above_mi->mbmi.mode; |
| 180 } else { |
| 181 assert(b == 2 || b == 3); |
| 182 return cur_mi->bmi[b - 2].as_mode; |
| 183 } |
| 184 } |
| 185 |
161 enum mv_precision { | 186 enum mv_precision { |
162 MV_PRECISION_Q3, | 187 MV_PRECISION_Q3, |
163 MV_PRECISION_Q4 | 188 MV_PRECISION_Q4 |
164 }; | 189 }; |
165 | 190 |
166 #if CONFIG_ALPHA | 191 #if CONFIG_ALPHA |
167 enum { MAX_MB_PLANE = 4 }; | 192 enum { MAX_MB_PLANE = 4 }; |
168 #else | 193 #else |
169 enum { MAX_MB_PLANE = 3 }; | 194 enum { MAX_MB_PLANE = 3 }; |
170 #endif | 195 #endif |
171 | 196 |
172 struct buf_2d { | 197 struct buf_2d { |
173 uint8_t *buf; | 198 uint8_t *buf; |
174 int stride; | 199 int stride; |
175 }; | 200 }; |
176 | 201 |
177 struct macroblockd_plane { | 202 struct macroblockd_plane { |
178 DECLARE_ALIGNED(16, int16_t, qcoeff[64 * 64]); | 203 int16_t *dqcoeff; |
179 DECLARE_ALIGNED(16, int16_t, dqcoeff[64 * 64]); | |
180 DECLARE_ALIGNED(16, uint16_t, eobs[256]); | |
181 PLANE_TYPE plane_type; | 204 PLANE_TYPE plane_type; |
182 int subsampling_x; | 205 int subsampling_x; |
183 int subsampling_y; | 206 int subsampling_y; |
184 struct buf_2d dst; | 207 struct buf_2d dst; |
185 struct buf_2d pre[2]; | 208 struct buf_2d pre[2]; |
186 int16_t *dequant; | 209 int16_t *dequant; |
187 ENTROPY_CONTEXT *above_context; | 210 ENTROPY_CONTEXT *above_context; |
188 ENTROPY_CONTEXT *left_context; | 211 ENTROPY_CONTEXT *left_context; |
189 }; | 212 }; |
190 | 213 |
(...skipping 14 matching lines...) Expand all Loading... |
205 | 228 |
206 int up_available; | 229 int up_available; |
207 int left_available; | 230 int left_available; |
208 | 231 |
209 /* Distance of MB away from frame edges */ | 232 /* Distance of MB away from frame edges */ |
210 int mb_to_left_edge; | 233 int mb_to_left_edge; |
211 int mb_to_right_edge; | 234 int mb_to_right_edge; |
212 int mb_to_top_edge; | 235 int mb_to_top_edge; |
213 int mb_to_bottom_edge; | 236 int mb_to_bottom_edge; |
214 | 237 |
| 238 /* pointers to reference frames */ |
| 239 const YV12_BUFFER_CONFIG *ref_buf[2]; |
| 240 |
215 int lossless; | 241 int lossless; |
216 /* Inverse transform function pointers. */ | 242 /* Inverse transform function pointers. */ |
217 void (*itxm_add)(const int16_t *input, uint8_t *dest, int stride, int eob); | 243 void (*itxm_add)(const int16_t *input, uint8_t *dest, int stride, int eob); |
218 | 244 |
219 struct subpix_fn_table subpix; | 245 struct subpix_fn_table subpix; |
220 | 246 |
221 int corrupted; | 247 int corrupted; |
222 | 248 |
223 unsigned char sb_index; // index of 32x32 block inside the 64x64 block | |
224 unsigned char mb_index; // index of 16x16 block inside the 32x32 block | |
225 unsigned char b_index; // index of 8x8 block inside the 16x16 block | |
226 unsigned char ab_index; // index of 4x4 block inside the 8x8 block | |
227 | |
228 int q_index; | |
229 | |
230 /* Y,U,V,(A) */ | 249 /* Y,U,V,(A) */ |
231 ENTROPY_CONTEXT *above_context[MAX_MB_PLANE]; | 250 ENTROPY_CONTEXT *above_context[MAX_MB_PLANE]; |
232 ENTROPY_CONTEXT left_context[MAX_MB_PLANE][16]; | 251 ENTROPY_CONTEXT left_context[MAX_MB_PLANE][16]; |
233 | 252 |
234 PARTITION_CONTEXT *above_seg_context; | 253 PARTITION_CONTEXT *above_seg_context; |
235 PARTITION_CONTEXT left_seg_context[8]; | 254 PARTITION_CONTEXT left_seg_context[8]; |
236 } MACROBLOCKD; | 255 } MACROBLOCKD; |
237 | 256 |
238 | 257 |
239 | 258 |
240 static BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, PARTITION_TYPE partition) { | 259 static BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, PARTITION_TYPE partition) { |
241 const BLOCK_SIZE subsize = subsize_lookup[partition][bsize]; | 260 const BLOCK_SIZE subsize = subsize_lookup[partition][bsize]; |
242 assert(subsize < BLOCK_SIZES); | 261 assert(subsize < BLOCK_SIZES); |
243 return subsize; | 262 return subsize; |
244 } | 263 } |
245 | 264 |
246 extern const TX_TYPE mode2txfm_map[MB_MODE_COUNT]; | 265 extern const TX_TYPE mode2txfm_map[MB_MODE_COUNT]; |
247 | 266 |
248 static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type, | 267 static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type, |
249 const MACROBLOCKD *xd, int ib) { | 268 const MACROBLOCKD *xd, int ib) { |
250 const MODE_INFO *const mi = xd->mi_8x8[0]; | 269 const MODE_INFO *const mi = xd->mi_8x8[0]; |
251 const MB_MODE_INFO *const mbmi = &mi->mbmi; | 270 const MB_MODE_INFO *const mbmi = &mi->mbmi; |
252 | 271 |
253 if (plane_type != PLANE_TYPE_Y_WITH_DC || | 272 if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mbmi)) |
254 xd->lossless || | |
255 is_inter_block(mbmi)) | |
256 return DCT_DCT; | 273 return DCT_DCT; |
257 | 274 |
258 return mode2txfm_map[mbmi->sb_type < BLOCK_8X8 ? | 275 return mode2txfm_map[mbmi->sb_type < BLOCK_8X8 ? mi->bmi[ib].as_mode |
259 mi->bmi[ib].as_mode : mbmi->mode]; | 276 : mbmi->mode]; |
260 } | 277 } |
261 | 278 |
262 static INLINE TX_TYPE get_tx_type_8x8(PLANE_TYPE plane_type, | 279 static INLINE TX_TYPE get_tx_type_8x8(PLANE_TYPE plane_type, |
263 const MACROBLOCKD *xd) { | 280 const MACROBLOCKD *xd) { |
264 return plane_type == PLANE_TYPE_Y_WITH_DC ? | 281 return plane_type == PLANE_TYPE_Y ? mode2txfm_map[xd->mi_8x8[0]->mbmi.mode] |
265 mode2txfm_map[xd->mi_8x8[0]->mbmi.mode] : DCT_DCT; | 282 : DCT_DCT; |
266 } | 283 } |
267 | 284 |
268 static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type, | 285 static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type, |
269 const MACROBLOCKD *xd) { | 286 const MACROBLOCKD *xd) { |
270 return plane_type == PLANE_TYPE_Y_WITH_DC ? | 287 return plane_type == PLANE_TYPE_Y ? mode2txfm_map[xd->mi_8x8[0]->mbmi.mode] |
271 mode2txfm_map[xd->mi_8x8[0]->mbmi.mode] : DCT_DCT; | 288 : DCT_DCT; |
272 } | 289 } |
273 | 290 |
274 static void setup_block_dptrs(MACROBLOCKD *xd, int ss_x, int ss_y) { | 291 static void setup_block_dptrs(MACROBLOCKD *xd, int ss_x, int ss_y) { |
275 int i; | 292 int i; |
276 | 293 |
277 for (i = 0; i < MAX_MB_PLANE; i++) { | 294 for (i = 0; i < MAX_MB_PLANE; i++) { |
278 xd->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y_WITH_DC; | 295 xd->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y; |
279 xd->plane[i].subsampling_x = i ? ss_x : 0; | 296 xd->plane[i].subsampling_x = i ? ss_x : 0; |
280 xd->plane[i].subsampling_y = i ? ss_y : 0; | 297 xd->plane[i].subsampling_y = i ? ss_y : 0; |
281 } | 298 } |
282 #if CONFIG_ALPHA | 299 #if CONFIG_ALPHA |
283 // TODO(jkoleszar): Using the Y w/h for now | 300 // TODO(jkoleszar): Using the Y w/h for now |
| 301 xd->plane[3].plane_type = PLANE_TYPE_Y; |
284 xd->plane[3].subsampling_x = 0; | 302 xd->plane[3].subsampling_x = 0; |
285 xd->plane[3].subsampling_y = 0; | 303 xd->plane[3].subsampling_y = 0; |
286 #endif | 304 #endif |
287 } | 305 } |
288 | 306 |
| 307 static TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize) { |
| 308 if (bsize < BLOCK_8X8) { |
| 309 return TX_4X4; |
| 310 } else { |
| 311 // TODO(dkovalev): Assuming YUV420 (ss_x == 1, ss_y == 1) |
| 312 const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][1][1]; |
| 313 return MIN(y_tx_size, max_txsize_lookup[plane_bsize]); |
| 314 } |
| 315 } |
289 | 316 |
290 static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) { | 317 static TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) { |
291 return MIN(mbmi->tx_size, max_uv_txsize_lookup[mbmi->sb_type]); | 318 return get_uv_tx_size_impl(mbmi->tx_size, mbmi->sb_type); |
292 } | 319 } |
293 | 320 |
294 static BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize, | 321 static BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize, |
295 const struct macroblockd_plane *pd) { | 322 const struct macroblockd_plane *pd) { |
296 BLOCK_SIZE bs = ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y]; | 323 BLOCK_SIZE bs = ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y]; |
297 assert(bs < BLOCK_SIZES); | 324 assert(bs < BLOCK_SIZES); |
298 return bs; | 325 return bs; |
299 } | 326 } |
300 | 327 |
301 static INLINE int plane_block_width(BLOCK_SIZE bsize, | |
302 const struct macroblockd_plane* plane) { | |
303 return 4 << (b_width_log2(bsize) - plane->subsampling_x); | |
304 } | |
305 | |
306 static INLINE int plane_block_height(BLOCK_SIZE bsize, | |
307 const struct macroblockd_plane* plane) { | |
308 return 4 << (b_height_log2(bsize) - plane->subsampling_y); | |
309 } | |
310 | |
311 typedef void (*foreach_transformed_block_visitor)(int plane, int block, | 328 typedef void (*foreach_transformed_block_visitor)(int plane, int block, |
312 BLOCK_SIZE plane_bsize, | 329 BLOCK_SIZE plane_bsize, |
313 TX_SIZE tx_size, | 330 TX_SIZE tx_size, |
314 void *arg); | 331 void *arg); |
315 | 332 |
316 static INLINE void foreach_transformed_block_in_plane( | 333 static INLINE void foreach_transformed_block_in_plane( |
317 const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, | 334 const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, |
318 foreach_transformed_block_visitor visit, void *arg) { | 335 foreach_transformed_block_visitor visit, void *arg) { |
319 const struct macroblockd_plane *const pd = &xd->plane[plane]; | 336 const struct macroblockd_plane *const pd = &xd->plane[plane]; |
320 const MB_MODE_INFO* mbmi = &xd->mi_8x8[0]->mbmi; | 337 const MB_MODE_INFO* mbmi = &xd->mi_8x8[0]->mbmi; |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
374 | 391 |
375 static INLINE void foreach_transformed_block_uv( | 392 static INLINE void foreach_transformed_block_uv( |
376 const MACROBLOCKD* const xd, BLOCK_SIZE bsize, | 393 const MACROBLOCKD* const xd, BLOCK_SIZE bsize, |
377 foreach_transformed_block_visitor visit, void *arg) { | 394 foreach_transformed_block_visitor visit, void *arg) { |
378 int plane; | 395 int plane; |
379 | 396 |
380 for (plane = 1; plane < MAX_MB_PLANE; plane++) | 397 for (plane = 1; plane < MAX_MB_PLANE; plane++) |
381 foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg); | 398 foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg); |
382 } | 399 } |
383 | 400 |
384 static int raster_block_offset(BLOCK_SIZE plane_bsize, | |
385 int raster_block, int stride) { | |
386 const int bw = b_width_log2(plane_bsize); | |
387 const int y = 4 * (raster_block >> bw); | |
388 const int x = 4 * (raster_block & ((1 << bw) - 1)); | |
389 return y * stride + x; | |
390 } | |
391 static int16_t* raster_block_offset_int16(BLOCK_SIZE plane_bsize, | |
392 int raster_block, int16_t *base) { | |
393 const int stride = 4 << b_width_log2(plane_bsize); | |
394 return base + raster_block_offset(plane_bsize, raster_block, stride); | |
395 } | |
396 static uint8_t* raster_block_offset_uint8(BLOCK_SIZE plane_bsize, | |
397 int raster_block, uint8_t *base, | |
398 int stride) { | |
399 return base + raster_block_offset(plane_bsize, raster_block, stride); | |
400 } | |
401 | |
402 static int txfrm_block_to_raster_block(BLOCK_SIZE plane_bsize, | |
403 TX_SIZE tx_size, int block) { | |
404 const int bwl = b_width_log2(plane_bsize); | |
405 const int tx_cols_log2 = bwl - tx_size; | |
406 const int tx_cols = 1 << tx_cols_log2; | |
407 const int raster_mb = block >> (tx_size << 1); | |
408 const int x = (raster_mb & (tx_cols - 1)) << tx_size; | |
409 const int y = (raster_mb >> tx_cols_log2) << tx_size; | |
410 return x + (y << bwl); | |
411 } | |
412 | |
413 static void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize, | 401 static void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize, |
414 TX_SIZE tx_size, int block, | 402 TX_SIZE tx_size, int block, |
415 int *x, int *y) { | 403 int *x, int *y) { |
416 const int bwl = b_width_log2(plane_bsize); | 404 const int bwl = b_width_log2(plane_bsize); |
417 const int tx_cols_log2 = bwl - tx_size; | 405 const int tx_cols_log2 = bwl - tx_size; |
418 const int tx_cols = 1 << tx_cols_log2; | 406 const int tx_cols = 1 << tx_cols_log2; |
419 const int raster_mb = block >> (tx_size << 1); | 407 const int raster_mb = block >> (tx_size << 1); |
420 *x = (raster_mb & (tx_cols - 1)) << tx_size; | 408 *x = (raster_mb & (tx_cols - 1)) << tx_size; |
421 *y = (raster_mb >> tx_cols_log2) << tx_size; | 409 *y = (raster_mb >> tx_cols_log2) << tx_size; |
422 } | 410 } |
423 | 411 |
424 static void extend_for_intra(MACROBLOCKD* const xd, BLOCK_SIZE plane_bsize, | 412 static void extend_for_intra(MACROBLOCKD *xd, BLOCK_SIZE plane_bsize, |
425 int plane, int block, TX_SIZE tx_size) { | 413 int plane, int aoff, int loff) { |
426 struct macroblockd_plane *const pd = &xd->plane[plane]; | 414 struct macroblockd_plane *const pd = &xd->plane[plane]; |
427 uint8_t *const buf = pd->dst.buf; | 415 uint8_t *const buf = pd->dst.buf; |
428 const int stride = pd->dst.stride; | 416 const int stride = pd->dst.stride; |
429 | 417 const int x = aoff * 4 - 1; |
430 int x, y; | 418 const int y = loff * 4 - 1; |
431 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); | |
432 x = x * 4 - 1; | |
433 y = y * 4 - 1; | |
434 // Copy a pixel into the umv if we are in a situation where the block size | 419 // Copy a pixel into the umv if we are in a situation where the block size |
435 // extends into the UMV. | 420 // extends into the UMV. |
436 // TODO(JBB): Should be able to do the full extend in place so we don't have | 421 // TODO(JBB): Should be able to do the full extend in place so we don't have |
437 // to do this multiple times. | 422 // to do this multiple times. |
438 if (xd->mb_to_right_edge < 0) { | 423 if (xd->mb_to_right_edge < 0) { |
439 const int bw = 4 << b_width_log2(plane_bsize); | 424 const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; |
440 const int umv_border_start = bw + (xd->mb_to_right_edge >> | 425 const int umv_border_start = bw + (xd->mb_to_right_edge >> |
441 (3 + pd->subsampling_x)); | 426 (3 + pd->subsampling_x)); |
442 | 427 |
443 if (x + bw > umv_border_start) | 428 if (x + bw > umv_border_start) |
444 vpx_memset(&buf[y * stride + umv_border_start], | 429 vpx_memset(&buf[y * stride + umv_border_start], |
445 buf[y * stride + umv_border_start - 1], bw); | 430 buf[y * stride + umv_border_start - 1], bw); |
446 } | 431 } |
447 | 432 |
448 if (xd->mb_to_bottom_edge < 0) { | 433 if (xd->mb_to_bottom_edge < 0) { |
449 if (xd->left_available || x >= 0) { | 434 if (xd->left_available || x >= 0) { |
450 const int bh = 4 << b_height_log2(plane_bsize); | 435 const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize]; |
451 const int umv_border_start = | 436 const int umv_border_start = |
452 bh + (xd->mb_to_bottom_edge >> (3 + pd->subsampling_y)); | 437 bh + (xd->mb_to_bottom_edge >> (3 + pd->subsampling_y)); |
453 | 438 |
454 if (y + bh > umv_border_start) { | 439 if (y + bh > umv_border_start) { |
455 const uint8_t c = buf[(umv_border_start - 1) * stride + x]; | 440 const uint8_t c = buf[(umv_border_start - 1) * stride + x]; |
456 uint8_t *d = &buf[umv_border_start * stride + x]; | 441 uint8_t *d = &buf[umv_border_start * stride + x]; |
457 int i; | 442 int i; |
458 for (i = 0; i < bh; ++i, d += stride) | 443 for (i = 0; i < bh; ++i, d += stride) |
459 *d = c; | 444 *d = c; |
460 } | 445 } |
461 } | 446 } |
462 } | 447 } |
463 } | 448 } |
464 static void set_contexts_on_border(MACROBLOCKD *xd, | |
465 struct macroblockd_plane *pd, | |
466 BLOCK_SIZE plane_bsize, | |
467 int tx_size_in_blocks, int has_eob, | |
468 int aoff, int loff, | |
469 ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L) { | |
470 int mi_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize]; | |
471 int mi_blocks_high = num_4x4_blocks_high_lookup[plane_bsize]; | |
472 int above_contexts = tx_size_in_blocks; | |
473 int left_contexts = tx_size_in_blocks; | |
474 int pt; | |
475 | 449 |
476 // xd->mb_to_right_edge is in units of pixels * 8. This converts | 450 static void set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, |
477 // it to 4x4 block sizes. | |
478 if (xd->mb_to_right_edge < 0) | |
479 mi_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); | |
480 | |
481 if (xd->mb_to_bottom_edge < 0) | |
482 mi_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); | |
483 | |
484 // this code attempts to avoid copying into contexts that are outside | |
485 // our border. Any blocks that do are set to 0... | |
486 if (above_contexts + aoff > mi_blocks_wide) | |
487 above_contexts = mi_blocks_wide - aoff; | |
488 | |
489 if (left_contexts + loff > mi_blocks_high) | |
490 left_contexts = mi_blocks_high - loff; | |
491 | |
492 for (pt = 0; pt < above_contexts; pt++) | |
493 A[pt] = has_eob; | |
494 for (pt = above_contexts; pt < tx_size_in_blocks; pt++) | |
495 A[pt] = 0; | |
496 for (pt = 0; pt < left_contexts; pt++) | |
497 L[pt] = has_eob; | |
498 for (pt = left_contexts; pt < tx_size_in_blocks; pt++) | |
499 L[pt] = 0; | |
500 } | |
501 | |
502 static void set_contexts(MACROBLOCKD *xd, struct macroblockd_plane *pd, | |
503 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, | 451 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, |
504 int has_eob, int aoff, int loff) { | 452 int has_eob, int aoff, int loff) { |
505 ENTROPY_CONTEXT *const A = pd->above_context + aoff; | 453 ENTROPY_CONTEXT *const a = pd->above_context + aoff; |
506 ENTROPY_CONTEXT *const L = pd->left_context + loff; | 454 ENTROPY_CONTEXT *const l = pd->left_context + loff; |
507 const int tx_size_in_blocks = 1 << tx_size; | 455 const int tx_size_in_blocks = 1 << tx_size; |
508 | 456 |
509 if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) { | 457 // above |
510 set_contexts_on_border(xd, pd, plane_bsize, tx_size_in_blocks, has_eob, | 458 if (has_eob && xd->mb_to_right_edge < 0) { |
511 aoff, loff, A, L); | 459 int i; |
| 460 const int blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize] + |
| 461 (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); |
| 462 int above_contexts = tx_size_in_blocks; |
| 463 if (above_contexts + aoff > blocks_wide) |
| 464 above_contexts = blocks_wide - aoff; |
| 465 |
| 466 for (i = 0; i < above_contexts; ++i) |
| 467 a[i] = has_eob; |
| 468 for (i = above_contexts; i < tx_size_in_blocks; ++i) |
| 469 a[i] = 0; |
512 } else { | 470 } else { |
513 vpx_memset(A, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); | 471 vpx_memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); |
514 vpx_memset(L, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); | 472 } |
| 473 |
| 474 // left |
| 475 if (has_eob && xd->mb_to_bottom_edge < 0) { |
| 476 int i; |
| 477 const int blocks_high = num_4x4_blocks_high_lookup[plane_bsize] + |
| 478 (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); |
| 479 int left_contexts = tx_size_in_blocks; |
| 480 if (left_contexts + loff > blocks_high) |
| 481 left_contexts = blocks_high - loff; |
| 482 |
| 483 for (i = 0; i < left_contexts; ++i) |
| 484 l[i] = has_eob; |
| 485 for (i = left_contexts; i < tx_size_in_blocks; ++i) |
| 486 l[i] = 0; |
| 487 } else { |
| 488 vpx_memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); |
515 } | 489 } |
516 } | 490 } |
517 | 491 |
518 static int get_tx_eob(const struct segmentation *seg, int segment_id, | 492 static int get_tx_eob(const struct segmentation *seg, int segment_id, |
519 TX_SIZE tx_size) { | 493 TX_SIZE tx_size) { |
520 const int eob_max = 16 << (tx_size << 1); | 494 const int eob_max = 16 << (tx_size << 1); |
521 return vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max; | 495 return vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max; |
522 } | 496 } |
523 | 497 |
524 #endif // VP9_COMMON_VP9_BLOCKD_H_ | 498 #endif // VP9_COMMON_VP9_BLOCKD_H_ |
OLD | NEW |