OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include <assert.h> |
| 12 #include <stdlib.h> // qsort() |
| 13 |
| 14 #include "./vp9_rtcd.h" |
| 15 #include "./vpx_scale_rtcd.h" |
| 16 |
| 17 #include "vpx_mem/vpx_mem.h" |
| 18 #include "vpx_scale/vpx_scale.h" |
| 19 |
| 20 #include "vp9/common/vp9_alloccommon.h" |
| 21 #include "vp9/common/vp9_common.h" |
| 22 #include "vp9/common/vp9_entropy.h" |
| 23 #include "vp9/common/vp9_entropymode.h" |
| 24 #include "vp9/common/vp9_idct.h" |
| 25 #include "vp9/common/vp9_pred_common.h" |
| 26 #include "vp9/common/vp9_quant_common.h" |
| 27 #include "vp9/common/vp9_reconintra.h" |
| 28 #include "vp9/common/vp9_reconinter.h" |
| 29 #include "vp9/common/vp9_seg_common.h" |
| 30 #include "vp9/common/vp9_tile_common.h" |
| 31 |
| 32 #include "vp9/decoder/vp9_dboolhuff.h" |
| 33 #include "vp9/decoder/vp9_decodeframe.h" |
| 34 #include "vp9/decoder/vp9_detokenize.h" |
| 35 #include "vp9/decoder/vp9_decodemv.h" |
| 36 #include "vp9/decoder/vp9_dsubexp.h" |
| 37 #include "vp9/decoder/vp9_onyxd_int.h" |
| 38 #include "vp9/decoder/vp9_read_bit_buffer.h" |
| 39 #include "vp9/decoder/vp9_thread.h" |
| 40 |
| 41 typedef struct TileWorkerData { |
| 42 VP9_COMMON *cm; |
| 43 vp9_reader bit_reader; |
| 44 DECLARE_ALIGNED(16, MACROBLOCKD, xd); |
| 45 DECLARE_ALIGNED(16, int16_t, dqcoeff[MAX_MB_PLANE][64 * 64]); |
| 46 } TileWorkerData; |
| 47 |
| 48 static int read_be32(const uint8_t *p) { |
| 49 return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; |
| 50 } |
| 51 |
| 52 static int is_compound_reference_allowed(const VP9_COMMON *cm) { |
| 53 int i; |
| 54 for (i = 1; i < REFS_PER_FRAME; ++i) |
| 55 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) |
| 56 return 1; |
| 57 |
| 58 return 0; |
| 59 } |
| 60 |
| 61 static void setup_compound_reference(VP9_COMMON *cm) { |
| 62 if (cm->ref_frame_sign_bias[LAST_FRAME] == |
| 63 cm->ref_frame_sign_bias[GOLDEN_FRAME]) { |
| 64 cm->comp_fixed_ref = ALTREF_FRAME; |
| 65 cm->comp_var_ref[0] = LAST_FRAME; |
| 66 cm->comp_var_ref[1] = GOLDEN_FRAME; |
| 67 } else if (cm->ref_frame_sign_bias[LAST_FRAME] == |
| 68 cm->ref_frame_sign_bias[ALTREF_FRAME]) { |
| 69 cm->comp_fixed_ref = GOLDEN_FRAME; |
| 70 cm->comp_var_ref[0] = LAST_FRAME; |
| 71 cm->comp_var_ref[1] = ALTREF_FRAME; |
| 72 } else { |
| 73 cm->comp_fixed_ref = LAST_FRAME; |
| 74 cm->comp_var_ref[0] = GOLDEN_FRAME; |
| 75 cm->comp_var_ref[1] = ALTREF_FRAME; |
| 76 } |
| 77 } |
| 78 |
| 79 // len == 0 is not allowed |
| 80 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) { |
| 81 return start + len > start && start + len <= end; |
| 82 } |
| 83 |
| 84 static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) { |
| 85 const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max)); |
| 86 return data > max ? max : data; |
| 87 } |
| 88 |
| 89 static TX_MODE read_tx_mode(vp9_reader *r) { |
| 90 TX_MODE tx_mode = vp9_read_literal(r, 2); |
| 91 if (tx_mode == ALLOW_32X32) |
| 92 tx_mode += vp9_read_bit(r); |
| 93 return tx_mode; |
| 94 } |
| 95 |
| 96 static void read_tx_mode_probs(struct tx_probs *tx_probs, vp9_reader *r) { |
| 97 int i, j; |
| 98 |
| 99 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) |
| 100 for (j = 0; j < TX_SIZES - 3; ++j) |
| 101 vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]); |
| 102 |
| 103 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) |
| 104 for (j = 0; j < TX_SIZES - 2; ++j) |
| 105 vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]); |
| 106 |
| 107 for (i = 0; i < TX_SIZE_CONTEXTS; ++i) |
| 108 for (j = 0; j < TX_SIZES - 1; ++j) |
| 109 vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]); |
| 110 } |
| 111 |
| 112 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) { |
| 113 int i, j; |
| 114 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) |
| 115 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) |
| 116 vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]); |
| 117 } |
| 118 |
| 119 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) { |
| 120 int i, j; |
| 121 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) |
| 122 for (j = 0; j < INTER_MODES - 1; ++j) |
| 123 vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]); |
| 124 } |
| 125 |
| 126 static REFERENCE_MODE read_reference_mode(VP9_COMMON *cm, vp9_reader *r) { |
| 127 if (is_compound_reference_allowed(cm)) { |
| 128 REFERENCE_MODE mode = vp9_read_bit(r); |
| 129 if (mode) |
| 130 mode += vp9_read_bit(r); |
| 131 setup_compound_reference(cm); |
| 132 return mode; |
| 133 } else { |
| 134 return SINGLE_REFERENCE; |
| 135 } |
| 136 } |
| 137 |
| 138 static void read_reference_mode_probs(VP9_COMMON *cm, vp9_reader *r) { |
| 139 int i; |
| 140 if (cm->reference_mode == REFERENCE_MODE_SELECT) |
| 141 for (i = 0; i < COMP_INTER_CONTEXTS; i++) |
| 142 vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]); |
| 143 |
| 144 if (cm->reference_mode != COMPOUND_REFERENCE) |
| 145 for (i = 0; i < REF_CONTEXTS; i++) { |
| 146 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]); |
| 147 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]); |
| 148 } |
| 149 |
| 150 if (cm->reference_mode != SINGLE_REFERENCE) |
| 151 for (i = 0; i < REF_CONTEXTS; i++) |
| 152 vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]); |
| 153 } |
| 154 |
| 155 static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) { |
| 156 int i; |
| 157 for (i = 0; i < n; ++i) |
| 158 if (vp9_read(r, NMV_UPDATE_PROB)) |
| 159 p[i] = (vp9_read_literal(r, 7) << 1) | 1; |
| 160 } |
| 161 |
| 162 static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) { |
| 163 int i, j; |
| 164 |
| 165 update_mv_probs(ctx->joints, MV_JOINTS - 1, r); |
| 166 |
| 167 for (i = 0; i < 2; ++i) { |
| 168 nmv_component *const comp_ctx = &ctx->comps[i]; |
| 169 update_mv_probs(&comp_ctx->sign, 1, r); |
| 170 update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r); |
| 171 update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r); |
| 172 update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r); |
| 173 } |
| 174 |
| 175 for (i = 0; i < 2; ++i) { |
| 176 nmv_component *const comp_ctx = &ctx->comps[i]; |
| 177 for (j = 0; j < CLASS0_SIZE; ++j) |
| 178 update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r); |
| 179 update_mv_probs(comp_ctx->fp, 3, r); |
| 180 } |
| 181 |
| 182 if (allow_hp) { |
| 183 for (i = 0; i < 2; ++i) { |
| 184 nmv_component *const comp_ctx = &ctx->comps[i]; |
| 185 update_mv_probs(&comp_ctx->class0_hp, 1, r); |
| 186 update_mv_probs(&comp_ctx->hp, 1, r); |
| 187 } |
| 188 } |
| 189 } |
| 190 |
| 191 static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) { |
| 192 int i; |
| 193 xd->plane[0].dequant = cm->y_dequant[q_index]; |
| 194 |
| 195 for (i = 1; i < MAX_MB_PLANE; i++) |
| 196 xd->plane[i].dequant = cm->uv_dequant[q_index]; |
| 197 } |
| 198 |
| 199 // Allocate storage for each tile column. |
| 200 // TODO(jzern): when max_threads <= 1 the same storage could be used for each |
| 201 // tile. |
| 202 static void alloc_tile_storage(VP9D_COMP *pbi, int tile_rows, int tile_cols) { |
| 203 VP9_COMMON *const cm = &pbi->common; |
| 204 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); |
| 205 int i, tile_row, tile_col; |
| 206 |
| 207 CHECK_MEM_ERROR(cm, pbi->mi_streams, |
| 208 vpx_realloc(pbi->mi_streams, tile_rows * tile_cols * |
| 209 sizeof(*pbi->mi_streams))); |
| 210 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { |
| 211 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { |
| 212 TileInfo tile; |
| 213 vp9_tile_init(&tile, cm, tile_row, tile_col); |
| 214 pbi->mi_streams[tile_row * tile_cols + tile_col] = |
| 215 &cm->mi[tile.mi_row_start * cm->mode_info_stride |
| 216 + tile.mi_col_start]; |
| 217 } |
| 218 } |
| 219 |
| 220 // 2 contexts per 'mi unit', so that we have one context per 4x4 txfm |
| 221 // block where mi unit size is 8x8. |
| 222 CHECK_MEM_ERROR(cm, pbi->above_context[0], |
| 223 vpx_realloc(pbi->above_context[0], |
| 224 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * |
| 225 2 * aligned_mi_cols)); |
| 226 for (i = 1; i < MAX_MB_PLANE; ++i) { |
| 227 pbi->above_context[i] = pbi->above_context[0] + |
| 228 i * sizeof(*pbi->above_context[0]) * |
| 229 2 * aligned_mi_cols; |
| 230 } |
| 231 |
| 232 // This is sized based on the entire frame. Each tile operates within its |
| 233 // column bounds. |
| 234 CHECK_MEM_ERROR(cm, pbi->above_seg_context, |
| 235 vpx_realloc(pbi->above_seg_context, |
| 236 sizeof(*pbi->above_seg_context) * |
| 237 aligned_mi_cols)); |
| 238 } |
| 239 |
| 240 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block, |
| 241 TX_SIZE tx_size, uint8_t *dst, int stride, |
| 242 int eob) { |
| 243 struct macroblockd_plane *const pd = &xd->plane[plane]; |
| 244 if (eob > 0) { |
| 245 TX_TYPE tx_type; |
| 246 const int plane_type = pd->plane_type; |
| 247 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); |
| 248 switch (tx_size) { |
| 249 case TX_4X4: |
| 250 tx_type = get_tx_type_4x4(plane_type, xd, block); |
| 251 if (tx_type == DCT_DCT) |
| 252 xd->itxm_add(dqcoeff, dst, stride, eob); |
| 253 else |
| 254 vp9_iht4x4_16_add(dqcoeff, dst, stride, tx_type); |
| 255 break; |
| 256 case TX_8X8: |
| 257 tx_type = get_tx_type_8x8(plane_type, xd); |
| 258 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); |
| 259 break; |
| 260 case TX_16X16: |
| 261 tx_type = get_tx_type_16x16(plane_type, xd); |
| 262 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); |
| 263 break; |
| 264 case TX_32X32: |
| 265 tx_type = DCT_DCT; |
| 266 vp9_idct32x32_add(dqcoeff, dst, stride, eob); |
| 267 break; |
| 268 default: |
| 269 assert(0 && "Invalid transform size"); |
| 270 } |
| 271 |
| 272 if (eob == 1) { |
| 273 vpx_memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0])); |
| 274 } else { |
| 275 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10) |
| 276 vpx_memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); |
| 277 else if (tx_size == TX_32X32 && eob <= 34) |
| 278 vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); |
| 279 else |
| 280 vpx_memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); |
| 281 } |
| 282 } |
| 283 } |
| 284 |
| 285 struct intra_args { |
| 286 VP9_COMMON *cm; |
| 287 MACROBLOCKD *xd; |
| 288 vp9_reader *r; |
| 289 }; |
| 290 |
| 291 static void predict_and_reconstruct_intra_block(int plane, int block, |
| 292 BLOCK_SIZE plane_bsize, |
| 293 TX_SIZE tx_size, void *arg) { |
| 294 struct intra_args *const args = arg; |
| 295 VP9_COMMON *const cm = args->cm; |
| 296 MACROBLOCKD *const xd = args->xd; |
| 297 struct macroblockd_plane *const pd = &xd->plane[plane]; |
| 298 MODE_INFO *const mi = xd->mi_8x8[0]; |
| 299 const MB_PREDICTION_MODE mode = (plane == 0) |
| 300 ? ((mi->mbmi.sb_type < BLOCK_8X8) ? mi->bmi[block].as_mode |
| 301 : mi->mbmi.mode) |
| 302 : mi->mbmi.uv_mode; |
| 303 int x, y; |
| 304 uint8_t *dst; |
| 305 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); |
| 306 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x]; |
| 307 |
| 308 if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) |
| 309 extend_for_intra(xd, plane_bsize, plane, x, y); |
| 310 |
| 311 vp9_predict_intra_block(xd, block >> (tx_size << 1), |
| 312 b_width_log2(plane_bsize), tx_size, mode, |
| 313 dst, pd->dst.stride, dst, pd->dst.stride); |
| 314 |
| 315 if (!mi->mbmi.skip_coeff) { |
| 316 const int eob = vp9_decode_block_tokens(cm, xd, plane, block, |
| 317 plane_bsize, x, y, tx_size, |
| 318 args->r); |
| 319 inverse_transform_block(xd, plane, block, tx_size, dst, pd->dst.stride, |
| 320 eob); |
| 321 } |
| 322 } |
| 323 |
| 324 struct inter_args { |
| 325 VP9_COMMON *cm; |
| 326 MACROBLOCKD *xd; |
| 327 vp9_reader *r; |
| 328 int *eobtotal; |
| 329 }; |
| 330 |
| 331 static void reconstruct_inter_block(int plane, int block, |
| 332 BLOCK_SIZE plane_bsize, |
| 333 TX_SIZE tx_size, void *arg) { |
| 334 struct inter_args *args = arg; |
| 335 VP9_COMMON *const cm = args->cm; |
| 336 MACROBLOCKD *const xd = args->xd; |
| 337 struct macroblockd_plane *const pd = &xd->plane[plane]; |
| 338 int x, y, eob; |
| 339 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); |
| 340 eob = vp9_decode_block_tokens(cm, xd, plane, block, plane_bsize, x, y, |
| 341 tx_size, args->r); |
| 342 inverse_transform_block(xd, plane, block, tx_size, |
| 343 &pd->dst.buf[4 * y * pd->dst.stride + 4 * x], |
| 344 pd->dst.stride, eob); |
| 345 *args->eobtotal += eob; |
| 346 } |
| 347 |
| 348 static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
| 349 const TileInfo *const tile, |
| 350 BLOCK_SIZE bsize, int mi_row, int mi_col) { |
| 351 const int bw = num_8x8_blocks_wide_lookup[bsize]; |
| 352 const int bh = num_8x8_blocks_high_lookup[bsize]; |
| 353 const int x_mis = MIN(bw, cm->mi_cols - mi_col); |
| 354 const int y_mis = MIN(bh, cm->mi_rows - mi_row); |
| 355 const int offset = mi_row * cm->mode_info_stride + mi_col; |
| 356 const int tile_offset = tile->mi_row_start * cm->mode_info_stride + |
| 357 tile->mi_col_start; |
| 358 int x, y; |
| 359 |
| 360 xd->mi_8x8 = cm->mi_grid_visible + offset; |
| 361 xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset; |
| 362 // Special case: if prev_mi is NULL, the previous mode info context |
| 363 // cannot be used. |
| 364 xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL; |
| 365 |
| 366 xd->mi_8x8[0] = xd->mi_stream + offset - tile_offset; |
| 367 xd->mi_8x8[0]->mbmi.sb_type = bsize; |
| 368 for (y = 0; y < y_mis; ++y) |
| 369 for (x = !y; x < x_mis; ++x) |
| 370 xd->mi_8x8[y * cm->mode_info_stride + x] = xd->mi_8x8[0]; |
| 371 |
| 372 set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col); |
| 373 |
| 374 // Distance of Mb to the various image edges. These are specified to 8th pel |
| 375 // as they are always compared to values that are in 1/8th pel units |
| 376 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); |
| 377 |
| 378 setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col); |
| 379 } |
| 380 |
| 381 static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
| 382 int idx, int mi_row, int mi_col) { |
| 383 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; |
| 384 const int ref = mbmi->ref_frame[idx] - LAST_FRAME; |
| 385 const YV12_BUFFER_CONFIG *cfg = get_frame_ref_buffer(cm, ref); |
| 386 const struct scale_factors_common *sfc = &cm->active_ref_scale_comm[ref]; |
| 387 |
| 388 xd->ref_buf[idx] = cfg; |
| 389 if (!vp9_is_valid_scale(sfc)) |
| 390 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
| 391 "Invalid scale factors"); |
| 392 |
| 393 xd->scale_factor[idx].sfc = sfc; |
| 394 setup_pre_planes(xd, idx, cfg, mi_row, mi_col, &xd->scale_factor[idx]); |
| 395 xd->corrupted |= cfg->corrupted; |
| 396 } |
| 397 |
| 398 static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
| 399 const TileInfo *const tile, |
| 400 int mi_row, int mi_col, |
| 401 vp9_reader *r, BLOCK_SIZE bsize) { |
| 402 const int less8x8 = bsize < BLOCK_8X8; |
| 403 MB_MODE_INFO *mbmi; |
| 404 |
| 405 set_offsets(cm, xd, tile, bsize, mi_row, mi_col); |
| 406 vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r); |
| 407 |
| 408 if (less8x8) |
| 409 bsize = BLOCK_8X8; |
| 410 |
| 411 // Has to be called after set_offsets |
| 412 mbmi = &xd->mi_8x8[0]->mbmi; |
| 413 |
| 414 if (mbmi->skip_coeff) { |
| 415 reset_skip_context(xd, bsize); |
| 416 } else { |
| 417 if (cm->seg.enabled) |
| 418 setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id, |
| 419 cm->base_qindex)); |
| 420 } |
| 421 |
| 422 if (!is_inter_block(mbmi)) { |
| 423 struct intra_args arg = { cm, xd, r }; |
| 424 foreach_transformed_block(xd, bsize, predict_and_reconstruct_intra_block, |
| 425 &arg); |
| 426 } else { |
| 427 // Setup |
| 428 set_ref(cm, xd, 0, mi_row, mi_col); |
| 429 if (has_second_ref(mbmi)) |
| 430 set_ref(cm, xd, 1, mi_row, mi_col); |
| 431 |
| 432 xd->subpix.filter_x = xd->subpix.filter_y = |
| 433 vp9_get_filter_kernel(mbmi->interp_filter); |
| 434 |
| 435 // Prediction |
| 436 vp9_dec_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); |
| 437 |
| 438 // Reconstruction |
| 439 if (!mbmi->skip_coeff) { |
| 440 int eobtotal = 0; |
| 441 struct inter_args arg = { cm, xd, r, &eobtotal }; |
| 442 foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg); |
| 443 if (!less8x8 && eobtotal == 0) |
| 444 mbmi->skip_coeff = 1; // skip loopfilter |
| 445 } |
| 446 } |
| 447 |
| 448 xd->corrupted |= vp9_reader_has_error(r); |
| 449 } |
| 450 |
| 451 static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs, |
| 452 int mi_row, int mi_col, BLOCK_SIZE bsize, |
| 453 vp9_reader *r) { |
| 454 const int ctx = partition_plane_context(xd->above_seg_context, |
| 455 xd->left_seg_context, |
| 456 mi_row, mi_col, bsize); |
| 457 const vp9_prob *const probs = get_partition_probs(cm, ctx); |
| 458 const int has_rows = (mi_row + hbs) < cm->mi_rows; |
| 459 const int has_cols = (mi_col + hbs) < cm->mi_cols; |
| 460 PARTITION_TYPE p; |
| 461 |
| 462 if (has_rows && has_cols) |
| 463 p = vp9_read_tree(r, vp9_partition_tree, probs); |
| 464 else if (!has_rows && has_cols) |
| 465 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ; |
| 466 else if (has_rows && !has_cols) |
| 467 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT; |
| 468 else |
| 469 p = PARTITION_SPLIT; |
| 470 |
| 471 if (!cm->frame_parallel_decoding_mode) |
| 472 ++cm->counts.partition[ctx][p]; |
| 473 |
| 474 return p; |
| 475 } |
| 476 |
| 477 static void decode_modes_sb(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
| 478 const TileInfo *const tile, |
| 479 int mi_row, int mi_col, |
| 480 vp9_reader* r, BLOCK_SIZE bsize) { |
| 481 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2; |
| 482 PARTITION_TYPE partition; |
| 483 BLOCK_SIZE subsize; |
| 484 |
| 485 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
| 486 return; |
| 487 |
| 488 partition = read_partition(cm, xd, hbs, mi_row, mi_col, bsize, r); |
| 489 subsize = get_subsize(bsize, partition); |
| 490 if (subsize < BLOCK_8X8) { |
| 491 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); |
| 492 } else { |
| 493 switch (partition) { |
| 494 case PARTITION_NONE: |
| 495 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); |
| 496 break; |
| 497 case PARTITION_HORZ: |
| 498 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); |
| 499 if (mi_row + hbs < cm->mi_rows) |
| 500 decode_modes_b(cm, xd, tile, mi_row + hbs, mi_col, r, subsize); |
| 501 break; |
| 502 case PARTITION_VERT: |
| 503 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); |
| 504 if (mi_col + hbs < cm->mi_cols) |
| 505 decode_modes_b(cm, xd, tile, mi_row, mi_col + hbs, r, subsize); |
| 506 break; |
| 507 case PARTITION_SPLIT: |
| 508 decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, subsize); |
| 509 decode_modes_sb(cm, xd, tile, mi_row, mi_col + hbs, r, subsize); |
| 510 decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col, r, subsize); |
| 511 decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize); |
| 512 break; |
| 513 default: |
| 514 assert(0 && "Invalid partition type"); |
| 515 } |
| 516 } |
| 517 |
| 518 // update partition context |
| 519 if (bsize >= BLOCK_8X8 && |
| 520 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) |
| 521 update_partition_context(xd->above_seg_context, xd->left_seg_context, |
| 522 mi_row, mi_col, subsize, bsize); |
| 523 } |
| 524 |
| 525 static void setup_token_decoder(const uint8_t *data, |
| 526 const uint8_t *data_end, |
| 527 size_t read_size, |
| 528 struct vpx_internal_error_info *error_info, |
| 529 vp9_reader *r) { |
| 530 // Validate the calculated partition length. If the buffer |
| 531 // described by the partition can't be fully read, then restrict |
| 532 // it to the portion that can be (for EC mode) or throw an error. |
| 533 if (!read_is_valid(data, read_size, data_end)) |
| 534 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, |
| 535 "Truncated packet or corrupt tile length"); |
| 536 |
| 537 if (vp9_reader_init(r, data, read_size)) |
| 538 vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR, |
| 539 "Failed to allocate bool decoder %d", 1); |
| 540 } |
| 541 |
| 542 static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs, |
| 543 vp9_reader *r) { |
| 544 int i, j, k, l, m; |
| 545 |
| 546 if (vp9_read_bit(r)) |
| 547 for (i = 0; i < PLANE_TYPES; ++i) |
| 548 for (j = 0; j < REF_TYPES; ++j) |
| 549 for (k = 0; k < COEF_BANDS; ++k) |
| 550 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) |
| 551 for (m = 0; m < UNCONSTRAINED_NODES; ++m) |
| 552 vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]); |
| 553 } |
| 554 |
| 555 static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, |
| 556 vp9_reader *r) { |
| 557 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; |
| 558 TX_SIZE tx_size; |
| 559 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) |
| 560 read_coef_probs_common(fc->coef_probs[tx_size], r); |
| 561 } |
| 562 |
| 563 static void setup_segmentation(struct segmentation *seg, |
| 564 struct vp9_read_bit_buffer *rb) { |
| 565 int i, j; |
| 566 |
| 567 seg->update_map = 0; |
| 568 seg->update_data = 0; |
| 569 |
| 570 seg->enabled = vp9_rb_read_bit(rb); |
| 571 if (!seg->enabled) |
| 572 return; |
| 573 |
| 574 // Segmentation map update |
| 575 seg->update_map = vp9_rb_read_bit(rb); |
| 576 if (seg->update_map) { |
| 577 for (i = 0; i < SEG_TREE_PROBS; i++) |
| 578 seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8) |
| 579 : MAX_PROB; |
| 580 |
| 581 seg->temporal_update = vp9_rb_read_bit(rb); |
| 582 if (seg->temporal_update) { |
| 583 for (i = 0; i < PREDICTION_PROBS; i++) |
| 584 seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8) |
| 585 : MAX_PROB; |
| 586 } else { |
| 587 for (i = 0; i < PREDICTION_PROBS; i++) |
| 588 seg->pred_probs[i] = MAX_PROB; |
| 589 } |
| 590 } |
| 591 |
| 592 // Segmentation data update |
| 593 seg->update_data = vp9_rb_read_bit(rb); |
| 594 if (seg->update_data) { |
| 595 seg->abs_delta = vp9_rb_read_bit(rb); |
| 596 |
| 597 vp9_clearall_segfeatures(seg); |
| 598 |
| 599 for (i = 0; i < MAX_SEGMENTS; i++) { |
| 600 for (j = 0; j < SEG_LVL_MAX; j++) { |
| 601 int data = 0; |
| 602 const int feature_enabled = vp9_rb_read_bit(rb); |
| 603 if (feature_enabled) { |
| 604 vp9_enable_segfeature(seg, i, j); |
| 605 data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j)); |
| 606 if (vp9_is_segfeature_signed(j)) |
| 607 data = vp9_rb_read_bit(rb) ? -data : data; |
| 608 } |
| 609 vp9_set_segdata(seg, i, j, data); |
| 610 } |
| 611 } |
| 612 } |
| 613 } |
| 614 |
| 615 static void setup_loopfilter(struct loopfilter *lf, |
| 616 struct vp9_read_bit_buffer *rb) { |
| 617 lf->filter_level = vp9_rb_read_literal(rb, 6); |
| 618 lf->sharpness_level = vp9_rb_read_literal(rb, 3); |
| 619 |
| 620 // Read in loop filter deltas applied at the MB level based on mode or ref |
| 621 // frame. |
| 622 lf->mode_ref_delta_update = 0; |
| 623 |
| 624 lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb); |
| 625 if (lf->mode_ref_delta_enabled) { |
| 626 lf->mode_ref_delta_update = vp9_rb_read_bit(rb); |
| 627 if (lf->mode_ref_delta_update) { |
| 628 int i; |
| 629 |
| 630 for (i = 0; i < MAX_REF_LF_DELTAS; i++) |
| 631 if (vp9_rb_read_bit(rb)) |
| 632 lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6); |
| 633 |
| 634 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) |
| 635 if (vp9_rb_read_bit(rb)) |
| 636 lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6); |
| 637 } |
| 638 } |
| 639 } |
| 640 |
| 641 static int read_delta_q(struct vp9_read_bit_buffer *rb, int *delta_q) { |
| 642 const int old = *delta_q; |
| 643 *delta_q = vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0; |
| 644 return old != *delta_q; |
| 645 } |
| 646 |
| 647 static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd, |
| 648 struct vp9_read_bit_buffer *rb) { |
| 649 int update = 0; |
| 650 |
| 651 cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS); |
| 652 update |= read_delta_q(rb, &cm->y_dc_delta_q); |
| 653 update |= read_delta_q(rb, &cm->uv_dc_delta_q); |
| 654 update |= read_delta_q(rb, &cm->uv_ac_delta_q); |
| 655 if (update) |
| 656 vp9_init_dequantizer(cm); |
| 657 |
| 658 xd->lossless = cm->base_qindex == 0 && |
| 659 cm->y_dc_delta_q == 0 && |
| 660 cm->uv_dc_delta_q == 0 && |
| 661 cm->uv_ac_delta_q == 0; |
| 662 |
| 663 xd->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add; |
| 664 } |
| 665 |
| 666 static INTERPOLATION_TYPE read_interp_filter_type( |
| 667 struct vp9_read_bit_buffer *rb) { |
| 668 const INTERPOLATION_TYPE literal_to_type[] = { EIGHTTAP_SMOOTH, |
| 669 EIGHTTAP, |
| 670 EIGHTTAP_SHARP, |
| 671 BILINEAR }; |
| 672 return vp9_rb_read_bit(rb) ? SWITCHABLE |
| 673 : literal_to_type[vp9_rb_read_literal(rb, 2)]; |
| 674 } |
| 675 |
| 676 static void read_frame_size(struct vp9_read_bit_buffer *rb, |
| 677 int *width, int *height) { |
| 678 const int w = vp9_rb_read_literal(rb, 16) + 1; |
| 679 const int h = vp9_rb_read_literal(rb, 16) + 1; |
| 680 *width = w; |
| 681 *height = h; |
| 682 } |
| 683 |
| 684 static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { |
| 685 cm->display_width = cm->width; |
| 686 cm->display_height = cm->height; |
| 687 if (vp9_rb_read_bit(rb)) |
| 688 read_frame_size(rb, &cm->display_width, &cm->display_height); |
| 689 } |
| 690 |
| 691 static void apply_frame_size(VP9D_COMP *pbi, int width, int height) { |
| 692 VP9_COMMON *cm = &pbi->common; |
| 693 |
| 694 if (cm->width != width || cm->height != height) { |
| 695 // Change in frame size. |
| 696 // TODO(agrange) Don't test width/height, check overall size. |
| 697 if (width > cm->width || height > cm->height) { |
| 698 // Rescale frame buffers only if they're not big enough already. |
| 699 if (vp9_resize_frame_buffers(cm, width, height)) |
| 700 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, |
| 701 "Failed to allocate frame buffers"); |
| 702 } |
| 703 |
| 704 cm->width = width; |
| 705 cm->height = height; |
| 706 |
| 707 vp9_update_frame_size(cm); |
| 708 } |
| 709 |
| 710 if (cm->fb_list != NULL) { |
| 711 vpx_codec_frame_buffer_t *const ext_fb = &cm->fb_list[cm->new_fb_idx]; |
| 712 if (vp9_realloc_frame_buffer(get_frame_new_buffer(cm), |
| 713 cm->width, cm->height, |
| 714 cm->subsampling_x, cm->subsampling_y, |
| 715 VP9BORDERINPIXELS, ext_fb, |
| 716 cm->realloc_fb_cb, cm->user_priv)) { |
| 717 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, |
| 718 "Failed to allocate external frame buffer"); |
| 719 } |
| 720 } else { |
| 721 vp9_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height, |
| 722 cm->subsampling_x, cm->subsampling_y, |
| 723 VP9BORDERINPIXELS, NULL, NULL, NULL); |
| 724 } |
| 725 } |
| 726 |
| 727 static void setup_frame_size(VP9D_COMP *pbi, |
| 728 struct vp9_read_bit_buffer *rb) { |
| 729 int width, height; |
| 730 read_frame_size(rb, &width, &height); |
| 731 apply_frame_size(pbi, width, height); |
| 732 setup_display_size(&pbi->common, rb); |
| 733 } |
| 734 |
| 735 static void setup_frame_size_with_refs(VP9D_COMP *pbi, |
| 736 struct vp9_read_bit_buffer *rb) { |
| 737 VP9_COMMON *const cm = &pbi->common; |
| 738 |
| 739 int width, height; |
| 740 int found = 0, i; |
| 741 for (i = 0; i < REFS_PER_FRAME; ++i) { |
| 742 if (vp9_rb_read_bit(rb)) { |
| 743 YV12_BUFFER_CONFIG *const cfg = get_frame_ref_buffer(cm, i); |
| 744 width = cfg->y_crop_width; |
| 745 height = cfg->y_crop_height; |
| 746 found = 1; |
| 747 break; |
| 748 } |
| 749 } |
| 750 |
| 751 if (!found) |
| 752 read_frame_size(rb, &width, &height); |
| 753 |
| 754 if (!width || !height) |
| 755 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
| 756 "Referenced frame with invalid size"); |
| 757 |
| 758 apply_frame_size(pbi, width, height); |
| 759 setup_display_size(cm, rb); |
| 760 } |
| 761 |
| 762 static void setup_tile_context(VP9D_COMP *const pbi, MACROBLOCKD *const xd, |
| 763 int tile_row, int tile_col) { |
| 764 int i; |
| 765 const int tile_cols = 1 << pbi->common.log2_tile_cols; |
| 766 xd->mi_stream = pbi->mi_streams[tile_row * tile_cols + tile_col]; |
| 767 |
| 768 for (i = 0; i < MAX_MB_PLANE; ++i) { |
| 769 xd->above_context[i] = pbi->above_context[i]; |
| 770 } |
| 771 // see note in alloc_tile_storage(). |
| 772 xd->above_seg_context = pbi->above_seg_context; |
| 773 } |
| 774 |
| 775 static void decode_tile(VP9D_COMP *pbi, const TileInfo *const tile, |
| 776 vp9_reader *r) { |
| 777 const int num_threads = pbi->oxcf.max_threads; |
| 778 VP9_COMMON *const cm = &pbi->common; |
| 779 int mi_row, mi_col; |
| 780 MACROBLOCKD *xd = &pbi->mb; |
| 781 |
| 782 if (pbi->do_loopfilter_inline) { |
| 783 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; |
| 784 lf_data->frame_buffer = get_frame_new_buffer(cm); |
| 785 lf_data->cm = cm; |
| 786 lf_data->xd = pbi->mb; |
| 787 lf_data->stop = 0; |
| 788 lf_data->y_only = 0; |
| 789 vp9_loop_filter_frame_init(cm, cm->lf.filter_level); |
| 790 } |
| 791 |
| 792 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; |
| 793 mi_row += MI_BLOCK_SIZE) { |
| 794 // For a SB there are 2 left contexts, each pertaining to a MB row within |
| 795 vp9_zero(xd->left_context); |
| 796 vp9_zero(xd->left_seg_context); |
| 797 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; |
| 798 mi_col += MI_BLOCK_SIZE) { |
| 799 decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64); |
| 800 } |
| 801 |
| 802 if (pbi->do_loopfilter_inline) { |
| 803 const int lf_start = mi_row - MI_BLOCK_SIZE; |
| 804 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; |
| 805 |
| 806 // delay the loopfilter by 1 macroblock row. |
| 807 if (lf_start < 0) continue; |
| 808 |
| 809 // decoding has completed: finish up the loop filter in this thread. |
| 810 if (mi_row + MI_BLOCK_SIZE >= tile->mi_row_end) continue; |
| 811 |
| 812 vp9_worker_sync(&pbi->lf_worker); |
| 813 lf_data->start = lf_start; |
| 814 lf_data->stop = mi_row; |
| 815 if (num_threads > 1) { |
| 816 vp9_worker_launch(&pbi->lf_worker); |
| 817 } else { |
| 818 vp9_worker_execute(&pbi->lf_worker); |
| 819 } |
| 820 } |
| 821 } |
| 822 |
| 823 if (pbi->do_loopfilter_inline) { |
| 824 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; |
| 825 |
| 826 vp9_worker_sync(&pbi->lf_worker); |
| 827 lf_data->start = lf_data->stop; |
| 828 lf_data->stop = cm->mi_rows; |
| 829 vp9_worker_execute(&pbi->lf_worker); |
| 830 } |
| 831 } |
| 832 |
| 833 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { |
| 834 int min_log2_tile_cols, max_log2_tile_cols, max_ones; |
| 835 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); |
| 836 |
| 837 // columns |
| 838 max_ones = max_log2_tile_cols - min_log2_tile_cols; |
| 839 cm->log2_tile_cols = min_log2_tile_cols; |
| 840 while (max_ones-- && vp9_rb_read_bit(rb)) |
| 841 cm->log2_tile_cols++; |
| 842 |
| 843 // rows |
| 844 cm->log2_tile_rows = vp9_rb_read_bit(rb); |
| 845 if (cm->log2_tile_rows) |
| 846 cm->log2_tile_rows += vp9_rb_read_bit(rb); |
| 847 } |
| 848 |
| 849 // Reads the next tile returning its size and adjusting '*data' accordingly |
| 850 // based on 'is_last'. |
| 851 static size_t get_tile(const uint8_t *const data_end, |
| 852 int is_last, |
| 853 struct vpx_internal_error_info *error_info, |
| 854 const uint8_t **data) { |
| 855 size_t size; |
| 856 |
| 857 if (!is_last) { |
| 858 if (!read_is_valid(*data, 4, data_end)) |
| 859 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, |
| 860 "Truncated packet or corrupt tile length"); |
| 861 |
| 862 size = read_be32(*data); |
| 863 *data += 4; |
| 864 } else { |
| 865 size = data_end - *data; |
| 866 } |
| 867 return size; |
| 868 } |
| 869 |
| 870 typedef struct TileBuffer { |
| 871 const uint8_t *data; |
| 872 size_t size; |
| 873 int col; // only used with multi-threaded decoding |
| 874 } TileBuffer; |
| 875 |
| 876 static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) { |
| 877 VP9_COMMON *const cm = &pbi->common; |
| 878 MACROBLOCKD *const xd = &pbi->mb; |
| 879 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols); |
| 880 const int tile_cols = 1 << cm->log2_tile_cols; |
| 881 const int tile_rows = 1 << cm->log2_tile_rows; |
| 882 TileBuffer tile_buffers[4][1 << 6]; |
| 883 int tile_row, tile_col; |
| 884 const uint8_t *const data_end = pbi->source + pbi->source_sz; |
| 885 const uint8_t *end = NULL; |
| 886 vp9_reader r; |
| 887 |
| 888 assert(tile_rows <= 4); |
| 889 assert(tile_cols <= (1 << 6)); |
| 890 |
| 891 // Note: this memset assumes above_context[0], [1] and [2] |
| 892 // are allocated as part of the same buffer. |
| 893 vpx_memset(pbi->above_context[0], 0, |
| 894 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * 2 * aligned_cols); |
| 895 |
| 896 vpx_memset(pbi->above_seg_context, 0, |
| 897 sizeof(*pbi->above_seg_context) * aligned_cols); |
| 898 |
| 899 // Load tile data into tile_buffers |
| 900 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { |
| 901 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { |
| 902 const int last_tile = tile_row == tile_rows - 1 && |
| 903 tile_col == tile_cols - 1; |
| 904 const size_t size = get_tile(data_end, last_tile, &cm->error, &data); |
| 905 TileBuffer *const buf = &tile_buffers[tile_row][tile_col]; |
| 906 buf->data = data; |
| 907 buf->size = size; |
| 908 data += size; |
| 909 } |
| 910 } |
| 911 |
| 912 // Decode tiles using data from tile_buffers |
| 913 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { |
| 914 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { |
| 915 const int col = pbi->oxcf.inv_tile_order ? tile_cols - tile_col - 1 |
| 916 : tile_col; |
| 917 const int last_tile = tile_row == tile_rows - 1 && |
| 918 col == tile_cols - 1; |
| 919 const TileBuffer *const buf = &tile_buffers[tile_row][col]; |
| 920 TileInfo tile; |
| 921 |
| 922 vp9_tile_init(&tile, cm, tile_row, col); |
| 923 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &r); |
| 924 setup_tile_context(pbi, xd, tile_row, col); |
| 925 decode_tile(pbi, &tile, &r); |
| 926 |
| 927 if (last_tile) |
| 928 end = vp9_reader_find_end(&r); |
| 929 } |
| 930 } |
| 931 |
| 932 return end; |
| 933 } |
| 934 |
| 935 static void setup_tile_macroblockd(TileWorkerData *const tile_data) { |
| 936 MACROBLOCKD *xd = &tile_data->xd; |
| 937 struct macroblockd_plane *const pd = xd->plane; |
| 938 int i; |
| 939 |
| 940 for (i = 0; i < MAX_MB_PLANE; ++i) { |
| 941 pd[i].dqcoeff = tile_data->dqcoeff[i]; |
| 942 vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t)); |
| 943 } |
| 944 } |
| 945 |
| 946 static int tile_worker_hook(void *arg1, void *arg2) { |
| 947 TileWorkerData *const tile_data = (TileWorkerData*)arg1; |
| 948 const TileInfo *const tile = (TileInfo*)arg2; |
| 949 int mi_row, mi_col; |
| 950 |
| 951 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; |
| 952 mi_row += MI_BLOCK_SIZE) { |
| 953 vp9_zero(tile_data->xd.left_context); |
| 954 vp9_zero(tile_data->xd.left_seg_context); |
| 955 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; |
| 956 mi_col += MI_BLOCK_SIZE) { |
| 957 decode_modes_sb(tile_data->cm, &tile_data->xd, tile, |
| 958 mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64); |
| 959 } |
| 960 } |
| 961 return !tile_data->xd.corrupted; |
| 962 } |
| 963 |
| 964 // sorts in descending order |
| 965 static int compare_tile_buffers(const void *a, const void *b) { |
| 966 const TileBuffer *const buf1 = (const TileBuffer*)a; |
| 967 const TileBuffer *const buf2 = (const TileBuffer*)b; |
| 968 if (buf1->size < buf2->size) { |
| 969 return 1; |
| 970 } else if (buf1->size == buf2->size) { |
| 971 return 0; |
| 972 } else { |
| 973 return -1; |
| 974 } |
| 975 } |
| 976 |
| 977 static const uint8_t *decode_tiles_mt(VP9D_COMP *pbi, const uint8_t *data) { |
| 978 VP9_COMMON *const cm = &pbi->common; |
| 979 const uint8_t *bit_reader_end = NULL; |
| 980 const uint8_t *const data_end = pbi->source + pbi->source_sz; |
| 981 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); |
| 982 const int tile_cols = 1 << cm->log2_tile_cols; |
| 983 const int tile_rows = 1 << cm->log2_tile_rows; |
| 984 const int num_workers = MIN(pbi->oxcf.max_threads & ~1, tile_cols); |
| 985 TileBuffer tile_buffers[1 << 6]; |
| 986 int n; |
| 987 int final_worker = -1; |
| 988 |
| 989 assert(tile_cols <= (1 << 6)); |
| 990 assert(tile_rows == 1); |
| 991 (void)tile_rows; |
| 992 |
| 993 if (num_workers > pbi->num_tile_workers) { |
| 994 int i; |
| 995 CHECK_MEM_ERROR(cm, pbi->tile_workers, |
| 996 vpx_realloc(pbi->tile_workers, |
| 997 num_workers * sizeof(*pbi->tile_workers))); |
| 998 for (i = pbi->num_tile_workers; i < num_workers; ++i) { |
| 999 VP9Worker *const worker = &pbi->tile_workers[i]; |
| 1000 ++pbi->num_tile_workers; |
| 1001 |
| 1002 vp9_worker_init(worker); |
| 1003 worker->hook = (VP9WorkerHook)tile_worker_hook; |
| 1004 CHECK_MEM_ERROR(cm, worker->data1, |
| 1005 vpx_memalign(32, sizeof(TileWorkerData))); |
| 1006 CHECK_MEM_ERROR(cm, worker->data2, vpx_malloc(sizeof(TileInfo))); |
| 1007 if (i < num_workers - 1 && !vp9_worker_reset(worker)) { |
| 1008 vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
| 1009 "Tile decoder thread creation failed"); |
| 1010 } |
| 1011 } |
| 1012 } |
| 1013 |
| 1014 // Note: this memset assumes above_context[0], [1] and [2] |
| 1015 // are allocated as part of the same buffer. |
| 1016 vpx_memset(pbi->above_context[0], 0, |
| 1017 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * |
| 1018 2 * aligned_mi_cols); |
| 1019 vpx_memset(pbi->above_seg_context, 0, |
| 1020 sizeof(*pbi->above_seg_context) * aligned_mi_cols); |
| 1021 |
| 1022 // Load tile data into tile_buffers |
| 1023 for (n = 0; n < tile_cols; ++n) { |
| 1024 const size_t size = |
| 1025 get_tile(data_end, n == tile_cols - 1, &cm->error, &data); |
| 1026 TileBuffer *const buf = &tile_buffers[n]; |
| 1027 buf->data = data; |
| 1028 buf->size = size; |
| 1029 buf->col = n; |
| 1030 data += size; |
| 1031 } |
| 1032 |
| 1033 // Sort the buffers based on size in descending order. |
| 1034 qsort(tile_buffers, tile_cols, sizeof(tile_buffers[0]), compare_tile_buffers); |
| 1035 |
| 1036 // Rearrange the tile buffers such that per-tile group the largest, and |
| 1037 // presumably the most difficult, tile will be decoded in the main thread. |
| 1038 // This should help minimize the number of instances where the main thread is |
| 1039 // waiting for a worker to complete. |
| 1040 { |
| 1041 int group_start = 0; |
| 1042 while (group_start < tile_cols) { |
| 1043 const TileBuffer largest = tile_buffers[group_start]; |
| 1044 const int group_end = MIN(group_start + num_workers, tile_cols) - 1; |
| 1045 memmove(tile_buffers + group_start, tile_buffers + group_start + 1, |
| 1046 (group_end - group_start) * sizeof(tile_buffers[0])); |
| 1047 tile_buffers[group_end] = largest; |
| 1048 group_start = group_end + 1; |
| 1049 } |
| 1050 } |
| 1051 |
| 1052 n = 0; |
| 1053 while (n < tile_cols) { |
| 1054 int i; |
| 1055 for (i = 0; i < num_workers && n < tile_cols; ++i) { |
| 1056 VP9Worker *const worker = &pbi->tile_workers[i]; |
| 1057 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1; |
| 1058 TileInfo *const tile = (TileInfo*)worker->data2; |
| 1059 TileBuffer *const buf = &tile_buffers[n]; |
| 1060 |
| 1061 tile_data->cm = cm; |
| 1062 tile_data->xd = pbi->mb; |
| 1063 tile_data->xd.corrupted = 0; |
| 1064 vp9_tile_init(tile, tile_data->cm, 0, buf->col); |
| 1065 |
| 1066 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, |
| 1067 &tile_data->bit_reader); |
| 1068 setup_tile_context(pbi, &tile_data->xd, 0, buf->col); |
| 1069 setup_tile_macroblockd(tile_data); |
| 1070 |
| 1071 worker->had_error = 0; |
| 1072 if (i == num_workers - 1 || n == tile_cols - 1) { |
| 1073 vp9_worker_execute(worker); |
| 1074 } else { |
| 1075 vp9_worker_launch(worker); |
| 1076 } |
| 1077 |
| 1078 if (buf->col == tile_cols - 1) { |
| 1079 final_worker = i; |
| 1080 } |
| 1081 |
| 1082 ++n; |
| 1083 } |
| 1084 |
| 1085 for (; i > 0; --i) { |
| 1086 VP9Worker *const worker = &pbi->tile_workers[i - 1]; |
| 1087 pbi->mb.corrupted |= !vp9_worker_sync(worker); |
| 1088 } |
| 1089 if (final_worker > -1) { |
| 1090 TileWorkerData *const tile_data = |
| 1091 (TileWorkerData*)pbi->tile_workers[final_worker].data1; |
| 1092 bit_reader_end = vp9_reader_find_end(&tile_data->bit_reader); |
| 1093 final_worker = -1; |
| 1094 } |
| 1095 } |
| 1096 |
| 1097 return bit_reader_end; |
| 1098 } |
| 1099 |
| 1100 static void check_sync_code(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { |
| 1101 if (vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_0 || |
| 1102 vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_1 || |
| 1103 vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_2) { |
| 1104 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
| 1105 "Invalid frame sync code"); |
| 1106 } |
| 1107 } |
| 1108 |
| 1109 static void error_handler(void *data, size_t bit_offset) { |
| 1110 VP9_COMMON *const cm = (VP9_COMMON *)data; |
| 1111 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet"); |
| 1112 } |
| 1113 |
| 1114 #define RESERVED \ |
| 1115 if (vp9_rb_read_bit(rb)) \ |
| 1116 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, \ |
| 1117 "Reserved bit must be unset") |
| 1118 |
| 1119 static size_t read_uncompressed_header(VP9D_COMP *pbi, |
| 1120 struct vp9_read_bit_buffer *rb) { |
| 1121 VP9_COMMON *const cm = &pbi->common; |
| 1122 size_t sz; |
| 1123 int i; |
| 1124 |
| 1125 cm->last_frame_type = cm->frame_type; |
| 1126 |
| 1127 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER) |
| 1128 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
| 1129 "Invalid frame marker"); |
| 1130 |
| 1131 cm->version = vp9_rb_read_bit(rb); |
| 1132 RESERVED; |
| 1133 |
| 1134 if (vp9_rb_read_bit(rb)) { |
| 1135 // show an existing frame directly |
| 1136 int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)]; |
| 1137 ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->new_fb_idx, frame_to_show); |
| 1138 pbi->refresh_frame_flags = 0; |
| 1139 cm->lf.filter_level = 0; |
| 1140 return 0; |
| 1141 } |
| 1142 |
| 1143 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb); |
| 1144 cm->show_frame = vp9_rb_read_bit(rb); |
| 1145 cm->error_resilient_mode = vp9_rb_read_bit(rb); |
| 1146 |
| 1147 if (cm->frame_type == KEY_FRAME) { |
| 1148 check_sync_code(cm, rb); |
| 1149 |
| 1150 cm->color_space = vp9_rb_read_literal(rb, 3); // colorspace |
| 1151 if (cm->color_space != SRGB) { |
| 1152 vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range |
| 1153 if (cm->version == 1) { |
| 1154 cm->subsampling_x = vp9_rb_read_bit(rb); |
| 1155 cm->subsampling_y = vp9_rb_read_bit(rb); |
| 1156 vp9_rb_read_bit(rb); // has extra plane |
| 1157 } else { |
| 1158 cm->subsampling_y = cm->subsampling_x = 1; |
| 1159 } |
| 1160 } else { |
| 1161 if (cm->version == 1) { |
| 1162 cm->subsampling_y = cm->subsampling_x = 0; |
| 1163 vp9_rb_read_bit(rb); // has extra plane |
| 1164 } else { |
| 1165 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, |
| 1166 "RGB not supported in profile 0"); |
| 1167 } |
| 1168 } |
| 1169 |
| 1170 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; |
| 1171 |
| 1172 for (i = 0; i < REFS_PER_FRAME; ++i) |
| 1173 cm->active_ref_idx[i] = cm->new_fb_idx; |
| 1174 |
| 1175 setup_frame_size(pbi, rb); |
| 1176 } else { |
| 1177 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb); |
| 1178 |
| 1179 cm->reset_frame_context = cm->error_resilient_mode ? |
| 1180 0 : vp9_rb_read_literal(rb, 2); |
| 1181 |
| 1182 if (cm->intra_only) { |
| 1183 check_sync_code(cm, rb); |
| 1184 |
| 1185 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES); |
| 1186 setup_frame_size(pbi, rb); |
| 1187 } else { |
| 1188 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES); |
| 1189 |
| 1190 for (i = 0; i < REFS_PER_FRAME; ++i) { |
| 1191 const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2); |
| 1192 cm->active_ref_idx[i] = cm->ref_frame_map[ref]; |
| 1193 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb); |
| 1194 } |
| 1195 |
| 1196 setup_frame_size_with_refs(pbi, rb); |
| 1197 |
| 1198 cm->allow_high_precision_mv = vp9_rb_read_bit(rb); |
| 1199 cm->mcomp_filter_type = read_interp_filter_type(rb); |
| 1200 |
| 1201 for (i = 0; i < REFS_PER_FRAME; ++i) { |
| 1202 vp9_setup_scale_factors(cm, i); |
| 1203 if (vp9_is_scaled(&cm->active_ref_scale_comm[i])) |
| 1204 vp9_extend_frame_borders(&cm->yv12_fb[cm->active_ref_idx[i]], |
| 1205 cm->subsampling_x, cm->subsampling_y); |
| 1206 } |
| 1207 } |
| 1208 } |
| 1209 |
| 1210 if (!cm->error_resilient_mode) { |
| 1211 cm->refresh_frame_context = vp9_rb_read_bit(rb); |
| 1212 cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb); |
| 1213 } else { |
| 1214 cm->refresh_frame_context = 0; |
| 1215 cm->frame_parallel_decoding_mode = 1; |
| 1216 } |
| 1217 |
| 1218 // This flag will be overridden by the call to vp9_setup_past_independence |
| 1219 // below, forcing the use of context 0 for those frame types. |
| 1220 cm->frame_context_idx = vp9_rb_read_literal(rb, FRAME_CONTEXTS_LOG2); |
| 1221 |
| 1222 if (frame_is_intra_only(cm) || cm->error_resilient_mode) |
| 1223 vp9_setup_past_independence(cm); |
| 1224 |
| 1225 setup_loopfilter(&cm->lf, rb); |
| 1226 setup_quantization(cm, &pbi->mb, rb); |
| 1227 setup_segmentation(&cm->seg, rb); |
| 1228 |
| 1229 setup_tile_info(cm, rb); |
| 1230 sz = vp9_rb_read_literal(rb, 16); |
| 1231 |
| 1232 if (sz == 0) |
| 1233 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
| 1234 "Invalid header size"); |
| 1235 |
| 1236 return sz; |
| 1237 } |
| 1238 |
| 1239 static int read_compressed_header(VP9D_COMP *pbi, const uint8_t *data, |
| 1240 size_t partition_size) { |
| 1241 VP9_COMMON *const cm = &pbi->common; |
| 1242 MACROBLOCKD *const xd = &pbi->mb; |
| 1243 FRAME_CONTEXT *const fc = &cm->fc; |
| 1244 vp9_reader r; |
| 1245 int k; |
| 1246 |
| 1247 if (vp9_reader_init(&r, data, partition_size)) |
| 1248 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, |
| 1249 "Failed to allocate bool decoder 0"); |
| 1250 |
| 1251 cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r); |
| 1252 if (cm->tx_mode == TX_MODE_SELECT) |
| 1253 read_tx_mode_probs(&fc->tx_probs, &r); |
| 1254 read_coef_probs(fc, cm->tx_mode, &r); |
| 1255 |
| 1256 for (k = 0; k < MBSKIP_CONTEXTS; ++k) |
| 1257 vp9_diff_update_prob(&r, &fc->mbskip_probs[k]); |
| 1258 |
| 1259 if (!frame_is_intra_only(cm)) { |
| 1260 nmv_context *const nmvc = &fc->nmvc; |
| 1261 int i, j; |
| 1262 |
| 1263 read_inter_mode_probs(fc, &r); |
| 1264 |
| 1265 if (cm->mcomp_filter_type == SWITCHABLE) |
| 1266 read_switchable_interp_probs(fc, &r); |
| 1267 |
| 1268 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) |
| 1269 vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]); |
| 1270 |
| 1271 cm->reference_mode = read_reference_mode(cm, &r); |
| 1272 read_reference_mode_probs(cm, &r); |
| 1273 |
| 1274 for (j = 0; j < BLOCK_SIZE_GROUPS; j++) |
| 1275 for (i = 0; i < INTRA_MODES - 1; ++i) |
| 1276 vp9_diff_update_prob(&r, &fc->y_mode_prob[j][i]); |
| 1277 |
| 1278 for (j = 0; j < PARTITION_CONTEXTS; ++j) |
| 1279 for (i = 0; i < PARTITION_TYPES - 1; ++i) |
| 1280 vp9_diff_update_prob(&r, &fc->partition_prob[j][i]); |
| 1281 |
| 1282 read_mv_probs(nmvc, cm->allow_high_precision_mv, &r); |
| 1283 } |
| 1284 |
| 1285 return vp9_reader_has_error(&r); |
| 1286 } |
| 1287 |
| 1288 void vp9_init_dequantizer(VP9_COMMON *cm) { |
| 1289 int q; |
| 1290 |
| 1291 for (q = 0; q < QINDEX_RANGE; q++) { |
| 1292 cm->y_dequant[q][0] = vp9_dc_quant(q, cm->y_dc_delta_q); |
| 1293 cm->y_dequant[q][1] = vp9_ac_quant(q, 0); |
| 1294 |
| 1295 cm->uv_dequant[q][0] = vp9_dc_quant(q, cm->uv_dc_delta_q); |
| 1296 cm->uv_dequant[q][1] = vp9_ac_quant(q, cm->uv_ac_delta_q); |
| 1297 } |
| 1298 } |
| 1299 |
| 1300 #ifdef NDEBUG |
| 1301 #define debug_check_frame_counts(cm) (void)0 |
| 1302 #else // !NDEBUG |
| 1303 // Counts should only be incremented when frame_parallel_decoding_mode and |
| 1304 // error_resilient_mode are disabled. |
| 1305 static void debug_check_frame_counts(const VP9_COMMON *const cm) { |
| 1306 FRAME_COUNTS zero_counts; |
| 1307 vp9_zero(zero_counts); |
| 1308 assert(cm->frame_parallel_decoding_mode || cm->error_resilient_mode); |
| 1309 assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode, |
| 1310 sizeof(cm->counts.y_mode))); |
| 1311 assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode, |
| 1312 sizeof(cm->counts.uv_mode))); |
| 1313 assert(!memcmp(cm->counts.partition, zero_counts.partition, |
| 1314 sizeof(cm->counts.partition))); |
| 1315 assert(!memcmp(cm->counts.coef, zero_counts.coef, |
| 1316 sizeof(cm->counts.coef))); |
| 1317 assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch, |
| 1318 sizeof(cm->counts.eob_branch))); |
| 1319 assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp, |
| 1320 sizeof(cm->counts.switchable_interp))); |
| 1321 assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode, |
| 1322 sizeof(cm->counts.inter_mode))); |
| 1323 assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter, |
| 1324 sizeof(cm->counts.intra_inter))); |
| 1325 assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter, |
| 1326 sizeof(cm->counts.comp_inter))); |
| 1327 assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref, |
| 1328 sizeof(cm->counts.single_ref))); |
| 1329 assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref, |
| 1330 sizeof(cm->counts.comp_ref))); |
| 1331 assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx))); |
| 1332 assert(!memcmp(cm->counts.mbskip, zero_counts.mbskip, |
| 1333 sizeof(cm->counts.mbskip))); |
| 1334 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv))); |
| 1335 } |
| 1336 #endif // NDEBUG |
| 1337 |
| 1338 int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) { |
| 1339 int i; |
| 1340 VP9_COMMON *const cm = &pbi->common; |
| 1341 MACROBLOCKD *const xd = &pbi->mb; |
| 1342 |
| 1343 const uint8_t *data = pbi->source; |
| 1344 const uint8_t *const data_end = pbi->source + pbi->source_sz; |
| 1345 |
| 1346 struct vp9_read_bit_buffer rb = { data, data_end, 0, cm, error_handler }; |
| 1347 const size_t first_partition_size = read_uncompressed_header(pbi, &rb); |
| 1348 const int keyframe = cm->frame_type == KEY_FRAME; |
| 1349 const int tile_rows = 1 << cm->log2_tile_rows; |
| 1350 const int tile_cols = 1 << cm->log2_tile_cols; |
| 1351 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); |
| 1352 |
| 1353 if (!first_partition_size) { |
| 1354 // showing a frame directly |
| 1355 *p_data_end = data + 1; |
| 1356 return 0; |
| 1357 } |
| 1358 |
| 1359 if (!pbi->decoded_key_frame && !keyframe) |
| 1360 return -1; |
| 1361 |
| 1362 data += vp9_rb_bytes_read(&rb); |
| 1363 if (!read_is_valid(data, first_partition_size, data_end)) |
| 1364 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
| 1365 "Truncated packet or corrupt header length"); |
| 1366 |
| 1367 pbi->do_loopfilter_inline = |
| 1368 (cm->log2_tile_rows | cm->log2_tile_cols) == 0 && cm->lf.filter_level; |
| 1369 if (pbi->do_loopfilter_inline && pbi->lf_worker.data1 == NULL) { |
| 1370 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, vpx_malloc(sizeof(LFWorkerData))); |
| 1371 pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker; |
| 1372 if (pbi->oxcf.max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) { |
| 1373 vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
| 1374 "Loop filter thread creation failed"); |
| 1375 } |
| 1376 } |
| 1377 |
| 1378 alloc_tile_storage(pbi, tile_rows, tile_cols); |
| 1379 |
| 1380 xd->mode_info_stride = cm->mode_info_stride; |
| 1381 set_prev_mi(cm); |
| 1382 |
| 1383 setup_plane_dequants(cm, xd, cm->base_qindex); |
| 1384 setup_block_dptrs(xd, cm->subsampling_x, cm->subsampling_y); |
| 1385 |
| 1386 cm->fc = cm->frame_contexts[cm->frame_context_idx]; |
| 1387 vp9_zero(cm->counts); |
| 1388 for (i = 0; i < MAX_MB_PLANE; ++i) |
| 1389 vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t)); |
| 1390 |
| 1391 xd->corrupted = 0; |
| 1392 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size); |
| 1393 |
| 1394 // TODO(jzern): remove frame_parallel_decoding_mode restriction for |
| 1395 // single-frame tile decoding. |
| 1396 if (pbi->oxcf.max_threads > 1 && tile_rows == 1 && tile_cols > 1 && |
| 1397 cm->frame_parallel_decoding_mode) { |
| 1398 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size); |
| 1399 } else { |
| 1400 *p_data_end = decode_tiles(pbi, data + first_partition_size); |
| 1401 } |
| 1402 |
| 1403 cm->last_width = cm->width; |
| 1404 cm->last_height = cm->height; |
| 1405 |
| 1406 new_fb->corrupted |= xd->corrupted; |
| 1407 |
| 1408 if (!pbi->decoded_key_frame) { |
| 1409 if (keyframe && !new_fb->corrupted) |
| 1410 pbi->decoded_key_frame = 1; |
| 1411 else |
| 1412 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
| 1413 "A stream must start with a complete key frame"); |
| 1414 } |
| 1415 |
| 1416 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { |
| 1417 vp9_adapt_coef_probs(cm); |
| 1418 |
| 1419 if (!frame_is_intra_only(cm)) { |
| 1420 vp9_adapt_mode_probs(cm); |
| 1421 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv); |
| 1422 } |
| 1423 } else { |
| 1424 debug_check_frame_counts(cm); |
| 1425 } |
| 1426 |
| 1427 if (cm->refresh_frame_context) |
| 1428 cm->frame_contexts[cm->frame_context_idx] = cm->fc; |
| 1429 |
| 1430 return 0; |
| 1431 } |
OLD | NEW |