Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: source/libvpx/vp9/decoder/vp9_decodeframe.c

Issue 232133009: libvpx: Pull from upstream (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp9/decoder/vp9_decodeframe.h ('k') | source/libvpx/vp9/decoder/vp9_decodemv.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 15 matching lines...) Expand all
26 #include "vp9/common/vp9_pred_common.h" 26 #include "vp9/common/vp9_pred_common.h"
27 #include "vp9/common/vp9_quant_common.h" 27 #include "vp9/common/vp9_quant_common.h"
28 #include "vp9/common/vp9_reconintra.h" 28 #include "vp9/common/vp9_reconintra.h"
29 #include "vp9/common/vp9_reconinter.h" 29 #include "vp9/common/vp9_reconinter.h"
30 #include "vp9/common/vp9_seg_common.h" 30 #include "vp9/common/vp9_seg_common.h"
31 #include "vp9/common/vp9_tile_common.h" 31 #include "vp9/common/vp9_tile_common.h"
32 32
33 #include "vp9/decoder/vp9_decodeframe.h" 33 #include "vp9/decoder/vp9_decodeframe.h"
34 #include "vp9/decoder/vp9_detokenize.h" 34 #include "vp9/decoder/vp9_detokenize.h"
35 #include "vp9/decoder/vp9_decodemv.h" 35 #include "vp9/decoder/vp9_decodemv.h"
36 #include "vp9/decoder/vp9_decoder.h"
36 #include "vp9/decoder/vp9_dsubexp.h" 37 #include "vp9/decoder/vp9_dsubexp.h"
37 #include "vp9/decoder/vp9_dthread.h" 38 #include "vp9/decoder/vp9_dthread.h"
38 #include "vp9/decoder/vp9_onyxd_int.h"
39 #include "vp9/decoder/vp9_read_bit_buffer.h" 39 #include "vp9/decoder/vp9_read_bit_buffer.h"
40 #include "vp9/decoder/vp9_reader.h" 40 #include "vp9/decoder/vp9_reader.h"
41 #include "vp9/decoder/vp9_thread.h" 41 #include "vp9/decoder/vp9_thread.h"
42 42
43 static int is_compound_reference_allowed(const VP9_COMMON *cm) { 43 static int is_compound_reference_allowed(const VP9_COMMON *cm) {
44 int i; 44 int i;
45 for (i = 1; i < REFS_PER_FRAME; ++i) 45 for (i = 1; i < REFS_PER_FRAME; ++i)
46 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) 46 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
47 return 1; 47 return 1;
48 48
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
139 } 139 }
140 140
141 if (cm->reference_mode != SINGLE_REFERENCE) 141 if (cm->reference_mode != SINGLE_REFERENCE)
142 for (i = 0; i < REF_CONTEXTS; ++i) 142 for (i = 0; i < REF_CONTEXTS; ++i)
143 vp9_diff_update_prob(r, &fc->comp_ref_prob[i]); 143 vp9_diff_update_prob(r, &fc->comp_ref_prob[i]);
144 } 144 }
145 145
146 static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) { 146 static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) {
147 int i; 147 int i;
148 for (i = 0; i < n; ++i) 148 for (i = 0; i < n; ++i)
149 if (vp9_read(r, NMV_UPDATE_PROB)) 149 if (vp9_read(r, MV_UPDATE_PROB))
150 p[i] = (vp9_read_literal(r, 7) << 1) | 1; 150 p[i] = (vp9_read_literal(r, 7) << 1) | 1;
151 } 151 }
152 152
153 static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) { 153 static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) {
154 int i, j; 154 int i, j;
155 155
156 update_mv_probs(ctx->joints, MV_JOINTS - 1, r); 156 update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
157 157
158 for (i = 0; i < 2; ++i) { 158 for (i = 0; i < 2; ++i) {
159 nmv_component *const comp_ctx = &ctx->comps[i]; 159 nmv_component *const comp_ctx = &ctx->comps[i];
(...skipping 20 matching lines...) Expand all
180 } 180 }
181 181
182 static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) { 182 static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) {
183 int i; 183 int i;
184 xd->plane[0].dequant = cm->y_dequant[q_index]; 184 xd->plane[0].dequant = cm->y_dequant[q_index];
185 185
186 for (i = 1; i < MAX_MB_PLANE; i++) 186 for (i = 1; i < MAX_MB_PLANE; i++)
187 xd->plane[i].dequant = cm->uv_dequant[q_index]; 187 xd->plane[i].dequant = cm->uv_dequant[q_index];
188 } 188 }
189 189
190 // Allocate storage for each tile column.
191 // TODO(jzern): when max_threads <= 1 the same storage could be used for each
192 // tile.
193 static void alloc_tile_storage(VP9D_COMP *pbi, int tile_rows, int tile_cols) {
194 VP9_COMMON *const cm = &pbi->common;
195 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
196 int i, tile_row, tile_col;
197
198 CHECK_MEM_ERROR(cm, pbi->mi_streams,
199 vpx_realloc(pbi->mi_streams, tile_rows * tile_cols *
200 sizeof(*pbi->mi_streams)));
201 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
202 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
203 TileInfo tile;
204 vp9_tile_init(&tile, cm, tile_row, tile_col);
205 pbi->mi_streams[tile_row * tile_cols + tile_col] =
206 &cm->mi[tile.mi_row_start * cm->mode_info_stride
207 + tile.mi_col_start];
208 }
209 }
210
211 // 2 contexts per 'mi unit', so that we have one context per 4x4 txfm
212 // block where mi unit size is 8x8.
213 CHECK_MEM_ERROR(cm, pbi->above_context[0],
214 vpx_realloc(pbi->above_context[0],
215 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE *
216 2 * aligned_mi_cols));
217 for (i = 1; i < MAX_MB_PLANE; ++i) {
218 pbi->above_context[i] = pbi->above_context[0] +
219 i * sizeof(*pbi->above_context[0]) *
220 2 * aligned_mi_cols;
221 }
222
223 // This is sized based on the entire frame. Each tile operates within its
224 // column bounds.
225 CHECK_MEM_ERROR(cm, pbi->above_seg_context,
226 vpx_realloc(pbi->above_seg_context,
227 sizeof(*pbi->above_seg_context) *
228 aligned_mi_cols));
229 }
230
231 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block, 190 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block,
232 TX_SIZE tx_size, uint8_t *dst, int stride, 191 TX_SIZE tx_size, uint8_t *dst, int stride,
233 int eob) { 192 int eob) {
234 struct macroblockd_plane *const pd = &xd->plane[plane]; 193 struct macroblockd_plane *const pd = &xd->plane[plane];
235 if (eob > 0) { 194 if (eob > 0) {
236 TX_TYPE tx_type; 195 TX_TYPE tx_type;
237 const int plane_type = pd->plane_type; 196 const PLANE_TYPE plane_type = pd->plane_type;
238 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); 197 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
239 switch (tx_size) { 198 switch (tx_size) {
240 case TX_4X4: 199 case TX_4X4:
241 tx_type = get_tx_type_4x4(plane_type, xd, block); 200 tx_type = get_tx_type_4x4(plane_type, xd, block);
242 if (tx_type == DCT_DCT) 201 if (tx_type == DCT_DCT)
243 xd->itxm_add(dqcoeff, dst, stride, eob); 202 xd->itxm_add(dqcoeff, dst, stride, eob);
244 else 203 else
245 vp9_iht4x4_16_add(dqcoeff, dst, stride, tx_type); 204 vp9_iht4x4_16_add(dqcoeff, dst, stride, tx_type);
246 break; 205 break;
247 case TX_8X8: 206 case TX_8X8:
248 tx_type = get_tx_type_8x8(plane_type, xd); 207 tx_type = get_tx_type(plane_type, xd);
249 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); 208 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
250 break; 209 break;
251 case TX_16X16: 210 case TX_16X16:
252 tx_type = get_tx_type_16x16(plane_type, xd); 211 tx_type = get_tx_type(plane_type, xd);
253 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); 212 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
254 break; 213 break;
255 case TX_32X32: 214 case TX_32X32:
256 tx_type = DCT_DCT; 215 tx_type = DCT_DCT;
257 vp9_idct32x32_add(dqcoeff, dst, stride, eob); 216 vp9_idct32x32_add(dqcoeff, dst, stride, eob);
258 break; 217 break;
259 default: 218 default:
260 assert(0 && "Invalid transform size"); 219 assert(0 && "Invalid transform size");
261 } 220 }
262 221
(...skipping 12 matching lines...) Expand all
275 234
276 struct intra_args { 235 struct intra_args {
277 VP9_COMMON *cm; 236 VP9_COMMON *cm;
278 MACROBLOCKD *xd; 237 MACROBLOCKD *xd;
279 vp9_reader *r; 238 vp9_reader *r;
280 }; 239 };
281 240
282 static void predict_and_reconstruct_intra_block(int plane, int block, 241 static void predict_and_reconstruct_intra_block(int plane, int block,
283 BLOCK_SIZE plane_bsize, 242 BLOCK_SIZE plane_bsize,
284 TX_SIZE tx_size, void *arg) { 243 TX_SIZE tx_size, void *arg) {
285 struct intra_args *const args = arg; 244 struct intra_args *const args = (struct intra_args *)arg;
286 VP9_COMMON *const cm = args->cm; 245 VP9_COMMON *const cm = args->cm;
287 MACROBLOCKD *const xd = args->xd; 246 MACROBLOCKD *const xd = args->xd;
288 struct macroblockd_plane *const pd = &xd->plane[plane]; 247 struct macroblockd_plane *const pd = &xd->plane[plane];
289 MODE_INFO *const mi = xd->mi_8x8[0]; 248 MODE_INFO *const mi = xd->mi[0];
290 const MB_PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block) 249 const MB_PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block)
291 : mi->mbmi.uv_mode; 250 : mi->mbmi.uv_mode;
292 int x, y; 251 int x, y;
293 uint8_t *dst; 252 uint8_t *dst;
294 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); 253 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
295 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x]; 254 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x];
296 255
297 vp9_predict_intra_block(xd, block >> (tx_size << 1), 256 vp9_predict_intra_block(xd, block >> (tx_size << 1),
298 b_width_log2(plane_bsize), tx_size, mode, 257 b_width_log2(plane_bsize), tx_size, mode,
299 dst, pd->dst.stride, dst, pd->dst.stride, 258 dst, pd->dst.stride, dst, pd->dst.stride,
(...skipping 11 matching lines...) Expand all
311 struct inter_args { 270 struct inter_args {
312 VP9_COMMON *cm; 271 VP9_COMMON *cm;
313 MACROBLOCKD *xd; 272 MACROBLOCKD *xd;
314 vp9_reader *r; 273 vp9_reader *r;
315 int *eobtotal; 274 int *eobtotal;
316 }; 275 };
317 276
318 static void reconstruct_inter_block(int plane, int block, 277 static void reconstruct_inter_block(int plane, int block,
319 BLOCK_SIZE plane_bsize, 278 BLOCK_SIZE plane_bsize,
320 TX_SIZE tx_size, void *arg) { 279 TX_SIZE tx_size, void *arg) {
321 struct inter_args *args = arg; 280 struct inter_args *args = (struct inter_args *)arg;
322 VP9_COMMON *const cm = args->cm; 281 VP9_COMMON *const cm = args->cm;
323 MACROBLOCKD *const xd = args->xd; 282 MACROBLOCKD *const xd = args->xd;
324 struct macroblockd_plane *const pd = &xd->plane[plane]; 283 struct macroblockd_plane *const pd = &xd->plane[plane];
325 int x, y, eob; 284 int x, y, eob;
326 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y); 285 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
327 eob = vp9_decode_block_tokens(cm, xd, plane, block, plane_bsize, x, y, 286 eob = vp9_decode_block_tokens(cm, xd, plane, block, plane_bsize, x, y,
328 tx_size, args->r); 287 tx_size, args->r);
329 inverse_transform_block(xd, plane, block, tx_size, 288 inverse_transform_block(xd, plane, block, tx_size,
330 &pd->dst.buf[4 * y * pd->dst.stride + 4 * x], 289 &pd->dst.buf[4 * y * pd->dst.stride + 4 * x],
331 pd->dst.stride, eob); 290 pd->dst.stride, eob);
332 *args->eobtotal += eob; 291 *args->eobtotal += eob;
333 } 292 }
334 293
335 static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, 294 static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
336 const TileInfo *const tile, 295 const TileInfo *const tile,
337 BLOCK_SIZE bsize, int mi_row, int mi_col) { 296 BLOCK_SIZE bsize, int mi_row, int mi_col) {
338 const int bw = num_8x8_blocks_wide_lookup[bsize]; 297 const int bw = num_8x8_blocks_wide_lookup[bsize];
339 const int bh = num_8x8_blocks_high_lookup[bsize]; 298 const int bh = num_8x8_blocks_high_lookup[bsize];
340 const int x_mis = MIN(bw, cm->mi_cols - mi_col); 299 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
341 const int y_mis = MIN(bh, cm->mi_rows - mi_row); 300 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
342 const int offset = mi_row * cm->mode_info_stride + mi_col; 301 const int offset = mi_row * cm->mi_stride + mi_col;
343 const int tile_offset = tile->mi_row_start * cm->mode_info_stride +
344 tile->mi_col_start;
345 int x, y; 302 int x, y;
346 303
347 xd->mi_8x8 = cm->mi_grid_visible + offset; 304 xd->mi = cm->mi_grid_visible + offset;
348 xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset; 305 xd->mi[0] = &cm->mi[offset];
349 306 xd->mi[0]->mbmi.sb_type = bsize;
350 xd->last_mi = cm->coding_use_prev_mi && cm->prev_mi ?
351 xd->prev_mi_8x8[0] : NULL;
352
353 xd->mi_8x8[0] = xd->mi_stream + offset - tile_offset;
354 xd->mi_8x8[0]->mbmi.sb_type = bsize;
355 for (y = 0; y < y_mis; ++y) 307 for (y = 0; y < y_mis; ++y)
356 for (x = !y; x < x_mis; ++x) 308 for (x = !y; x < x_mis; ++x)
357 xd->mi_8x8[y * cm->mode_info_stride + x] = xd->mi_8x8[0]; 309 xd->mi[y * cm->mi_stride + x] = xd->mi[0];
358 310
359 set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col); 311 set_skip_context(xd, mi_row, mi_col);
360 312
361 // Distance of Mb to the various image edges. These are specified to 8th pel 313 // Distance of Mb to the various image edges. These are specified to 8th pel
362 // as they are always compared to values that are in 1/8th pel units 314 // as they are always compared to values that are in 1/8th pel units
363 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); 315 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
364 316
365 setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col); 317 vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col);
318 return &xd->mi[0]->mbmi;
366 } 319 }
367 320
368 static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd, 321 static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd,
369 int idx, int mi_row, int mi_col) { 322 int idx, int mi_row, int mi_col) {
370 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; 323 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
371 RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME]; 324 RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME];
372 xd->block_refs[idx] = ref_buffer; 325 xd->block_refs[idx] = ref_buffer;
373 if (!vp9_is_valid_scale(&ref_buffer->sf)) 326 if (!vp9_is_valid_scale(&ref_buffer->sf))
374 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 327 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
375 "Invalid scale factors"); 328 "Invalid scale factors");
376 setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col, &ref_buffer->sf); 329 vp9_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col,
330 &ref_buffer->sf);
377 xd->corrupted |= ref_buffer->buf->corrupted; 331 xd->corrupted |= ref_buffer->buf->corrupted;
378 } 332 }
379 333
380 static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd, 334 static void decode_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
381 const TileInfo *const tile, 335 const TileInfo *const tile,
382 int mi_row, int mi_col, 336 int mi_row, int mi_col,
383 vp9_reader *r, BLOCK_SIZE bsize) { 337 vp9_reader *r, BLOCK_SIZE bsize) {
384 const int less8x8 = bsize < BLOCK_8X8; 338 const int less8x8 = bsize < BLOCK_8X8;
385 MB_MODE_INFO *mbmi; 339 MB_MODE_INFO *mbmi = set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
386
387 set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
388 vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r); 340 vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r);
389 341
390 if (less8x8) 342 if (less8x8)
391 bsize = BLOCK_8X8; 343 bsize = BLOCK_8X8;
392 344
393 // Has to be called after set_offsets
394 mbmi = &xd->mi_8x8[0]->mbmi;
395
396 if (mbmi->skip) { 345 if (mbmi->skip) {
397 reset_skip_context(xd, bsize); 346 reset_skip_context(xd, bsize);
398 } else { 347 } else {
399 if (cm->seg.enabled) 348 if (cm->seg.enabled)
400 setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id, 349 setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id,
401 cm->base_qindex)); 350 cm->base_qindex));
402 } 351 }
403 352
404 if (!is_inter_block(mbmi)) { 353 if (!is_inter_block(mbmi)) {
405 struct intra_args arg = { cm, xd, r }; 354 struct intra_args arg = { cm, xd, r };
406 vp9_foreach_transformed_block(xd, bsize, 355 vp9_foreach_transformed_block(xd, bsize,
407 predict_and_reconstruct_intra_block, &arg); 356 predict_and_reconstruct_intra_block, &arg);
408 } else { 357 } else {
409 // Setup 358 // Setup
410 set_ref(cm, xd, 0, mi_row, mi_col); 359 set_ref(cm, xd, 0, mi_row, mi_col);
411 if (has_second_ref(mbmi)) 360 if (has_second_ref(mbmi))
412 set_ref(cm, xd, 1, mi_row, mi_col); 361 set_ref(cm, xd, 1, mi_row, mi_col);
413 362
414 xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter);
415
416 // Prediction 363 // Prediction
417 vp9_dec_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); 364 vp9_dec_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
418 365
419 // Reconstruction 366 // Reconstruction
420 if (!mbmi->skip) { 367 if (!mbmi->skip) {
421 int eobtotal = 0; 368 int eobtotal = 0;
422 struct inter_args arg = { cm, xd, r, &eobtotal }; 369 struct inter_args arg = { cm, xd, r, &eobtotal };
423 vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg); 370 vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg);
424 if (!less8x8 && eobtotal == 0) 371 if (!less8x8 && eobtotal == 0)
425 mbmi->skip = 1; // skip loopfilter 372 mbmi->skip = 1; // skip loopfilter
426 } 373 }
427 } 374 }
428 375
429 xd->corrupted |= vp9_reader_has_error(r); 376 xd->corrupted |= vp9_reader_has_error(r);
430 } 377 }
431 378
432 static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs, 379 static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs,
433 int mi_row, int mi_col, BLOCK_SIZE bsize, 380 int mi_row, int mi_col, BLOCK_SIZE bsize,
434 vp9_reader *r) { 381 vp9_reader *r) {
435 const int ctx = partition_plane_context(xd->above_seg_context, 382 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
436 xd->left_seg_context,
437 mi_row, mi_col, bsize);
438 const vp9_prob *const probs = get_partition_probs(cm, ctx); 383 const vp9_prob *const probs = get_partition_probs(cm, ctx);
439 const int has_rows = (mi_row + hbs) < cm->mi_rows; 384 const int has_rows = (mi_row + hbs) < cm->mi_rows;
440 const int has_cols = (mi_col + hbs) < cm->mi_cols; 385 const int has_cols = (mi_col + hbs) < cm->mi_cols;
441 PARTITION_TYPE p; 386 PARTITION_TYPE p;
442 387
443 if (has_rows && has_cols) 388 if (has_rows && has_cols)
444 p = vp9_read_tree(r, vp9_partition_tree, probs); 389 p = (PARTITION_TYPE)vp9_read_tree(r, vp9_partition_tree, probs);
445 else if (!has_rows && has_cols) 390 else if (!has_rows && has_cols)
446 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ; 391 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
447 else if (has_rows && !has_cols) 392 else if (has_rows && !has_cols)
448 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT; 393 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
449 else 394 else
450 p = PARTITION_SPLIT; 395 p = PARTITION_SPLIT;
451 396
452 if (!cm->frame_parallel_decoding_mode) 397 if (!cm->frame_parallel_decoding_mode)
453 ++cm->counts.partition[ctx][p]; 398 ++cm->counts.partition[ctx][p];
454 399
455 return p; 400 return p;
456 } 401 }
457 402
458 static void decode_modes_sb(VP9_COMMON *const cm, MACROBLOCKD *const xd, 403 static void decode_partition(VP9_COMMON *const cm, MACROBLOCKD *const xd,
459 const TileInfo *const tile, 404 const TileInfo *const tile,
460 int mi_row, int mi_col, 405 int mi_row, int mi_col,
461 vp9_reader* r, BLOCK_SIZE bsize) { 406 vp9_reader* r, BLOCK_SIZE bsize) {
462 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2; 407 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
463 PARTITION_TYPE partition; 408 PARTITION_TYPE partition;
464 BLOCK_SIZE subsize; 409 BLOCK_SIZE subsize;
465 410
466 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) 411 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
467 return; 412 return;
468 413
469 partition = read_partition(cm, xd, hbs, mi_row, mi_col, bsize, r); 414 partition = read_partition(cm, xd, hbs, mi_row, mi_col, bsize, r);
470 subsize = get_subsize(bsize, partition); 415 subsize = get_subsize(bsize, partition);
471 if (subsize < BLOCK_8X8) { 416 if (subsize < BLOCK_8X8) {
472 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); 417 decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
473 } else { 418 } else {
474 switch (partition) { 419 switch (partition) {
475 case PARTITION_NONE: 420 case PARTITION_NONE:
476 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); 421 decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
477 break; 422 break;
478 case PARTITION_HORZ: 423 case PARTITION_HORZ:
479 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); 424 decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
480 if (mi_row + hbs < cm->mi_rows) 425 if (mi_row + hbs < cm->mi_rows)
481 decode_modes_b(cm, xd, tile, mi_row + hbs, mi_col, r, subsize); 426 decode_block(cm, xd, tile, mi_row + hbs, mi_col, r, subsize);
482 break; 427 break;
483 case PARTITION_VERT: 428 case PARTITION_VERT:
484 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize); 429 decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
485 if (mi_col + hbs < cm->mi_cols) 430 if (mi_col + hbs < cm->mi_cols)
486 decode_modes_b(cm, xd, tile, mi_row, mi_col + hbs, r, subsize); 431 decode_block(cm, xd, tile, mi_row, mi_col + hbs, r, subsize);
487 break; 432 break;
488 case PARTITION_SPLIT: 433 case PARTITION_SPLIT:
489 decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, subsize); 434 decode_partition(cm, xd, tile, mi_row, mi_col, r, subsize);
490 decode_modes_sb(cm, xd, tile, mi_row, mi_col + hbs, r, subsize); 435 decode_partition(cm, xd, tile, mi_row, mi_col + hbs, r, subsize);
491 decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col, r, subsize); 436 decode_partition(cm, xd, tile, mi_row + hbs, mi_col, r, subsize);
492 decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize); 437 decode_partition(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize);
493 break; 438 break;
494 default: 439 default:
495 assert(0 && "Invalid partition type"); 440 assert(0 && "Invalid partition type");
496 } 441 }
497 } 442 }
498 443
499 // update partition context 444 // update partition context
500 if (bsize >= BLOCK_8X8 && 445 if (bsize >= BLOCK_8X8 &&
501 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) 446 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
502 update_partition_context(xd->above_seg_context, xd->left_seg_context, 447 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
503 mi_row, mi_col, subsize, bsize);
504 } 448 }
505 449
506 static void setup_token_decoder(const uint8_t *data, 450 static void setup_token_decoder(const uint8_t *data,
507 const uint8_t *data_end, 451 const uint8_t *data_end,
508 size_t read_size, 452 size_t read_size,
509 struct vpx_internal_error_info *error_info, 453 struct vpx_internal_error_info *error_info,
510 vp9_reader *r) { 454 vp9_reader *r) {
511 // Validate the calculated partition length. If the buffer 455 // Validate the calculated partition length. If the buffer
512 // described by the partition can't be fully read, then restrict 456 // described by the partition can't be fully read, then restrict
513 // it to the portion that can be (for EC mode) or throw an error. 457 // it to the portion that can be (for EC mode) or throw an error.
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after
661 *height = h; 605 *height = h;
662 } 606 }
663 607
664 static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { 608 static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
665 cm->display_width = cm->width; 609 cm->display_width = cm->width;
666 cm->display_height = cm->height; 610 cm->display_height = cm->height;
667 if (vp9_rb_read_bit(rb)) 611 if (vp9_rb_read_bit(rb))
668 read_frame_size(rb, &cm->display_width, &cm->display_height); 612 read_frame_size(rb, &cm->display_width, &cm->display_height);
669 } 613 }
670 614
671 static void apply_frame_size(VP9D_COMP *pbi, int width, int height) { 615 static void apply_frame_size(VP9_COMMON *cm, int width, int height) {
672 VP9_COMMON *cm = &pbi->common;
673
674 if (cm->width != width || cm->height != height) { 616 if (cm->width != width || cm->height != height) {
675 // Change in frame size. 617 // Change in frame size.
676 // TODO(agrange) Don't test width/height, check overall size. 618 // TODO(agrange) Don't test width/height, check overall size.
677 if (width > cm->width || height > cm->height) { 619 if (width > cm->width || height > cm->height) {
678 // Rescale frame buffers only if they're not big enough already. 620 // Rescale frame buffers only if they're not big enough already.
679 if (vp9_resize_frame_buffers(cm, width, height)) 621 if (vp9_resize_frame_buffers(cm, width, height))
680 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, 622 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
681 "Failed to allocate frame buffers"); 623 "Failed to allocate frame buffers");
682 } 624 }
683 625
684 cm->width = width; 626 cm->width = width;
685 cm->height = height; 627 cm->height = height;
686 628
687 vp9_update_frame_size(cm); 629 vp9_update_frame_size(cm);
688 } 630 }
689 631
690 if (vp9_realloc_frame_buffer( 632 if (vp9_realloc_frame_buffer(
691 get_frame_new_buffer(cm), cm->width, cm->height, 633 get_frame_new_buffer(cm), cm->width, cm->height,
692 cm->subsampling_x, cm->subsampling_y, VP9_DEC_BORDER_IN_PIXELS, 634 cm->subsampling_x, cm->subsampling_y, VP9_DEC_BORDER_IN_PIXELS,
693 &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb, 635 &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb,
694 cm->cb_priv)) { 636 cm->cb_priv)) {
695 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, 637 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
696 "Failed to allocate frame buffer"); 638 "Failed to allocate frame buffer");
697 } 639 }
698 } 640 }
699 641
700 static void setup_frame_size(VP9D_COMP *pbi, 642 static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
701 struct vp9_read_bit_buffer *rb) {
702 int width, height; 643 int width, height;
703 read_frame_size(rb, &width, &height); 644 read_frame_size(rb, &width, &height);
704 apply_frame_size(pbi, width, height); 645 apply_frame_size(cm, width, height);
705 setup_display_size(&pbi->common, rb); 646 setup_display_size(cm, rb);
706 } 647 }
707 648
708 static void setup_frame_size_with_refs(VP9D_COMP *pbi, 649 static void setup_frame_size_with_refs(VP9_COMMON *cm,
709 struct vp9_read_bit_buffer *rb) { 650 struct vp9_read_bit_buffer *rb) {
710 VP9_COMMON *const cm = &pbi->common;
711
712 int width, height; 651 int width, height;
713 int found = 0, i; 652 int found = 0, i;
714 for (i = 0; i < REFS_PER_FRAME; ++i) { 653 for (i = 0; i < REFS_PER_FRAME; ++i) {
715 if (vp9_rb_read_bit(rb)) { 654 if (vp9_rb_read_bit(rb)) {
716 YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf; 655 YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
717 width = buf->y_crop_width; 656 width = buf->y_crop_width;
718 height = buf->y_crop_height; 657 height = buf->y_crop_height;
719 found = 1; 658 found = 1;
720 break; 659 break;
721 } 660 }
722 } 661 }
723 662
724 if (!found) 663 if (!found)
725 read_frame_size(rb, &width, &height); 664 read_frame_size(rb, &width, &height);
726 665
727 if (width <= 0 || height <= 0) 666 if (width <= 0 || height <= 0)
728 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 667 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
729 "Referenced frame with invalid size"); 668 "Referenced frame with invalid size");
730 669
731 apply_frame_size(pbi, width, height); 670 apply_frame_size(cm, width, height);
732 setup_display_size(cm, rb); 671 setup_display_size(cm, rb);
733 } 672 }
734 673
735 static void setup_tile_context(VP9D_COMP *const pbi, MACROBLOCKD *const xd, 674 static void decode_tile(VP9Decoder *pbi, const TileInfo *const tile,
736 int tile_row, int tile_col) {
737 int i;
738 const int tile_cols = 1 << pbi->common.log2_tile_cols;
739 xd->mi_stream = pbi->mi_streams[tile_row * tile_cols + tile_col];
740
741 for (i = 0; i < MAX_MB_PLANE; ++i) {
742 xd->above_context[i] = pbi->above_context[i];
743 }
744 // see note in alloc_tile_storage().
745 xd->above_seg_context = pbi->above_seg_context;
746 }
747
748 static void decode_tile(VP9D_COMP *pbi, const TileInfo *const tile,
749 vp9_reader *r) { 675 vp9_reader *r) {
750 const int num_threads = pbi->oxcf.max_threads; 676 const int num_threads = pbi->oxcf.max_threads;
751 VP9_COMMON *const cm = &pbi->common; 677 VP9_COMMON *const cm = &pbi->common;
752 int mi_row, mi_col; 678 int mi_row, mi_col;
753 MACROBLOCKD *xd = &pbi->mb; 679 MACROBLOCKD *xd = &pbi->mb;
754 680
755 if (pbi->do_loopfilter_inline) { 681 if (pbi->do_loopfilter_inline) {
756 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; 682 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
757 lf_data->frame_buffer = get_frame_new_buffer(cm); 683 lf_data->frame_buffer = get_frame_new_buffer(cm);
758 lf_data->cm = cm; 684 lf_data->cm = cm;
759 lf_data->xd = pbi->mb; 685 lf_data->xd = pbi->mb;
760 lf_data->stop = 0; 686 lf_data->stop = 0;
761 lf_data->y_only = 0; 687 lf_data->y_only = 0;
762 vp9_loop_filter_frame_init(cm, cm->lf.filter_level); 688 vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
763 } 689 }
764 690
765 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 691 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
766 mi_row += MI_BLOCK_SIZE) { 692 mi_row += MI_BLOCK_SIZE) {
767 // For a SB there are 2 left contexts, each pertaining to a MB row within 693 // For a SB there are 2 left contexts, each pertaining to a MB row within
768 vp9_zero(xd->left_context); 694 vp9_zero(xd->left_context);
769 vp9_zero(xd->left_seg_context); 695 vp9_zero(xd->left_seg_context);
770 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 696 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
771 mi_col += MI_BLOCK_SIZE) { 697 mi_col += MI_BLOCK_SIZE) {
772 decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64); 698 decode_partition(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64);
773 } 699 }
774 700
775 if (pbi->do_loopfilter_inline) { 701 if (pbi->do_loopfilter_inline) {
776 const int lf_start = mi_row - MI_BLOCK_SIZE; 702 const int lf_start = mi_row - MI_BLOCK_SIZE;
777 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; 703 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
778 704
779 // delay the loopfilter by 1 macroblock row. 705 // delay the loopfilter by 1 macroblock row.
780 if (lf_start < 0) continue; 706 if (lf_start < 0) continue;
781 707
782 // decoding has completed: finish up the loop filter in this thread. 708 // decoding has completed: finish up the loop filter in this thread.
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
843 } 769 }
844 return size; 770 return size;
845 } 771 }
846 772
847 typedef struct TileBuffer { 773 typedef struct TileBuffer {
848 const uint8_t *data; 774 const uint8_t *data;
849 size_t size; 775 size_t size;
850 int col; // only used with multi-threaded decoding 776 int col; // only used with multi-threaded decoding
851 } TileBuffer; 777 } TileBuffer;
852 778
853 static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) { 779 static const uint8_t *decode_tiles(VP9Decoder *pbi,
780 const uint8_t *data,
781 const uint8_t *data_end) {
854 VP9_COMMON *const cm = &pbi->common; 782 VP9_COMMON *const cm = &pbi->common;
855 MACROBLOCKD *const xd = &pbi->mb;
856 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols); 783 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
857 const int tile_cols = 1 << cm->log2_tile_cols; 784 const int tile_cols = 1 << cm->log2_tile_cols;
858 const int tile_rows = 1 << cm->log2_tile_rows; 785 const int tile_rows = 1 << cm->log2_tile_rows;
859 TileBuffer tile_buffers[4][1 << 6]; 786 TileBuffer tile_buffers[4][1 << 6];
860 int tile_row, tile_col; 787 int tile_row, tile_col;
861 const uint8_t *const data_end = pbi->source + pbi->source_sz;
862 const uint8_t *end = NULL; 788 const uint8_t *end = NULL;
863 vp9_reader r; 789 vp9_reader r;
864 790
865 assert(tile_rows <= 4); 791 assert(tile_rows <= 4);
866 assert(tile_cols <= (1 << 6)); 792 assert(tile_cols <= (1 << 6));
867 793
868 // Note: this memset assumes above_context[0], [1] and [2] 794 // Note: this memset assumes above_context[0], [1] and [2]
869 // are allocated as part of the same buffer. 795 // are allocated as part of the same buffer.
870 vpx_memset(pbi->above_context[0], 0, 796 vpx_memset(cm->above_context, 0,
871 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * 2 * aligned_cols); 797 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols);
872 798
873 vpx_memset(pbi->above_seg_context, 0, 799 vpx_memset(cm->above_seg_context, 0,
874 sizeof(*pbi->above_seg_context) * aligned_cols); 800 sizeof(*cm->above_seg_context) * aligned_cols);
875 801
876 // Load tile data into tile_buffers 802 // Load tile data into tile_buffers
877 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { 803 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
878 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { 804 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
879 const int last_tile = tile_row == tile_rows - 1 && 805 const int last_tile = tile_row == tile_rows - 1 &&
880 tile_col == tile_cols - 1; 806 tile_col == tile_cols - 1;
881 const size_t size = get_tile(data_end, last_tile, &cm->error, &data); 807 const size_t size = get_tile(data_end, last_tile, &cm->error, &data);
882 TileBuffer *const buf = &tile_buffers[tile_row][tile_col]; 808 TileBuffer *const buf = &tile_buffers[tile_row][tile_col];
883 buf->data = data; 809 buf->data = data;
884 buf->size = size; 810 buf->size = size;
885 data += size; 811 data += size;
886 } 812 }
887 } 813 }
888 814
889 // Decode tiles using data from tile_buffers 815 // Decode tiles using data from tile_buffers
890 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { 816 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
891 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { 817 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
892 const int col = pbi->oxcf.inv_tile_order ? tile_cols - tile_col - 1 818 const int col = pbi->oxcf.inv_tile_order ? tile_cols - tile_col - 1
893 : tile_col; 819 : tile_col;
894 const int last_tile = tile_row == tile_rows - 1 && 820 const int last_tile = tile_row == tile_rows - 1 &&
895 col == tile_cols - 1; 821 col == tile_cols - 1;
896 const TileBuffer *const buf = &tile_buffers[tile_row][col]; 822 const TileBuffer *const buf = &tile_buffers[tile_row][col];
897 TileInfo tile; 823 TileInfo tile;
898 824
899 vp9_tile_init(&tile, cm, tile_row, col); 825 vp9_tile_init(&tile, cm, tile_row, col);
900 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &r); 826 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &r);
901 setup_tile_context(pbi, xd, tile_row, col);
902 decode_tile(pbi, &tile, &r); 827 decode_tile(pbi, &tile, &r);
903 828
904 if (last_tile) 829 if (last_tile)
905 end = vp9_reader_find_end(&r); 830 end = vp9_reader_find_end(&r);
906 } 831 }
907 } 832 }
908 833
909 return end; 834 return end;
910 } 835 }
911 836
912 static void setup_tile_macroblockd(TileWorkerData *const tile_data) {
913 MACROBLOCKD *xd = &tile_data->xd;
914 struct macroblockd_plane *const pd = xd->plane;
915 int i;
916
917 for (i = 0; i < MAX_MB_PLANE; ++i) {
918 pd[i].dqcoeff = tile_data->dqcoeff[i];
919 vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t));
920 }
921 }
922
923 static int tile_worker_hook(void *arg1, void *arg2) { 837 static int tile_worker_hook(void *arg1, void *arg2) {
924 TileWorkerData *const tile_data = (TileWorkerData*)arg1; 838 TileWorkerData *const tile_data = (TileWorkerData*)arg1;
925 const TileInfo *const tile = (TileInfo*)arg2; 839 const TileInfo *const tile = (TileInfo*)arg2;
926 int mi_row, mi_col; 840 int mi_row, mi_col;
927 841
928 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 842 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
929 mi_row += MI_BLOCK_SIZE) { 843 mi_row += MI_BLOCK_SIZE) {
930 vp9_zero(tile_data->xd.left_context); 844 vp9_zero(tile_data->xd.left_context);
931 vp9_zero(tile_data->xd.left_seg_context); 845 vp9_zero(tile_data->xd.left_seg_context);
932 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 846 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
933 mi_col += MI_BLOCK_SIZE) { 847 mi_col += MI_BLOCK_SIZE) {
934 decode_modes_sb(tile_data->cm, &tile_data->xd, tile, 848 decode_partition(tile_data->cm, &tile_data->xd, tile,
935 mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64); 849 mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64);
936 } 850 }
937 } 851 }
938 return !tile_data->xd.corrupted; 852 return !tile_data->xd.corrupted;
939 } 853 }
940 854
941 // sorts in descending order 855 // sorts in descending order
942 static int compare_tile_buffers(const void *a, const void *b) { 856 static int compare_tile_buffers(const void *a, const void *b) {
943 const TileBuffer *const buf1 = (const TileBuffer*)a; 857 const TileBuffer *const buf1 = (const TileBuffer*)a;
944 const TileBuffer *const buf2 = (const TileBuffer*)b; 858 const TileBuffer *const buf2 = (const TileBuffer*)b;
945 if (buf1->size < buf2->size) { 859 if (buf1->size < buf2->size) {
946 return 1; 860 return 1;
947 } else if (buf1->size == buf2->size) { 861 } else if (buf1->size == buf2->size) {
948 return 0; 862 return 0;
949 } else { 863 } else {
950 return -1; 864 return -1;
951 } 865 }
952 } 866 }
953 867
954 static const uint8_t *decode_tiles_mt(VP9D_COMP *pbi, const uint8_t *data) { 868 static const uint8_t *decode_tiles_mt(VP9Decoder *pbi,
869 const uint8_t *data,
870 const uint8_t *data_end) {
955 VP9_COMMON *const cm = &pbi->common; 871 VP9_COMMON *const cm = &pbi->common;
956 const uint8_t *bit_reader_end = NULL; 872 const uint8_t *bit_reader_end = NULL;
957 const uint8_t *const data_end = pbi->source + pbi->source_sz;
958 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); 873 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
959 const int tile_cols = 1 << cm->log2_tile_cols; 874 const int tile_cols = 1 << cm->log2_tile_cols;
960 const int tile_rows = 1 << cm->log2_tile_rows; 875 const int tile_rows = 1 << cm->log2_tile_rows;
961 const int num_workers = MIN(pbi->oxcf.max_threads & ~1, tile_cols); 876 const int num_workers = MIN(pbi->oxcf.max_threads & ~1, tile_cols);
962 TileBuffer tile_buffers[1 << 6]; 877 TileBuffer tile_buffers[1 << 6];
963 int n; 878 int n;
964 int final_worker = -1; 879 int final_worker = -1;
965 880
966 assert(tile_cols <= (1 << 6)); 881 assert(tile_cols <= (1 << 6));
967 assert(tile_rows == 1); 882 assert(tile_rows == 1);
968 (void)tile_rows; 883 (void)tile_rows;
969 884
970 if (num_workers > pbi->num_tile_workers) { 885 // TODO(jzern): See if we can remove the restriction of passing in max
886 // threads to the decoder.
887 if (pbi->num_tile_workers == 0) {
888 const int num_threads = pbi->oxcf.max_threads & ~1;
971 int i; 889 int i;
890 // TODO(jzern): Allocate one less worker, as in the current code we only
891 // use num_threads - 1 workers.
972 CHECK_MEM_ERROR(cm, pbi->tile_workers, 892 CHECK_MEM_ERROR(cm, pbi->tile_workers,
973 vpx_realloc(pbi->tile_workers, 893 vpx_malloc(num_threads * sizeof(*pbi->tile_workers)));
974 num_workers * sizeof(*pbi->tile_workers))); 894 for (i = 0; i < num_threads; ++i) {
975 for (i = pbi->num_tile_workers; i < num_workers; ++i) {
976 VP9Worker *const worker = &pbi->tile_workers[i]; 895 VP9Worker *const worker = &pbi->tile_workers[i];
977 ++pbi->num_tile_workers; 896 ++pbi->num_tile_workers;
978 897
979 vp9_worker_init(worker); 898 vp9_worker_init(worker);
980 CHECK_MEM_ERROR(cm, worker->data1, 899 CHECK_MEM_ERROR(cm, worker->data1,
981 vpx_memalign(32, sizeof(TileWorkerData))); 900 vpx_memalign(32, sizeof(TileWorkerData)));
982 CHECK_MEM_ERROR(cm, worker->data2, vpx_malloc(sizeof(TileInfo))); 901 CHECK_MEM_ERROR(cm, worker->data2, vpx_malloc(sizeof(TileInfo)));
983 if (i < num_workers - 1 && !vp9_worker_reset(worker)) { 902 if (i < num_threads - 1 && !vp9_worker_reset(worker)) {
984 vpx_internal_error(&cm->error, VPX_CODEC_ERROR, 903 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
985 "Tile decoder thread creation failed"); 904 "Tile decoder thread creation failed");
986 } 905 }
987 } 906 }
988 } 907 }
989 908
990 // Reset tile decoding hook 909 // Reset tile decoding hook
991 for (n = 0; n < pbi->num_tile_workers; ++n) { 910 for (n = 0; n < num_workers; ++n) {
992 pbi->tile_workers[n].hook = (VP9WorkerHook)tile_worker_hook; 911 pbi->tile_workers[n].hook = (VP9WorkerHook)tile_worker_hook;
993 } 912 }
994 913
995 // Note: this memset assumes above_context[0], [1] and [2] 914 // Note: this memset assumes above_context[0], [1] and [2]
996 // are allocated as part of the same buffer. 915 // are allocated as part of the same buffer.
997 vpx_memset(pbi->above_context[0], 0, 916 vpx_memset(cm->above_context, 0,
998 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * 917 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols);
999 2 * aligned_mi_cols); 918 vpx_memset(cm->above_seg_context, 0,
1000 vpx_memset(pbi->above_seg_context, 0, 919 sizeof(*cm->above_seg_context) * aligned_mi_cols);
1001 sizeof(*pbi->above_seg_context) * aligned_mi_cols);
1002 920
1003 // Load tile data into tile_buffers 921 // Load tile data into tile_buffers
1004 for (n = 0; n < tile_cols; ++n) { 922 for (n = 0; n < tile_cols; ++n) {
1005 const size_t size = 923 const size_t size =
1006 get_tile(data_end, n == tile_cols - 1, &cm->error, &data); 924 get_tile(data_end, n == tile_cols - 1, &cm->error, &data);
1007 TileBuffer *const buf = &tile_buffers[n]; 925 TileBuffer *const buf = &tile_buffers[n];
1008 buf->data = data; 926 buf->data = data;
1009 buf->size = size; 927 buf->size = size;
1010 buf->col = n; 928 buf->col = n;
1011 data += size; 929 data += size;
(...skipping 24 matching lines...) Expand all
1036 for (i = 0; i < num_workers && n < tile_cols; ++i) { 954 for (i = 0; i < num_workers && n < tile_cols; ++i) {
1037 VP9Worker *const worker = &pbi->tile_workers[i]; 955 VP9Worker *const worker = &pbi->tile_workers[i];
1038 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1; 956 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
1039 TileInfo *const tile = (TileInfo*)worker->data2; 957 TileInfo *const tile = (TileInfo*)worker->data2;
1040 TileBuffer *const buf = &tile_buffers[n]; 958 TileBuffer *const buf = &tile_buffers[n];
1041 959
1042 tile_data->cm = cm; 960 tile_data->cm = cm;
1043 tile_data->xd = pbi->mb; 961 tile_data->xd = pbi->mb;
1044 tile_data->xd.corrupted = 0; 962 tile_data->xd.corrupted = 0;
1045 vp9_tile_init(tile, tile_data->cm, 0, buf->col); 963 vp9_tile_init(tile, tile_data->cm, 0, buf->col);
1046
1047 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, 964 setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
1048 &tile_data->bit_reader); 965 &tile_data->bit_reader);
1049 setup_tile_context(pbi, &tile_data->xd, 0, buf->col); 966 init_macroblockd(cm, &tile_data->xd);
1050 setup_tile_macroblockd(tile_data); 967 vp9_zero(tile_data->xd.dqcoeff);
1051 968
1052 worker->had_error = 0; 969 worker->had_error = 0;
1053 if (i == num_workers - 1 || n == tile_cols - 1) { 970 if (i == num_workers - 1 || n == tile_cols - 1) {
1054 vp9_worker_execute(worker); 971 vp9_worker_execute(worker);
1055 } else { 972 } else {
1056 vp9_worker_launch(worker); 973 vp9_worker_launch(worker);
1057 } 974 }
1058 975
1059 if (buf->col == tile_cols - 1) { 976 if (buf->col == tile_cols - 1) {
1060 final_worker = i; 977 final_worker = i;
(...skipping 24 matching lines...) Expand all
1085 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1002 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1086 "Invalid frame sync code"); 1003 "Invalid frame sync code");
1087 } 1004 }
1088 } 1005 }
1089 1006
1090 static void error_handler(void *data) { 1007 static void error_handler(void *data) {
1091 VP9_COMMON *const cm = (VP9_COMMON *)data; 1008 VP9_COMMON *const cm = (VP9_COMMON *)data;
1092 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet"); 1009 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
1093 } 1010 }
1094 1011
1095 #define RESERVED \ 1012 static BITSTREAM_PROFILE read_profile(struct vp9_read_bit_buffer *rb) {
1096 if (vp9_rb_read_bit(rb)) \ 1013 int profile = vp9_rb_read_bit(rb);
1097 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, \ 1014 profile |= vp9_rb_read_bit(rb) << 1;
1098 "Reserved bit must be unset") 1015 return (BITSTREAM_PROFILE) profile;
1016 }
1099 1017
1100 static size_t read_uncompressed_header(VP9D_COMP *pbi, 1018 static size_t read_uncompressed_header(VP9Decoder *pbi,
1101 struct vp9_read_bit_buffer *rb) { 1019 struct vp9_read_bit_buffer *rb) {
1102 VP9_COMMON *const cm = &pbi->common; 1020 VP9_COMMON *const cm = &pbi->common;
1103 size_t sz; 1021 size_t sz;
1104 int i; 1022 int i;
1105 1023
1106 cm->last_frame_type = cm->frame_type; 1024 cm->last_frame_type = cm->frame_type;
1107 1025
1108 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER) 1026 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER)
1109 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1027 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1110 "Invalid frame marker"); 1028 "Invalid frame marker");
1111 1029
1112 cm->version = vp9_rb_read_bit(rb); 1030 cm->profile = read_profile(rb);
1113 RESERVED; 1031 if (cm->profile >= MAX_PROFILES)
1032 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1033 "Unsupported bitstream profile");
1114 1034
1115 cm->show_existing_frame = vp9_rb_read_bit(rb); 1035 cm->show_existing_frame = vp9_rb_read_bit(rb);
1116 if (cm->show_existing_frame) { 1036 if (cm->show_existing_frame) {
1117 // Show an existing frame directly. 1037 // Show an existing frame directly.
1118 const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)]; 1038 const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
1119 1039
1120 if (cm->frame_bufs[frame_to_show].ref_count < 1) 1040 if (cm->frame_bufs[frame_to_show].ref_count < 1)
1121 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1041 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1122 "Buffer %d does not contain a decoded frame", 1042 "Buffer %d does not contain a decoded frame",
1123 frame_to_show); 1043 frame_to_show);
1124 1044
1125 ref_cnt_fb(cm->frame_bufs, &cm->new_fb_idx, frame_to_show); 1045 ref_cnt_fb(cm->frame_bufs, &cm->new_fb_idx, frame_to_show);
1126 pbi->refresh_frame_flags = 0; 1046 pbi->refresh_frame_flags = 0;
1127 cm->lf.filter_level = 0; 1047 cm->lf.filter_level = 0;
1128 cm->show_frame = 1; 1048 cm->show_frame = 1;
1129 return 0; 1049 return 0;
1130 } 1050 }
1131 1051
1132 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb); 1052 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
1133 cm->show_frame = vp9_rb_read_bit(rb); 1053 cm->show_frame = vp9_rb_read_bit(rb);
1134 cm->error_resilient_mode = vp9_rb_read_bit(rb); 1054 cm->error_resilient_mode = vp9_rb_read_bit(rb);
1135 1055
1136 if (cm->frame_type == KEY_FRAME) { 1056 if (cm->frame_type == KEY_FRAME) {
1137 check_sync_code(cm, rb); 1057 check_sync_code(cm, rb);
1138 1058 if (cm->profile > PROFILE_1)
1139 cm->color_space = vp9_rb_read_literal(rb, 3); // colorspace 1059 cm->bit_depth = vp9_rb_read_bit(rb) ? BITS_12 : BITS_10;
1060 cm->color_space = (COLOR_SPACE)vp9_rb_read_literal(rb, 3);
1140 if (cm->color_space != SRGB) { 1061 if (cm->color_space != SRGB) {
1141 vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range 1062 vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
1142 if (cm->version == 1) { 1063 if (cm->profile >= PROFILE_1) {
1143 cm->subsampling_x = vp9_rb_read_bit(rb); 1064 cm->subsampling_x = vp9_rb_read_bit(rb);
1144 cm->subsampling_y = vp9_rb_read_bit(rb); 1065 cm->subsampling_y = vp9_rb_read_bit(rb);
1145 vp9_rb_read_bit(rb); // has extra plane 1066 vp9_rb_read_bit(rb); // has extra plane
1146 } else { 1067 } else {
1147 cm->subsampling_y = cm->subsampling_x = 1; 1068 cm->subsampling_y = cm->subsampling_x = 1;
1148 } 1069 }
1149 } else { 1070 } else {
1150 if (cm->version == 1) { 1071 if (cm->profile >= PROFILE_1) {
1151 cm->subsampling_y = cm->subsampling_x = 0; 1072 cm->subsampling_y = cm->subsampling_x = 0;
1152 vp9_rb_read_bit(rb); // has extra plane 1073 vp9_rb_read_bit(rb); // has extra plane
1153 } else { 1074 } else {
1154 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, 1075 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1155 "RGB not supported in profile 0"); 1076 "RGB not supported in profile 0");
1156 } 1077 }
1157 } 1078 }
1158 1079
1159 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; 1080 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
1160 1081
1161 for (i = 0; i < REFS_PER_FRAME; ++i) { 1082 for (i = 0; i < REFS_PER_FRAME; ++i) {
1162 cm->frame_refs[i].idx = cm->new_fb_idx; 1083 cm->frame_refs[i].idx = cm->new_fb_idx;
1163 cm->frame_refs[i].buf = get_frame_new_buffer(cm); 1084 cm->frame_refs[i].buf = get_frame_new_buffer(cm);
1164 } 1085 }
1165 1086
1166 setup_frame_size(pbi, rb); 1087 setup_frame_size(cm, rb);
1167 } else { 1088 } else {
1168 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb); 1089 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
1169 1090
1170 cm->reset_frame_context = cm->error_resilient_mode ? 1091 cm->reset_frame_context = cm->error_resilient_mode ?
1171 0 : vp9_rb_read_literal(rb, 2); 1092 0 : vp9_rb_read_literal(rb, 2);
1172 1093
1173 if (cm->intra_only) { 1094 if (cm->intra_only) {
1174 check_sync_code(cm, rb); 1095 check_sync_code(cm, rb);
1175 1096
1176 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES); 1097 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
1177 setup_frame_size(pbi, rb); 1098 setup_frame_size(cm, rb);
1178 } else { 1099 } else {
1179 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES); 1100 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
1180 1101
1181 for (i = 0; i < REFS_PER_FRAME; ++i) { 1102 for (i = 0; i < REFS_PER_FRAME; ++i) {
1182 const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2); 1103 const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2);
1183 const int idx = cm->ref_frame_map[ref]; 1104 const int idx = cm->ref_frame_map[ref];
1184 cm->frame_refs[i].idx = idx; 1105 cm->frame_refs[i].idx = idx;
1185 cm->frame_refs[i].buf = &cm->frame_bufs[idx].buf; 1106 cm->frame_refs[i].buf = &cm->frame_bufs[idx].buf;
1186 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb); 1107 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
1187 } 1108 }
1188 1109
1189 setup_frame_size_with_refs(pbi, rb); 1110 setup_frame_size_with_refs(cm, rb);
1190 1111
1191 cm->allow_high_precision_mv = vp9_rb_read_bit(rb); 1112 cm->allow_high_precision_mv = vp9_rb_read_bit(rb);
1192 cm->interp_filter = read_interp_filter(rb); 1113 cm->interp_filter = read_interp_filter(rb);
1193 1114
1194 for (i = 0; i < REFS_PER_FRAME; ++i) { 1115 for (i = 0; i < REFS_PER_FRAME; ++i) {
1195 RefBuffer *const ref_buf = &cm->frame_refs[i]; 1116 RefBuffer *const ref_buf = &cm->frame_refs[i];
1196 vp9_setup_scale_factors_for_frame(&ref_buf->sf, 1117 vp9_setup_scale_factors_for_frame(&ref_buf->sf,
1197 ref_buf->buf->y_crop_width, 1118 ref_buf->buf->y_crop_width,
1198 ref_buf->buf->y_crop_height, 1119 ref_buf->buf->y_crop_height,
1199 cm->width, cm->height); 1120 cm->width, cm->height);
(...skipping 27 matching lines...) Expand all
1227 setup_tile_info(cm, rb); 1148 setup_tile_info(cm, rb);
1228 sz = vp9_rb_read_literal(rb, 16); 1149 sz = vp9_rb_read_literal(rb, 16);
1229 1150
1230 if (sz == 0) 1151 if (sz == 0)
1231 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1152 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1232 "Invalid header size"); 1153 "Invalid header size");
1233 1154
1234 return sz; 1155 return sz;
1235 } 1156 }
1236 1157
1237 static int read_compressed_header(VP9D_COMP *pbi, const uint8_t *data, 1158 static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data,
1238 size_t partition_size) { 1159 size_t partition_size) {
1239 VP9_COMMON *const cm = &pbi->common; 1160 VP9_COMMON *const cm = &pbi->common;
1240 MACROBLOCKD *const xd = &pbi->mb; 1161 MACROBLOCKD *const xd = &pbi->mb;
1241 FRAME_CONTEXT *const fc = &cm->fc; 1162 FRAME_CONTEXT *const fc = &cm->fc;
1242 vp9_reader r; 1163 vp9_reader r;
1243 int k; 1164 int k;
1244 1165
1245 if (vp9_reader_init(&r, data, partition_size)) 1166 if (vp9_reader_init(&r, data, partition_size))
1246 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, 1167 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1247 "Failed to allocate bool decoder 0"); 1168 "Failed to allocate bool decoder 0");
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
1327 assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref, 1248 assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref,
1328 sizeof(cm->counts.single_ref))); 1249 sizeof(cm->counts.single_ref)));
1329 assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref, 1250 assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref,
1330 sizeof(cm->counts.comp_ref))); 1251 sizeof(cm->counts.comp_ref)));
1331 assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx))); 1252 assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx)));
1332 assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip))); 1253 assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip)));
1333 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv))); 1254 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
1334 } 1255 }
1335 #endif // NDEBUG 1256 #endif // NDEBUG
1336 1257
1337 int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) { 1258 int vp9_decode_frame(VP9Decoder *pbi,
1338 int i; 1259 const uint8_t *data, const uint8_t *data_end,
1260 const uint8_t **p_data_end) {
1339 VP9_COMMON *const cm = &pbi->common; 1261 VP9_COMMON *const cm = &pbi->common;
1340 MACROBLOCKD *const xd = &pbi->mb; 1262 MACROBLOCKD *const xd = &pbi->mb;
1341 1263
1342 const uint8_t *data = pbi->source;
1343 const uint8_t *const data_end = pbi->source + pbi->source_sz;
1344
1345 struct vp9_read_bit_buffer rb = { data, data_end, 0, cm, error_handler }; 1264 struct vp9_read_bit_buffer rb = { data, data_end, 0, cm, error_handler };
1346 const size_t first_partition_size = read_uncompressed_header(pbi, &rb); 1265 const size_t first_partition_size = read_uncompressed_header(pbi, &rb);
1347 const int keyframe = cm->frame_type == KEY_FRAME; 1266 const int keyframe = cm->frame_type == KEY_FRAME;
1348 const int tile_rows = 1 << cm->log2_tile_rows; 1267 const int tile_rows = 1 << cm->log2_tile_rows;
1349 const int tile_cols = 1 << cm->log2_tile_cols; 1268 const int tile_cols = 1 << cm->log2_tile_cols;
1350 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); 1269 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
1351 xd->cur_buf = new_fb; 1270 xd->cur_buf = new_fb;
1352 1271
1353 if (!first_partition_size) { 1272 if (!first_partition_size) {
1354 // showing a frame directly 1273 // showing a frame directly
1355 *p_data_end = data + 1; 1274 *p_data_end = data + 1;
1356 return 0; 1275 return 0;
1357 } 1276 }
1358 1277
1359 if (!pbi->decoded_key_frame && !keyframe) 1278 if (!pbi->decoded_key_frame && !keyframe)
1360 return -1; 1279 return -1;
1361 1280
1362 data += vp9_rb_bytes_read(&rb); 1281 data += vp9_rb_bytes_read(&rb);
1363 if (!read_is_valid(data, first_partition_size, data_end)) 1282 if (!read_is_valid(data, first_partition_size, data_end))
1364 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1283 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1365 "Truncated packet or corrupt header length"); 1284 "Truncated packet or corrupt header length");
1366 1285
1367 pbi->do_loopfilter_inline = 1286 pbi->do_loopfilter_inline =
1368 (cm->log2_tile_rows | cm->log2_tile_cols) == 0 && cm->lf.filter_level; 1287 (cm->log2_tile_rows | cm->log2_tile_cols) == 0 && cm->lf.filter_level;
1369 if (pbi->do_loopfilter_inline && pbi->lf_worker.data1 == NULL) { 1288 if (pbi->do_loopfilter_inline && pbi->lf_worker.data1 == NULL) {
1370 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, vpx_malloc(sizeof(LFWorkerData))); 1289 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
1290 vpx_memalign(32, sizeof(LFWorkerData)));
1371 pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker; 1291 pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker;
1372 if (pbi->oxcf.max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) { 1292 if (pbi->oxcf.max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) {
1373 vpx_internal_error(&cm->error, VPX_CODEC_ERROR, 1293 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
1374 "Loop filter thread creation failed"); 1294 "Loop filter thread creation failed");
1375 } 1295 }
1376 } 1296 }
1377 1297
1378 alloc_tile_storage(pbi, tile_rows, tile_cols); 1298 init_macroblockd(cm, &pbi->mb);
1379 1299
1380 xd->mode_info_stride = cm->mode_info_stride;
1381 if (cm->coding_use_prev_mi) 1300 if (cm->coding_use_prev_mi)
1382 set_prev_mi(cm); 1301 set_prev_mi(cm);
1383 else 1302 else
1384 cm->prev_mi = NULL; 1303 cm->prev_mi = NULL;
1385 1304
1386 setup_plane_dequants(cm, xd, cm->base_qindex); 1305 setup_plane_dequants(cm, xd, cm->base_qindex);
1387 vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y); 1306 vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
1388 1307
1389 cm->fc = cm->frame_contexts[cm->frame_context_idx]; 1308 cm->fc = cm->frame_contexts[cm->frame_context_idx];
1390 vp9_zero(cm->counts); 1309 vp9_zero(cm->counts);
1391 for (i = 0; i < MAX_MB_PLANE; ++i) 1310 vp9_zero(xd->dqcoeff);
1392 vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t));
1393 1311
1394 xd->corrupted = 0; 1312 xd->corrupted = 0;
1395 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size); 1313 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
1396 1314
1397 // TODO(jzern): remove frame_parallel_decoding_mode restriction for 1315 // TODO(jzern): remove frame_parallel_decoding_mode restriction for
1398 // single-frame tile decoding. 1316 // single-frame tile decoding.
1399 if (pbi->oxcf.max_threads > 1 && tile_rows == 1 && tile_cols > 1 && 1317 if (pbi->oxcf.max_threads > 1 && tile_rows == 1 && tile_cols > 1 &&
1400 cm->frame_parallel_decoding_mode) { 1318 cm->frame_parallel_decoding_mode) {
1401 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size); 1319 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end);
1402 } else { 1320 } else {
1403 *p_data_end = decode_tiles(pbi, data + first_partition_size); 1321 *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
1404 } 1322 }
1405 1323
1406 new_fb->corrupted |= xd->corrupted; 1324 new_fb->corrupted |= xd->corrupted;
1407 1325
1408 if (!pbi->decoded_key_frame) { 1326 if (!pbi->decoded_key_frame) {
1409 if (keyframe && !new_fb->corrupted) 1327 if (keyframe && !new_fb->corrupted)
1410 pbi->decoded_key_frame = 1; 1328 pbi->decoded_key_frame = 1;
1411 else 1329 else
1412 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, 1330 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1413 "A stream must start with a complete key frame"); 1331 "A stream must start with a complete key frame");
1414 } 1332 }
1415 1333
1416 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { 1334 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
1417 vp9_adapt_coef_probs(cm); 1335 vp9_adapt_coef_probs(cm);
1418 1336
1419 if (!frame_is_intra_only(cm)) { 1337 if (!frame_is_intra_only(cm)) {
1420 vp9_adapt_mode_probs(cm); 1338 vp9_adapt_mode_probs(cm);
1421 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv); 1339 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv);
1422 } 1340 }
1423 } else { 1341 } else {
1424 debug_check_frame_counts(cm); 1342 debug_check_frame_counts(cm);
1425 } 1343 }
1426 1344
1427 if (cm->refresh_frame_context) 1345 if (cm->refresh_frame_context)
1428 cm->frame_contexts[cm->frame_context_idx] = cm->fc; 1346 cm->frame_contexts[cm->frame_context_idx] = cm->fc;
1429 1347
1430 return 0; 1348 return 0;
1431 } 1349 }
OLDNEW
« no previous file with comments | « source/libvpx/vp9/decoder/vp9_decodeframe.h ('k') | source/libvpx/vp9/decoder/vp9_decodemv.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698