| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 188 for (i = 1; i < MAX_MB_PLANE; i++) | 188 for (i = 1; i < MAX_MB_PLANE; i++) |
| 189 xd->plane[i].dequant = cm->uv_dequant[q_index]; | 189 xd->plane[i].dequant = cm->uv_dequant[q_index]; |
| 190 } | 190 } |
| 191 | 191 |
| 192 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block, | 192 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block, |
| 193 TX_SIZE tx_size, uint8_t *dst, int stride, | 193 TX_SIZE tx_size, uint8_t *dst, int stride, |
| 194 int eob) { | 194 int eob) { |
| 195 struct macroblockd_plane *const pd = &xd->plane[plane]; | 195 struct macroblockd_plane *const pd = &xd->plane[plane]; |
| 196 if (eob > 0) { | 196 if (eob > 0) { |
| 197 TX_TYPE tx_type; | 197 TX_TYPE tx_type; |
| 198 const PLANE_TYPE plane_type = pd->plane_type; | |
| 199 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); | 198 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); |
| 200 switch (tx_size) { | 199 if (xd->lossless) { |
| 201 case TX_4X4: | 200 tx_type = DCT_DCT; |
| 202 tx_type = get_tx_type_4x4(plane_type, xd, block); | 201 vp9_iwht4x4_add(dqcoeff, dst, stride, eob); |
| 203 if (tx_type == DCT_DCT) | 202 } else { |
| 204 xd->itxm_add(dqcoeff, dst, stride, eob); | 203 const PLANE_TYPE plane_type = pd->plane_type; |
| 205 else | 204 switch (tx_size) { |
| 206 vp9_iht4x4_16_add(dqcoeff, dst, stride, tx_type); | 205 case TX_4X4: |
| 207 break; | 206 tx_type = get_tx_type_4x4(plane_type, xd, block); |
| 208 case TX_8X8: | 207 vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); |
| 209 tx_type = get_tx_type(plane_type, xd); | 208 break; |
| 210 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); | 209 case TX_8X8: |
| 211 break; | 210 tx_type = get_tx_type(plane_type, xd); |
| 212 case TX_16X16: | 211 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); |
| 213 tx_type = get_tx_type(plane_type, xd); | 212 break; |
| 214 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); | 213 case TX_16X16: |
| 215 break; | 214 tx_type = get_tx_type(plane_type, xd); |
| 216 case TX_32X32: | 215 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); |
| 217 tx_type = DCT_DCT; | 216 break; |
| 218 vp9_idct32x32_add(dqcoeff, dst, stride, eob); | 217 case TX_32X32: |
| 219 break; | 218 tx_type = DCT_DCT; |
| 220 default: | 219 vp9_idct32x32_add(dqcoeff, dst, stride, eob); |
| 221 assert(0 && "Invalid transform size"); | 220 break; |
| 221 default: |
| 222 assert(0 && "Invalid transform size"); |
| 223 } |
| 222 } | 224 } |
| 223 | 225 |
| 224 if (eob == 1) { | 226 if (eob == 1) { |
| 225 vpx_memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0])); | 227 vpx_memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0])); |
| 226 } else { | 228 } else { |
| 227 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10) | 229 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10) |
| 228 vpx_memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); | 230 vpx_memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); |
| 229 else if (tx_size == TX_32X32 && eob <= 34) | 231 else if (tx_size == TX_32X32 && eob <= 34) |
| 230 vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); | 232 vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); |
| 231 else | 233 else |
| (...skipping 349 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 581 update |= read_delta_q(rb, &cm->y_dc_delta_q); | 583 update |= read_delta_q(rb, &cm->y_dc_delta_q); |
| 582 update |= read_delta_q(rb, &cm->uv_dc_delta_q); | 584 update |= read_delta_q(rb, &cm->uv_dc_delta_q); |
| 583 update |= read_delta_q(rb, &cm->uv_ac_delta_q); | 585 update |= read_delta_q(rb, &cm->uv_ac_delta_q); |
| 584 if (update) | 586 if (update) |
| 585 vp9_init_dequantizer(cm); | 587 vp9_init_dequantizer(cm); |
| 586 | 588 |
| 587 xd->lossless = cm->base_qindex == 0 && | 589 xd->lossless = cm->base_qindex == 0 && |
| 588 cm->y_dc_delta_q == 0 && | 590 cm->y_dc_delta_q == 0 && |
| 589 cm->uv_dc_delta_q == 0 && | 591 cm->uv_dc_delta_q == 0 && |
| 590 cm->uv_ac_delta_q == 0; | 592 cm->uv_ac_delta_q == 0; |
| 591 | |
| 592 xd->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add; | |
| 593 } | 593 } |
| 594 | 594 |
| 595 static INTERP_FILTER read_interp_filter(struct vp9_read_bit_buffer *rb) { | 595 static INTERP_FILTER read_interp_filter(struct vp9_read_bit_buffer *rb) { |
| 596 const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH, | 596 const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH, |
| 597 EIGHTTAP, | 597 EIGHTTAP, |
| 598 EIGHTTAP_SHARP, | 598 EIGHTTAP_SHARP, |
| 599 BILINEAR }; | 599 BILINEAR }; |
| 600 return vp9_rb_read_bit(rb) ? SWITCHABLE | 600 return vp9_rb_read_bit(rb) ? SWITCHABLE |
| 601 : literal_to_filter[vp9_rb_read_literal(rb, 2)]; | 601 : literal_to_filter[vp9_rb_read_literal(rb, 2)]; |
| 602 } | 602 } |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 668 read_frame_size(rb, &width, &height); | 668 read_frame_size(rb, &width, &height); |
| 669 | 669 |
| 670 if (width <= 0 || height <= 0) | 670 if (width <= 0 || height <= 0) |
| 671 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, | 671 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
| 672 "Referenced frame with invalid size"); | 672 "Referenced frame with invalid size"); |
| 673 | 673 |
| 674 apply_frame_size(cm, width, height); | 674 apply_frame_size(cm, width, height); |
| 675 setup_display_size(cm, rb); | 675 setup_display_size(cm, rb); |
| 676 } | 676 } |
| 677 | 677 |
| 678 static void decode_tile(VP9Decoder *pbi, const TileInfo *const tile, | |
| 679 int do_loopfilter_inline, vp9_reader *r) { | |
| 680 const int num_threads = pbi->max_threads; | |
| 681 VP9_COMMON *const cm = &pbi->common; | |
| 682 int mi_row, mi_col; | |
| 683 MACROBLOCKD *xd = &pbi->mb; | |
| 684 | |
| 685 if (do_loopfilter_inline) { | |
| 686 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; | |
| 687 lf_data->frame_buffer = get_frame_new_buffer(cm); | |
| 688 lf_data->cm = cm; | |
| 689 vp9_copy(lf_data->planes, pbi->mb.plane); | |
| 690 lf_data->stop = 0; | |
| 691 lf_data->y_only = 0; | |
| 692 vp9_loop_filter_frame_init(cm, cm->lf.filter_level); | |
| 693 } | |
| 694 | |
| 695 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; | |
| 696 mi_row += MI_BLOCK_SIZE) { | |
| 697 // For a SB there are 2 left contexts, each pertaining to a MB row within | |
| 698 vp9_zero(xd->left_context); | |
| 699 vp9_zero(xd->left_seg_context); | |
| 700 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; | |
| 701 mi_col += MI_BLOCK_SIZE) { | |
| 702 decode_partition(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64); | |
| 703 } | |
| 704 | |
| 705 if (do_loopfilter_inline) { | |
| 706 const int lf_start = mi_row - MI_BLOCK_SIZE; | |
| 707 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; | |
| 708 | |
| 709 // delay the loopfilter by 1 macroblock row. | |
| 710 if (lf_start < 0) continue; | |
| 711 | |
| 712 // decoding has completed: finish up the loop filter in this thread. | |
| 713 if (mi_row + MI_BLOCK_SIZE >= tile->mi_row_end) continue; | |
| 714 | |
| 715 vp9_worker_sync(&pbi->lf_worker); | |
| 716 lf_data->start = lf_start; | |
| 717 lf_data->stop = mi_row; | |
| 718 if (num_threads > 1) { | |
| 719 vp9_worker_launch(&pbi->lf_worker); | |
| 720 } else { | |
| 721 vp9_worker_execute(&pbi->lf_worker); | |
| 722 } | |
| 723 } | |
| 724 } | |
| 725 | |
| 726 if (do_loopfilter_inline) { | |
| 727 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; | |
| 728 | |
| 729 vp9_worker_sync(&pbi->lf_worker); | |
| 730 lf_data->start = lf_data->stop; | |
| 731 lf_data->stop = cm->mi_rows; | |
| 732 vp9_worker_execute(&pbi->lf_worker); | |
| 733 } | |
| 734 } | |
| 735 | |
| 736 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { | 678 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) { |
| 737 int min_log2_tile_cols, max_log2_tile_cols, max_ones; | 679 int min_log2_tile_cols, max_log2_tile_cols, max_ones; |
| 738 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); | 680 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); |
| 739 | 681 |
| 740 // columns | 682 // columns |
| 741 max_ones = max_log2_tile_cols - min_log2_tile_cols; | 683 max_ones = max_log2_tile_cols - min_log2_tile_cols; |
| 742 cm->log2_tile_cols = min_log2_tile_cols; | 684 cm->log2_tile_cols = min_log2_tile_cols; |
| 743 while (max_ones-- && vp9_rb_read_bit(rb)) | 685 while (max_ones-- && vp9_rb_read_bit(rb)) |
| 744 cm->log2_tile_cols++; | 686 cm->log2_tile_cols++; |
| 745 | 687 |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 804 TileBuffer *const buf = &tile_buffers[r][c]; | 746 TileBuffer *const buf = &tile_buffers[r][c]; |
| 805 buf->col = c; | 747 buf->col = c; |
| 806 get_tile_buffer(data_end, is_last, &pbi->common.error, &data, | 748 get_tile_buffer(data_end, is_last, &pbi->common.error, &data, |
| 807 pbi->decrypt_cb, pbi->decrypt_state, buf); | 749 pbi->decrypt_cb, pbi->decrypt_state, buf); |
| 808 } | 750 } |
| 809 } | 751 } |
| 810 } | 752 } |
| 811 | 753 |
| 812 static const uint8_t *decode_tiles(VP9Decoder *pbi, | 754 static const uint8_t *decode_tiles(VP9Decoder *pbi, |
| 813 const uint8_t *data, | 755 const uint8_t *data, |
| 814 const uint8_t *data_end, | 756 const uint8_t *data_end) { |
| 815 int do_loopfilter_inline) { | |
| 816 VP9_COMMON *const cm = &pbi->common; | 757 VP9_COMMON *const cm = &pbi->common; |
| 817 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols); | 758 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols); |
| 818 const int tile_cols = 1 << cm->log2_tile_cols; | 759 const int tile_cols = 1 << cm->log2_tile_cols; |
| 819 const int tile_rows = 1 << cm->log2_tile_rows; | 760 const int tile_rows = 1 << cm->log2_tile_rows; |
| 820 TileBuffer tile_buffers[4][1 << 6]; | 761 TileBuffer tile_buffers[4][1 << 6]; |
| 821 int tile_row, tile_col; | 762 int tile_row, tile_col; |
| 822 const uint8_t *end = NULL; | 763 int mi_row, mi_col; |
| 823 vp9_reader r; | 764 TileData *tile_data = NULL; |
| 765 |
| 766 if (cm->lf.filter_level && pbi->lf_worker.data1 == NULL) { |
| 767 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, |
| 768 vpx_memalign(32, sizeof(LFWorkerData))); |
| 769 pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker; |
| 770 if (pbi->max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) { |
| 771 vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
| 772 "Loop filter thread creation failed"); |
| 773 } |
| 774 } |
| 775 |
| 776 if (cm->lf.filter_level) { |
| 777 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; |
| 778 lf_data->frame_buffer = get_frame_new_buffer(cm); |
| 779 lf_data->cm = cm; |
| 780 vp9_copy(lf_data->planes, pbi->mb.plane); |
| 781 lf_data->stop = 0; |
| 782 lf_data->y_only = 0; |
| 783 vp9_loop_filter_frame_init(cm, cm->lf.filter_level); |
| 784 } |
| 824 | 785 |
| 825 assert(tile_rows <= 4); | 786 assert(tile_rows <= 4); |
| 826 assert(tile_cols <= (1 << 6)); | 787 assert(tile_cols <= (1 << 6)); |
| 827 | 788 |
| 828 // Note: this memset assumes above_context[0], [1] and [2] | 789 // Note: this memset assumes above_context[0], [1] and [2] |
| 829 // are allocated as part of the same buffer. | 790 // are allocated as part of the same buffer. |
| 830 vpx_memset(cm->above_context, 0, | 791 vpx_memset(cm->above_context, 0, |
| 831 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols); | 792 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols); |
| 832 | 793 |
| 833 vpx_memset(cm->above_seg_context, 0, | 794 vpx_memset(cm->above_seg_context, 0, |
| 834 sizeof(*cm->above_seg_context) * aligned_cols); | 795 sizeof(*cm->above_seg_context) * aligned_cols); |
| 835 | 796 |
| 836 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers); | 797 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers); |
| 837 | 798 |
| 838 // Decode tiles using data from tile_buffers | 799 if (pbi->tile_data == NULL || |
| 800 (tile_cols * tile_rows) != pbi->total_tiles) { |
| 801 vpx_free(pbi->tile_data); |
| 802 CHECK_MEM_ERROR( |
| 803 cm, |
| 804 pbi->tile_data, |
| 805 vpx_memalign(32, tile_cols * tile_rows * (sizeof(*pbi->tile_data)))); |
| 806 pbi->total_tiles = tile_rows * tile_cols; |
| 807 } |
| 808 |
| 809 // Load all tile information into tile_data. |
| 839 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { | 810 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { |
| 840 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { | 811 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { |
| 841 const int col = pbi->inv_tile_order ? tile_cols - tile_col - 1 : tile_col; | |
| 842 const int last_tile = tile_row == tile_rows - 1 && | |
| 843 col == tile_cols - 1; | |
| 844 const TileBuffer *const buf = &tile_buffers[tile_row][col]; | |
| 845 TileInfo tile; | 812 TileInfo tile; |
| 846 | 813 const TileBuffer *const buf = &tile_buffers[tile_row][tile_col]; |
| 847 vp9_tile_init(&tile, cm, tile_row, col); | 814 tile_data = pbi->tile_data + tile_cols * tile_row + tile_col; |
| 848 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &r, | 815 tile_data->cm = cm; |
| 849 pbi->decrypt_cb, pbi->decrypt_state); | 816 tile_data->xd = pbi->mb; |
| 850 decode_tile(pbi, &tile, do_loopfilter_inline, &r); | 817 tile_data->xd.corrupted = 0; |
| 851 | 818 vp9_tile_init(&tile, tile_data->cm, tile_row, tile_col); |
| 852 if (last_tile) | 819 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, |
| 853 end = vp9_reader_find_end(&r); | 820 &tile_data->bit_reader, pbi->decrypt_cb, |
| 821 pbi->decrypt_state); |
| 822 init_macroblockd(cm, &tile_data->xd); |
| 823 vp9_zero(tile_data->xd.dqcoeff); |
| 854 } | 824 } |
| 855 } | 825 } |
| 856 | 826 |
| 857 return end; | 827 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { |
| 828 TileInfo tile; |
| 829 vp9_tile_set_row(&tile, cm, tile_row); |
| 830 for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end; |
| 831 mi_row += MI_BLOCK_SIZE) { |
| 832 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { |
| 833 const int col = pbi->inv_tile_order ? |
| 834 tile_cols - tile_col - 1 : tile_col; |
| 835 tile_data = pbi->tile_data + tile_cols * tile_row + col; |
| 836 vp9_tile_set_col(&tile, tile_data->cm, col); |
| 837 vp9_zero(tile_data->xd.left_context); |
| 838 vp9_zero(tile_data->xd.left_seg_context); |
| 839 for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end; |
| 840 mi_col += MI_BLOCK_SIZE) { |
| 841 decode_partition(tile_data->cm, &tile_data->xd, &tile, mi_row, mi_col, |
| 842 &tile_data->bit_reader, BLOCK_64X64); |
| 843 } |
| 844 } |
| 845 // Loopfilter one row. |
| 846 if (cm->lf.filter_level) { |
| 847 const int lf_start = mi_row - MI_BLOCK_SIZE; |
| 848 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; |
| 849 |
| 850 // delay the loopfilter by 1 macroblock row. |
| 851 if (lf_start < 0) continue; |
| 852 |
| 853 // decoding has completed: finish up the loop filter in this thread. |
| 854 if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue; |
| 855 |
| 856 vp9_worker_sync(&pbi->lf_worker); |
| 857 lf_data->start = lf_start; |
| 858 lf_data->stop = mi_row; |
| 859 if (pbi->max_threads > 1) { |
| 860 vp9_worker_launch(&pbi->lf_worker); |
| 861 } else { |
| 862 vp9_worker_execute(&pbi->lf_worker); |
| 863 } |
| 864 } |
| 865 } |
| 866 } |
| 867 |
| 868 // Loopfilter remaining rows in the frame. |
| 869 if (cm->lf.filter_level) { |
| 870 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; |
| 871 vp9_worker_sync(&pbi->lf_worker); |
| 872 lf_data->start = lf_data->stop; |
| 873 lf_data->stop = cm->mi_rows; |
| 874 vp9_worker_execute(&pbi->lf_worker); |
| 875 } |
| 876 |
| 877 // Get last tile data. |
| 878 tile_data = pbi->tile_data + tile_cols * tile_rows - 1; |
| 879 |
| 880 return vp9_reader_find_end(&tile_data->bit_reader); |
| 858 } | 881 } |
| 859 | 882 |
| 860 static int tile_worker_hook(void *arg1, void *arg2) { | 883 static int tile_worker_hook(void *arg1, void *arg2) { |
| 861 TileWorkerData *const tile_data = (TileWorkerData*)arg1; | 884 TileWorkerData *const tile_data = (TileWorkerData*)arg1; |
| 862 const TileInfo *const tile = (TileInfo*)arg2; | 885 const TileInfo *const tile = (TileInfo*)arg2; |
| 863 int mi_row, mi_col; | 886 int mi_row, mi_col; |
| 864 | 887 |
| 865 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; | 888 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; |
| 866 mi_row += MI_BLOCK_SIZE) { | 889 mi_row += MI_BLOCK_SIZE) { |
| 867 vp9_zero(tile_data->xd.left_context); | 890 vp9_zero(tile_data->xd.left_context); |
| (...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1272 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv))); | 1295 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv))); |
| 1273 } | 1296 } |
| 1274 #endif // NDEBUG | 1297 #endif // NDEBUG |
| 1275 | 1298 |
| 1276 static struct vp9_read_bit_buffer* init_read_bit_buffer( | 1299 static struct vp9_read_bit_buffer* init_read_bit_buffer( |
| 1277 VP9Decoder *pbi, | 1300 VP9Decoder *pbi, |
| 1278 struct vp9_read_bit_buffer *rb, | 1301 struct vp9_read_bit_buffer *rb, |
| 1279 const uint8_t *data, | 1302 const uint8_t *data, |
| 1280 const uint8_t *data_end, | 1303 const uint8_t *data_end, |
| 1281 uint8_t *clear_data /* buffer size MAX_VP9_HEADER_SIZE */) { | 1304 uint8_t *clear_data /* buffer size MAX_VP9_HEADER_SIZE */) { |
| 1282 vp9_zero(*rb); | |
| 1283 rb->bit_offset = 0; | 1305 rb->bit_offset = 0; |
| 1284 rb->error_handler = error_handler; | 1306 rb->error_handler = error_handler; |
| 1285 rb->error_handler_data = &pbi->common; | 1307 rb->error_handler_data = &pbi->common; |
| 1286 if (pbi->decrypt_cb) { | 1308 if (pbi->decrypt_cb) { |
| 1287 const int n = (int)MIN(MAX_VP9_HEADER_SIZE, data_end - data); | 1309 const int n = (int)MIN(MAX_VP9_HEADER_SIZE, data_end - data); |
| 1288 pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n); | 1310 pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n); |
| 1289 rb->bit_buffer = clear_data; | 1311 rb->bit_buffer = clear_data; |
| 1290 rb->bit_buffer_end = clear_data + n; | 1312 rb->bit_buffer_end = clear_data + n; |
| 1291 } else { | 1313 } else { |
| 1292 rb->bit_buffer = data; | 1314 rb->bit_buffer = data; |
| 1293 rb->bit_buffer_end = data_end; | 1315 rb->bit_buffer_end = data_end; |
| 1294 } | 1316 } |
| 1295 return rb; | 1317 return rb; |
| 1296 } | 1318 } |
| 1297 | 1319 |
| 1298 int vp9_decode_frame(VP9Decoder *pbi, | 1320 void vp9_decode_frame(VP9Decoder *pbi, |
| 1299 const uint8_t *data, const uint8_t *data_end, | 1321 const uint8_t *data, const uint8_t *data_end, |
| 1300 const uint8_t **p_data_end) { | 1322 const uint8_t **p_data_end) { |
| 1301 VP9_COMMON *const cm = &pbi->common; | 1323 VP9_COMMON *const cm = &pbi->common; |
| 1302 MACROBLOCKD *const xd = &pbi->mb; | 1324 MACROBLOCKD *const xd = &pbi->mb; |
| 1303 struct vp9_read_bit_buffer rb; | 1325 struct vp9_read_bit_buffer rb = { 0 }; |
| 1304 uint8_t clear_data[MAX_VP9_HEADER_SIZE]; | 1326 uint8_t clear_data[MAX_VP9_HEADER_SIZE]; |
| 1305 const size_t first_partition_size = read_uncompressed_header(pbi, | 1327 const size_t first_partition_size = read_uncompressed_header(pbi, |
| 1306 init_read_bit_buffer(pbi, &rb, data, data_end, clear_data)); | 1328 init_read_bit_buffer(pbi, &rb, data, data_end, clear_data)); |
| 1307 const int keyframe = cm->frame_type == KEY_FRAME; | |
| 1308 const int tile_rows = 1 << cm->log2_tile_rows; | 1329 const int tile_rows = 1 << cm->log2_tile_rows; |
| 1309 const int tile_cols = 1 << cm->log2_tile_cols; | 1330 const int tile_cols = 1 << cm->log2_tile_cols; |
| 1310 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); | 1331 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); |
| 1311 const int do_loopfilter_inline = tile_rows == 1 && tile_cols == 1 && | |
| 1312 cm->lf.filter_level; | |
| 1313 xd->cur_buf = new_fb; | 1332 xd->cur_buf = new_fb; |
| 1314 | 1333 |
| 1315 if (!first_partition_size) { | 1334 if (!first_partition_size) { |
| 1316 // showing a frame directly | 1335 // showing a frame directly |
| 1317 *p_data_end = data + 1; | 1336 *p_data_end = data + 1; |
| 1318 return 0; | 1337 return; |
| 1319 } | 1338 } |
| 1320 | 1339 |
| 1321 if (!pbi->decoded_key_frame && !keyframe) | |
| 1322 return -1; | |
| 1323 | |
| 1324 data += vp9_rb_bytes_read(&rb); | 1340 data += vp9_rb_bytes_read(&rb); |
| 1325 if (!read_is_valid(data, first_partition_size, data_end)) | 1341 if (!read_is_valid(data, first_partition_size, data_end)) |
| 1326 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, | 1342 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, |
| 1327 "Truncated packet or corrupt header length"); | 1343 "Truncated packet or corrupt header length"); |
| 1328 | 1344 |
| 1329 init_macroblockd(cm, &pbi->mb); | 1345 init_macroblockd(cm, &pbi->mb); |
| 1330 | 1346 |
| 1331 if (cm->coding_use_prev_mi) | 1347 if (cm->coding_use_prev_mi) |
| 1332 set_prev_mi(cm); | 1348 set_prev_mi(cm); |
| 1333 else | 1349 else |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1345 | 1361 |
| 1346 // TODO(jzern): remove frame_parallel_decoding_mode restriction for | 1362 // TODO(jzern): remove frame_parallel_decoding_mode restriction for |
| 1347 // single-frame tile decoding. | 1363 // single-frame tile decoding. |
| 1348 if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1 && | 1364 if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1 && |
| 1349 cm->frame_parallel_decoding_mode) { | 1365 cm->frame_parallel_decoding_mode) { |
| 1350 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end); | 1366 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end); |
| 1351 // If multiple threads are used to decode tiles, then we use those threads | 1367 // If multiple threads are used to decode tiles, then we use those threads |
| 1352 // to do parallel loopfiltering. | 1368 // to do parallel loopfiltering. |
| 1353 vp9_loop_filter_frame_mt(new_fb, pbi, cm, cm->lf.filter_level, 0); | 1369 vp9_loop_filter_frame_mt(new_fb, pbi, cm, cm->lf.filter_level, 0); |
| 1354 } else { | 1370 } else { |
| 1355 if (do_loopfilter_inline && pbi->lf_worker.data1 == NULL) { | 1371 *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end); |
| 1356 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, | |
| 1357 vpx_memalign(32, sizeof(LFWorkerData))); | |
| 1358 pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker; | |
| 1359 if (pbi->max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) { | |
| 1360 vpx_internal_error(&cm->error, VPX_CODEC_ERROR, | |
| 1361 "Loop filter thread creation failed"); | |
| 1362 } | |
| 1363 } | |
| 1364 *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end, | |
| 1365 do_loopfilter_inline); | |
| 1366 if (!do_loopfilter_inline) | |
| 1367 vp9_loop_filter_frame(new_fb, cm, &pbi->mb, cm->lf.filter_level, 0, 0); | |
| 1368 } | 1372 } |
| 1369 | 1373 |
| 1370 new_fb->corrupted |= xd->corrupted; | 1374 new_fb->corrupted |= xd->corrupted; |
| 1371 | 1375 |
| 1372 if (!pbi->decoded_key_frame) { | |
| 1373 if (keyframe && !new_fb->corrupted) | |
| 1374 pbi->decoded_key_frame = 1; | |
| 1375 else | |
| 1376 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, | |
| 1377 "A stream must start with a complete key frame"); | |
| 1378 } | |
| 1379 | |
| 1380 if (!new_fb->corrupted) { | 1376 if (!new_fb->corrupted) { |
| 1381 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { | 1377 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { |
| 1382 vp9_adapt_coef_probs(cm); | 1378 vp9_adapt_coef_probs(cm); |
| 1383 | 1379 |
| 1384 if (!frame_is_intra_only(cm)) { | 1380 if (!frame_is_intra_only(cm)) { |
| 1385 vp9_adapt_mode_probs(cm); | 1381 vp9_adapt_mode_probs(cm); |
| 1386 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv); | 1382 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv); |
| 1387 } | 1383 } |
| 1388 } else { | 1384 } else { |
| 1389 debug_check_frame_counts(cm); | 1385 debug_check_frame_counts(cm); |
| 1390 } | 1386 } |
| 1391 } | 1387 } |
| 1392 | 1388 |
| 1393 if (cm->refresh_frame_context) | 1389 if (cm->refresh_frame_context) |
| 1394 cm->frame_contexts[cm->frame_context_idx] = cm->fc; | 1390 cm->frame_contexts[cm->frame_context_idx] = cm->fc; |
| 1395 | |
| 1396 return 0; | |
| 1397 } | 1391 } |
| OLD | NEW |