Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(200)

Side by Side Diff: source/patched-ffmpeg-mt/libavcodec/ffv1.c

Issue 4533003: patched ffmpeg nov 2 (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/ffmpeg/
Patch Set: '' Created 10 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * FFV1 codec for libavcodec 2 * FFV1 codec for libavcodec
3 * 3 *
4 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> 4 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * 5 *
6 * This file is part of FFmpeg. 6 * This file is part of FFmpeg.
7 * 7 *
8 * FFmpeg is free software; you can redistribute it and/or 8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public 9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either 10 * License as published by the Free Software Foundation; either
(...skipping 14 matching lines...) Expand all
25 * FF Video Codec 1 (a lossless codec) 25 * FF Video Codec 1 (a lossless codec)
26 */ 26 */
27 27
28 #include "avcodec.h" 28 #include "avcodec.h"
29 #include "get_bits.h" 29 #include "get_bits.h"
30 #include "put_bits.h" 30 #include "put_bits.h"
31 #include "dsputil.h" 31 #include "dsputil.h"
32 #include "rangecoder.h" 32 #include "rangecoder.h"
33 #include "golomb.h" 33 #include "golomb.h"
34 #include "mathops.h" 34 #include "mathops.h"
35 #include "libavutil/avassert.h"
35 36
36 #define MAX_PLANES 4 37 #define MAX_PLANES 4
37 #define CONTEXT_SIZE 32 38 #define CONTEXT_SIZE 32
38 39
40 #define MAX_QUANT_TABLES 8
41 #define MAX_CONTEXT_INPUTS 5
42
39 extern const uint8_t ff_log2_run[32]; 43 extern const uint8_t ff_log2_run[32];
40 44
41 static const int8_t quant3[256]={ 45 static const int8_t quant3[256]={
42 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 46 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
43 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 47 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
44 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 48 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
45 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 49 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
46 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 50 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
47 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 51 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
48 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 52 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after
206 }; 210 };
207 211
208 typedef struct VlcState{ 212 typedef struct VlcState{
209 int16_t drift; 213 int16_t drift;
210 uint16_t error_sum; 214 uint16_t error_sum;
211 int8_t bias; 215 int8_t bias;
212 uint8_t count; 216 uint8_t count;
213 } VlcState; 217 } VlcState;
214 218
215 typedef struct PlaneContext{ 219 typedef struct PlaneContext{
220 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
216 int context_count; 221 int context_count;
217 uint8_t (*state)[CONTEXT_SIZE]; 222 uint8_t (*state)[CONTEXT_SIZE];
218 VlcState *vlc_state; 223 VlcState *vlc_state;
219 uint8_t interlace_bit_state[2]; 224 uint8_t interlace_bit_state[2];
220 } PlaneContext; 225 } PlaneContext;
221 226
227 #define MAX_SLICES 256
228
222 typedef struct FFV1Context{ 229 typedef struct FFV1Context{
223 AVCodecContext *avctx; 230 AVCodecContext *avctx;
224 RangeCoder c; 231 RangeCoder c;
225 GetBitContext gb; 232 GetBitContext gb;
226 PutBitContext pb; 233 PutBitContext pb;
227 int version; 234 int version;
228 int width, height; 235 int width, height;
229 int chroma_h_shift, chroma_v_shift; 236 int chroma_h_shift, chroma_v_shift;
230 int flags; 237 int flags;
231 int picture_number; 238 int picture_number;
232 AVFrame picture; 239 AVFrame picture;
233 int plane_count; 240 int plane_count;
234 int ac; ///< 1=range coder <-> 0=golomb rice 241 int ac; ///< 1=range coder <-> 0=golomb rice
235 PlaneContext plane[MAX_PLANES]; 242 PlaneContext plane[MAX_PLANES];
236 int16_t quant_table[5][256]; 243 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
244 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
245 int context_count[MAX_QUANT_TABLES];
237 uint8_t state_transition[256]; 246 uint8_t state_transition[256];
238 int run_index; 247 int run_index;
239 int colorspace; 248 int colorspace;
240 int_fast16_t *sample_buffer; 249 int_fast16_t *sample_buffer;
241 250
251 int quant_table_count;
252
242 DSPContext dsp; 253 DSPContext dsp;
254
255 struct FFV1Context *slice_context[MAX_SLICES];
256 int slice_count;
257 int num_v_slices;
258 int num_h_slices;
259 int slice_width;
260 int slice_height;
261 int slice_x;
262 int slice_y;
243 }FFV1Context; 263 }FFV1Context;
244 264
245 static av_always_inline int fold(int diff, int bits){ 265 static av_always_inline int fold(int diff, int bits){
246 if(bits==8) 266 if(bits==8)
247 diff= (int8_t)diff; 267 diff= (int8_t)diff;
248 else{ 268 else{
249 diff+= 1<<(bits-1); 269 diff+= 1<<(bits-1);
250 diff&=(1<<bits)-1; 270 diff&=(1<<bits)-1;
251 diff-= 1<<(bits-1); 271 diff-= 1<<(bits-1);
252 } 272 }
253 273
254 return diff; 274 return diff;
255 } 275 }
256 276
257 static inline int predict(int_fast16_t *src, int_fast16_t *last){ 277 static inline int predict(int_fast16_t *src, int_fast16_t *last){
258 const int LT= last[-1]; 278 const int LT= last[-1];
259 const int T= last[ 0]; 279 const int T= last[ 0];
260 const int L = src[-1]; 280 const int L = src[-1];
261 281
262 return mid_pred(L, L + T - LT, T); 282 return mid_pred(L, L + T - LT, T);
263 } 283 }
264 284
265 static inline int get_context(FFV1Context *f, int_fast16_t *src, int_fast16_t *l ast, int_fast16_t *last2){ 285 static inline int get_context(PlaneContext *p, int_fast16_t *src, int_fast16_t * last, int_fast16_t *last2){
266 const int LT= last[-1]; 286 const int LT= last[-1];
267 const int T= last[ 0]; 287 const int T= last[ 0];
268 const int RT= last[ 1]; 288 const int RT= last[ 1];
269 const int L = src[-1]; 289 const int L = src[-1];
270 290
271 if(f->quant_table[3][127]){ 291 if(p->quant_table[3][127]){
272 const int TT= last2[0]; 292 const int TT= last2[0];
273 const int LL= src[-2]; 293 const int LL= src[-2];
274 return f->quant_table[0][(L-LT) & 0xFF] + f->quant_table[1][(LT-T) & 0xF F] + f->quant_table[2][(T-RT) & 0xFF] 294 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xF F] + p->quant_table[2][(T-RT) & 0xFF]
275 +f->quant_table[3][(LL-L) & 0xFF] + f->quant_table[4][(TT-T) & 0xF F]; 295 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xF F];
276 }else 296 }else
277 return f->quant_table[0][(L-LT) & 0xFF] + f->quant_table[1][(LT-T) & 0xF F] + f->quant_table[2][(T-RT) & 0xFF]; 297 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xF F] + p->quant_table[2][(T-RT) & 0xFF];
278 } 298 }
279 299
280 static inline void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int i s_signed){ 300 static inline void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int i s_signed){
281 int i; 301 int i;
282 302
283 if(v){ 303 if(v){
284 const int a= FFABS(v); 304 const int a= FFABS(v);
285 const int e= av_log2(a); 305 const int e= av_log2(a);
286 put_rac(c, state+0, 0); 306 put_rac(c, state+0, 0);
287 if(e<=9){ 307 if(e<=9){
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after
445 }else{ 465 }else{
446 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){ 466 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
447 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 467 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
448 return -1; 468 return -1;
449 } 469 }
450 } 470 }
451 471
452 for(x=0; x<w; x++){ 472 for(x=0; x<w; x++){
453 int diff, context; 473 int diff, context;
454 474
455 context= get_context(s, sample[0]+x, sample[1]+x, sample[2]+x); 475 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
456 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x); 476 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
457 477
458 if(context < 0){ 478 if(context < 0){
459 context = -context; 479 context = -context;
460 diff= -diff; 480 diff= -diff;
461 } 481 }
462 482
463 diff= fold(diff, bits); 483 diff= fold(diff, bits);
464 484
465 if(s->ac){ 485 if(s->ac){
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
584 604
585 for(i=1; i<128 ; i++){ 605 for(i=1; i<128 ; i++){
586 if(quant_table[i] != quant_table[i-1]){ 606 if(quant_table[i] != quant_table[i-1]){
587 put_symbol(c, state, i-last-1, 0); 607 put_symbol(c, state, i-last-1, 0);
588 last= i; 608 last= i;
589 } 609 }
590 } 610 }
591 put_symbol(c, state, i-last-1, 0); 611 put_symbol(c, state, i-last-1, 0);
592 } 612 }
593 613
614 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_IN PUTS][256]){
615 int i;
616 for(i=0; i<5; i++)
617 write_quant_table(c, quant_table[i]);
618 }
619
594 static void write_header(FFV1Context *f){ 620 static void write_header(FFV1Context *f){
595 uint8_t state[CONTEXT_SIZE]; 621 uint8_t state[CONTEXT_SIZE];
596 int i; 622 int i, j;
597 RangeCoder * const c= &f->c; 623 RangeCoder * const c= &f->slice_context[0]->c;
598 624
599 memset(state, 128, sizeof(state)); 625 memset(state, 128, sizeof(state));
600 626
601 put_symbol(c, state, f->version, 0); 627 if(f->version < 2){
602 put_symbol(c, state, f->ac, 0); 628 put_symbol(c, state, f->version, 0);
603 if(f->ac>1){ 629 put_symbol(c, state, f->ac, 0);
604 for(i=1; i<256; i++){ 630 if(f->ac>1){
605 f->state_transition[i]=ver2_state[i]; 631 for(i=1; i<256; i++){
606 put_symbol(c, state, ver2_state[i] - c->one_state[i], 1); 632 f->state_transition[i]=ver2_state[i];
633 put_symbol(c, state, ver2_state[i] - c->one_state[i], 1);
634 }
635 }
636 put_symbol(c, state, f->colorspace, 0); //YUV cs type
637 if(f->version>0)
638 put_symbol(c, state, f->avctx->bits_per_raw_sample, 0);
639 put_rac(c, state, 1); //chroma planes
640 put_symbol(c, state, f->chroma_h_shift, 0);
641 put_symbol(c, state, f->chroma_v_shift, 0);
642 put_rac(c, state, 0); //no transparency plane
643
644 write_quant_tables(c, f->quant_table);
645 }else{
646 put_symbol(c, state, f->slice_count, 0);
647 for(i=0; i<f->slice_count; i++){
648 FFV1Context *fs= f->slice_context[i];
649 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
650 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->heigh t , 0);
651 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
652 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->heigh t-1, 0);
653 for(j=0; j<f->plane_count; j++)
654 put_symbol(c, state, f->avctx->context_model, 0);
607 } 655 }
608 } 656 }
609 put_symbol(c, state, f->colorspace, 0); //YUV cs type
610 if(f->version>0)
611 put_symbol(c, state, f->avctx->bits_per_raw_sample, 0);
612 put_rac(c, state, 1); //chroma planes
613 put_symbol(c, state, f->chroma_h_shift, 0);
614 put_symbol(c, state, f->chroma_v_shift, 0);
615 put_rac(c, state, 0); //no transparency plane
616
617 for(i=0; i<5; i++)
618 write_quant_table(c, f->quant_table[i]);
619 } 657 }
620 #endif /* CONFIG_FFV1_ENCODER */ 658 #endif /* CONFIG_FFV1_ENCODER */
621 659
622 static av_cold int common_init(AVCodecContext *avctx){ 660 static av_cold int common_init(AVCodecContext *avctx){
623 FFV1Context *s = avctx->priv_data; 661 FFV1Context *s = avctx->priv_data;
624 662
625 s->avctx= avctx; 663 s->avctx= avctx;
626 s->flags= avctx->flags; 664 s->flags= avctx->flags;
627 665
628 dsputil_init(&s->dsp, avctx); 666 dsputil_init(&s->dsp, avctx);
629 667
630 s->width = avctx->width; 668 s->width = avctx->width;
631 s->height= avctx->height; 669 s->height= avctx->height;
632 670
633 assert(s->width && s->height); 671 assert(s->width && s->height);
672 //defaults
673 s->num_h_slices=1;
674 s->num_v_slices=1;
634 675
635 s->sample_buffer = av_malloc(6 * (s->width+6) * sizeof(*s->sample_buffer));
636 if (!s->sample_buffer)
637 return AVERROR(ENOMEM);
638 676
639 return 0; 677 return 0;
640 } 678 }
641 679
680 static int init_slice_state(FFV1Context *f){
681 int i, j;
682
683 for(i=0; i<f->slice_count; i++){
684 FFV1Context *fs= f->slice_context[i];
685 for(j=0; j<f->plane_count; j++){
686 PlaneContext * const p= &fs->plane[j];
687
688 if(fs->ac){
689 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->contex t_count*sizeof(uint8_t));
690 if(!p-> state)
691 return AVERROR(ENOMEM);
692 }else{
693 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeo f(VlcState));
694 if(!p->vlc_state)
695 return AVERROR(ENOMEM);
696 }
697 }
698
699 if (fs->ac>1){
700 //FIXME only redo if state_transition changed
701 for(j=1; j<256; j++){
702 fs->c.one_state [ j]= fs->state_transition[j];
703 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
704 }
705 }
706 }
707
708 return 0;
709 }
710
711 static av_cold int init_slice_contexts(FFV1Context *f){
712 int i;
713
714 f->slice_count= f->num_h_slices * f->num_v_slices;
715
716 for(i=0; i<f->slice_count; i++){
717 FFV1Context *fs= av_mallocz(sizeof(*fs));
718 int sx= i % f->num_h_slices;
719 int sy= i / f->num_h_slices;
720 int sxs= f->avctx->width * sx / f->num_h_slices;
721 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
722 int sys= f->avctx->height* sy / f->num_v_slices;
723 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
724 f->slice_context[i]= fs;
725 memcpy(fs, f, sizeof(*fs));
726
727 fs->slice_width = sxe - sxs;
728 fs->slice_height= sye - sys;
729 fs->slice_x = sxs;
730 fs->slice_y = sys;
731
732 fs->sample_buffer = av_malloc(6 * (fs->width+6) * sizeof(*fs->sample_buf fer));
733 if (!fs->sample_buffer)
734 return AVERROR(ENOMEM);
735 }
736 return 0;
737 }
738
642 #if CONFIG_FFV1_ENCODER 739 #if CONFIG_FFV1_ENCODER
740 static int write_extra_header(FFV1Context *f){
741 RangeCoder * const c= &f->c;
742 uint8_t state[CONTEXT_SIZE];
743 int i;
744 memset(state, 128, sizeof(state));
745
746 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000);
747 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
748 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
749
750 put_symbol(c, state, f->version, 0);
751 put_symbol(c, state, f->ac, 0);
752 if(f->ac>1){
753 for(i=1; i<256; i++){
754 f->state_transition[i]=ver2_state[i];
755 put_symbol(c, state, ver2_state[i] - c->one_state[i], 1);
756 }
757 }
758 put_symbol(c, state, f->colorspace, 0); //YUV cs type
759 put_symbol(c, state, f->avctx->bits_per_raw_sample, 0);
760 put_rac(c, state, 1); //chroma planes
761 put_symbol(c, state, f->chroma_h_shift, 0);
762 put_symbol(c, state, f->chroma_v_shift, 0);
763 put_rac(c, state, 0); //no transparency plane
764 put_symbol(c, state, f->num_h_slices-1, 0);
765 put_symbol(c, state, f->num_v_slices-1, 0);
766
767 put_symbol(c, state, f->quant_table_count, 0);
768 for(i=0; i<f->quant_table_count; i++)
769 write_quant_tables(c, f->quant_tables[i]);
770
771 f->avctx->extradata_size= ff_rac_terminate(c);
772
773 return 0;
774 }
775
643 static av_cold int encode_init(AVCodecContext *avctx) 776 static av_cold int encode_init(AVCodecContext *avctx)
644 { 777 {
645 FFV1Context *s = avctx->priv_data; 778 FFV1Context *s = avctx->priv_data;
646 int i; 779 int i;
647 780
648 common_init(avctx); 781 common_init(avctx);
649 782
650 s->version=0; 783 s->version=0;
651 s->ac= avctx->coder_type ? 2:0; 784 s->ac= avctx->coder_type ? 2:0;
652 785
653 s->plane_count=2; 786 s->plane_count=2;
654 for(i=0; i<256; i++){ 787 for(i=0; i<256; i++){
788 s->quant_table_count=2;
655 if(avctx->bits_per_raw_sample <=8){ 789 if(avctx->bits_per_raw_sample <=8){
656 s->quant_table[0][i]= quant11[i]; 790 s->quant_tables[0][0][i]= quant11[i];
657 s->quant_table[1][i]= 11*quant11[i]; 791 s->quant_tables[0][1][i]= 11*quant11[i];
658 if(avctx->context_model==0){ 792 s->quant_tables[0][2][i]= 11*11*quant11[i];
659 s->quant_table[2][i]= 11*11*quant11[i]; 793 s->quant_tables[1][0][i]= quant11[i];
660 s->quant_table[3][i]= 794 s->quant_tables[1][1][i]= 11*quant11[i];
661 s->quant_table[4][i]=0; 795 s->quant_tables[1][2][i]= 11*11*quant5 [i];
662 }else{ 796 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
663 s->quant_table[2][i]= 11*11*quant5 [i]; 797 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
664 s->quant_table[3][i]= 5*11*11*quant5 [i];
665 s->quant_table[4][i]= 5*5*11*11*quant5 [i];
666 }
667 }else{ 798 }else{
668 s->quant_table[0][i]= quant9_10bit[i]; 799 s->quant_tables[0][0][i]= quant9_10bit[i];
669 s->quant_table[1][i]= 11*quant9_10bit[i]; 800 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
670 if(avctx->context_model==0){ 801 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
671 s->quant_table[2][i]= 11*11*quant9_10bit[i]; 802 s->quant_tables[1][0][i]= quant9_10bit[i];
672 s->quant_table[3][i]= 803 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
673 s->quant_table[4][i]=0; 804 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
674 }else{ 805 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
675 s->quant_table[2][i]= 11*11*quant5_10bit[i]; 806 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
676 s->quant_table[3][i]= 5*11*11*quant5_10bit[i];
677 s->quant_table[4][i]= 5*5*11*11*quant5_10bit[i];
678 }
679 } 807 }
680 } 808 }
809 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quan t_table));
681 810
682 for(i=0; i<s->plane_count; i++){ 811 for(i=0; i<s->plane_count; i++){
683 PlaneContext * const p= &s->plane[i]; 812 PlaneContext * const p= &s->plane[i];
684 813
814 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
685 if(avctx->context_model==0){ 815 if(avctx->context_model==0){
686 p->context_count= (11*11*11+1)/2; 816 p->context_count= (11*11*11+1)/2;
687 }else{ 817 }else{
688 p->context_count= (11*11*5*5*5+1)/2; 818 p->context_count= (11*11*5*5*5+1)/2;
689 } 819 }
690
691 if(s->ac){
692 if(!p->state) p->state= av_malloc(CONTEXT_SIZE*p->context_count*size of(uint8_t));
693 }else{
694 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(Vl cState));
695 }
696 } 820 }
697 821
698 avctx->coded_frame= &s->picture; 822 avctx->coded_frame= &s->picture;
699 switch(avctx->pix_fmt){ 823 switch(avctx->pix_fmt){
700 case PIX_FMT_YUV444P16: 824 case PIX_FMT_YUV444P16:
701 case PIX_FMT_YUV422P16: 825 case PIX_FMT_YUV422P16:
702 case PIX_FMT_YUV420P16: 826 case PIX_FMT_YUV420P16:
703 if(avctx->bits_per_raw_sample <=8){ 827 if(avctx->bits_per_raw_sample <=8){
704 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n"); 828 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
705 return -1; 829 return -1;
706 } 830 }
707 if(!s->ac){ 831 if(!s->ac){
708 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 need s -coder 1 currently\n"); 832 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 need s -coder 1 currently\n");
709 return -1; 833 return -1;
710 } 834 }
711 s->version= 1; 835 s->version= FFMAX(s->version, 1);
712 case PIX_FMT_YUV444P: 836 case PIX_FMT_YUV444P:
713 case PIX_FMT_YUV422P: 837 case PIX_FMT_YUV422P:
714 case PIX_FMT_YUV420P: 838 case PIX_FMT_YUV420P:
715 case PIX_FMT_YUV411P: 839 case PIX_FMT_YUV411P:
716 case PIX_FMT_YUV410P: 840 case PIX_FMT_YUV410P:
717 s->colorspace= 0; 841 s->colorspace= 0;
718 break; 842 break;
719 case PIX_FMT_RGB32: 843 case PIX_FMT_RGB32:
720 s->colorspace= 1; 844 s->colorspace= 1;
721 break; 845 break;
722 default: 846 default:
723 av_log(avctx, AV_LOG_ERROR, "format not supported\n"); 847 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
724 return -1; 848 return -1;
725 } 849 }
726 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma _v_shift); 850 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma _v_shift);
727 851
728 s->picture_number=0; 852 s->picture_number=0;
729 853
854 if(s->version>1){
855 s->num_h_slices=2;
856 s->num_v_slices=2;
857 write_extra_header(s);
858 }
859
860 if(init_slice_contexts(s) < 0)
861 return -1;
862 if(init_slice_state(s) < 0)
863 return -1;
864
730 return 0; 865 return 0;
731 } 866 }
732 #endif /* CONFIG_FFV1_ENCODER */ 867 #endif /* CONFIG_FFV1_ENCODER */
733 868
734 869
735 static void clear_state(FFV1Context *f){ 870 static void clear_state(FFV1Context *f){
736 int i, j; 871 int i, si, j;
737 872
738 for(i=0; i<f->plane_count; i++){ 873 for(si=0; si<f->slice_count; si++){
739 PlaneContext *p= &f->plane[i]; 874 FFV1Context *fs= f->slice_context[si];
875 for(i=0; i<f->plane_count; i++){
876 PlaneContext *p= &fs->plane[i];
740 877
741 p->interlace_bit_state[0]= 128; 878 p->interlace_bit_state[0]= 128;
742 p->interlace_bit_state[1]= 128; 879 p->interlace_bit_state[1]= 128;
743 880
744 for(j=0; j<p->context_count; j++){ 881 for(j=0; j<p->context_count; j++){
745 if(f->ac){ 882 if(fs->ac){
746 memset(p->state[j], 128, sizeof(uint8_t)*CONTEXT_SIZE); 883 memset(p->state[j], 128, sizeof(uint8_t)*CONTEXT_SIZE);
747 }else{ 884 }else{
748 p->vlc_state[j].drift= 0; 885 p->vlc_state[j].drift= 0;
749 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2); 886 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
750 p->vlc_state[j].bias= 0; 887 p->vlc_state[j].bias= 0;
751 p->vlc_state[j].count= 1; 888 p->vlc_state[j].count= 1;
889 }
752 } 890 }
753 } 891 }
754 } 892 }
755 } 893 }
756 894
757 #if CONFIG_FFV1_ENCODER 895 #if CONFIG_FFV1_ENCODER
896 static int encode_slice(AVCodecContext *c, void *arg){
897 FFV1Context *fs= *(void**)arg;
898 FFV1Context *f= fs->avctx->priv_data;
899 int width = fs->slice_width;
900 int height= fs->slice_height;
901 int x= fs->slice_x;
902 int y= fs->slice_y;
903 AVFrame * const p= &f->picture;
904
905 if(f->colorspace==0){
906 const int chroma_width = -((-width )>>f->chroma_h_shift);
907 const int chroma_height= -((-height)>>f->chroma_v_shift);
908 const int cx= x>>f->chroma_h_shift;
909 const int cy= y>>f->chroma_v_shift;
910
911 encode_plane(fs, p->data[0] + x + y*p->linesize[0], width, height, p->li nesize[0], 0);
912
913 encode_plane(fs, p->data[1] + cx+cy*p->linesize[1], chroma_width, chroma _height, p->linesize[1], 1);
914 encode_plane(fs, p->data[2] + cx+cy*p->linesize[2], chroma_width, chroma _height, p->linesize[2], 1);
915 }else{
916 encode_rgb_frame(fs, (uint32_t*)(p->data[0]) + x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
917 }
918 emms_c();
919
920 return 0;
921 }
922
758 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ 923 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
759 FFV1Context *f = avctx->priv_data; 924 FFV1Context *f = avctx->priv_data;
760 RangeCoder * const c= &f->c; 925 RangeCoder * const c= &f->slice_context[0]->c;
761 AVFrame *pict = data; 926 AVFrame *pict = data;
762 const int width= f->width;
763 const int height= f->height;
764 AVFrame * const p= &f->picture; 927 AVFrame * const p= &f->picture;
765 int used_count= 0; 928 int used_count= 0;
766 uint8_t keystate=128; 929 uint8_t keystate=128;
930 uint8_t *buf_p;
931 int i;
767 932
768 ff_init_range_encoder(c, buf, buf_size); 933 ff_init_range_encoder(c, buf, buf_size);
769 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); 934 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
770 935
771 *p = *pict; 936 *p = *pict;
772 p->pict_type= FF_I_TYPE; 937 p->pict_type= FF_I_TYPE;
773 938
774 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){ 939 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
775 put_rac(c, &keystate, 1); 940 put_rac(c, &keystate, 1);
776 p->key_frame= 1; 941 p->key_frame= 1;
777 write_header(f); 942 write_header(f);
778 clear_state(f); 943 clear_state(f);
779 }else{ 944 }else{
780 put_rac(c, &keystate, 0); 945 put_rac(c, &keystate, 0);
781 p->key_frame= 0; 946 p->key_frame= 0;
782 } 947 }
783 948
784 if(!f->ac){ 949 if(!f->ac){
785 used_count += ff_rac_terminate(c); 950 used_count += ff_rac_terminate(c);
786 //printf("pos=%d\n", used_count); 951 //printf("pos=%d\n", used_count);
787 init_put_bits(&f->pb, buf + used_count, buf_size - used_count); 952 init_put_bits(&f->slice_context[0]->pb, buf + used_count, buf_size - use d_count);
788 }else if (f->ac>1){ 953 }else if (f->ac>1){
789 int i; 954 int i;
790 for(i=1; i<256; i++){ 955 for(i=1; i<256; i++){
791 c->one_state[i]= f->state_transition[i]; 956 c->one_state[i]= f->state_transition[i];
792 c->zero_state[256-i]= 256-c->one_state[i]; 957 c->zero_state[256-i]= 256-c->one_state[i];
793 } 958 }
794 } 959 }
795 960
796 if(f->colorspace==0){ 961 for(i=1; i<f->slice_count; i++){
797 const int chroma_width = -((-width )>>f->chroma_h_shift); 962 FFV1Context *fs= f->slice_context[i];
798 const int chroma_height= -((-height)>>f->chroma_v_shift); 963 uint8_t *start= buf + (buf_size-used_count)*i/f->slice_count;
964 int len= buf_size/f->slice_count;
799 965
800 encode_plane(f, p->data[0], width, height, p->linesize[0], 0); 966 if(fs->ac){
967 ff_init_range_encoder(&fs->c, start, len);
968 }else{
969 init_put_bits(&fs->pb, start, len);
970 }
971 }
972 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_cou nt, sizeof(void*));
801 973
802 encode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1); 974 buf_p=buf;
803 encode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 1); 975 for(i=0; i<f->slice_count; i++){
804 }else{ 976 FFV1Context *fs= f->slice_context[i];
805 encode_rgb_frame(f, (uint32_t*)(p->data[0]), width, height, p->linesize[ 0]/4); 977 int bytes;
978
979 if(fs->ac){
980 uint8_t state=128;
981 put_rac(&fs->c, &state, 0);
982 bytes= ff_rac_terminate(&fs->c);
983 }else{
984 flush_put_bits(&fs->pb); //nicer padding FIXME
985 bytes= used_count + (put_bits_count(&fs->pb)+7)/8;
986 used_count= 0;
987 }
988 if(i>0){
989 av_assert0(bytes < buf_size/f->slice_count);
990 memmove(buf_p, fs->ac ? fs->c.bytestream_start : fs->pb.buf, bytes);
991 av_assert0(bytes < (1<<24));
992 AV_WB24(buf_p+bytes, bytes);
993 bytes+=3;
994 }
995 buf_p += bytes;
806 } 996 }
807 emms_c();
808 997
809 f->picture_number++; 998 f->picture_number++;
810 999 return buf_p-buf;
811 if(f->ac){
812 return ff_rac_terminate(c);
813 }else{
814 flush_put_bits(&f->pb); //nicer padding FIXME
815 return used_count + (put_bits_count(&f->pb)+7)/8;
816 }
817 } 1000 }
818 #endif /* CONFIG_FFV1_ENCODER */ 1001 #endif /* CONFIG_FFV1_ENCODER */
819 1002
820 static av_cold int common_end(AVCodecContext *avctx){ 1003 static av_cold int common_end(AVCodecContext *avctx){
821 FFV1Context *s = avctx->priv_data; 1004 FFV1Context *s = avctx->priv_data;
822 int i; 1005 int i, j;
823 1006
824 for(i=0; i<s->plane_count; i++){ 1007 for(j=0; j<s->slice_count; j++){
825 PlaneContext *p= &s->plane[i]; 1008 FFV1Context *fs= s->slice_context[j];
1009 for(i=0; i<s->plane_count; i++){
1010 PlaneContext *p= &fs->plane[i];
826 1011
827 av_freep(&p->state); 1012 av_freep(&p->state);
828 av_freep(&p->vlc_state); 1013 av_freep(&p->vlc_state);
1014 }
1015 av_freep(&fs->sample_buffer);
829 } 1016 }
830 1017
831 av_freep(&s->sample_buffer);
832
833 return 0; 1018 return 0;
834 } 1019 }
835 1020
836 static av_always_inline void decode_line(FFV1Context *s, int w, int_fast16_t *sa mple[2], int plane_index, int bits){ 1021 static av_always_inline void decode_line(FFV1Context *s, int w, int_fast16_t *sa mple[2], int plane_index, int bits){
837 PlaneContext * const p= &s->plane[plane_index]; 1022 PlaneContext * const p= &s->plane[plane_index];
838 RangeCoder * const c= &s->c; 1023 RangeCoder * const c= &s->c;
839 int x; 1024 int x;
840 int run_count=0; 1025 int run_count=0;
841 int run_mode=0; 1026 int run_mode=0;
842 int run_index= s->run_index; 1027 int run_index= s->run_index;
843 1028
844 for(x=0; x<w; x++){ 1029 for(x=0; x<w; x++){
845 int diff, context, sign; 1030 int diff, context, sign;
846 1031
847 context= get_context(s, sample[1] + x, sample[0] + x, sample[1] + x); 1032 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
848 if(context < 0){ 1033 if(context < 0){
849 context= -context; 1034 context= -context;
850 sign=1; 1035 sign=1;
851 }else 1036 }else
852 sign=0; 1037 sign=0;
853 1038
1039 av_assert2(context < p->context_count);
854 1040
855 if(s->ac){ 1041 if(s->ac){
856 diff= get_symbol_inline(c, p->state[context], 1); 1042 diff= get_symbol_inline(c, p->state[context], 1);
857 }else{ 1043 }else{
858 if(context == 0 && run_mode==0) run_mode=1; 1044 if(context == 0 && run_mode==0) run_mode=1;
859 1045
860 if(run_mode){ 1046 if(run_mode){
861 if(run_count==0 && run_mode==1){ 1047 if(run_count==0 && run_mode==1){
862 if(get_bits1(&s->gb)){ 1048 if(get_bits1(&s->gb)){
863 run_count = 1<<ff_log2_run[run_index]; 1049 run_count = 1<<ff_log2_run[run_index];
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
960 r -= 0x100; 1146 r -= 0x100;
961 g -= (b + r)>>2; 1147 g -= (b + r)>>2;
962 b += g; 1148 b += g;
963 r += g; 1149 r += g;
964 1150
965 src[x + stride*y]= b + (g<<8) + (r<<16) + (0xFF<<24); 1151 src[x + stride*y]= b + (g<<8) + (r<<16) + (0xFF<<24);
966 } 1152 }
967 } 1153 }
968 } 1154 }
969 1155
1156 static int decode_slice(AVCodecContext *c, void *arg){
1157 FFV1Context *fs= *(void**)arg;
1158 FFV1Context *f= fs->avctx->priv_data;
1159 int width = fs->slice_width;
1160 int height= fs->slice_height;
1161 int x= fs->slice_x;
1162 int y= fs->slice_y;
1163 AVFrame * const p= &f->picture;
1164
1165 av_assert1(width && height);
1166 if(f->colorspace==0){
1167 const int chroma_width = -((-width )>>f->chroma_h_shift);
1168 const int chroma_height= -((-height)>>f->chroma_v_shift);
1169 const int cx= x>>f->chroma_h_shift;
1170 const int cy= y>>f->chroma_v_shift;
1171 decode_plane(fs, p->data[0] + x + y*p->linesize[0], width, height, p->li nesize[0], 0);
1172
1173 decode_plane(fs, p->data[1] + cx+cy*p->linesize[1], chroma_width, chroma _height, p->linesize[1], 1);
1174 decode_plane(fs, p->data[2] + cx+cy*p->linesize[1], chroma_width, chroma _height, p->linesize[2], 1);
1175 }else{
1176 decode_rgb_frame(fs, (uint32_t*)p->data[0] + x + y*(p->linesize[0]/4), w idth, height, p->linesize[0]/4);
1177 }
1178
1179 emms_c();
1180
1181 return 0;
1182 }
1183
970 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){ 1184 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
971 int v; 1185 int v;
972 int i=0; 1186 int i=0;
973 uint8_t state[CONTEXT_SIZE]; 1187 uint8_t state[CONTEXT_SIZE];
974 1188
975 memset(state, 128, sizeof(state)); 1189 memset(state, 128, sizeof(state));
976 1190
977 for(v=0; i<128 ; v++){ 1191 for(v=0; i<128 ; v++){
978 int len= get_symbol(c, state, 0) + 1; 1192 int len= get_symbol(c, state, 0) + 1;
979 1193
980 if(len + i > 128) return -1; 1194 if(len + i > 128) return -1;
981 1195
982 while(len--){ 1196 while(len--){
983 quant_table[i] = scale*v; 1197 quant_table[i] = scale*v;
984 i++; 1198 i++;
985 //printf("%2d ",v); 1199 //printf("%2d ",v);
986 //if(i%16==0) printf("\n"); 1200 //if(i%16==0) printf("\n");
987 } 1201 }
988 } 1202 }
989 1203
990 for(i=1; i<128; i++){ 1204 for(i=1; i<128; i++){
991 quant_table[256-i]= -quant_table[i]; 1205 quant_table[256-i]= -quant_table[i];
992 } 1206 }
993 quant_table[128]= -quant_table[127]; 1207 quant_table[128]= -quant_table[127];
994 1208
995 return 2*v - 1; 1209 return 2*v - 1;
996 } 1210 }
997 1211
998 static int read_header(FFV1Context *f){ 1212 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPU TS][256]){
1213 int i;
1214 int context_count=1;
1215
1216 for(i=0; i<5; i++){
1217 context_count*= read_quant_table(c, quant_table[i], context_count);
1218 if(context_count > 32768U){
1219 return -1;
1220 }
1221 }
1222 return (context_count+1)/2;
1223 }
1224
1225 static int read_extra_header(FFV1Context *f){
1226 RangeCoder * const c= &f->c;
999 uint8_t state[CONTEXT_SIZE]; 1227 uint8_t state[CONTEXT_SIZE];
1000 int i, context_count; 1228 int i;
1001 RangeCoder * const c= &f->c;
1002 1229
1003 memset(state, 128, sizeof(state)); 1230 memset(state, 128, sizeof(state));
1004 1231
1232 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1233 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1234
1005 f->version= get_symbol(c, state, 0); 1235 f->version= get_symbol(c, state, 0);
1006 f->ac= f->avctx->coder_type= get_symbol(c, state, 0); 1236 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1007 if(f->ac>1){ 1237 if(f->ac>1){
1008 for(i=1; i<256; i++){ 1238 for(i=1; i<256; i++){
1009 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i]; 1239 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1010 } 1240 }
1011 } 1241 }
1012 f->colorspace= get_symbol(c, state, 0); //YUV cs type 1242 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1013 if(f->version>0) 1243 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1014 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1015 get_rac(c, state); //no chroma = false 1244 get_rac(c, state); //no chroma = false
1016 f->chroma_h_shift= get_symbol(c, state, 0); 1245 f->chroma_h_shift= get_symbol(c, state, 0);
1017 f->chroma_v_shift= get_symbol(c, state, 0); 1246 f->chroma_v_shift= get_symbol(c, state, 0);
1018 get_rac(c, state); //transparency plane 1247 get_rac(c, state); //transparency plane
1019 f->plane_count= 2; 1248 f->plane_count= 2;
1249 f->num_h_slices= 1 + get_symbol(c, state, 0);
1250 f->num_v_slices= 1 + get_symbol(c, state, 0);
1251 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->he ight){
1252 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1253 return -1;
1254 }
1255
1256 f->quant_table_count= get_symbol(c, state, 0);
1257 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1258 return -1;
1259 for(i=0; i<f->quant_table_count; i++){
1260 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1261 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1262 return -1;
1263 }
1264 }
1265
1266 return 0;
1267 }
1268
1269 static int read_header(FFV1Context *f){
1270 uint8_t state[CONTEXT_SIZE];
1271 int i, j, context_count;
1272 RangeCoder * const c= &f->slice_context[0]->c;
1273
1274 memset(state, 128, sizeof(state));
1275
1276 if(f->version < 2){
1277 f->version= get_symbol(c, state, 0);
1278 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1279 if(f->ac>1){
1280 for(i=1; i<256; i++){
1281 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i ];
1282 }
1283 }
1284 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1285 if(f->version>0)
1286 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1287 get_rac(c, state); //no chroma = false
1288 f->chroma_h_shift= get_symbol(c, state, 0);
1289 f->chroma_v_shift= get_symbol(c, state, 0);
1290 get_rac(c, state); //transparency plane
1291 f->plane_count= 2;
1292 }
1020 1293
1021 if(f->colorspace==0){ 1294 if(f->colorspace==0){
1022 if(f->avctx->bits_per_raw_sample<=8){ 1295 if(f->avctx->bits_per_raw_sample<=8){
1023 switch(16*f->chroma_h_shift + f->chroma_v_shift){ 1296 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1024 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break; 1297 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1025 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break; 1298 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1026 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break; 1299 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1027 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break; 1300 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1028 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break; 1301 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1029 default: 1302 default:
(...skipping 15 matching lines...) Expand all
1045 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n"); 1318 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1046 return -1; 1319 return -1;
1047 } 1320 }
1048 f->avctx->pix_fmt= PIX_FMT_RGB32; 1321 f->avctx->pix_fmt= PIX_FMT_RGB32;
1049 }else{ 1322 }else{
1050 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n"); 1323 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1051 return -1; 1324 return -1;
1052 } 1325 }
1053 1326
1054 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt); 1327 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt);
1328 if(f->version < 2){
1329 context_count= read_quant_tables(c, f->quant_table);
1330 if(context_count < 0){
1331 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1332 return -1;
1333 }
1334 }else{
1335 f->slice_count= get_symbol(c, state, 0);
1336 if(f->slice_count > (unsigned)MAX_SLICES)
1337 return -1;
1338 }
1055 1339
1056 context_count=1; 1340 for(j=0; j<f->slice_count; j++){
1057 for(i=0; i<5; i++){ 1341 FFV1Context *fs= f->slice_context[j];
1058 context_count*= read_quant_table(c, f->quant_table[i], context_count); 1342 fs->ac= f->ac;
1059 if(context_count < 0 || context_count > 32768){ 1343
1060 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n"); 1344 if(f->version >= 2){
1061 return -1; 1345 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1346 fs->slice_y = get_symbol(c, state, 0) *f->height;
1347 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x ;
1348 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y ;
1349
1350 fs->slice_x /= f->num_h_slices;
1351 fs->slice_y /= f->num_v_slices;
1352 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1353 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1354 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_heigh t > f->height)
1355 return -1;
1356 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1357 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->heigh t)
1358 return -1;
1062 } 1359 }
1063 }
1064 context_count= (context_count+1)/2;
1065 1360
1066 for(i=0; i<f->plane_count; i++){ 1361 for(i=0; i<f->plane_count; i++){
1067 PlaneContext * const p= &f->plane[i]; 1362 PlaneContext * const p= &fs->plane[i];
1068 1363
1069 p->context_count= context_count; 1364 if(f->version >= 2){
1365 int idx=get_symbol(c, state, 0);
1366 if(idx > (unsigned)f->quant_table_count){
1367 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of ran ge\n");
1368 return -1;
1369 }
1370 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_tab le));
1371 context_count= f->context_count[idx];
1372 }else{
1373 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
1374 }
1070 1375
1071 if(f->ac){ 1376 if(p->context_count < context_count){
1072 if(!p->state) p->state= av_malloc(CONTEXT_SIZE*p->context_count*size of(uint8_t)); 1377 av_freep(&p->state);
1073 }else{ 1378 av_freep(&p->vlc_state);
1074 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(Vl cState)); 1379 }
1380 p->context_count= context_count;
1075 } 1381 }
1076 } 1382 }
1077 1383
1078 return 0; 1384 return 0;
1079 } 1385 }
1080 1386
1081 static av_cold int decode_init(AVCodecContext *avctx) 1387 static av_cold int decode_init(AVCodecContext *avctx)
1082 { 1388 {
1083 // FFV1Context *s = avctx->priv_data; 1389 FFV1Context *f = avctx->priv_data;
1084 1390
1085 common_init(avctx); 1391 common_init(avctx);
1086 1392
1393 if(avctx->extradata && read_extra_header(f) < 0)
1394 return -1;
1395
1396 if(init_slice_contexts(f) < 0)
1397 return -1;
1398
1087 return 0; 1399 return 0;
1088 } 1400 }
1089 1401
1090 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac ket *avpkt){ 1402 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac ket *avpkt){
1091 const uint8_t *buf = avpkt->data; 1403 const uint8_t *buf = avpkt->data;
1092 int buf_size = avpkt->size; 1404 int buf_size = avpkt->size;
1093 FFV1Context *f = avctx->priv_data; 1405 FFV1Context *f = avctx->priv_data;
1094 RangeCoder * const c= &f->c; 1406 RangeCoder * const c= &f->slice_context[0]->c;
1095 const int width= f->width;
1096 const int height= f->height;
1097 AVFrame * const p= &f->picture; 1407 AVFrame * const p= &f->picture;
1098 int bytes_read; 1408 int bytes_read, i;
1099 uint8_t keystate= 128; 1409 uint8_t keystate= 128;
1410 const uint8_t *buf_p;
1100 1411
1101 AVFrame *picture = data; 1412 AVFrame *picture = data;
1102 1413
1103 ff_init_range_decoder(c, buf, buf_size); 1414 ff_init_range_decoder(c, buf, buf_size);
1104 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); 1415 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1105 1416
1106 1417
1107 p->pict_type= FF_I_TYPE; //FIXME I vs. P 1418 p->pict_type= FF_I_TYPE; //FIXME I vs. P
1108 if(get_rac(c, &keystate)){ 1419 if(get_rac(c, &keystate)){
1109 p->key_frame= 1; 1420 p->key_frame= 1;
1110 if(read_header(f) < 0) 1421 if(read_header(f) < 0)
1111 return -1; 1422 return -1;
1423 if(init_slice_state(f) < 0)
1424 return -1;
1425
1112 clear_state(f); 1426 clear_state(f);
1113 }else{ 1427 }else{
1114 p->key_frame= 0; 1428 p->key_frame= 0;
1115 } 1429 }
1116 if(f->ac>1){ 1430 if(f->ac>1){
1117 int i; 1431 int i;
1118 for(i=1; i<256; i++){ 1432 for(i=1; i<256; i++){
1119 c->one_state[i]= f->state_transition[i]; 1433 c->one_state[i]= f->state_transition[i];
1120 c->zero_state[256-i]= 256-c->one_state[i]; 1434 c->zero_state[256-i]= 256-c->one_state[i];
1121 } 1435 }
1122 } 1436 }
1123 1437
1124 if(!f->plane[0].state && !f->plane[0].vlc_state)
1125 return -1;
1126
1127 p->reference= 0; 1438 p->reference= 0;
1128 if(avctx->get_buffer(avctx, p) < 0){ 1439 if(avctx->get_buffer(avctx, p) < 0){
1129 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); 1440 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1130 return -1; 1441 return -1;
1131 } 1442 }
1132 1443
1133 if(avctx->debug&FF_DEBUG_PICT_INFO) 1444 if(avctx->debug&FF_DEBUG_PICT_INFO)
1134 av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->a c); 1445 av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->a c);
1135 1446
1136 if(!f->ac){ 1447 if(!f->ac){
1137 bytes_read = c->bytestream - c->bytestream_start - 1; 1448 bytes_read = c->bytestream - c->bytestream_start - 1;
1138 if(bytes_read ==0) av_log(avctx, AV_LOG_ERROR, "error at end of AC strea m\n"); //FIXME 1449 if(bytes_read ==0) av_log(avctx, AV_LOG_ERROR, "error at end of AC strea m\n"); //FIXME
1139 //printf("pos=%d\n", bytes_read); 1450 //printf("pos=%d\n", bytes_read);
1140 init_get_bits(&f->gb, buf + bytes_read, buf_size - bytes_read); 1451 init_get_bits(&f->slice_context[0]->gb, buf + bytes_read, buf_size - byt es_read);
1141 } else { 1452 } else {
1142 bytes_read = 0; /* avoid warning */ 1453 bytes_read = 0; /* avoid warning */
1143 } 1454 }
1144 1455
1145 if(f->colorspace==0){ 1456 buf_p= buf + buf_size;
1146 const int chroma_width = -((-width )>>f->chroma_h_shift); 1457 for(i=f->slice_count-1; i>0; i--){
1147 const int chroma_height= -((-height)>>f->chroma_v_shift); 1458 FFV1Context *fs= f->slice_context[i];
1148 decode_plane(f, p->data[0], width, height, p->linesize[0], 0); 1459 int v= AV_RB24(buf_p-3)+3;
1149 1460 if(buf_p - buf <= v){
1150 decode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1); 1461 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
1151 decode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 1); 1462 return -1;
1152 }else{ 1463 }
1153 decode_rgb_frame(f, (uint32_t*)p->data[0], width, height, p->linesize[0] /4); 1464 buf_p -= v;
1465 if(fs->ac){
1466 ff_init_range_decoder(&fs->c, buf_p, v);
1467 }else{
1468 init_get_bits(&fs->gb, buf_p, v);
1469 }
1154 } 1470 }
1155 1471
1156 emms_c(); 1472 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_cou nt, sizeof(void*));
1157
1158 f->picture_number++; 1473 f->picture_number++;
1159 1474
1160 *picture= *p; 1475 *picture= *p;
1161 1476
1162 avctx->release_buffer(avctx, p); //FIXME 1477 avctx->release_buffer(avctx, p); //FIXME
1163 1478
1164 *data_size = sizeof(AVFrame); 1479 *data_size = sizeof(AVFrame);
1165 1480
1166 if(f->ac){ 1481 return buf_size;
1167 bytes_read= c->bytestream - c->bytestream_start - 1;
1168 if(bytes_read ==0) av_log(f->avctx, AV_LOG_ERROR, "error at end of frame \n");
1169 }else{
1170 bytes_read+= (get_bits_count(&f->gb)+7)/8;
1171 }
1172
1173 return bytes_read;
1174 } 1482 }
1175 1483
1176 AVCodec ffv1_decoder = { 1484 AVCodec ffv1_decoder = {
1177 "ffv1", 1485 "ffv1",
1178 AVMEDIA_TYPE_VIDEO, 1486 AVMEDIA_TYPE_VIDEO,
1179 CODEC_ID_FFV1, 1487 CODEC_ID_FFV1,
1180 sizeof(FFV1Context), 1488 sizeof(FFV1Context),
1181 decode_init, 1489 decode_init,
1182 NULL, 1490 NULL,
1183 common_end, 1491 common_end,
1184 decode_frame, 1492 decode_frame,
1185 CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/, 1493 CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
1186 NULL, 1494 NULL,
1187 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), 1495 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
1188 }; 1496 };
1189 1497
1190 #if CONFIG_FFV1_ENCODER 1498 #if CONFIG_FFV1_ENCODER
1191 AVCodec ffv1_encoder = { 1499 AVCodec ffv1_encoder = {
1192 "ffv1", 1500 "ffv1",
1193 AVMEDIA_TYPE_VIDEO, 1501 AVMEDIA_TYPE_VIDEO,
1194 CODEC_ID_FFV1, 1502 CODEC_ID_FFV1,
1195 sizeof(FFV1Context), 1503 sizeof(FFV1Context),
1196 encode_init, 1504 encode_init,
1197 encode_frame, 1505 encode_frame,
1198 common_end, 1506 common_end,
1199 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_ FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_NONE}, 1507 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_ FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_NONE},
1200 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), 1508 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
1201 }; 1509 };
1202 #endif 1510 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698