OLD | NEW |
1 /* | 1 /* |
2 * FFplay : Simple Media Player based on the ffmpeg libraries | 2 * FFplay : Simple Media Player based on the FFmpeg libraries |
3 * Copyright (c) 2003 Fabrice Bellard | 3 * Copyright (c) 2003 Fabrice Bellard |
4 * | 4 * |
5 * This file is part of FFmpeg. | 5 * This file is part of FFmpeg. |
6 * | 6 * |
7 * FFmpeg is free software; you can redistribute it and/or | 7 * FFmpeg is free software; you can redistribute it and/or |
8 * modify it under the terms of the GNU Lesser General Public | 8 * modify it under the terms of the GNU Lesser General Public |
9 * License as published by the Free Software Foundation; either | 9 * License as published by the Free Software Foundation; either |
10 * version 2.1 of the License, or (at your option) any later version. | 10 * version 2.1 of the License, or (at your option) any later version. |
11 * | 11 * |
12 * FFmpeg is distributed in the hope that it will be useful, | 12 * FFmpeg is distributed in the hope that it will be useful, |
(...skipping 10 matching lines...) Expand all Loading... |
23 #include <math.h> | 23 #include <math.h> |
24 #include <limits.h> | 24 #include <limits.h> |
25 #include "libavutil/avstring.h" | 25 #include "libavutil/avstring.h" |
26 #include "libavutil/pixdesc.h" | 26 #include "libavutil/pixdesc.h" |
27 #include "libavformat/avformat.h" | 27 #include "libavformat/avformat.h" |
28 #include "libavdevice/avdevice.h" | 28 #include "libavdevice/avdevice.h" |
29 #include "libswscale/swscale.h" | 29 #include "libswscale/swscale.h" |
30 #include "libavcodec/audioconvert.h" | 30 #include "libavcodec/audioconvert.h" |
31 #include "libavcodec/colorspace.h" | 31 #include "libavcodec/colorspace.h" |
32 #include "libavcodec/opt.h" | 32 #include "libavcodec/opt.h" |
| 33 #include "libavcodec/avfft.h" |
| 34 |
| 35 #if CONFIG_AVFILTER |
| 36 # include "libavfilter/avfilter.h" |
| 37 # include "libavfilter/avfiltergraph.h" |
| 38 # include "libavfilter/graphparser.h" |
| 39 #endif |
33 | 40 |
34 #include "cmdutils.h" | 41 #include "cmdutils.h" |
35 | 42 |
36 #include <SDL.h> | 43 #include <SDL.h> |
37 #include <SDL_thread.h> | 44 #include <SDL_thread.h> |
38 | 45 |
39 #ifdef __MINGW32__ | 46 #ifdef __MINGW32__ |
40 #undef main /* We don't want SDL to override our main() */ | 47 #undef main /* We don't want SDL to override our main() */ |
41 #endif | 48 #endif |
42 | 49 |
43 #undef exit | |
44 #undef printf | |
45 #undef fprintf | |
46 | |
47 const char program_name[] = "FFplay"; | 50 const char program_name[] = "FFplay"; |
48 const int program_birth_year = 2003; | 51 const int program_birth_year = 2003; |
49 | 52 |
50 //#define DEBUG_SYNC | 53 //#define DEBUG_SYNC |
51 | 54 |
52 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) | 55 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) |
53 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024) | 56 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024) |
54 #define MIN_FRAMES 5 | 57 #define MIN_FRAMES 5 |
55 | 58 |
56 /* SDL audio buffer size, in samples. Should be small to have precise | 59 /* SDL audio buffer size, in samples. Should be small to have precise |
57 A/V sync as SDL does not have hardware buffer fullness info. */ | 60 A/V sync as SDL does not have hardware buffer fullness info. */ |
58 #define SDL_AUDIO_BUFFER_SIZE 1024 | 61 #define SDL_AUDIO_BUFFER_SIZE 1024 |
59 | 62 |
60 /* no AV sync correction is done if below the AV sync threshold */ | 63 /* no AV sync correction is done if below the AV sync threshold */ |
61 #define AV_SYNC_THRESHOLD 0.01 | 64 #define AV_SYNC_THRESHOLD 0.01 |
62 /* no AV correction is done if too big error */ | 65 /* no AV correction is done if too big error */ |
63 #define AV_NOSYNC_THRESHOLD 10.0 | 66 #define AV_NOSYNC_THRESHOLD 10.0 |
64 | 67 |
65 /* maximum audio speed change to get correct sync */ | 68 /* maximum audio speed change to get correct sync */ |
66 #define SAMPLE_CORRECTION_PERCENT_MAX 10 | 69 #define SAMPLE_CORRECTION_PERCENT_MAX 10 |
67 | 70 |
68 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */ | 71 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */ |
69 #define AUDIO_DIFF_AVG_NB 20 | 72 #define AUDIO_DIFF_AVG_NB 20 |
70 | 73 |
71 /* NOTE: the size must be big enough to compensate the hardware audio buffersize
size */ | 74 /* NOTE: the size must be big enough to compensate the hardware audio buffersize
size */ |
72 #define SAMPLE_ARRAY_SIZE (2*65536) | 75 #define SAMPLE_ARRAY_SIZE (2*65536) |
73 | 76 |
| 77 #if !CONFIG_AVFILTER |
74 static int sws_flags = SWS_BICUBIC; | 78 static int sws_flags = SWS_BICUBIC; |
| 79 #endif |
75 | 80 |
76 typedef struct PacketQueue { | 81 typedef struct PacketQueue { |
77 AVPacketList *first_pkt, *last_pkt; | 82 AVPacketList *first_pkt, *last_pkt; |
78 int nb_packets; | 83 int nb_packets; |
79 int size; | 84 int size; |
80 int abort_request; | 85 int abort_request; |
81 SDL_mutex *mutex; | 86 SDL_mutex *mutex; |
82 SDL_cond *cond; | 87 SDL_cond *cond; |
83 } PacketQueue; | 88 } PacketQueue; |
84 | 89 |
85 #define VIDEO_PICTURE_QUEUE_SIZE 1 | 90 #define VIDEO_PICTURE_QUEUE_SIZE 1 |
86 #define SUBPICTURE_QUEUE_SIZE 4 | 91 #define SUBPICTURE_QUEUE_SIZE 4 |
87 | 92 |
88 typedef struct VideoPicture { | 93 typedef struct VideoPicture { |
89 double pts; ///<presentation time stamp for
this picture | 94 double pts; ///<presentation time stamp for
this picture |
90 int64_t pos; ///<byte position in file | 95 int64_t pos; ///<byte position in file |
91 SDL_Overlay *bmp; | 96 SDL_Overlay *bmp; |
92 int width, height; /* source height & width */ | 97 int width, height; /* source height & width */ |
93 int allocated; | 98 int allocated; |
94 SDL_TimerID timer_id; | 99 SDL_TimerID timer_id; |
| 100 enum PixelFormat pix_fmt; |
| 101 |
| 102 #if CONFIG_AVFILTER |
| 103 AVFilterPicRef *picref; |
| 104 #endif |
95 } VideoPicture; | 105 } VideoPicture; |
96 | 106 |
97 typedef struct SubPicture { | 107 typedef struct SubPicture { |
98 double pts; /* presentation time stamp for this picture */ | 108 double pts; /* presentation time stamp for this picture */ |
99 AVSubtitle sub; | 109 AVSubtitle sub; |
100 } SubPicture; | 110 } SubPicture; |
101 | 111 |
102 enum { | 112 enum { |
103 AV_SYNC_AUDIO_MASTER, /* default choice */ | 113 AV_SYNC_AUDIO_MASTER, /* default choice */ |
104 AV_SYNC_VIDEO_MASTER, | 114 AV_SYNC_VIDEO_MASTER, |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
144 int audio_buf_index; /* in bytes */ | 154 int audio_buf_index; /* in bytes */ |
145 AVPacket audio_pkt_temp; | 155 AVPacket audio_pkt_temp; |
146 AVPacket audio_pkt; | 156 AVPacket audio_pkt; |
147 enum SampleFormat audio_src_fmt; | 157 enum SampleFormat audio_src_fmt; |
148 AVAudioConvert *reformat_ctx; | 158 AVAudioConvert *reformat_ctx; |
149 | 159 |
150 int show_audio; /* if true, display audio samples */ | 160 int show_audio; /* if true, display audio samples */ |
151 int16_t sample_array[SAMPLE_ARRAY_SIZE]; | 161 int16_t sample_array[SAMPLE_ARRAY_SIZE]; |
152 int sample_array_index; | 162 int sample_array_index; |
153 int last_i_start; | 163 int last_i_start; |
| 164 RDFTContext *rdft; |
| 165 int rdft_bits; |
| 166 int xpos; |
154 | 167 |
155 SDL_Thread *subtitle_tid; | 168 SDL_Thread *subtitle_tid; |
156 int subtitle_stream; | 169 int subtitle_stream; |
157 int subtitle_stream_changed; | 170 int subtitle_stream_changed; |
158 AVStream *subtitle_st; | 171 AVStream *subtitle_st; |
159 PacketQueue subtitleq; | 172 PacketQueue subtitleq; |
160 SubPicture subpq[SUBPICTURE_QUEUE_SIZE]; | 173 SubPicture subpq[SUBPICTURE_QUEUE_SIZE]; |
161 int subpq_size, subpq_rindex, subpq_windex; | 174 int subpq_size, subpq_rindex, subpq_windex; |
162 SDL_mutex *subpq_mutex; | 175 SDL_mutex *subpq_mutex; |
163 SDL_cond *subpq_cond; | 176 SDL_cond *subpq_cond; |
164 | 177 |
165 double frame_timer; | 178 double frame_timer; |
166 double frame_last_pts; | 179 double frame_last_pts; |
167 double frame_last_delay; | 180 double frame_last_delay; |
168 double video_clock; ///<pts of last decoded frame /
predicted pts of next decoded frame | 181 double video_clock; ///<pts of last decoded frame /
predicted pts of next decoded frame |
169 int video_stream; | 182 int video_stream; |
170 AVStream *video_st; | 183 AVStream *video_st; |
171 PacketQueue videoq; | 184 PacketQueue videoq; |
172 double video_current_pts; ///<current displayed pts (diff
erent from video_clock if frame fifos are used) | 185 double video_current_pts; ///<current displayed pts (diff
erent from video_clock if frame fifos are used) |
173 double video_current_pts_drift; ///<video_current_pts - time (a
v_gettime) at which we updated video_current_pts - used to have running video pt
s | 186 double video_current_pts_drift; ///<video_current_pts - time (a
v_gettime) at which we updated video_current_pts - used to have running video pt
s |
174 int64_t video_current_pos; ///<current displayed file pos | 187 int64_t video_current_pos; ///<current displayed file pos |
175 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE]; | 188 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE]; |
176 int pictq_size, pictq_rindex, pictq_windex; | 189 int pictq_size, pictq_rindex, pictq_windex; |
177 SDL_mutex *pictq_mutex; | 190 SDL_mutex *pictq_mutex; |
178 SDL_cond *pictq_cond; | 191 SDL_cond *pictq_cond; |
| 192 #if !CONFIG_AVFILTER |
179 struct SwsContext *img_convert_ctx; | 193 struct SwsContext *img_convert_ctx; |
| 194 #endif |
180 | 195 |
181 // QETimer *video_timer; | 196 // QETimer *video_timer; |
182 char filename[1024]; | 197 char filename[1024]; |
183 int width, height, xleft, ytop; | 198 int width, height, xleft, ytop; |
184 | 199 |
185 int64_t faulty_pts; | 200 int64_t faulty_pts; |
186 int64_t faulty_dts; | 201 int64_t faulty_dts; |
187 int64_t last_dts_for_fault_detection; | 202 int64_t last_dts_for_fault_detection; |
188 int64_t last_pts_for_fault_detection; | 203 int64_t last_pts_for_fault_detection; |
189 | 204 |
| 205 #if CONFIG_AVFILTER |
| 206 AVFilterContext *out_video_filter; ///<the last filter in the video
chain |
| 207 #endif |
190 } VideoState; | 208 } VideoState; |
191 | 209 |
192 static void show_help(void); | 210 static void show_help(void); |
193 static int audio_write_get_buf_size(VideoState *is); | 211 static int audio_write_get_buf_size(VideoState *is); |
194 | 212 |
195 /* options specified by the user */ | 213 /* options specified by the user */ |
196 static AVInputFormat *file_iformat; | 214 static AVInputFormat *file_iformat; |
197 static const char *input_filename; | 215 static const char *input_filename; |
198 static int fs_screen_width; | 216 static int fs_screen_width; |
199 static int fs_screen_height; | 217 static int fs_screen_height; |
200 static int screen_width = 0; | 218 static int screen_width = 0; |
201 static int screen_height = 0; | 219 static int screen_height = 0; |
202 static int frame_width = 0; | 220 static int frame_width = 0; |
203 static int frame_height = 0; | 221 static int frame_height = 0; |
204 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE; | 222 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE; |
205 static int audio_disable; | 223 static int audio_disable; |
206 static int video_disable; | 224 static int video_disable; |
207 static int wanted_audio_stream= 0; | 225 static int wanted_stream[CODEC_TYPE_NB]={ |
208 static int wanted_video_stream= 0; | 226 [CODEC_TYPE_AUDIO]=-1, |
209 static int wanted_subtitle_stream= -1; | 227 [CODEC_TYPE_VIDEO]=-1, |
| 228 [CODEC_TYPE_SUBTITLE]=-1, |
| 229 }; |
210 static int seek_by_bytes=-1; | 230 static int seek_by_bytes=-1; |
211 static int display_disable; | 231 static int display_disable; |
212 static int show_status = 1; | 232 static int show_status = 1; |
213 static int av_sync_type = AV_SYNC_AUDIO_MASTER; | 233 static int av_sync_type = AV_SYNC_AUDIO_MASTER; |
214 static int64_t start_time = AV_NOPTS_VALUE; | 234 static int64_t start_time = AV_NOPTS_VALUE; |
215 static int debug = 0; | 235 static int debug = 0; |
216 static int debug_mv = 0; | 236 static int debug_mv = 0; |
217 static int step = 0; | 237 static int step = 0; |
218 static int thread_count = 1; | 238 static int thread_count = 1; |
219 static int workaround_bugs = 1; | 239 static int workaround_bugs = 1; |
220 static int fast = 0; | 240 static int fast = 0; |
221 static int genpts = 0; | 241 static int genpts = 0; |
222 static int lowres = 0; | 242 static int lowres = 0; |
223 static int idct = FF_IDCT_AUTO; | 243 static int idct = FF_IDCT_AUTO; |
224 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT; | 244 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT; |
225 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT; | 245 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT; |
226 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT; | 246 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT; |
227 static int error_recognition = FF_ER_CAREFUL; | 247 static int error_recognition = FF_ER_CAREFUL; |
228 static int error_concealment = 3; | 248 static int error_concealment = 3; |
229 static int decoder_reorder_pts= -1; | 249 static int decoder_reorder_pts= -1; |
| 250 static int autoexit; |
| 251 #if CONFIG_AVFILTER |
| 252 static char *vfilters = NULL; |
| 253 #endif |
230 | 254 |
231 /* current context */ | 255 /* current context */ |
232 static int is_full_screen; | 256 static int is_full_screen; |
233 static VideoState *cur_stream; | 257 static VideoState *cur_stream; |
234 static int64_t audio_callback_time; | 258 static int64_t audio_callback_time; |
235 | 259 |
236 static AVPacket flush_pkt; | 260 static AVPacket flush_pkt; |
237 | 261 |
238 #define FF_ALLOC_EVENT (SDL_USEREVENT) | 262 #define FF_ALLOC_EVENT (SDL_USEREVENT) |
239 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1) | 263 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1) |
(...skipping 414 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
654 VideoPicture *vp; | 678 VideoPicture *vp; |
655 SubPicture *sp; | 679 SubPicture *sp; |
656 AVPicture pict; | 680 AVPicture pict; |
657 float aspect_ratio; | 681 float aspect_ratio; |
658 int width, height, x, y; | 682 int width, height, x, y; |
659 SDL_Rect rect; | 683 SDL_Rect rect; |
660 int i; | 684 int i; |
661 | 685 |
662 vp = &is->pictq[is->pictq_rindex]; | 686 vp = &is->pictq[is->pictq_rindex]; |
663 if (vp->bmp) { | 687 if (vp->bmp) { |
| 688 #if CONFIG_AVFILTER |
| 689 if (vp->picref->pixel_aspect.num == 0) |
| 690 aspect_ratio = 0; |
| 691 else |
| 692 aspect_ratio = av_q2d(vp->picref->pixel_aspect); |
| 693 #else |
| 694 |
664 /* XXX: use variable in the frame */ | 695 /* XXX: use variable in the frame */ |
665 if (is->video_st->sample_aspect_ratio.num) | 696 if (is->video_st->sample_aspect_ratio.num) |
666 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio); | 697 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio); |
667 else if (is->video_st->codec->sample_aspect_ratio.num) | 698 else if (is->video_st->codec->sample_aspect_ratio.num) |
668 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio); | 699 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio); |
669 else | 700 else |
670 aspect_ratio = 0; | 701 aspect_ratio = 0; |
| 702 #endif |
671 if (aspect_ratio <= 0.0) | 703 if (aspect_ratio <= 0.0) |
672 aspect_ratio = 1.0; | 704 aspect_ratio = 1.0; |
673 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec-
>height; | 705 aspect_ratio *= (float)vp->width / (float)vp->height; |
674 /* if an active format is indicated, then it overrides the | 706 /* if an active format is indicated, then it overrides the |
675 mpeg format */ | 707 mpeg format */ |
676 #if 0 | 708 #if 0 |
677 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) { | 709 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) { |
678 is->dtg_active_format = is->video_st->codec->dtg_active_format; | 710 is->dtg_active_format = is->video_st->codec->dtg_active_format; |
679 printf("dtg_active_format=%d\n", is->dtg_active_format); | 711 printf("dtg_active_format=%d\n", is->dtg_active_format); |
680 } | 712 } |
681 #endif | 713 #endif |
682 #if 0 | 714 #if 0 |
683 switch(is->video_st->codec->dtg_active_format) { | 715 switch(is->video_st->codec->dtg_active_format) { |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
770 return a; | 802 return a; |
771 else | 803 else |
772 return a + b; | 804 return a + b; |
773 } | 805 } |
774 | 806 |
775 static void video_audio_display(VideoState *s) | 807 static void video_audio_display(VideoState *s) |
776 { | 808 { |
777 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels; | 809 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels; |
778 int ch, channels, h, h2, bgcolor, fgcolor; | 810 int ch, channels, h, h2, bgcolor, fgcolor; |
779 int16_t time_diff; | 811 int16_t time_diff; |
| 812 int rdft_bits, nb_freq; |
| 813 |
| 814 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++) |
| 815 ; |
| 816 nb_freq= 1<<(rdft_bits-1); |
780 | 817 |
781 /* compute display index : center on currently output samples */ | 818 /* compute display index : center on currently output samples */ |
782 channels = s->audio_st->codec->channels; | 819 channels = s->audio_st->codec->channels; |
783 nb_display_channels = channels; | 820 nb_display_channels = channels; |
784 if (!s->paused) { | 821 if (!s->paused) { |
| 822 int data_used= s->show_audio==1 ? s->width : (2*nb_freq); |
785 n = 2 * channels; | 823 n = 2 * channels; |
786 delay = audio_write_get_buf_size(s); | 824 delay = audio_write_get_buf_size(s); |
787 delay /= n; | 825 delay /= n; |
788 | 826 |
789 /* to be more precise, we take into account the time spent since | 827 /* to be more precise, we take into account the time spent since |
790 the last buffer computation */ | 828 the last buffer computation */ |
791 if (audio_callback_time) { | 829 if (audio_callback_time) { |
792 time_diff = av_gettime() - audio_callback_time; | 830 time_diff = av_gettime() - audio_callback_time; |
793 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000; | 831 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000; |
794 } | 832 } |
795 | 833 |
796 delay -= s->width / 2; | 834 delay -= data_used / 2; |
797 if (delay < s->width) | 835 if (delay < data_used) |
798 delay = s->width; | 836 delay = data_used; |
799 | 837 |
800 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPL
E_ARRAY_SIZE); | 838 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPL
E_ARRAY_SIZE); |
801 | 839 if(s->show_audio==1){ |
802 h= INT_MIN; | 840 h= INT_MIN; |
803 for(i=0; i<1000; i+=channels){ | 841 for(i=0; i<1000; i+=channels){ |
804 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE; | 842 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE; |
805 int a= s->sample_array[idx]; | 843 int a= s->sample_array[idx]; |
806 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE]; | 844 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE]; |
807 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE]; | 845 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE]; |
808 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE]; | 846 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE]; |
809 int score= a-d; | 847 int score= a-d; |
810 if(h<score && (b^c)<0){ | 848 if(h<score && (b^c)<0){ |
811 h= score; | 849 h= score; |
812 i_start= idx; | 850 i_start= idx; |
| 851 } |
813 } | 852 } |
814 } | 853 } |
815 | 854 |
816 s->last_i_start = i_start; | 855 s->last_i_start = i_start; |
817 } else { | 856 } else { |
818 i_start = s->last_i_start; | 857 i_start = s->last_i_start; |
819 } | 858 } |
820 | 859 |
821 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00); | 860 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00); |
822 fill_rectangle(screen, | 861 if(s->show_audio==1){ |
823 s->xleft, s->ytop, s->width, s->height, | 862 fill_rectangle(screen, |
824 bgcolor); | 863 s->xleft, s->ytop, s->width, s->height, |
| 864 bgcolor); |
825 | 865 |
826 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff); | 866 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff); |
827 | 867 |
828 /* total height for one channel */ | 868 /* total height for one channel */ |
829 h = s->height / nb_display_channels; | 869 h = s->height / nb_display_channels; |
830 /* graph height / 2 */ | 870 /* graph height / 2 */ |
831 h2 = (h * 9) / 20; | 871 h2 = (h * 9) / 20; |
832 for(ch = 0;ch < nb_display_channels; ch++) { | 872 for(ch = 0;ch < nb_display_channels; ch++) { |
833 i = i_start + ch; | 873 i = i_start + ch; |
834 y1 = s->ytop + ch * h + (h / 2); /* position of center line */ | 874 y1 = s->ytop + ch * h + (h / 2); /* position of center line */ |
835 for(x = 0; x < s->width; x++) { | 875 for(x = 0; x < s->width; x++) { |
836 y = (s->sample_array[i] * h2) >> 15; | 876 y = (s->sample_array[i] * h2) >> 15; |
837 if (y < 0) { | 877 if (y < 0) { |
838 y = -y; | 878 y = -y; |
839 ys = y1 - y; | 879 ys = y1 - y; |
840 } else { | 880 } else { |
841 ys = y1; | 881 ys = y1; |
| 882 } |
| 883 fill_rectangle(screen, |
| 884 s->xleft + x, ys, 1, y, |
| 885 fgcolor); |
| 886 i += channels; |
| 887 if (i >= SAMPLE_ARRAY_SIZE) |
| 888 i -= SAMPLE_ARRAY_SIZE; |
842 } | 889 } |
| 890 } |
| 891 |
| 892 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff); |
| 893 |
| 894 for(ch = 1;ch < nb_display_channels; ch++) { |
| 895 y = s->ytop + ch * h; |
843 fill_rectangle(screen, | 896 fill_rectangle(screen, |
844 s->xleft + x, ys, 1, y, | 897 s->xleft, y, s->width, 1, |
845 fgcolor); | 898 fgcolor); |
846 i += channels; | |
847 if (i >= SAMPLE_ARRAY_SIZE) | |
848 i -= SAMPLE_ARRAY_SIZE; | |
849 } | 899 } |
| 900 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height); |
| 901 }else{ |
| 902 nb_display_channels= FFMIN(nb_display_channels, 2); |
| 903 if(rdft_bits != s->rdft_bits){ |
| 904 av_rdft_end(s->rdft); |
| 905 s->rdft = av_rdft_init(rdft_bits, DFT_R2C); |
| 906 s->rdft_bits= rdft_bits; |
| 907 } |
| 908 { |
| 909 FFTSample data[2][2*nb_freq]; |
| 910 for(ch = 0;ch < nb_display_channels; ch++) { |
| 911 i = i_start + ch; |
| 912 for(x = 0; x < 2*nb_freq; x++) { |
| 913 double w= (x-nb_freq)*(1.0/nb_freq); |
| 914 data[ch][x]= s->sample_array[i]*(1.0-w*w); |
| 915 i += channels; |
| 916 if (i >= SAMPLE_ARRAY_SIZE) |
| 917 i -= SAMPLE_ARRAY_SIZE; |
| 918 } |
| 919 av_rdft_calc(s->rdft, data[ch]); |
| 920 } |
| 921 //least efficient way to do this, we should of course directly acces
s it but its more than fast enough |
| 922 for(y=0; y<s->height; y++){ |
| 923 double w= 1/sqrt(nb_freq); |
| 924 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1
]*data[0][2*y+1])); |
| 925 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1
]*data[1][2*y+1])); |
| 926 a= FFMIN(a,255); |
| 927 b= FFMIN(b,255); |
| 928 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2); |
| 929 |
| 930 fill_rectangle(screen, |
| 931 s->xpos, s->height-y, 1, 1, |
| 932 fgcolor); |
| 933 } |
| 934 } |
| 935 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height); |
| 936 s->xpos++; |
| 937 if(s->xpos >= s->width) |
| 938 s->xpos= s->xleft; |
850 } | 939 } |
851 | |
852 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff); | |
853 | |
854 for(ch = 1;ch < nb_display_channels; ch++) { | |
855 y = s->ytop + ch * h; | |
856 fill_rectangle(screen, | |
857 s->xleft, y, s->width, 1, | |
858 fgcolor); | |
859 } | |
860 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height); | |
861 } | 940 } |
862 | 941 |
863 static int video_open(VideoState *is){ | 942 static int video_open(VideoState *is){ |
864 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL; | 943 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL; |
865 int w,h; | 944 int w,h; |
866 | 945 |
867 if(is_full_screen) flags |= SDL_FULLSCREEN; | 946 if(is_full_screen) flags |= SDL_FULLSCREEN; |
868 else flags |= SDL_RESIZABLE; | 947 else flags |= SDL_RESIZABLE; |
869 | 948 |
870 if (is_full_screen && fs_screen_width) { | 949 if (is_full_screen && fs_screen_width) { |
871 w = fs_screen_width; | 950 w = fs_screen_width; |
872 h = fs_screen_height; | 951 h = fs_screen_height; |
873 } else if(!is_full_screen && screen_width){ | 952 } else if(!is_full_screen && screen_width){ |
874 w = screen_width; | 953 w = screen_width; |
875 h = screen_height; | 954 h = screen_height; |
| 955 #if CONFIG_AVFILTER |
| 956 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){ |
| 957 w = is->out_video_filter->inputs[0]->w; |
| 958 h = is->out_video_filter->inputs[0]->h; |
| 959 #else |
876 }else if (is->video_st && is->video_st->codec->width){ | 960 }else if (is->video_st && is->video_st->codec->width){ |
877 w = is->video_st->codec->width; | 961 w = is->video_st->codec->width; |
878 h = is->video_st->codec->height; | 962 h = is->video_st->codec->height; |
| 963 #endif |
879 } else { | 964 } else { |
880 w = 640; | 965 w = 640; |
881 h = 480; | 966 h = 480; |
882 } | 967 } |
883 #ifndef __APPLE__ | 968 #ifndef __APPLE__ |
884 screen = SDL_SetVideoMode(w, h, 0, flags); | 969 screen = SDL_SetVideoMode(w, h, 0, flags); |
885 #else | 970 #else |
886 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */ | 971 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */ |
887 screen = SDL_SetVideoMode(w, h, 24, flags); | 972 screen = SDL_SetVideoMode(w, h, 24, flags); |
888 #endif | 973 #endif |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1002 if(is->read_pause_return != AVERROR(ENOSYS)){ | 1087 if(is->read_pause_return != AVERROR(ENOSYS)){ |
1003 is->video_current_pts = is->video_current_pts_drift + av_gettime() /
1000000.0; | 1088 is->video_current_pts = is->video_current_pts_drift + av_gettime() /
1000000.0; |
1004 } | 1089 } |
1005 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 100
0000.0; | 1090 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 100
0000.0; |
1006 } | 1091 } |
1007 is->paused = !is->paused; | 1092 is->paused = !is->paused; |
1008 } | 1093 } |
1009 | 1094 |
1010 static double compute_frame_delay(double frame_current_pts, VideoState *is) | 1095 static double compute_frame_delay(double frame_current_pts, VideoState *is) |
1011 { | 1096 { |
1012 double actual_delay, delay, sync_threshold, ref_clock, diff; | 1097 double actual_delay, delay, sync_threshold, diff; |
1013 | 1098 |
1014 /* compute nominal delay */ | 1099 /* compute nominal delay */ |
1015 delay = frame_current_pts - is->frame_last_pts; | 1100 delay = frame_current_pts - is->frame_last_pts; |
1016 if (delay <= 0 || delay >= 10.0) { | 1101 if (delay <= 0 || delay >= 10.0) { |
1017 /* if incorrect delay, use previous one */ | 1102 /* if incorrect delay, use previous one */ |
1018 delay = is->frame_last_delay; | 1103 delay = is->frame_last_delay; |
1019 } else { | 1104 } else { |
1020 is->frame_last_delay = delay; | 1105 is->frame_last_delay = delay; |
1021 } | 1106 } |
1022 is->frame_last_pts = frame_current_pts; | 1107 is->frame_last_pts = frame_current_pts; |
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1180 static void alloc_picture(void *opaque) | 1265 static void alloc_picture(void *opaque) |
1181 { | 1266 { |
1182 VideoState *is = opaque; | 1267 VideoState *is = opaque; |
1183 VideoPicture *vp; | 1268 VideoPicture *vp; |
1184 | 1269 |
1185 vp = &is->pictq[is->pictq_windex]; | 1270 vp = &is->pictq[is->pictq_windex]; |
1186 | 1271 |
1187 if (vp->bmp) | 1272 if (vp->bmp) |
1188 SDL_FreeYUVOverlay(vp->bmp); | 1273 SDL_FreeYUVOverlay(vp->bmp); |
1189 | 1274 |
1190 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width, | 1275 #if CONFIG_AVFILTER |
1191 is->video_st->codec->height, | 1276 if (vp->picref) |
| 1277 avfilter_unref_pic(vp->picref); |
| 1278 vp->picref = NULL; |
| 1279 |
| 1280 vp->width = is->out_video_filter->inputs[0]->w; |
| 1281 vp->height = is->out_video_filter->inputs[0]->h; |
| 1282 vp->pix_fmt = is->out_video_filter->inputs[0]->format; |
| 1283 #else |
| 1284 vp->width = is->video_st->codec->width; |
| 1285 vp->height = is->video_st->codec->height; |
| 1286 vp->pix_fmt = is->video_st->codec->pix_fmt; |
| 1287 #endif |
| 1288 |
| 1289 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height, |
1192 SDL_YV12_OVERLAY, | 1290 SDL_YV12_OVERLAY, |
1193 screen); | 1291 screen); |
1194 vp->width = is->video_st->codec->width; | |
1195 vp->height = is->video_st->codec->height; | |
1196 | 1292 |
1197 SDL_LockMutex(is->pictq_mutex); | 1293 SDL_LockMutex(is->pictq_mutex); |
1198 vp->allocated = 1; | 1294 vp->allocated = 1; |
1199 SDL_CondSignal(is->pictq_cond); | 1295 SDL_CondSignal(is->pictq_cond); |
1200 SDL_UnlockMutex(is->pictq_mutex); | 1296 SDL_UnlockMutex(is->pictq_mutex); |
1201 } | 1297 } |
1202 | 1298 |
1203 /** | 1299 /** |
1204 * | 1300 * |
1205 * @param pts the dts of the pkt / pts of the frame and guessed if not known | 1301 * @param pts the dts of the pkt / pts of the frame and guessed if not known |
1206 */ | 1302 */ |
1207 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
pos) | 1303 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
pos) |
1208 { | 1304 { |
1209 VideoPicture *vp; | 1305 VideoPicture *vp; |
1210 int dst_pix_fmt; | 1306 int dst_pix_fmt; |
1211 | 1307 #if CONFIG_AVFILTER |
| 1308 AVPicture pict_src; |
| 1309 #endif |
1212 /* wait until we have space to put a new picture */ | 1310 /* wait until we have space to put a new picture */ |
1213 SDL_LockMutex(is->pictq_mutex); | 1311 SDL_LockMutex(is->pictq_mutex); |
1214 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && | 1312 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && |
1215 !is->videoq.abort_request) { | 1313 !is->videoq.abort_request) { |
1216 SDL_CondWait(is->pictq_cond, is->pictq_mutex); | 1314 SDL_CondWait(is->pictq_cond, is->pictq_mutex); |
1217 } | 1315 } |
1218 SDL_UnlockMutex(is->pictq_mutex); | 1316 SDL_UnlockMutex(is->pictq_mutex); |
1219 | 1317 |
1220 if (is->videoq.abort_request) | 1318 if (is->videoq.abort_request) |
1221 return -1; | 1319 return -1; |
1222 | 1320 |
1223 vp = &is->pictq[is->pictq_windex]; | 1321 vp = &is->pictq[is->pictq_windex]; |
1224 | 1322 |
1225 /* alloc or resize hardware picture buffer */ | 1323 /* alloc or resize hardware picture buffer */ |
1226 if (!vp->bmp || | 1324 if (!vp->bmp || |
| 1325 #if CONFIG_AVFILTER |
| 1326 vp->width != is->out_video_filter->inputs[0]->w || |
| 1327 vp->height != is->out_video_filter->inputs[0]->h) { |
| 1328 #else |
1227 vp->width != is->video_st->codec->width || | 1329 vp->width != is->video_st->codec->width || |
1228 vp->height != is->video_st->codec->height) { | 1330 vp->height != is->video_st->codec->height) { |
| 1331 #endif |
1229 SDL_Event event; | 1332 SDL_Event event; |
1230 | 1333 |
1231 vp->allocated = 0; | 1334 vp->allocated = 0; |
1232 | 1335 |
1233 /* the allocation must be done in the main thread to avoid | 1336 /* the allocation must be done in the main thread to avoid |
1234 locking problems */ | 1337 locking problems */ |
1235 event.type = FF_ALLOC_EVENT; | 1338 event.type = FF_ALLOC_EVENT; |
1236 event.user.data1 = is; | 1339 event.user.data1 = is; |
1237 SDL_PushEvent(&event); | 1340 SDL_PushEvent(&event); |
1238 | 1341 |
1239 /* wait until the picture is allocated */ | 1342 /* wait until the picture is allocated */ |
1240 SDL_LockMutex(is->pictq_mutex); | 1343 SDL_LockMutex(is->pictq_mutex); |
1241 while (!vp->allocated && !is->videoq.abort_request) { | 1344 while (!vp->allocated && !is->videoq.abort_request) { |
1242 SDL_CondWait(is->pictq_cond, is->pictq_mutex); | 1345 SDL_CondWait(is->pictq_cond, is->pictq_mutex); |
1243 } | 1346 } |
1244 SDL_UnlockMutex(is->pictq_mutex); | 1347 SDL_UnlockMutex(is->pictq_mutex); |
1245 | 1348 |
1246 if (is->videoq.abort_request) | 1349 if (is->videoq.abort_request) |
1247 return -1; | 1350 return -1; |
1248 } | 1351 } |
1249 | 1352 |
1250 /* if the frame is not skipped, then display it */ | 1353 /* if the frame is not skipped, then display it */ |
1251 if (vp->bmp) { | 1354 if (vp->bmp) { |
1252 AVPicture pict; | 1355 AVPicture pict; |
| 1356 #if CONFIG_AVFILTER |
| 1357 if(vp->picref) |
| 1358 avfilter_unref_pic(vp->picref); |
| 1359 vp->picref = src_frame->opaque; |
| 1360 #endif |
1253 | 1361 |
1254 /* get a pointer on the bitmap */ | 1362 /* get a pointer on the bitmap */ |
1255 SDL_LockYUVOverlay (vp->bmp); | 1363 SDL_LockYUVOverlay (vp->bmp); |
1256 | 1364 |
1257 dst_pix_fmt = PIX_FMT_YUV420P; | 1365 dst_pix_fmt = PIX_FMT_YUV420P; |
1258 memset(&pict,0,sizeof(AVPicture)); | 1366 memset(&pict,0,sizeof(AVPicture)); |
1259 pict.data[0] = vp->bmp->pixels[0]; | 1367 pict.data[0] = vp->bmp->pixels[0]; |
1260 pict.data[1] = vp->bmp->pixels[2]; | 1368 pict.data[1] = vp->bmp->pixels[2]; |
1261 pict.data[2] = vp->bmp->pixels[1]; | 1369 pict.data[2] = vp->bmp->pixels[1]; |
1262 | 1370 |
1263 pict.linesize[0] = vp->bmp->pitches[0]; | 1371 pict.linesize[0] = vp->bmp->pitches[0]; |
1264 pict.linesize[1] = vp->bmp->pitches[2]; | 1372 pict.linesize[1] = vp->bmp->pitches[2]; |
1265 pict.linesize[2] = vp->bmp->pitches[1]; | 1373 pict.linesize[2] = vp->bmp->pitches[1]; |
| 1374 |
| 1375 #if CONFIG_AVFILTER |
| 1376 pict_src.data[0] = src_frame->data[0]; |
| 1377 pict_src.data[1] = src_frame->data[1]; |
| 1378 pict_src.data[2] = src_frame->data[2]; |
| 1379 |
| 1380 pict_src.linesize[0] = src_frame->linesize[0]; |
| 1381 pict_src.linesize[1] = src_frame->linesize[1]; |
| 1382 pict_src.linesize[2] = src_frame->linesize[2]; |
| 1383 |
| 1384 //FIXME use direct rendering |
| 1385 av_picture_copy(&pict, &pict_src, |
| 1386 vp->pix_fmt, vp->width, vp->height); |
| 1387 #else |
1266 sws_flags = av_get_int(sws_opts, "sws_flags", NULL); | 1388 sws_flags = av_get_int(sws_opts, "sws_flags", NULL); |
1267 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx, | 1389 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx, |
1268 is->video_st->codec->width, is->video_st->codec->height, | 1390 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height, |
1269 is->video_st->codec->pix_fmt, | |
1270 is->video_st->codec->width, is->video_st->codec->height, | |
1271 dst_pix_fmt, sws_flags, NULL, NULL, NULL); | 1391 dst_pix_fmt, sws_flags, NULL, NULL, NULL); |
1272 if (is->img_convert_ctx == NULL) { | 1392 if (is->img_convert_ctx == NULL) { |
1273 fprintf(stderr, "Cannot initialize the conversion context\n"); | 1393 fprintf(stderr, "Cannot initialize the conversion context\n"); |
1274 exit(1); | 1394 exit(1); |
1275 } | 1395 } |
1276 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize, | 1396 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize, |
1277 0, is->video_st->codec->height, pict.data, pict.linesize); | 1397 0, vp->height, pict.data, pict.linesize); |
| 1398 #endif |
1278 /* update the bitmap content */ | 1399 /* update the bitmap content */ |
1279 SDL_UnlockYUVOverlay(vp->bmp); | 1400 SDL_UnlockYUVOverlay(vp->bmp); |
1280 | 1401 |
1281 vp->pts = pts; | 1402 vp->pts = pts; |
1282 vp->pos = pos; | 1403 vp->pos = pos; |
1283 | 1404 |
1284 /* now we can update the picture count */ | 1405 /* now we can update the picture count */ |
1285 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) | 1406 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) |
1286 is->pictq_windex = 0; | 1407 is->pictq_windex = 0; |
1287 SDL_LockMutex(is->pictq_mutex); | 1408 SDL_LockMutex(is->pictq_mutex); |
(...skipping 22 matching lines...) Expand all Loading... |
1310 pts = is->video_clock; | 1431 pts = is->video_clock; |
1311 } | 1432 } |
1312 /* update video clock for next frame */ | 1433 /* update video clock for next frame */ |
1313 frame_delay = av_q2d(is->video_st->codec->time_base); | 1434 frame_delay = av_q2d(is->video_st->codec->time_base); |
1314 /* for MPEG2, the frame can be repeated, so we update the | 1435 /* for MPEG2, the frame can be repeated, so we update the |
1315 clock accordingly */ | 1436 clock accordingly */ |
1316 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); | 1437 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); |
1317 is->video_clock += frame_delay; | 1438 is->video_clock += frame_delay; |
1318 | 1439 |
1319 #if defined(DEBUG_SYNC) && 0 | 1440 #if defined(DEBUG_SYNC) && 0 |
1320 { | 1441 printf("frame_type=%c clock=%0.3f pts=%0.3f\n", |
1321 int ftype; | 1442 av_get_pict_type_char(src_frame->pict_type), pts, pts1); |
1322 if (src_frame->pict_type == FF_B_TYPE) | |
1323 ftype = 'B'; | |
1324 else if (src_frame->pict_type == FF_I_TYPE) | |
1325 ftype = 'I'; | |
1326 else | |
1327 ftype = 'P'; | |
1328 printf("frame_type=%c clock=%0.3f pts=%0.3f\n", | |
1329 ftype, pts, pts1); | |
1330 } | |
1331 #endif | 1443 #endif |
1332 return queue_picture(is, src_frame, pts, pos); | 1444 return queue_picture(is, src_frame, pts, pos); |
1333 } | 1445 } |
1334 | 1446 |
1335 static int video_thread(void *arg) | 1447 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
t *pkt) |
1336 { | 1448 { |
1337 VideoState *is = arg; | |
1338 AVPacket pkt1, *pkt = &pkt1; | |
1339 int len1, got_picture, i; | 1449 int len1, got_picture, i; |
1340 AVFrame *frame= avcodec_alloc_frame(); | |
1341 double pts; | |
1342 | 1450 |
1343 for(;;) { | |
1344 while (is->paused && !is->videoq.abort_request) { | |
1345 SDL_Delay(10); | |
1346 } | |
1347 if (packet_queue_get(&is->videoq, pkt, 1) < 0) | 1451 if (packet_queue_get(&is->videoq, pkt, 1) < 0) |
1348 break; | 1452 return -1; |
1349 | 1453 |
1350 if(pkt->data == flush_pkt.data){ | 1454 if(pkt->data == flush_pkt.data){ |
1351 avcodec_flush_buffers(is->video_st->codec); | 1455 avcodec_flush_buffers(is->video_st->codec); |
1352 | 1456 |
1353 SDL_LockMutex(is->pictq_mutex); | 1457 SDL_LockMutex(is->pictq_mutex); |
1354 //Make sure there are no long delay timers (ideally we should just f
lush the que but thats harder) | 1458 //Make sure there are no long delay timers (ideally we should just f
lush the que but thats harder) |
1355 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){ | 1459 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){ |
1356 if(is->pictq[i].timer_id){ | 1460 if(is->pictq[i].timer_id){ |
1357 SDL_RemoveTimer(is->pictq[i].timer_id); | 1461 SDL_RemoveTimer(is->pictq[i].timer_id); |
1358 is->pictq[i].timer_id=0; | 1462 is->pictq[i].timer_id=0; |
1359 schedule_refresh(is, 1); | 1463 schedule_refresh(is, 1); |
1360 } | 1464 } |
1361 } | 1465 } |
1362 while (is->pictq_size && !is->videoq.abort_request) { | 1466 while (is->pictq_size && !is->videoq.abort_request) { |
1363 SDL_CondWait(is->pictq_cond, is->pictq_mutex); | 1467 SDL_CondWait(is->pictq_cond, is->pictq_mutex); |
1364 } | 1468 } |
1365 is->video_current_pos= -1; | 1469 is->video_current_pos= -1; |
1366 SDL_UnlockMutex(is->pictq_mutex); | 1470 SDL_UnlockMutex(is->pictq_mutex); |
1367 | 1471 |
1368 is->last_dts_for_fault_detection= | 1472 is->last_dts_for_fault_detection= |
1369 is->last_pts_for_fault_detection= INT64_MIN; | 1473 is->last_pts_for_fault_detection= INT64_MIN; |
1370 is->frame_last_pts= AV_NOPTS_VALUE; | 1474 is->frame_last_pts= AV_NOPTS_VALUE; |
1371 is->frame_last_delay = 0; | 1475 is->frame_last_delay = 0; |
| 1476 is->frame_timer = (double)av_gettime() / 1000000.0; |
1372 | 1477 |
1373 continue; | 1478 return 0; |
1374 } | 1479 } |
1375 | 1480 |
1376 /* NOTE: ipts is the PTS of the _first_ picture beginning in | 1481 /* NOTE: ipts is the PTS of the _first_ picture beginning in |
1377 this packet, if any */ | 1482 this packet, if any */ |
1378 is->video_st->codec->reordered_opaque= pkt->pts; | 1483 is->video_st->codec->reordered_opaque= pkt->pts; |
1379 len1 = avcodec_decode_video2(is->video_st->codec, | 1484 len1 = avcodec_decode_video2(is->video_st->codec, |
1380 frame, &got_picture, | 1485 frame, &got_picture, |
1381 pkt); | 1486 pkt); |
1382 | 1487 |
1383 if (got_picture) { | 1488 if (got_picture) { |
1384 if(pkt->dts != AV_NOPTS_VALUE){ | 1489 if(pkt->dts != AV_NOPTS_VALUE){ |
1385 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection; | 1490 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection; |
1386 is->last_dts_for_fault_detection= pkt->dts; | 1491 is->last_dts_for_fault_detection= pkt->dts; |
1387 } | 1492 } |
1388 if(frame->reordered_opaque != AV_NOPTS_VALUE){ | 1493 if(frame->reordered_opaque != AV_NOPTS_VALUE){ |
1389 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fa
ult_detection; | 1494 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fa
ult_detection; |
1390 is->last_pts_for_fault_detection= frame->reordered_opaque; | 1495 is->last_pts_for_fault_detection= frame->reordered_opaque; |
1391 } | 1496 } |
1392 } | 1497 } |
1393 | 1498 |
1394 if( ( decoder_reorder_pts==1 | 1499 if( ( decoder_reorder_pts==1 |
1395 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts) | 1500 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts) |
1396 || pkt->dts == AV_NOPTS_VALUE) | 1501 || pkt->dts == AV_NOPTS_VALUE) |
1397 && frame->reordered_opaque != AV_NOPTS_VALUE) | 1502 && frame->reordered_opaque != AV_NOPTS_VALUE) |
1398 pts= frame->reordered_opaque; | 1503 *pts= frame->reordered_opaque; |
1399 else if(pkt->dts != AV_NOPTS_VALUE) | 1504 else if(pkt->dts != AV_NOPTS_VALUE) |
1400 pts= pkt->dts; | 1505 *pts= pkt->dts; |
1401 else | 1506 else |
1402 pts= 0; | 1507 *pts= 0; |
1403 pts *= av_q2d(is->video_st->time_base); | |
1404 | 1508 |
1405 // if (len1 < 0) | 1509 // if (len1 < 0) |
1406 // break; | 1510 // break; |
1407 if (got_picture) { | 1511 if (got_picture) |
1408 if (output_picture2(is, frame, pts, pkt->pos) < 0) | 1512 return 1; |
1409 goto the_end; | 1513 return 0; |
1410 } | 1514 } |
1411 av_free_packet(pkt); | 1515 |
| 1516 #if CONFIG_AVFILTER |
| 1517 typedef struct { |
| 1518 VideoState *is; |
| 1519 AVFrame *frame; |
| 1520 } FilterPriv; |
| 1521 |
| 1522 static int input_init(AVFilterContext *ctx, const char *args, void *opaque) |
| 1523 { |
| 1524 FilterPriv *priv = ctx->priv; |
| 1525 if(!opaque) return -1; |
| 1526 |
| 1527 priv->is = opaque; |
| 1528 priv->frame = avcodec_alloc_frame(); |
| 1529 |
| 1530 return 0; |
| 1531 } |
| 1532 |
| 1533 static void input_uninit(AVFilterContext *ctx) |
| 1534 { |
| 1535 FilterPriv *priv = ctx->priv; |
| 1536 av_free(priv->frame); |
| 1537 } |
| 1538 |
| 1539 static int input_request_frame(AVFilterLink *link) |
| 1540 { |
| 1541 FilterPriv *priv = link->src->priv; |
| 1542 AVFilterPicRef *picref; |
| 1543 int64_t pts = 0; |
| 1544 AVPacket pkt; |
| 1545 int ret; |
| 1546 |
| 1547 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt))) |
| 1548 av_free_packet(&pkt); |
| 1549 if (ret < 0) |
| 1550 return -1; |
| 1551 |
| 1552 /* FIXME: until I figure out how to hook everything up to the codec |
| 1553 * right, we're just copying the entire frame. */ |
| 1554 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h); |
| 1555 av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame, |
| 1556 picref->pic->format, link->w, link->h); |
| 1557 av_free_packet(&pkt); |
| 1558 |
| 1559 picref->pts = pts; |
| 1560 picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio; |
| 1561 avfilter_start_frame(link, avfilter_ref_pic(picref, ~0)); |
| 1562 avfilter_draw_slice(link, 0, link->h, 1); |
| 1563 avfilter_end_frame(link); |
| 1564 avfilter_unref_pic(picref); |
| 1565 |
| 1566 return 0; |
| 1567 } |
| 1568 |
| 1569 static int input_query_formats(AVFilterContext *ctx) |
| 1570 { |
| 1571 FilterPriv *priv = ctx->priv; |
| 1572 enum PixelFormat pix_fmts[] = { |
| 1573 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE |
| 1574 }; |
| 1575 |
| 1576 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts)); |
| 1577 return 0; |
| 1578 } |
| 1579 |
| 1580 static int input_config_props(AVFilterLink *link) |
| 1581 { |
| 1582 FilterPriv *priv = link->src->priv; |
| 1583 AVCodecContext *c = priv->is->video_st->codec; |
| 1584 |
| 1585 link->w = c->width; |
| 1586 link->h = c->height; |
| 1587 |
| 1588 return 0; |
| 1589 } |
| 1590 |
| 1591 static AVFilter input_filter = |
| 1592 { |
| 1593 .name = "ffplay_input", |
| 1594 |
| 1595 .priv_size = sizeof(FilterPriv), |
| 1596 |
| 1597 .init = input_init, |
| 1598 .uninit = input_uninit, |
| 1599 |
| 1600 .query_formats = input_query_formats, |
| 1601 |
| 1602 .inputs = (AVFilterPad[]) {{ .name = NULL }}, |
| 1603 .outputs = (AVFilterPad[]) {{ .name = "default", |
| 1604 .type = CODEC_TYPE_VIDEO, |
| 1605 .request_frame = input_request_frame, |
| 1606 .config_props = input_config_props, }, |
| 1607 { .name = NULL }}, |
| 1608 }; |
| 1609 |
| 1610 static void output_end_frame(AVFilterLink *link) |
| 1611 { |
| 1612 } |
| 1613 |
| 1614 static int output_query_formats(AVFilterContext *ctx) |
| 1615 { |
| 1616 enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE }; |
| 1617 |
| 1618 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts)); |
| 1619 return 0; |
| 1620 } |
| 1621 |
| 1622 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame, |
| 1623 int64_t *pts) |
| 1624 { |
| 1625 AVFilterPicRef *pic; |
| 1626 |
| 1627 if(avfilter_request_frame(ctx->inputs[0])) |
| 1628 return -1; |
| 1629 if(!(pic = ctx->inputs[0]->cur_pic)) |
| 1630 return -1; |
| 1631 ctx->inputs[0]->cur_pic = NULL; |
| 1632 |
| 1633 frame->opaque = pic; |
| 1634 *pts = pic->pts; |
| 1635 |
| 1636 memcpy(frame->data, pic->data, sizeof(frame->data)); |
| 1637 memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize)); |
| 1638 |
| 1639 return 1; |
| 1640 } |
| 1641 |
| 1642 static AVFilter output_filter = |
| 1643 { |
| 1644 .name = "ffplay_output", |
| 1645 |
| 1646 .query_formats = output_query_formats, |
| 1647 |
| 1648 .inputs = (AVFilterPad[]) {{ .name = "default", |
| 1649 .type = CODEC_TYPE_VIDEO, |
| 1650 .end_frame = output_end_frame, |
| 1651 .min_perms = AV_PERM_READ, }, |
| 1652 { .name = NULL }}, |
| 1653 .outputs = (AVFilterPad[]) {{ .name = NULL }}, |
| 1654 }; |
| 1655 #endif /* CONFIG_AVFILTER */ |
| 1656 |
| 1657 static int video_thread(void *arg) |
| 1658 { |
| 1659 VideoState *is = arg; |
| 1660 AVFrame *frame= avcodec_alloc_frame(); |
| 1661 int64_t pts_int; |
| 1662 double pts; |
| 1663 int ret; |
| 1664 |
| 1665 #if CONFIG_AVFILTER |
| 1666 AVFilterContext *filt_src = NULL, *filt_out = NULL; |
| 1667 AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph)); |
| 1668 graph->scale_sws_opts = av_strdup("sws_flags=bilinear"); |
| 1669 |
| 1670 if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end; |
| 1671 if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end; |
| 1672 |
| 1673 if(avfilter_init_filter(filt_src, NULL, is)) goto the_end; |
| 1674 if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end; |
| 1675 |
| 1676 |
| 1677 if(vfilters) { |
| 1678 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut)); |
| 1679 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut)); |
| 1680 |
| 1681 outputs->name = av_strdup("in"); |
| 1682 outputs->filter = filt_src; |
| 1683 outputs->pad_idx = 0; |
| 1684 outputs->next = NULL; |
| 1685 |
| 1686 inputs->name = av_strdup("out"); |
| 1687 inputs->filter = filt_out; |
| 1688 inputs->pad_idx = 0; |
| 1689 inputs->next = NULL; |
| 1690 |
| 1691 if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0) |
| 1692 goto the_end; |
| 1693 av_freep(&vfilters); |
| 1694 } else { |
| 1695 if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end; |
| 1696 } |
| 1697 avfilter_graph_add_filter(graph, filt_src); |
| 1698 avfilter_graph_add_filter(graph, filt_out); |
| 1699 |
| 1700 if(avfilter_graph_check_validity(graph, NULL)) goto the_end; |
| 1701 if(avfilter_graph_config_formats(graph, NULL)) goto the_end; |
| 1702 if(avfilter_graph_config_links(graph, NULL)) goto the_end; |
| 1703 |
| 1704 is->out_video_filter = filt_out; |
| 1705 #endif |
| 1706 |
| 1707 for(;;) { |
| 1708 #if !CONFIG_AVFILTER |
| 1709 AVPacket pkt; |
| 1710 #endif |
| 1711 while (is->paused && !is->videoq.abort_request) |
| 1712 SDL_Delay(10); |
| 1713 #if CONFIG_AVFILTER |
| 1714 ret = get_filtered_video_frame(filt_out, frame, &pts_int); |
| 1715 #else |
| 1716 ret = get_video_frame(is, frame, &pts_int, &pkt); |
| 1717 #endif |
| 1718 |
| 1719 if (ret < 0) goto the_end; |
| 1720 |
| 1721 if (!ret) |
| 1722 continue; |
| 1723 |
| 1724 pts = pts_int*av_q2d(is->video_st->time_base); |
| 1725 |
| 1726 #if CONFIG_AVFILTER |
| 1727 ret = output_picture2(is, frame, pts, -1); /* fixme: unknown pos */ |
| 1728 #else |
| 1729 ret = output_picture2(is, frame, pts, pkt.pos); |
| 1730 av_free_packet(&pkt); |
| 1731 #endif |
| 1732 if (ret < 0) |
| 1733 goto the_end; |
| 1734 |
1412 if (step) | 1735 if (step) |
1413 if (cur_stream) | 1736 if (cur_stream) |
1414 stream_pause(cur_stream); | 1737 stream_pause(cur_stream); |
1415 } | 1738 } |
1416 the_end: | 1739 the_end: |
| 1740 #if CONFIG_AVFILTER |
| 1741 avfilter_graph_destroy(graph); |
| 1742 av_freep(&graph); |
| 1743 #endif |
1417 av_free(frame); | 1744 av_free(frame); |
1418 return 0; | 1745 return 0; |
1419 } | 1746 } |
1420 | 1747 |
1421 static int subtitle_thread(void *arg) | 1748 static int subtitle_thread(void *arg) |
1422 { | 1749 { |
1423 VideoState *is = arg; | 1750 VideoState *is = arg; |
1424 SubPicture *sp; | 1751 SubPicture *sp; |
1425 AVPacket pkt1, *pkt = &pkt1; | 1752 AVPacket pkt1, *pkt = &pkt1; |
1426 int len1, got_subtitle; | 1753 int len1, got_subtitle; |
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1735 len -= len1; | 2062 len -= len1; |
1736 stream += len1; | 2063 stream += len1; |
1737 is->audio_buf_index += len1; | 2064 is->audio_buf_index += len1; |
1738 } | 2065 } |
1739 } | 2066 } |
1740 | 2067 |
1741 /* open a given stream. Return 0 if OK */ | 2068 /* open a given stream. Return 0 if OK */ |
1742 static int stream_component_open(VideoState *is, int stream_index) | 2069 static int stream_component_open(VideoState *is, int stream_index) |
1743 { | 2070 { |
1744 AVFormatContext *ic = is->ic; | 2071 AVFormatContext *ic = is->ic; |
1745 AVCodecContext *enc; | 2072 AVCodecContext *avctx; |
1746 AVCodec *codec; | 2073 AVCodec *codec; |
1747 SDL_AudioSpec wanted_spec, spec; | 2074 SDL_AudioSpec wanted_spec, spec; |
1748 | 2075 |
1749 if (stream_index < 0 || stream_index >= ic->nb_streams) | 2076 if (stream_index < 0 || stream_index >= ic->nb_streams) |
1750 return -1; | 2077 return -1; |
1751 enc = ic->streams[stream_index]->codec; | 2078 avctx = ic->streams[stream_index]->codec; |
1752 | 2079 |
1753 /* prepare audio output */ | 2080 /* prepare audio output */ |
1754 if (enc->codec_type == CODEC_TYPE_AUDIO) { | 2081 if (avctx->codec_type == CODEC_TYPE_AUDIO) { |
1755 if (enc->channels > 0) { | 2082 if (avctx->channels > 0) { |
1756 enc->request_channels = FFMIN(2, enc->channels); | 2083 avctx->request_channels = FFMIN(2, avctx->channels); |
1757 } else { | 2084 } else { |
1758 enc->request_channels = 2; | 2085 avctx->request_channels = 2; |
1759 } | 2086 } |
1760 } | 2087 } |
1761 | 2088 |
1762 codec = avcodec_find_decoder(enc->codec_id); | 2089 codec = avcodec_find_decoder(avctx->codec_id); |
1763 enc->debug_mv = debug_mv; | 2090 avctx->debug_mv = debug_mv; |
1764 enc->debug = debug; | 2091 avctx->debug = debug; |
1765 enc->workaround_bugs = workaround_bugs; | 2092 avctx->workaround_bugs = workaround_bugs; |
1766 enc->lowres = lowres; | 2093 avctx->lowres = lowres; |
1767 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE; | 2094 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE; |
1768 enc->idct_algo= idct; | 2095 avctx->idct_algo= idct; |
1769 if(fast) enc->flags2 |= CODEC_FLAG2_FAST; | 2096 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST; |
1770 enc->skip_frame= skip_frame; | 2097 avctx->skip_frame= skip_frame; |
1771 enc->skip_idct= skip_idct; | 2098 avctx->skip_idct= skip_idct; |
1772 enc->skip_loop_filter= skip_loop_filter; | 2099 avctx->skip_loop_filter= skip_loop_filter; |
1773 enc->error_recognition= error_recognition; | 2100 avctx->error_recognition= error_recognition; |
1774 enc->error_concealment= error_concealment; | 2101 avctx->error_concealment= error_concealment; |
1775 avcodec_thread_init(enc, thread_count); | 2102 avcodec_thread_init(avctx, thread_count); |
1776 | 2103 |
1777 set_context_opts(enc, avcodec_opts[enc->codec_type], 0); | 2104 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0); |
1778 | 2105 |
1779 if (!codec || | 2106 if (!codec || |
1780 avcodec_open(enc, codec) < 0) | 2107 avcodec_open(avctx, codec) < 0) |
1781 return -1; | 2108 return -1; |
1782 | 2109 |
1783 /* prepare audio output */ | 2110 /* prepare audio output */ |
1784 if (enc->codec_type == CODEC_TYPE_AUDIO) { | 2111 if (avctx->codec_type == CODEC_TYPE_AUDIO) { |
1785 wanted_spec.freq = enc->sample_rate; | 2112 wanted_spec.freq = avctx->sample_rate; |
1786 wanted_spec.format = AUDIO_S16SYS; | 2113 wanted_spec.format = AUDIO_S16SYS; |
1787 wanted_spec.channels = enc->channels; | 2114 wanted_spec.channels = avctx->channels; |
1788 wanted_spec.silence = 0; | 2115 wanted_spec.silence = 0; |
1789 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; | 2116 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; |
1790 wanted_spec.callback = sdl_audio_callback; | 2117 wanted_spec.callback = sdl_audio_callback; |
1791 wanted_spec.userdata = is; | 2118 wanted_spec.userdata = is; |
1792 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) { | 2119 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) { |
1793 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); | 2120 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); |
1794 return -1; | 2121 return -1; |
1795 } | 2122 } |
1796 is->audio_hw_buf_size = spec.size; | 2123 is->audio_hw_buf_size = spec.size; |
1797 is->audio_src_fmt= SAMPLE_FMT_S16; | 2124 is->audio_src_fmt= SAMPLE_FMT_S16; |
1798 } | 2125 } |
1799 | 2126 |
1800 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; | 2127 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; |
1801 switch(enc->codec_type) { | 2128 switch(avctx->codec_type) { |
1802 case CODEC_TYPE_AUDIO: | 2129 case CODEC_TYPE_AUDIO: |
1803 is->audio_stream = stream_index; | 2130 is->audio_stream = stream_index; |
1804 is->audio_st = ic->streams[stream_index]; | 2131 is->audio_st = ic->streams[stream_index]; |
1805 is->audio_buf_size = 0; | 2132 is->audio_buf_size = 0; |
1806 is->audio_buf_index = 0; | 2133 is->audio_buf_index = 0; |
1807 | 2134 |
1808 /* init averaging filter */ | 2135 /* init averaging filter */ |
1809 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB); | 2136 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB); |
1810 is->audio_diff_avg_count = 0; | 2137 is->audio_diff_avg_count = 0; |
1811 /* since we do not have a precise anough audio fifo fullness, | 2138 /* since we do not have a precise anough audio fifo fullness, |
1812 we correct audio sync only if larger than this threshold */ | 2139 we correct audio sync only if larger than this threshold */ |
1813 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rat
e; | 2140 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_r
ate; |
1814 | 2141 |
1815 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); | 2142 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); |
1816 packet_queue_init(&is->audioq); | 2143 packet_queue_init(&is->audioq); |
1817 SDL_PauseAudio(0); | 2144 SDL_PauseAudio(0); |
1818 break; | 2145 break; |
1819 case CODEC_TYPE_VIDEO: | 2146 case CODEC_TYPE_VIDEO: |
1820 is->video_stream = stream_index; | 2147 is->video_stream = stream_index; |
1821 is->video_st = ic->streams[stream_index]; | 2148 is->video_st = ic->streams[stream_index]; |
1822 | 2149 |
1823 is->frame_timer = (double)av_gettime() / 1000000.0; | |
1824 // is->video_current_pts_time = av_gettime(); | 2150 // is->video_current_pts_time = av_gettime(); |
1825 | 2151 |
1826 packet_queue_init(&is->videoq); | 2152 packet_queue_init(&is->videoq); |
1827 is->video_tid = SDL_CreateThread(video_thread, is); | 2153 is->video_tid = SDL_CreateThread(video_thread, is); |
1828 break; | 2154 break; |
1829 case CODEC_TYPE_SUBTITLE: | 2155 case CODEC_TYPE_SUBTITLE: |
1830 is->subtitle_stream = stream_index; | 2156 is->subtitle_stream = stream_index; |
1831 is->subtitle_st = ic->streams[stream_index]; | 2157 is->subtitle_st = ic->streams[stream_index]; |
1832 packet_queue_init(&is->subtitleq); | 2158 packet_queue_init(&is->subtitleq); |
1833 | 2159 |
1834 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is); | 2160 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is); |
1835 break; | 2161 break; |
1836 default: | 2162 default: |
1837 break; | 2163 break; |
1838 } | 2164 } |
1839 return 0; | 2165 return 0; |
1840 } | 2166 } |
1841 | 2167 |
1842 static void stream_component_close(VideoState *is, int stream_index) | 2168 static void stream_component_close(VideoState *is, int stream_index) |
1843 { | 2169 { |
1844 AVFormatContext *ic = is->ic; | 2170 AVFormatContext *ic = is->ic; |
1845 AVCodecContext *enc; | 2171 AVCodecContext *avctx; |
1846 | 2172 |
1847 if (stream_index < 0 || stream_index >= ic->nb_streams) | 2173 if (stream_index < 0 || stream_index >= ic->nb_streams) |
1848 return; | 2174 return; |
1849 enc = ic->streams[stream_index]->codec; | 2175 avctx = ic->streams[stream_index]->codec; |
1850 | 2176 |
1851 switch(enc->codec_type) { | 2177 switch(avctx->codec_type) { |
1852 case CODEC_TYPE_AUDIO: | 2178 case CODEC_TYPE_AUDIO: |
1853 packet_queue_abort(&is->audioq); | 2179 packet_queue_abort(&is->audioq); |
1854 | 2180 |
1855 SDL_CloseAudio(); | 2181 SDL_CloseAudio(); |
1856 | 2182 |
1857 packet_queue_end(&is->audioq); | 2183 packet_queue_end(&is->audioq); |
1858 if (is->reformat_ctx) | 2184 if (is->reformat_ctx) |
1859 av_audio_convert_free(is->reformat_ctx); | 2185 av_audio_convert_free(is->reformat_ctx); |
| 2186 is->reformat_ctx = NULL; |
1860 break; | 2187 break; |
1861 case CODEC_TYPE_VIDEO: | 2188 case CODEC_TYPE_VIDEO: |
1862 packet_queue_abort(&is->videoq); | 2189 packet_queue_abort(&is->videoq); |
1863 | 2190 |
1864 /* note: we also signal this mutex to make sure we deblock the | 2191 /* note: we also signal this mutex to make sure we deblock the |
1865 video thread in all cases */ | 2192 video thread in all cases */ |
1866 SDL_LockMutex(is->pictq_mutex); | 2193 SDL_LockMutex(is->pictq_mutex); |
1867 SDL_CondSignal(is->pictq_cond); | 2194 SDL_CondSignal(is->pictq_cond); |
1868 SDL_UnlockMutex(is->pictq_mutex); | 2195 SDL_UnlockMutex(is->pictq_mutex); |
1869 | 2196 |
(...skipping 14 matching lines...) Expand all Loading... |
1884 | 2211 |
1885 SDL_WaitThread(is->subtitle_tid, NULL); | 2212 SDL_WaitThread(is->subtitle_tid, NULL); |
1886 | 2213 |
1887 packet_queue_end(&is->subtitleq); | 2214 packet_queue_end(&is->subtitleq); |
1888 break; | 2215 break; |
1889 default: | 2216 default: |
1890 break; | 2217 break; |
1891 } | 2218 } |
1892 | 2219 |
1893 ic->streams[stream_index]->discard = AVDISCARD_ALL; | 2220 ic->streams[stream_index]->discard = AVDISCARD_ALL; |
1894 avcodec_close(enc); | 2221 avcodec_close(avctx); |
1895 switch(enc->codec_type) { | 2222 switch(avctx->codec_type) { |
1896 case CODEC_TYPE_AUDIO: | 2223 case CODEC_TYPE_AUDIO: |
1897 is->audio_st = NULL; | 2224 is->audio_st = NULL; |
1898 is->audio_stream = -1; | 2225 is->audio_stream = -1; |
1899 break; | 2226 break; |
1900 case CODEC_TYPE_VIDEO: | 2227 case CODEC_TYPE_VIDEO: |
1901 is->video_st = NULL; | 2228 is->video_st = NULL; |
1902 is->video_stream = -1; | 2229 is->video_stream = -1; |
1903 break; | 2230 break; |
1904 case CODEC_TYPE_SUBTITLE: | 2231 case CODEC_TYPE_SUBTITLE: |
1905 is->subtitle_st = NULL; | 2232 is->subtitle_st = NULL; |
(...skipping 11 matching lines...) Expand all Loading... |
1917 static int decode_interrupt_cb(void) | 2244 static int decode_interrupt_cb(void) |
1918 { | 2245 { |
1919 return (global_video_state && global_video_state->abort_request); | 2246 return (global_video_state && global_video_state->abort_request); |
1920 } | 2247 } |
1921 | 2248 |
1922 /* this thread gets the stream from the disk or the network */ | 2249 /* this thread gets the stream from the disk or the network */ |
1923 static int decode_thread(void *arg) | 2250 static int decode_thread(void *arg) |
1924 { | 2251 { |
1925 VideoState *is = arg; | 2252 VideoState *is = arg; |
1926 AVFormatContext *ic; | 2253 AVFormatContext *ic; |
1927 int err, i, ret, video_index, audio_index, subtitle_index; | 2254 int err, i, ret; |
| 2255 int st_index[CODEC_TYPE_NB]; |
| 2256 int st_count[CODEC_TYPE_NB]={0}; |
| 2257 int st_best_packet_count[CODEC_TYPE_NB]; |
1928 AVPacket pkt1, *pkt = &pkt1; | 2258 AVPacket pkt1, *pkt = &pkt1; |
1929 AVFormatParameters params, *ap = ¶ms; | 2259 AVFormatParameters params, *ap = ¶ms; |
1930 int eof=0; | 2260 int eof=0; |
1931 | 2261 |
1932 ic = avformat_alloc_context(); | 2262 ic = avformat_alloc_context(); |
1933 | 2263 |
1934 video_index = -1; | 2264 memset(st_index, -1, sizeof(st_index)); |
1935 audio_index = -1; | 2265 memset(st_best_packet_count, -1, sizeof(st_best_packet_count)); |
1936 subtitle_index = -1; | |
1937 is->video_stream = -1; | 2266 is->video_stream = -1; |
1938 is->audio_stream = -1; | 2267 is->audio_stream = -1; |
1939 is->subtitle_stream = -1; | 2268 is->subtitle_stream = -1; |
1940 | 2269 |
1941 global_video_state = is; | 2270 global_video_state = is; |
1942 url_set_interrupt_cb(decode_interrupt_cb); | 2271 url_set_interrupt_cb(decode_interrupt_cb); |
1943 | 2272 |
1944 memset(ap, 0, sizeof(*ap)); | 2273 memset(ap, 0, sizeof(*ap)); |
1945 | 2274 |
1946 ap->prealloced_context = 1; | 2275 ap->prealloced_context = 1; |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1983 if (ic->start_time != AV_NOPTS_VALUE) | 2312 if (ic->start_time != AV_NOPTS_VALUE) |
1984 timestamp += ic->start_time; | 2313 timestamp += ic->start_time; |
1985 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0); | 2314 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0); |
1986 if (ret < 0) { | 2315 if (ret < 0) { |
1987 fprintf(stderr, "%s: could not seek to position %0.3f\n", | 2316 fprintf(stderr, "%s: could not seek to position %0.3f\n", |
1988 is->filename, (double)timestamp / AV_TIME_BASE); | 2317 is->filename, (double)timestamp / AV_TIME_BASE); |
1989 } | 2318 } |
1990 } | 2319 } |
1991 | 2320 |
1992 for(i = 0; i < ic->nb_streams; i++) { | 2321 for(i = 0; i < ic->nb_streams; i++) { |
1993 AVCodecContext *enc = ic->streams[i]->codec; | 2322 AVStream *st= ic->streams[i]; |
| 2323 AVCodecContext *avctx = st->codec; |
1994 ic->streams[i]->discard = AVDISCARD_ALL; | 2324 ic->streams[i]->discard = AVDISCARD_ALL; |
1995 switch(enc->codec_type) { | 2325 if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB) |
| 2326 continue; |
| 2327 if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] &&
wanted_stream[avctx->codec_type] >= 0) |
| 2328 continue; |
| 2329 |
| 2330 if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames) |
| 2331 continue; |
| 2332 st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames; |
| 2333 |
| 2334 switch(avctx->codec_type) { |
1996 case CODEC_TYPE_AUDIO: | 2335 case CODEC_TYPE_AUDIO: |
1997 if (wanted_audio_stream-- >= 0 && !audio_disable) | 2336 if (!audio_disable) |
1998 audio_index = i; | 2337 st_index[CODEC_TYPE_AUDIO] = i; |
1999 break; | 2338 break; |
2000 case CODEC_TYPE_VIDEO: | 2339 case CODEC_TYPE_VIDEO: |
2001 if (wanted_video_stream-- >= 0 && !video_disable) | |
2002 video_index = i; | |
2003 break; | |
2004 case CODEC_TYPE_SUBTITLE: | 2340 case CODEC_TYPE_SUBTITLE: |
2005 if (wanted_subtitle_stream-- >= 0 && !video_disable) | 2341 if (!video_disable) |
2006 subtitle_index = i; | 2342 st_index[avctx->codec_type] = i; |
2007 break; | 2343 break; |
2008 default: | 2344 default: |
2009 break; | 2345 break; |
2010 } | 2346 } |
2011 } | 2347 } |
2012 if (show_status) { | 2348 if (show_status) { |
2013 dump_format(ic, 0, is->filename, 0); | 2349 dump_format(ic, 0, is->filename, 0); |
2014 } | 2350 } |
2015 | 2351 |
2016 /* open the streams */ | 2352 /* open the streams */ |
2017 if (audio_index >= 0) { | 2353 if (st_index[CODEC_TYPE_AUDIO] >= 0) { |
2018 stream_component_open(is, audio_index); | 2354 stream_component_open(is, st_index[CODEC_TYPE_AUDIO]); |
2019 } | 2355 } |
2020 | 2356 |
2021 if (video_index >= 0) { | 2357 ret=-1; |
2022 stream_component_open(is, video_index); | 2358 if (st_index[CODEC_TYPE_VIDEO] >= 0) { |
2023 } else { | 2359 ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]); |
| 2360 } |
| 2361 if(ret<0) { |
2024 /* add the refresh timer to draw the picture */ | 2362 /* add the refresh timer to draw the picture */ |
2025 schedule_refresh(is, 40); | 2363 schedule_refresh(is, 40); |
2026 | 2364 |
2027 if (!display_disable) | 2365 if (!display_disable) |
2028 is->show_audio = 1; | 2366 is->show_audio = 2; |
2029 } | 2367 } |
2030 | 2368 |
2031 if (subtitle_index >= 0) { | 2369 if (st_index[CODEC_TYPE_SUBTITLE] >= 0) { |
2032 stream_component_open(is, subtitle_index); | 2370 stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]); |
2033 } | 2371 } |
2034 | 2372 |
2035 if (is->video_stream < 0 && is->audio_stream < 0) { | 2373 if (is->video_stream < 0 && is->audio_stream < 0) { |
2036 fprintf(stderr, "%s: could not open codecs\n", is->filename); | 2374 fprintf(stderr, "%s: could not open codecs\n", is->filename); |
2037 ret = -1; | 2375 ret = -1; |
2038 goto fail; | 2376 goto fail; |
2039 } | 2377 } |
2040 | 2378 |
2041 for(;;) { | 2379 for(;;) { |
2042 if (is->abort_request) | 2380 if (is->abort_request) |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2095 } | 2433 } |
2096 if(url_feof(ic->pb) || eof) { | 2434 if(url_feof(ic->pb) || eof) { |
2097 if(is->video_stream >= 0){ | 2435 if(is->video_stream >= 0){ |
2098 av_init_packet(pkt); | 2436 av_init_packet(pkt); |
2099 pkt->data=NULL; | 2437 pkt->data=NULL; |
2100 pkt->size=0; | 2438 pkt->size=0; |
2101 pkt->stream_index= is->video_stream; | 2439 pkt->stream_index= is->video_stream; |
2102 packet_queue_put(&is->videoq, pkt); | 2440 packet_queue_put(&is->videoq, pkt); |
2103 } | 2441 } |
2104 SDL_Delay(10); | 2442 SDL_Delay(10); |
| 2443 if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.siz
e ==0){ |
| 2444 ret=AVERROR_EOF; |
| 2445 goto fail; |
| 2446 } |
2105 continue; | 2447 continue; |
2106 } | 2448 } |
2107 ret = av_read_frame(ic, pkt); | 2449 ret = av_read_frame(ic, pkt); |
2108 if (ret < 0) { | 2450 if (ret < 0) { |
2109 if (ret == AVERROR_EOF) | 2451 if (ret == AVERROR_EOF) |
2110 eof=1; | 2452 eof=1; |
2111 if (url_ferror(ic->pb)) | 2453 if (url_ferror(ic->pb)) |
2112 break; | 2454 break; |
2113 SDL_Delay(100); /* wait for user event */ | 2455 SDL_Delay(100); /* wait for user event */ |
2114 continue; | 2456 continue; |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2188 { | 2530 { |
2189 VideoPicture *vp; | 2531 VideoPicture *vp; |
2190 int i; | 2532 int i; |
2191 /* XXX: use a special url_shutdown call to abort parse cleanly */ | 2533 /* XXX: use a special url_shutdown call to abort parse cleanly */ |
2192 is->abort_request = 1; | 2534 is->abort_request = 1; |
2193 SDL_WaitThread(is->parse_tid, NULL); | 2535 SDL_WaitThread(is->parse_tid, NULL); |
2194 | 2536 |
2195 /* free all pictures */ | 2537 /* free all pictures */ |
2196 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) { | 2538 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) { |
2197 vp = &is->pictq[i]; | 2539 vp = &is->pictq[i]; |
| 2540 #if CONFIG_AVFILTER |
| 2541 if (vp->picref) { |
| 2542 avfilter_unref_pic(vp->picref); |
| 2543 vp->picref = NULL; |
| 2544 } |
| 2545 #endif |
2198 if (vp->bmp) { | 2546 if (vp->bmp) { |
2199 SDL_FreeYUVOverlay(vp->bmp); | 2547 SDL_FreeYUVOverlay(vp->bmp); |
2200 vp->bmp = NULL; | 2548 vp->bmp = NULL; |
2201 } | 2549 } |
2202 } | 2550 } |
2203 SDL_DestroyMutex(is->pictq_mutex); | 2551 SDL_DestroyMutex(is->pictq_mutex); |
2204 SDL_DestroyCond(is->pictq_cond); | 2552 SDL_DestroyCond(is->pictq_cond); |
2205 SDL_DestroyMutex(is->subpq_mutex); | 2553 SDL_DestroyMutex(is->subpq_mutex); |
2206 SDL_DestroyCond(is->subpq_cond); | 2554 SDL_DestroyCond(is->subpq_cond); |
| 2555 #if !CONFIG_AVFILTER |
2207 if (is->img_convert_ctx) | 2556 if (is->img_convert_ctx) |
2208 sws_freeContext(is->img_convert_ctx); | 2557 sws_freeContext(is->img_convert_ctx); |
| 2558 #endif |
2209 av_free(is); | 2559 av_free(is); |
2210 } | 2560 } |
2211 | 2561 |
2212 static void stream_cycle_channel(VideoState *is, int codec_type) | 2562 static void stream_cycle_channel(VideoState *is, int codec_type) |
2213 { | 2563 { |
2214 AVFormatContext *ic = is->ic; | 2564 AVFormatContext *ic = is->ic; |
2215 int start_index, stream_index; | 2565 int start_index, stream_index; |
2216 AVStream *st; | 2566 AVStream *st; |
2217 | 2567 |
2218 if (codec_type == CODEC_TYPE_VIDEO) | 2568 if (codec_type == CODEC_TYPE_VIDEO) |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2290 { | 2640 { |
2291 int i; | 2641 int i; |
2292 if (cur_stream) { | 2642 if (cur_stream) { |
2293 stream_close(cur_stream); | 2643 stream_close(cur_stream); |
2294 cur_stream = NULL; | 2644 cur_stream = NULL; |
2295 } | 2645 } |
2296 for (i = 0; i < CODEC_TYPE_NB; i++) | 2646 for (i = 0; i < CODEC_TYPE_NB; i++) |
2297 av_free(avcodec_opts[i]); | 2647 av_free(avcodec_opts[i]); |
2298 av_free(avformat_opts); | 2648 av_free(avformat_opts); |
2299 av_free(sws_opts); | 2649 av_free(sws_opts); |
| 2650 #if CONFIG_AVFILTER |
| 2651 avfilter_uninit(); |
| 2652 #endif |
2300 if (show_status) | 2653 if (show_status) |
2301 printf("\n"); | 2654 printf("\n"); |
2302 SDL_Quit(); | 2655 SDL_Quit(); |
2303 exit(0); | 2656 exit(0); |
2304 } | 2657 } |
2305 | 2658 |
2306 static void toggle_audio_display(void) | 2659 static void toggle_audio_display(void) |
2307 { | 2660 { |
2308 if (cur_stream) { | 2661 if (cur_stream) { |
2309 cur_stream->show_audio = !cur_stream->show_audio; | 2662 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00); |
| 2663 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3; |
| 2664 fill_rectangle(screen, |
| 2665 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_
stream->height, |
| 2666 bgcolor); |
| 2667 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->
width, cur_stream->height); |
2310 } | 2668 } |
2311 } | 2669 } |
2312 | 2670 |
2313 /* handle an event sent by the GUI */ | 2671 /* handle an event sent by the GUI */ |
2314 static void event_loop(void) | 2672 static void event_loop(void) |
2315 { | 2673 { |
2316 SDL_Event event; | 2674 SDL_Event event; |
2317 double incr, pos, frac; | 2675 double incr, pos, frac; |
2318 | 2676 |
2319 for(;;) { | 2677 for(;;) { |
| 2678 double x; |
2320 SDL_WaitEvent(&event); | 2679 SDL_WaitEvent(&event); |
2321 switch(event.type) { | 2680 switch(event.type) { |
2322 case SDL_KEYDOWN: | 2681 case SDL_KEYDOWN: |
2323 switch(event.key.keysym.sym) { | 2682 switch(event.key.keysym.sym) { |
2324 case SDLK_ESCAPE: | 2683 case SDLK_ESCAPE: |
2325 case SDLK_q: | 2684 case SDLK_q: |
2326 do_exit(); | 2685 do_exit(); |
2327 break; | 2686 break; |
2328 case SDLK_f: | 2687 case SDLK_f: |
2329 toggle_full_screen(); | 2688 toggle_full_screen(); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2364 do_seek: | 2723 do_seek: |
2365 if (cur_stream) { | 2724 if (cur_stream) { |
2366 if (seek_by_bytes) { | 2725 if (seek_by_bytes) { |
2367 if (cur_stream->video_stream >= 0 && cur_stream->video_c
urrent_pos>=0){ | 2726 if (cur_stream->video_stream >= 0 && cur_stream->video_c
urrent_pos>=0){ |
2368 pos= cur_stream->video_current_pos; | 2727 pos= cur_stream->video_current_pos; |
2369 }else if(cur_stream->audio_stream >= 0 && cur_stream->au
dio_pkt.pos>=0){ | 2728 }else if(cur_stream->audio_stream >= 0 && cur_stream->au
dio_pkt.pos>=0){ |
2370 pos= cur_stream->audio_pkt.pos; | 2729 pos= cur_stream->audio_pkt.pos; |
2371 }else | 2730 }else |
2372 pos = url_ftell(cur_stream->ic->pb); | 2731 pos = url_ftell(cur_stream->ic->pb); |
2373 if (cur_stream->ic->bit_rate) | 2732 if (cur_stream->ic->bit_rate) |
2374 incr *= cur_stream->ic->bit_rate / 60.0; | 2733 incr *= cur_stream->ic->bit_rate / 8.0; |
2375 else | 2734 else |
2376 incr *= 180000.0; | 2735 incr *= 180000.0; |
2377 pos += incr; | 2736 pos += incr; |
2378 stream_seek(cur_stream, pos, incr, 1); | 2737 stream_seek(cur_stream, pos, incr, 1); |
2379 } else { | 2738 } else { |
2380 pos = get_master_clock(cur_stream); | 2739 pos = get_master_clock(cur_stream); |
2381 pos += incr; | 2740 pos += incr; |
2382 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (
int64_t)(incr * AV_TIME_BASE), 0); | 2741 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (
int64_t)(incr * AV_TIME_BASE), 0); |
2383 } | 2742 } |
2384 } | 2743 } |
2385 break; | 2744 break; |
2386 default: | 2745 default: |
2387 break; | 2746 break; |
2388 } | 2747 } |
2389 break; | 2748 break; |
2390 case SDL_MOUSEBUTTONDOWN: | 2749 case SDL_MOUSEBUTTONDOWN: |
| 2750 case SDL_MOUSEMOTION: |
| 2751 if(event.type ==SDL_MOUSEBUTTONDOWN){ |
| 2752 x= event.button.x; |
| 2753 }else{ |
| 2754 if(event.motion.state != SDL_PRESSED) |
| 2755 break; |
| 2756 x= event.motion.x; |
| 2757 } |
2391 if (cur_stream) { | 2758 if (cur_stream) { |
2392 if(seek_by_bytes || cur_stream->ic->duration<=0){ | 2759 if(seek_by_bytes || cur_stream->ic->duration<=0){ |
2393 uint64_t size= url_fsize(cur_stream->ic->pb); | 2760 uint64_t size= url_fsize(cur_stream->ic->pb); |
2394 stream_seek(cur_stream, size*(double)event.button.x/(double)
cur_stream->width, 0, 1); | 2761 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1); |
2395 }else{ | 2762 }else{ |
2396 int64_t ts; | 2763 int64_t ts; |
2397 int ns, hh, mm, ss; | 2764 int ns, hh, mm, ss; |
2398 int tns, thh, tmm, tss; | 2765 int tns, thh, tmm, tss; |
2399 tns = cur_stream->ic->duration/1000000LL; | 2766 tns = cur_stream->ic->duration/1000000LL; |
2400 thh = tns/3600; | 2767 thh = tns/3600; |
2401 tmm = (tns%3600)/60; | 2768 tmm = (tns%3600)/60; |
2402 tss = (tns%60); | 2769 tss = (tns%60); |
2403 frac = (double)event.button.x/(double)cur_stream->width; | 2770 frac = x/cur_stream->width; |
2404 ns = frac*tns; | 2771 ns = frac*tns; |
2405 hh = ns/3600; | 2772 hh = ns/3600; |
2406 mm = (ns%3600)/60; | 2773 mm = (ns%3600)/60; |
2407 ss = (ns%60); | 2774 ss = (ns%60); |
2408 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total du
ration (%2d:%02d:%02d) \n", frac*100, | 2775 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total du
ration (%2d:%02d:%02d) \n", frac*100, |
2409 hh, mm, ss, thh, tmm, tss); | 2776 hh, mm, ss, thh, tmm, tss); |
2410 ts = frac*cur_stream->ic->duration; | 2777 ts = frac*cur_stream->ic->duration; |
2411 if (cur_stream->ic->start_time != AV_NOPTS_VALUE) | 2778 if (cur_stream->ic->start_time != AV_NOPTS_VALUE) |
2412 ts += cur_stream->ic->start_time; | 2779 ts += cur_stream->ic->start_time; |
2413 stream_seek(cur_stream, ts, 0, 0); | 2780 stream_seek(cur_stream, ts, 0, 0); |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2521 } | 2888 } |
2522 | 2889 |
2523 static const OptionDef options[] = { | 2890 static const OptionDef options[] = { |
2524 #include "cmdutils_common_opts.h" | 2891 #include "cmdutils_common_opts.h" |
2525 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "wi
dth" }, | 2892 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "wi
dth" }, |
2526 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "
height" }, | 2893 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "
height" }, |
2527 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or
abbreviation)", "size" }, | 2894 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or
abbreviation)", "size" }, |
2528 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" }, | 2895 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" }, |
2529 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" }, | 2896 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" }, |
2530 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" }, | 2897 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" }, |
2531 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "sel
ect desired audio stream", "stream_number" }, | 2898 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_A
UDIO]}, "select desired audio stream", "stream_number" }, |
2532 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "sel
ect desired video stream", "stream_number" }, | 2899 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_V
IDEO]}, "select desired video stream", "stream_number" }, |
2533 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "
select desired subtitle stream", "stream_number" }, | 2900 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_S
UBTITLE]}, "select desired subtitle stream", "stream_number" }, |
2534 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position i
n seconds", "pos" }, | 2901 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position i
n seconds", "pos" }, |
2535 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off
1=on -1=auto" }, | 2902 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off
1=on -1=auto", "val" }, |
2536 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display"
}, | 2903 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display"
}, |
2537 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" }, | 2904 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" }, |
2538 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "
set pixel format", "format" }, | 2905 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "
set pixel format", "format" }, |
2539 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" }
, | 2906 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" }
, |
2540 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print spec
ific debug info", "" }, | 2907 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print spec
ific debug info", "" }, |
2541 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaro
und bugs", "" }, | 2908 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaro
und bugs", "" }, |
2542 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize
motion vectors", "" }, | 2909 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize
motion vectors", "" }, |
2543 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimiz
ations", "" }, | 2910 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimiz
ations", "" }, |
2544 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" }, | 2911 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" }, |
2545 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let
decoder reorder pts 0=off 1=on -1=auto", ""}, | 2912 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let
decoder reorder pts 0=off 1=on -1=auto", ""}, |
2546 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" }, | 2913 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" }, |
2547 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, ""
, "" }, | 2914 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, ""
, "" }, |
2548 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", ""
}, | 2915 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", ""
}, |
2549 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" }, | 2916 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" }, |
2550 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",
"algo" }, | 2917 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",
"algo" }, |
2551 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set er
ror detection threshold (0-4)", "threshold" }, | 2918 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set er
ror detection threshold (0-4)", "threshold" }, |
2552 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set er
ror concealment options", "bit_mask" }, | 2919 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set er
ror concealment options", "bit_mask" }, |
2553 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-vi
deo sync. type (type=audio/video/ext)", "type" }, | 2920 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-vi
deo sync. type (type=audio/video/ext)", "type" }, |
2554 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "t
hread count", "count" }, | 2921 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "t
hread count", "count" }, |
| 2922 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end",
"" }, |
| 2923 #if CONFIG_AVFILTER |
| 2924 { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "fi
lter list" }, |
| 2925 #endif |
2555 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(voi
d*)opt_default}, "generic catch all option", "" }, | 2926 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(voi
d*)opt_default}, "generic catch all option", "" }, |
2556 { NULL, }, | 2927 { NULL, }, |
2557 }; | 2928 }; |
2558 | 2929 |
2559 static void show_usage(void) | 2930 static void show_usage(void) |
2560 { | 2931 { |
2561 printf("Simple media player\n"); | 2932 printf("Simple media player\n"); |
2562 printf("usage: ffplay [options] input_file\n"); | 2933 printf("usage: ffplay [options] input_file\n"); |
2563 printf("\n"); | 2934 printf("\n"); |
2564 } | 2935 } |
(...skipping 14 matching lines...) Expand all Loading... |
2579 "t cycle subtitle channel\n" | 2950 "t cycle subtitle channel\n" |
2580 "w show audio waves\n" | 2951 "w show audio waves\n" |
2581 "left/right seek backward/forward 10 seconds\n" | 2952 "left/right seek backward/forward 10 seconds\n" |
2582 "down/up seek backward/forward 1 minute\n" | 2953 "down/up seek backward/forward 1 minute\n" |
2583 "mouse click seek to percentage in file corresponding to frac
tion of width\n" | 2954 "mouse click seek to percentage in file corresponding to frac
tion of width\n" |
2584 ); | 2955 ); |
2585 } | 2956 } |
2586 | 2957 |
2587 static void opt_input_file(const char *filename) | 2958 static void opt_input_file(const char *filename) |
2588 { | 2959 { |
| 2960 if (input_filename) { |
| 2961 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was
already specified.\n", |
| 2962 filename, input_filename); |
| 2963 exit(1); |
| 2964 } |
2589 if (!strcmp(filename, "-")) | 2965 if (!strcmp(filename, "-")) |
2590 filename = "pipe:"; | 2966 filename = "pipe:"; |
2591 input_filename = filename; | 2967 input_filename = filename; |
2592 } | 2968 } |
2593 | 2969 |
2594 /* Called from the main */ | 2970 /* Called from the main */ |
2595 int main(int argc, char **argv) | 2971 int main(int argc, char **argv) |
2596 { | 2972 { |
2597 int flags, i; | 2973 int flags, i; |
2598 | 2974 |
2599 /* register all codecs, demux and protocols */ | 2975 /* register all codecs, demux and protocols */ |
2600 avcodec_register_all(); | 2976 avcodec_register_all(); |
2601 avdevice_register_all(); | 2977 avdevice_register_all(); |
| 2978 #if CONFIG_AVFILTER |
| 2979 avfilter_register_all(); |
| 2980 #endif |
2602 av_register_all(); | 2981 av_register_all(); |
2603 | 2982 |
2604 for(i=0; i<CODEC_TYPE_NB; i++){ | 2983 for(i=0; i<CODEC_TYPE_NB; i++){ |
2605 avcodec_opts[i]= avcodec_alloc_context2(i); | 2984 avcodec_opts[i]= avcodec_alloc_context2(i); |
2606 } | 2985 } |
2607 avformat_opts = avformat_alloc_context(); | 2986 avformat_opts = avformat_alloc_context(); |
| 2987 #if !CONFIG_AVFILTER |
2608 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL); | 2988 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL); |
| 2989 #endif |
2609 | 2990 |
2610 show_banner(); | 2991 show_banner(); |
2611 | 2992 |
2612 parse_options(argc, argv, options, opt_input_file); | 2993 parse_options(argc, argv, options, opt_input_file); |
2613 | 2994 |
2614 if (!input_filename) { | 2995 if (!input_filename) { |
2615 show_usage(); | 2996 show_usage(); |
2616 fprintf(stderr, "An input file must be specified\n"); | 2997 fprintf(stderr, "An input file must be specified\n"); |
2617 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffpla
y'\n"); | 2998 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffpla
y'\n"); |
2618 exit(1); | 2999 exit(1); |
(...skipping 13 matching lines...) Expand all Loading... |
2632 | 3013 |
2633 if (!display_disable) { | 3014 if (!display_disable) { |
2634 #if HAVE_SDL_VIDEO_SIZE | 3015 #if HAVE_SDL_VIDEO_SIZE |
2635 const SDL_VideoInfo *vi = SDL_GetVideoInfo(); | 3016 const SDL_VideoInfo *vi = SDL_GetVideoInfo(); |
2636 fs_screen_width = vi->current_w; | 3017 fs_screen_width = vi->current_w; |
2637 fs_screen_height = vi->current_h; | 3018 fs_screen_height = vi->current_h; |
2638 #endif | 3019 #endif |
2639 } | 3020 } |
2640 | 3021 |
2641 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE); | 3022 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE); |
2642 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE); | |
2643 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE); | 3023 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE); |
2644 SDL_EventState(SDL_USEREVENT, SDL_IGNORE); | 3024 SDL_EventState(SDL_USEREVENT, SDL_IGNORE); |
2645 | 3025 |
2646 av_init_packet(&flush_pkt); | 3026 av_init_packet(&flush_pkt); |
2647 flush_pkt.data= "FLUSH"; | 3027 flush_pkt.data= "FLUSH"; |
2648 | 3028 |
2649 cur_stream = stream_open(input_filename, file_iformat); | 3029 cur_stream = stream_open(input_filename, file_iformat); |
2650 | 3030 |
2651 event_loop(); | 3031 event_loop(); |
2652 | 3032 |
2653 /* never returns */ | 3033 /* never returns */ |
2654 | 3034 |
2655 return 0; | 3035 return 0; |
2656 } | 3036 } |
OLD | NEW |