Index: patched-ffmpeg-mt/ffplay.c |
=================================================================== |
--- patched-ffmpeg-mt/ffplay.c (revision 41250) |
+++ patched-ffmpeg-mt/ffplay.c (working copy) |
@@ -1,5 +1,5 @@ |
/* |
- * FFplay : Simple Media Player based on the ffmpeg libraries |
+ * FFplay : Simple Media Player based on the FFmpeg libraries |
* Copyright (c) 2003 Fabrice Bellard |
* |
* This file is part of FFmpeg. |
@@ -30,7 +30,14 @@ |
#include "libavcodec/audioconvert.h" |
#include "libavcodec/colorspace.h" |
#include "libavcodec/opt.h" |
+#include "libavcodec/avfft.h" |
+#if CONFIG_AVFILTER |
+# include "libavfilter/avfilter.h" |
+# include "libavfilter/avfiltergraph.h" |
+# include "libavfilter/graphparser.h" |
+#endif |
+ |
#include "cmdutils.h" |
#include <SDL.h> |
@@ -40,10 +47,6 @@ |
#undef main /* We don't want SDL to override our main() */ |
#endif |
-#undef exit |
-#undef printf |
-#undef fprintf |
- |
const char program_name[] = "FFplay"; |
const int program_birth_year = 2003; |
@@ -71,7 +74,9 @@ |
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */ |
#define SAMPLE_ARRAY_SIZE (2*65536) |
+#if !CONFIG_AVFILTER |
static int sws_flags = SWS_BICUBIC; |
+#endif |
typedef struct PacketQueue { |
AVPacketList *first_pkt, *last_pkt; |
@@ -92,6 +97,11 @@ |
int width, height; /* source height & width */ |
int allocated; |
SDL_TimerID timer_id; |
+ enum PixelFormat pix_fmt; |
+ |
+#if CONFIG_AVFILTER |
+ AVFilterPicRef *picref; |
+#endif |
} VideoPicture; |
typedef struct SubPicture { |
@@ -151,6 +161,9 @@ |
int16_t sample_array[SAMPLE_ARRAY_SIZE]; |
int sample_array_index; |
int last_i_start; |
+ RDFTContext *rdft; |
+ int rdft_bits; |
+ int xpos; |
SDL_Thread *subtitle_tid; |
int subtitle_stream; |
@@ -176,7 +189,9 @@ |
int pictq_size, pictq_rindex, pictq_windex; |
SDL_mutex *pictq_mutex; |
SDL_cond *pictq_cond; |
+#if !CONFIG_AVFILTER |
struct SwsContext *img_convert_ctx; |
+#endif |
// QETimer *video_timer; |
char filename[1024]; |
@@ -187,6 +202,9 @@ |
int64_t last_dts_for_fault_detection; |
int64_t last_pts_for_fault_detection; |
+#if CONFIG_AVFILTER |
+ AVFilterContext *out_video_filter; ///<the last filter in the video chain |
+#endif |
} VideoState; |
static void show_help(void); |
@@ -204,9 +222,11 @@ |
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE; |
static int audio_disable; |
static int video_disable; |
-static int wanted_audio_stream= 0; |
-static int wanted_video_stream= 0; |
-static int wanted_subtitle_stream= -1; |
+static int wanted_stream[CODEC_TYPE_NB]={ |
+ [CODEC_TYPE_AUDIO]=-1, |
+ [CODEC_TYPE_VIDEO]=-1, |
+ [CODEC_TYPE_SUBTITLE]=-1, |
+}; |
static int seek_by_bytes=-1; |
static int display_disable; |
static int show_status = 1; |
@@ -227,6 +247,10 @@ |
static int error_recognition = FF_ER_CAREFUL; |
static int error_concealment = 3; |
static int decoder_reorder_pts= -1; |
+static int autoexit; |
+#if CONFIG_AVFILTER |
+static char *vfilters = NULL; |
+#endif |
/* current context */ |
static int is_full_screen; |
@@ -661,6 +685,13 @@ |
vp = &is->pictq[is->pictq_rindex]; |
if (vp->bmp) { |
+#if CONFIG_AVFILTER |
+ if (vp->picref->pixel_aspect.num == 0) |
+ aspect_ratio = 0; |
+ else |
+ aspect_ratio = av_q2d(vp->picref->pixel_aspect); |
+#else |
+ |
/* XXX: use variable in the frame */ |
if (is->video_st->sample_aspect_ratio.num) |
aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio); |
@@ -668,9 +699,10 @@ |
aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio); |
else |
aspect_ratio = 0; |
+#endif |
if (aspect_ratio <= 0.0) |
aspect_ratio = 1.0; |
- aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height; |
+ aspect_ratio *= (float)vp->width / (float)vp->height; |
/* if an active format is indicated, then it overrides the |
mpeg format */ |
#if 0 |
@@ -777,11 +809,17 @@ |
int i, i_start, x, y1, y, ys, delay, n, nb_display_channels; |
int ch, channels, h, h2, bgcolor, fgcolor; |
int16_t time_diff; |
+ int rdft_bits, nb_freq; |
+ for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++) |
+ ; |
+ nb_freq= 1<<(rdft_bits-1); |
+ |
/* compute display index : center on currently output samples */ |
channels = s->audio_st->codec->channels; |
nb_display_channels = channels; |
if (!s->paused) { |
+ int data_used= s->show_audio==1 ? s->width : (2*nb_freq); |
n = 2 * channels; |
delay = audio_write_get_buf_size(s); |
delay /= n; |
@@ -793,23 +831,24 @@ |
delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000; |
} |
- delay -= s->width / 2; |
- if (delay < s->width) |
- delay = s->width; |
+ delay -= data_used / 2; |
+ if (delay < data_used) |
+ delay = data_used; |
i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE); |
- |
- h= INT_MIN; |
- for(i=0; i<1000; i+=channels){ |
- int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE; |
- int a= s->sample_array[idx]; |
- int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE]; |
- int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE]; |
- int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE]; |
- int score= a-d; |
- if(h<score && (b^c)<0){ |
- h= score; |
- i_start= idx; |
+ if(s->show_audio==1){ |
+ h= INT_MIN; |
+ for(i=0; i<1000; i+=channels){ |
+ int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE; |
+ int a= s->sample_array[idx]; |
+ int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE]; |
+ int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE]; |
+ int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE]; |
+ int score= a-d; |
+ if(h<score && (b^c)<0){ |
+ h= score; |
+ i_start= idx; |
+ } |
} |
} |
@@ -819,45 +858,85 @@ |
} |
bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00); |
- fill_rectangle(screen, |
- s->xleft, s->ytop, s->width, s->height, |
- bgcolor); |
+ if(s->show_audio==1){ |
+ fill_rectangle(screen, |
+ s->xleft, s->ytop, s->width, s->height, |
+ bgcolor); |
- fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff); |
+ fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff); |
- /* total height for one channel */ |
- h = s->height / nb_display_channels; |
- /* graph height / 2 */ |
- h2 = (h * 9) / 20; |
- for(ch = 0;ch < nb_display_channels; ch++) { |
- i = i_start + ch; |
- y1 = s->ytop + ch * h + (h / 2); /* position of center line */ |
- for(x = 0; x < s->width; x++) { |
- y = (s->sample_array[i] * h2) >> 15; |
- if (y < 0) { |
- y = -y; |
- ys = y1 - y; |
- } else { |
- ys = y1; |
+ /* total height for one channel */ |
+ h = s->height / nb_display_channels; |
+ /* graph height / 2 */ |
+ h2 = (h * 9) / 20; |
+ for(ch = 0;ch < nb_display_channels; ch++) { |
+ i = i_start + ch; |
+ y1 = s->ytop + ch * h + (h / 2); /* position of center line */ |
+ for(x = 0; x < s->width; x++) { |
+ y = (s->sample_array[i] * h2) >> 15; |
+ if (y < 0) { |
+ y = -y; |
+ ys = y1 - y; |
+ } else { |
+ ys = y1; |
+ } |
+ fill_rectangle(screen, |
+ s->xleft + x, ys, 1, y, |
+ fgcolor); |
+ i += channels; |
+ if (i >= SAMPLE_ARRAY_SIZE) |
+ i -= SAMPLE_ARRAY_SIZE; |
} |
+ } |
+ |
+ fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff); |
+ |
+ for(ch = 1;ch < nb_display_channels; ch++) { |
+ y = s->ytop + ch * h; |
fill_rectangle(screen, |
- s->xleft + x, ys, 1, y, |
+ s->xleft, y, s->width, 1, |
fgcolor); |
- i += channels; |
- if (i >= SAMPLE_ARRAY_SIZE) |
- i -= SAMPLE_ARRAY_SIZE; |
} |
- } |
+ SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height); |
+ }else{ |
+ nb_display_channels= FFMIN(nb_display_channels, 2); |
+ if(rdft_bits != s->rdft_bits){ |
+ av_rdft_end(s->rdft); |
+ s->rdft = av_rdft_init(rdft_bits, DFT_R2C); |
+ s->rdft_bits= rdft_bits; |
+ } |
+ { |
+ FFTSample data[2][2*nb_freq]; |
+ for(ch = 0;ch < nb_display_channels; ch++) { |
+ i = i_start + ch; |
+ for(x = 0; x < 2*nb_freq; x++) { |
+ double w= (x-nb_freq)*(1.0/nb_freq); |
+ data[ch][x]= s->sample_array[i]*(1.0-w*w); |
+ i += channels; |
+ if (i >= SAMPLE_ARRAY_SIZE) |
+ i -= SAMPLE_ARRAY_SIZE; |
+ } |
+ av_rdft_calc(s->rdft, data[ch]); |
+ } |
+ //least efficient way to do this, we should of course directly access it but its more than fast enough |
+ for(y=0; y<s->height; y++){ |
+ double w= 1/sqrt(nb_freq); |
+ int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1])); |
+ int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1])); |
+ a= FFMIN(a,255); |
+ b= FFMIN(b,255); |
+ fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2); |
- fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff); |
- |
- for(ch = 1;ch < nb_display_channels; ch++) { |
- y = s->ytop + ch * h; |
- fill_rectangle(screen, |
- s->xleft, y, s->width, 1, |
- fgcolor); |
+ fill_rectangle(screen, |
+ s->xpos, s->height-y, 1, 1, |
+ fgcolor); |
+ } |
+ } |
+ SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height); |
+ s->xpos++; |
+ if(s->xpos >= s->width) |
+ s->xpos= s->xleft; |
} |
- SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height); |
} |
static int video_open(VideoState *is){ |
@@ -873,9 +952,15 @@ |
} else if(!is_full_screen && screen_width){ |
w = screen_width; |
h = screen_height; |
+#if CONFIG_AVFILTER |
+ }else if (is->out_video_filter && is->out_video_filter->inputs[0]){ |
+ w = is->out_video_filter->inputs[0]->w; |
+ h = is->out_video_filter->inputs[0]->h; |
+#else |
}else if (is->video_st && is->video_st->codec->width){ |
w = is->video_st->codec->width; |
h = is->video_st->codec->height; |
+#endif |
} else { |
w = 640; |
h = 480; |
@@ -1009,7 +1094,7 @@ |
static double compute_frame_delay(double frame_current_pts, VideoState *is) |
{ |
- double actual_delay, delay, sync_threshold, ref_clock, diff; |
+ double actual_delay, delay, sync_threshold, diff; |
/* compute nominal delay */ |
delay = frame_current_pts - is->frame_last_pts; |
@@ -1187,12 +1272,23 @@ |
if (vp->bmp) |
SDL_FreeYUVOverlay(vp->bmp); |
- vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width, |
- is->video_st->codec->height, |
+#if CONFIG_AVFILTER |
+ if (vp->picref) |
+ avfilter_unref_pic(vp->picref); |
+ vp->picref = NULL; |
+ |
+ vp->width = is->out_video_filter->inputs[0]->w; |
+ vp->height = is->out_video_filter->inputs[0]->h; |
+ vp->pix_fmt = is->out_video_filter->inputs[0]->format; |
+#else |
+ vp->width = is->video_st->codec->width; |
+ vp->height = is->video_st->codec->height; |
+ vp->pix_fmt = is->video_st->codec->pix_fmt; |
+#endif |
+ |
+ vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height, |
SDL_YV12_OVERLAY, |
screen); |
- vp->width = is->video_st->codec->width; |
- vp->height = is->video_st->codec->height; |
SDL_LockMutex(is->pictq_mutex); |
vp->allocated = 1; |
@@ -1208,7 +1304,9 @@ |
{ |
VideoPicture *vp; |
int dst_pix_fmt; |
- |
+#if CONFIG_AVFILTER |
+ AVPicture pict_src; |
+#endif |
/* wait until we have space to put a new picture */ |
SDL_LockMutex(is->pictq_mutex); |
while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && |
@@ -1224,8 +1322,13 @@ |
/* alloc or resize hardware picture buffer */ |
if (!vp->bmp || |
+#if CONFIG_AVFILTER |
+ vp->width != is->out_video_filter->inputs[0]->w || |
+ vp->height != is->out_video_filter->inputs[0]->h) { |
+#else |
vp->width != is->video_st->codec->width || |
vp->height != is->video_st->codec->height) { |
+#endif |
SDL_Event event; |
vp->allocated = 0; |
@@ -1250,6 +1353,11 @@ |
/* if the frame is not skipped, then display it */ |
if (vp->bmp) { |
AVPicture pict; |
+#if CONFIG_AVFILTER |
+ if(vp->picref) |
+ avfilter_unref_pic(vp->picref); |
+ vp->picref = src_frame->opaque; |
+#endif |
/* get a pointer on the bitmap */ |
SDL_LockYUVOverlay (vp->bmp); |
@@ -1263,18 +1371,31 @@ |
pict.linesize[0] = vp->bmp->pitches[0]; |
pict.linesize[1] = vp->bmp->pitches[2]; |
pict.linesize[2] = vp->bmp->pitches[1]; |
+ |
+#if CONFIG_AVFILTER |
+ pict_src.data[0] = src_frame->data[0]; |
+ pict_src.data[1] = src_frame->data[1]; |
+ pict_src.data[2] = src_frame->data[2]; |
+ |
+ pict_src.linesize[0] = src_frame->linesize[0]; |
+ pict_src.linesize[1] = src_frame->linesize[1]; |
+ pict_src.linesize[2] = src_frame->linesize[2]; |
+ |
+ //FIXME use direct rendering |
+ av_picture_copy(&pict, &pict_src, |
+ vp->pix_fmt, vp->width, vp->height); |
+#else |
sws_flags = av_get_int(sws_opts, "sws_flags", NULL); |
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx, |
- is->video_st->codec->width, is->video_st->codec->height, |
- is->video_st->codec->pix_fmt, |
- is->video_st->codec->width, is->video_st->codec->height, |
+ vp->width, vp->height, vp->pix_fmt, vp->width, vp->height, |
dst_pix_fmt, sws_flags, NULL, NULL, NULL); |
if (is->img_convert_ctx == NULL) { |
fprintf(stderr, "Cannot initialize the conversion context\n"); |
exit(1); |
} |
sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize, |
- 0, is->video_st->codec->height, pict.data, pict.linesize); |
+ 0, vp->height, pict.data, pict.linesize); |
+#endif |
/* update the bitmap content */ |
SDL_UnlockYUVOverlay(vp->bmp); |
@@ -1317,35 +1438,18 @@ |
is->video_clock += frame_delay; |
#if defined(DEBUG_SYNC) && 0 |
- { |
- int ftype; |
- if (src_frame->pict_type == FF_B_TYPE) |
- ftype = 'B'; |
- else if (src_frame->pict_type == FF_I_TYPE) |
- ftype = 'I'; |
- else |
- ftype = 'P'; |
- printf("frame_type=%c clock=%0.3f pts=%0.3f\n", |
- ftype, pts, pts1); |
- } |
+ printf("frame_type=%c clock=%0.3f pts=%0.3f\n", |
+ av_get_pict_type_char(src_frame->pict_type), pts, pts1); |
#endif |
return queue_picture(is, src_frame, pts, pos); |
} |
-static int video_thread(void *arg) |
+static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt) |
{ |
- VideoState *is = arg; |
- AVPacket pkt1, *pkt = &pkt1; |
int len1, got_picture, i; |
- AVFrame *frame= avcodec_alloc_frame(); |
- double pts; |
- for(;;) { |
- while (is->paused && !is->videoq.abort_request) { |
- SDL_Delay(10); |
- } |
if (packet_queue_get(&is->videoq, pkt, 1) < 0) |
- break; |
+ return -1; |
if(pkt->data == flush_pkt.data){ |
avcodec_flush_buffers(is->video_st->codec); |
@@ -1369,8 +1473,9 @@ |
is->last_pts_for_fault_detection= INT64_MIN; |
is->frame_last_pts= AV_NOPTS_VALUE; |
is->frame_last_delay = 0; |
+ is->frame_timer = (double)av_gettime() / 1000000.0; |
- continue; |
+ return 0; |
} |
/* NOTE: ipts is the PTS of the _first_ picture beginning in |
@@ -1395,25 +1500,247 @@ |
|| (decoder_reorder_pts && is->faulty_pts<is->faulty_dts) |
|| pkt->dts == AV_NOPTS_VALUE) |
&& frame->reordered_opaque != AV_NOPTS_VALUE) |
- pts= frame->reordered_opaque; |
+ *pts= frame->reordered_opaque; |
else if(pkt->dts != AV_NOPTS_VALUE) |
- pts= pkt->dts; |
+ *pts= pkt->dts; |
else |
- pts= 0; |
- pts *= av_q2d(is->video_st->time_base); |
+ *pts= 0; |
// if (len1 < 0) |
// break; |
- if (got_picture) { |
- if (output_picture2(is, frame, pts, pkt->pos) < 0) |
- goto the_end; |
- } |
- av_free_packet(pkt); |
+ if (got_picture) |
+ return 1; |
+ return 0; |
+} |
+ |
+#if CONFIG_AVFILTER |
+typedef struct { |
+ VideoState *is; |
+ AVFrame *frame; |
+} FilterPriv; |
+ |
+static int input_init(AVFilterContext *ctx, const char *args, void *opaque) |
+{ |
+ FilterPriv *priv = ctx->priv; |
+ if(!opaque) return -1; |
+ |
+ priv->is = opaque; |
+ priv->frame = avcodec_alloc_frame(); |
+ |
+ return 0; |
+} |
+ |
+static void input_uninit(AVFilterContext *ctx) |
+{ |
+ FilterPriv *priv = ctx->priv; |
+ av_free(priv->frame); |
+} |
+ |
+static int input_request_frame(AVFilterLink *link) |
+{ |
+ FilterPriv *priv = link->src->priv; |
+ AVFilterPicRef *picref; |
+ int64_t pts = 0; |
+ AVPacket pkt; |
+ int ret; |
+ |
+ while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt))) |
+ av_free_packet(&pkt); |
+ if (ret < 0) |
+ return -1; |
+ |
+ /* FIXME: until I figure out how to hook everything up to the codec |
+ * right, we're just copying the entire frame. */ |
+ picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h); |
+ av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame, |
+ picref->pic->format, link->w, link->h); |
+ av_free_packet(&pkt); |
+ |
+ picref->pts = pts; |
+ picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio; |
+ avfilter_start_frame(link, avfilter_ref_pic(picref, ~0)); |
+ avfilter_draw_slice(link, 0, link->h, 1); |
+ avfilter_end_frame(link); |
+ avfilter_unref_pic(picref); |
+ |
+ return 0; |
+} |
+ |
+static int input_query_formats(AVFilterContext *ctx) |
+{ |
+ FilterPriv *priv = ctx->priv; |
+ enum PixelFormat pix_fmts[] = { |
+ priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE |
+ }; |
+ |
+ avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts)); |
+ return 0; |
+} |
+ |
+static int input_config_props(AVFilterLink *link) |
+{ |
+ FilterPriv *priv = link->src->priv; |
+ AVCodecContext *c = priv->is->video_st->codec; |
+ |
+ link->w = c->width; |
+ link->h = c->height; |
+ |
+ return 0; |
+} |
+ |
+static AVFilter input_filter = |
+{ |
+ .name = "ffplay_input", |
+ |
+ .priv_size = sizeof(FilterPriv), |
+ |
+ .init = input_init, |
+ .uninit = input_uninit, |
+ |
+ .query_formats = input_query_formats, |
+ |
+ .inputs = (AVFilterPad[]) {{ .name = NULL }}, |
+ .outputs = (AVFilterPad[]) {{ .name = "default", |
+ .type = CODEC_TYPE_VIDEO, |
+ .request_frame = input_request_frame, |
+ .config_props = input_config_props, }, |
+ { .name = NULL }}, |
+}; |
+ |
+static void output_end_frame(AVFilterLink *link) |
+{ |
+} |
+ |
+static int output_query_formats(AVFilterContext *ctx) |
+{ |
+ enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE }; |
+ |
+ avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts)); |
+ return 0; |
+} |
+ |
+static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame, |
+ int64_t *pts) |
+{ |
+ AVFilterPicRef *pic; |
+ |
+ if(avfilter_request_frame(ctx->inputs[0])) |
+ return -1; |
+ if(!(pic = ctx->inputs[0]->cur_pic)) |
+ return -1; |
+ ctx->inputs[0]->cur_pic = NULL; |
+ |
+ frame->opaque = pic; |
+ *pts = pic->pts; |
+ |
+ memcpy(frame->data, pic->data, sizeof(frame->data)); |
+ memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize)); |
+ |
+ return 1; |
+} |
+ |
+static AVFilter output_filter = |
+{ |
+ .name = "ffplay_output", |
+ |
+ .query_formats = output_query_formats, |
+ |
+ .inputs = (AVFilterPad[]) {{ .name = "default", |
+ .type = CODEC_TYPE_VIDEO, |
+ .end_frame = output_end_frame, |
+ .min_perms = AV_PERM_READ, }, |
+ { .name = NULL }}, |
+ .outputs = (AVFilterPad[]) {{ .name = NULL }}, |
+}; |
+#endif /* CONFIG_AVFILTER */ |
+ |
+static int video_thread(void *arg) |
+{ |
+ VideoState *is = arg; |
+ AVFrame *frame= avcodec_alloc_frame(); |
+ int64_t pts_int; |
+ double pts; |
+ int ret; |
+ |
+#if CONFIG_AVFILTER |
+ AVFilterContext *filt_src = NULL, *filt_out = NULL; |
+ AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph)); |
+ graph->scale_sws_opts = av_strdup("sws_flags=bilinear"); |
+ |
+ if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end; |
+ if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end; |
+ |
+ if(avfilter_init_filter(filt_src, NULL, is)) goto the_end; |
+ if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end; |
+ |
+ |
+ if(vfilters) { |
+ AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut)); |
+ AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut)); |
+ |
+ outputs->name = av_strdup("in"); |
+ outputs->filter = filt_src; |
+ outputs->pad_idx = 0; |
+ outputs->next = NULL; |
+ |
+ inputs->name = av_strdup("out"); |
+ inputs->filter = filt_out; |
+ inputs->pad_idx = 0; |
+ inputs->next = NULL; |
+ |
+ if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0) |
+ goto the_end; |
+ av_freep(&vfilters); |
+ } else { |
+ if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end; |
+ } |
+ avfilter_graph_add_filter(graph, filt_src); |
+ avfilter_graph_add_filter(graph, filt_out); |
+ |
+ if(avfilter_graph_check_validity(graph, NULL)) goto the_end; |
+ if(avfilter_graph_config_formats(graph, NULL)) goto the_end; |
+ if(avfilter_graph_config_links(graph, NULL)) goto the_end; |
+ |
+ is->out_video_filter = filt_out; |
+#endif |
+ |
+ for(;;) { |
+#if !CONFIG_AVFILTER |
+ AVPacket pkt; |
+#endif |
+ while (is->paused && !is->videoq.abort_request) |
+ SDL_Delay(10); |
+#if CONFIG_AVFILTER |
+ ret = get_filtered_video_frame(filt_out, frame, &pts_int); |
+#else |
+ ret = get_video_frame(is, frame, &pts_int, &pkt); |
+#endif |
+ |
+ if (ret < 0) goto the_end; |
+ |
+ if (!ret) |
+ continue; |
+ |
+ pts = pts_int*av_q2d(is->video_st->time_base); |
+ |
+#if CONFIG_AVFILTER |
+ ret = output_picture2(is, frame, pts, -1); /* fixme: unknown pos */ |
+#else |
+ ret = output_picture2(is, frame, pts, pkt.pos); |
+ av_free_packet(&pkt); |
+#endif |
+ if (ret < 0) |
+ goto the_end; |
+ |
if (step) |
if (cur_stream) |
stream_pause(cur_stream); |
} |
the_end: |
+#if CONFIG_AVFILTER |
+ avfilter_graph_destroy(graph); |
+ av_freep(&graph); |
+#endif |
av_free(frame); |
return 0; |
} |
@@ -1742,49 +2069,49 @@ |
static int stream_component_open(VideoState *is, int stream_index) |
{ |
AVFormatContext *ic = is->ic; |
- AVCodecContext *enc; |
+ AVCodecContext *avctx; |
AVCodec *codec; |
SDL_AudioSpec wanted_spec, spec; |
if (stream_index < 0 || stream_index >= ic->nb_streams) |
return -1; |
- enc = ic->streams[stream_index]->codec; |
+ avctx = ic->streams[stream_index]->codec; |
/* prepare audio output */ |
- if (enc->codec_type == CODEC_TYPE_AUDIO) { |
- if (enc->channels > 0) { |
- enc->request_channels = FFMIN(2, enc->channels); |
+ if (avctx->codec_type == CODEC_TYPE_AUDIO) { |
+ if (avctx->channels > 0) { |
+ avctx->request_channels = FFMIN(2, avctx->channels); |
} else { |
- enc->request_channels = 2; |
+ avctx->request_channels = 2; |
} |
} |
- codec = avcodec_find_decoder(enc->codec_id); |
- enc->debug_mv = debug_mv; |
- enc->debug = debug; |
- enc->workaround_bugs = workaround_bugs; |
- enc->lowres = lowres; |
- if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE; |
- enc->idct_algo= idct; |
- if(fast) enc->flags2 |= CODEC_FLAG2_FAST; |
- enc->skip_frame= skip_frame; |
- enc->skip_idct= skip_idct; |
- enc->skip_loop_filter= skip_loop_filter; |
- enc->error_recognition= error_recognition; |
- enc->error_concealment= error_concealment; |
- avcodec_thread_init(enc, thread_count); |
+ codec = avcodec_find_decoder(avctx->codec_id); |
+ avctx->debug_mv = debug_mv; |
+ avctx->debug = debug; |
+ avctx->workaround_bugs = workaround_bugs; |
+ avctx->lowres = lowres; |
+ if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE; |
+ avctx->idct_algo= idct; |
+ if(fast) avctx->flags2 |= CODEC_FLAG2_FAST; |
+ avctx->skip_frame= skip_frame; |
+ avctx->skip_idct= skip_idct; |
+ avctx->skip_loop_filter= skip_loop_filter; |
+ avctx->error_recognition= error_recognition; |
+ avctx->error_concealment= error_concealment; |
+ avcodec_thread_init(avctx, thread_count); |
- set_context_opts(enc, avcodec_opts[enc->codec_type], 0); |
+ set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0); |
if (!codec || |
- avcodec_open(enc, codec) < 0) |
+ avcodec_open(avctx, codec) < 0) |
return -1; |
/* prepare audio output */ |
- if (enc->codec_type == CODEC_TYPE_AUDIO) { |
- wanted_spec.freq = enc->sample_rate; |
+ if (avctx->codec_type == CODEC_TYPE_AUDIO) { |
+ wanted_spec.freq = avctx->sample_rate; |
wanted_spec.format = AUDIO_S16SYS; |
- wanted_spec.channels = enc->channels; |
+ wanted_spec.channels = avctx->channels; |
wanted_spec.silence = 0; |
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; |
wanted_spec.callback = sdl_audio_callback; |
@@ -1798,7 +2125,7 @@ |
} |
ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; |
- switch(enc->codec_type) { |
+ switch(avctx->codec_type) { |
case CODEC_TYPE_AUDIO: |
is->audio_stream = stream_index; |
is->audio_st = ic->streams[stream_index]; |
@@ -1810,7 +2137,7 @@ |
is->audio_diff_avg_count = 0; |
/* since we do not have a precise anough audio fifo fullness, |
we correct audio sync only if larger than this threshold */ |
- is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate; |
+ is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate; |
memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); |
packet_queue_init(&is->audioq); |
@@ -1820,7 +2147,6 @@ |
is->video_stream = stream_index; |
is->video_st = ic->streams[stream_index]; |
- is->frame_timer = (double)av_gettime() / 1000000.0; |
// is->video_current_pts_time = av_gettime(); |
packet_queue_init(&is->videoq); |
@@ -1842,13 +2168,13 @@ |
static void stream_component_close(VideoState *is, int stream_index) |
{ |
AVFormatContext *ic = is->ic; |
- AVCodecContext *enc; |
+ AVCodecContext *avctx; |
if (stream_index < 0 || stream_index >= ic->nb_streams) |
return; |
- enc = ic->streams[stream_index]->codec; |
+ avctx = ic->streams[stream_index]->codec; |
- switch(enc->codec_type) { |
+ switch(avctx->codec_type) { |
case CODEC_TYPE_AUDIO: |
packet_queue_abort(&is->audioq); |
@@ -1857,6 +2183,7 @@ |
packet_queue_end(&is->audioq); |
if (is->reformat_ctx) |
av_audio_convert_free(is->reformat_ctx); |
+ is->reformat_ctx = NULL; |
break; |
case CODEC_TYPE_VIDEO: |
packet_queue_abort(&is->videoq); |
@@ -1891,8 +2218,8 @@ |
} |
ic->streams[stream_index]->discard = AVDISCARD_ALL; |
- avcodec_close(enc); |
- switch(enc->codec_type) { |
+ avcodec_close(avctx); |
+ switch(avctx->codec_type) { |
case CODEC_TYPE_AUDIO: |
is->audio_st = NULL; |
is->audio_stream = -1; |
@@ -1924,16 +2251,18 @@ |
{ |
VideoState *is = arg; |
AVFormatContext *ic; |
- int err, i, ret, video_index, audio_index, subtitle_index; |
+ int err, i, ret; |
+ int st_index[CODEC_TYPE_NB]; |
+ int st_count[CODEC_TYPE_NB]={0}; |
+ int st_best_packet_count[CODEC_TYPE_NB]; |
AVPacket pkt1, *pkt = &pkt1; |
AVFormatParameters params, *ap = ¶ms; |
int eof=0; |
ic = avformat_alloc_context(); |
- video_index = -1; |
- audio_index = -1; |
- subtitle_index = -1; |
+ memset(st_index, -1, sizeof(st_index)); |
+ memset(st_best_packet_count, -1, sizeof(st_best_packet_count)); |
is->video_stream = -1; |
is->audio_stream = -1; |
is->subtitle_stream = -1; |
@@ -1990,20 +2319,27 @@ |
} |
for(i = 0; i < ic->nb_streams; i++) { |
- AVCodecContext *enc = ic->streams[i]->codec; |
+ AVStream *st= ic->streams[i]; |
+ AVCodecContext *avctx = st->codec; |
ic->streams[i]->discard = AVDISCARD_ALL; |
- switch(enc->codec_type) { |
+ if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB) |
+ continue; |
+ if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0) |
+ continue; |
+ |
+ if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames) |
+ continue; |
+ st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames; |
+ |
+ switch(avctx->codec_type) { |
case CODEC_TYPE_AUDIO: |
- if (wanted_audio_stream-- >= 0 && !audio_disable) |
- audio_index = i; |
+ if (!audio_disable) |
+ st_index[CODEC_TYPE_AUDIO] = i; |
break; |
case CODEC_TYPE_VIDEO: |
- if (wanted_video_stream-- >= 0 && !video_disable) |
- video_index = i; |
- break; |
case CODEC_TYPE_SUBTITLE: |
- if (wanted_subtitle_stream-- >= 0 && !video_disable) |
- subtitle_index = i; |
+ if (!video_disable) |
+ st_index[avctx->codec_type] = i; |
break; |
default: |
break; |
@@ -2014,22 +2350,24 @@ |
} |
/* open the streams */ |
- if (audio_index >= 0) { |
- stream_component_open(is, audio_index); |
+ if (st_index[CODEC_TYPE_AUDIO] >= 0) { |
+ stream_component_open(is, st_index[CODEC_TYPE_AUDIO]); |
} |
- if (video_index >= 0) { |
- stream_component_open(is, video_index); |
- } else { |
+ ret=-1; |
+ if (st_index[CODEC_TYPE_VIDEO] >= 0) { |
+ ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]); |
+ } |
+ if(ret<0) { |
/* add the refresh timer to draw the picture */ |
schedule_refresh(is, 40); |
if (!display_disable) |
- is->show_audio = 1; |
+ is->show_audio = 2; |
} |
- if (subtitle_index >= 0) { |
- stream_component_open(is, subtitle_index); |
+ if (st_index[CODEC_TYPE_SUBTITLE] >= 0) { |
+ stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]); |
} |
if (is->video_stream < 0 && is->audio_stream < 0) { |
@@ -2102,6 +2440,10 @@ |
packet_queue_put(&is->videoq, pkt); |
} |
SDL_Delay(10); |
+ if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){ |
+ ret=AVERROR_EOF; |
+ goto fail; |
+ } |
continue; |
} |
ret = av_read_frame(ic, pkt); |
@@ -2195,6 +2537,12 @@ |
/* free all pictures */ |
for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) { |
vp = &is->pictq[i]; |
+#if CONFIG_AVFILTER |
+ if (vp->picref) { |
+ avfilter_unref_pic(vp->picref); |
+ vp->picref = NULL; |
+ } |
+#endif |
if (vp->bmp) { |
SDL_FreeYUVOverlay(vp->bmp); |
vp->bmp = NULL; |
@@ -2204,8 +2552,10 @@ |
SDL_DestroyCond(is->pictq_cond); |
SDL_DestroyMutex(is->subpq_mutex); |
SDL_DestroyCond(is->subpq_cond); |
+#if !CONFIG_AVFILTER |
if (is->img_convert_ctx) |
sws_freeContext(is->img_convert_ctx); |
+#endif |
av_free(is); |
} |
@@ -2297,6 +2647,9 @@ |
av_free(avcodec_opts[i]); |
av_free(avformat_opts); |
av_free(sws_opts); |
+#if CONFIG_AVFILTER |
+ avfilter_uninit(); |
+#endif |
if (show_status) |
printf("\n"); |
SDL_Quit(); |
@@ -2306,7 +2659,12 @@ |
static void toggle_audio_display(void) |
{ |
if (cur_stream) { |
- cur_stream->show_audio = !cur_stream->show_audio; |
+ int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00); |
+ cur_stream->show_audio = (cur_stream->show_audio + 1) % 3; |
+ fill_rectangle(screen, |
+ cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height, |
+ bgcolor); |
+ SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height); |
} |
} |
@@ -2317,6 +2675,7 @@ |
double incr, pos, frac; |
for(;;) { |
+ double x; |
SDL_WaitEvent(&event); |
switch(event.type) { |
case SDL_KEYDOWN: |
@@ -2371,7 +2730,7 @@ |
}else |
pos = url_ftell(cur_stream->ic->pb); |
if (cur_stream->ic->bit_rate) |
- incr *= cur_stream->ic->bit_rate / 60.0; |
+ incr *= cur_stream->ic->bit_rate / 8.0; |
else |
incr *= 180000.0; |
pos += incr; |
@@ -2388,10 +2747,18 @@ |
} |
break; |
case SDL_MOUSEBUTTONDOWN: |
+ case SDL_MOUSEMOTION: |
+ if(event.type ==SDL_MOUSEBUTTONDOWN){ |
+ x= event.button.x; |
+ }else{ |
+ if(event.motion.state != SDL_PRESSED) |
+ break; |
+ x= event.motion.x; |
+ } |
if (cur_stream) { |
if(seek_by_bytes || cur_stream->ic->duration<=0){ |
uint64_t size= url_fsize(cur_stream->ic->pb); |
- stream_seek(cur_stream, size*(double)event.button.x/(double)cur_stream->width, 0, 1); |
+ stream_seek(cur_stream, size*x/cur_stream->width, 0, 1); |
}else{ |
int64_t ts; |
int ns, hh, mm, ss; |
@@ -2400,7 +2767,7 @@ |
thh = tns/3600; |
tmm = (tns%3600)/60; |
tss = (tns%60); |
- frac = (double)event.button.x/(double)cur_stream->width; |
+ frac = x/cur_stream->width; |
ns = frac*tns; |
hh = ns/3600; |
mm = (ns%3600)/60; |
@@ -2528,11 +2895,11 @@ |
{ "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" }, |
{ "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" }, |
{ "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" }, |
- { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" }, |
- { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" }, |
- { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" }, |
+ { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" }, |
+ { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" }, |
+ { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" }, |
{ "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" }, |
- { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto" }, |
+ { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" }, |
{ "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" }, |
{ "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" }, |
{ "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" }, |
@@ -2552,6 +2919,10 @@ |
{ "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" }, |
{ "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" }, |
{ "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" }, |
+ { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" }, |
+#if CONFIG_AVFILTER |
+ { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" }, |
+#endif |
{ "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" }, |
{ NULL, }, |
}; |
@@ -2586,6 +2957,11 @@ |
static void opt_input_file(const char *filename) |
{ |
+ if (input_filename) { |
+ fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n", |
+ filename, input_filename); |
+ exit(1); |
+ } |
if (!strcmp(filename, "-")) |
filename = "pipe:"; |
input_filename = filename; |
@@ -2599,13 +2975,18 @@ |
/* register all codecs, demux and protocols */ |
avcodec_register_all(); |
avdevice_register_all(); |
+#if CONFIG_AVFILTER |
+ avfilter_register_all(); |
+#endif |
av_register_all(); |
for(i=0; i<CODEC_TYPE_NB; i++){ |
avcodec_opts[i]= avcodec_alloc_context2(i); |
} |
avformat_opts = avformat_alloc_context(); |
+#if !CONFIG_AVFILTER |
sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL); |
+#endif |
show_banner(); |
@@ -2639,7 +3020,6 @@ |
} |
SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE); |
- SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE); |
SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE); |
SDL_EventState(SDL_USEREVENT, SDL_IGNORE); |