OSDN Git Service

configure: Do not unconditionally add -D_POSIX_C_SOURCE to CPPFLAGS.
[android-x86/external-ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavcodec/audioconvert.h"
36 #include "libavutil/opt.h"
37 #include "libavcodec/avfft.h"
38
39 #if CONFIG_AVFILTER
40 # include "libavfilter/avfilter.h"
41 # include "libavfilter/avfiltergraph.h"
42 #endif
43
44 #include "cmdutils.h"
45
46 #include <SDL.h>
47 #include <SDL_thread.h>
48
49 #ifdef __MINGW32__
50 #undef main /* We don't want SDL to override our main() */
51 #endif
52
53 #include <unistd.h>
54 #include <assert.h>
55
56 const char program_name[] = "ffplay";
57 const int program_birth_year = 2003;
58
59 //#define DEBUG
60 //#define DEBUG_SYNC
61
62 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
63 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
64 #define MIN_FRAMES 5
65
66 /* SDL audio buffer size, in samples. Should be small to have precise
67    A/V sync as SDL does not have hardware buffer fullness info. */
68 #define SDL_AUDIO_BUFFER_SIZE 1024
69
70 /* no AV sync correction is done if below the AV sync threshold */
71 #define AV_SYNC_THRESHOLD 0.01
72 /* no AV correction is done if too big error */
73 #define AV_NOSYNC_THRESHOLD 10.0
74
75 #define FRAME_SKIP_FACTOR 0.05
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2*65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///<presentation time stamp for this picture
102     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
103     int64_t pos;                                 ///<byte position in file
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     int allocated;
107     enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110     AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140     int dtg_active_format;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     /* samples output by the codec. we reserve more space for avsync
157        compensation */
158     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     uint8_t *audio_buf;
161     unsigned int audio_buf_size; /* in bytes */
162     int audio_buf_index; /* in bytes */
163     AVPacket audio_pkt_temp;
164     AVPacket audio_pkt;
165     enum AVSampleFormat audio_src_fmt;
166     AVAudioConvert *reformat_ctx;
167
168     int show_audio; /* if true, display audio samples */
169     int16_t sample_array[SAMPLE_ARRAY_SIZE];
170     int sample_array_index;
171     int last_i_start;
172     RDFTContext *rdft;
173     int rdft_bits;
174     FFTSample *rdft_data;
175     int xpos;
176
177     SDL_Thread *subtitle_tid;
178     int subtitle_stream;
179     int subtitle_stream_changed;
180     AVStream *subtitle_st;
181     PacketQueue subtitleq;
182     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
183     int subpq_size, subpq_rindex, subpq_windex;
184     SDL_mutex *subpq_mutex;
185     SDL_cond *subpq_cond;
186
187     double frame_timer;
188     double frame_last_pts;
189     double frame_last_delay;
190     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
191     int video_stream;
192     AVStream *video_st;
193     PacketQueue videoq;
194     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
195     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
196     int64_t video_current_pos;                   ///<current displayed file pos
197     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
198     int pictq_size, pictq_rindex, pictq_windex;
199     SDL_mutex *pictq_mutex;
200     SDL_cond *pictq_cond;
201 #if !CONFIG_AVFILTER
202     struct SwsContext *img_convert_ctx;
203 #endif
204
205     //    QETimer *video_timer;
206     char filename[1024];
207     int width, height, xleft, ytop;
208
209     PtsCorrectionContext pts_ctx;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static void show_help(void);
221 static int audio_write_get_buf_size(VideoState *is);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int frame_width = 0;
232 static int frame_height = 0;
233 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234 static int audio_disable;
235 static int video_disable;
236 static int wanted_stream[AVMEDIA_TYPE_NB]={
237     [AVMEDIA_TYPE_AUDIO]=-1,
238     [AVMEDIA_TYPE_VIDEO]=-1,
239     [AVMEDIA_TYPE_SUBTITLE]=-1,
240 };
241 static int seek_by_bytes=-1;
242 static int display_disable;
243 static int show_status = 1;
244 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int64_t duration = AV_NOPTS_VALUE;
247 static int debug = 0;
248 static int debug_mv = 0;
249 static int step = 0;
250 static int thread_count = 1;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop=1;
266 static int framedrop=1;
267
268 static int rdftspeed=20;
269 #if CONFIG_AVFILTER
270 static char *vfilters = NULL;
271 #endif
272
273 /* current context */
274 static int is_full_screen;
275 static VideoState *cur_stream;
276 static int64_t audio_callback_time;
277
278 static AVPacket flush_pkt;
279
280 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
281 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283
284 static SDL_Surface *screen;
285
286 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
287
288 /* packet queue handling */
289 static void packet_queue_init(PacketQueue *q)
290 {
291     memset(q, 0, sizeof(PacketQueue));
292     q->mutex = SDL_CreateMutex();
293     q->cond = SDL_CreateCond();
294     packet_queue_put(q, &flush_pkt);
295 }
296
297 static void packet_queue_flush(PacketQueue *q)
298 {
299     AVPacketList *pkt, *pkt1;
300
301     SDL_LockMutex(q->mutex);
302     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
303         pkt1 = pkt->next;
304         av_free_packet(&pkt->pkt);
305         av_freep(&pkt);
306     }
307     q->last_pkt = NULL;
308     q->first_pkt = NULL;
309     q->nb_packets = 0;
310     q->size = 0;
311     SDL_UnlockMutex(q->mutex);
312 }
313
314 static void packet_queue_end(PacketQueue *q)
315 {
316     packet_queue_flush(q);
317     SDL_DestroyMutex(q->mutex);
318     SDL_DestroyCond(q->cond);
319 }
320
321 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
322 {
323     AVPacketList *pkt1;
324
325     /* duplicate the packet */
326     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
327         return -1;
328
329     pkt1 = av_malloc(sizeof(AVPacketList));
330     if (!pkt1)
331         return -1;
332     pkt1->pkt = *pkt;
333     pkt1->next = NULL;
334
335
336     SDL_LockMutex(q->mutex);
337
338     if (!q->last_pkt)
339
340         q->first_pkt = pkt1;
341     else
342         q->last_pkt->next = pkt1;
343     q->last_pkt = pkt1;
344     q->nb_packets++;
345     q->size += pkt1->pkt.size + sizeof(*pkt1);
346     /* XXX: should duplicate packet data in DV case */
347     SDL_CondSignal(q->cond);
348
349     SDL_UnlockMutex(q->mutex);
350     return 0;
351 }
352
353 static void packet_queue_abort(PacketQueue *q)
354 {
355     SDL_LockMutex(q->mutex);
356
357     q->abort_request = 1;
358
359     SDL_CondSignal(q->cond);
360
361     SDL_UnlockMutex(q->mutex);
362 }
363
364 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
365 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366 {
367     AVPacketList *pkt1;
368     int ret;
369
370     SDL_LockMutex(q->mutex);
371
372     for(;;) {
373         if (q->abort_request) {
374             ret = -1;
375             break;
376         }
377
378         pkt1 = q->first_pkt;
379         if (pkt1) {
380             q->first_pkt = pkt1->next;
381             if (!q->first_pkt)
382                 q->last_pkt = NULL;
383             q->nb_packets--;
384             q->size -= pkt1->pkt.size + sizeof(*pkt1);
385             *pkt = pkt1->pkt;
386             av_free(pkt1);
387             ret = 1;
388             break;
389         } else if (!block) {
390             ret = 0;
391             break;
392         } else {
393             SDL_CondWait(q->cond, q->mutex);
394         }
395     }
396     SDL_UnlockMutex(q->mutex);
397     return ret;
398 }
399
400 static inline void fill_rectangle(SDL_Surface *screen,
401                                   int x, int y, int w, int h, int color)
402 {
403     SDL_Rect rect;
404     rect.x = x;
405     rect.y = y;
406     rect.w = w;
407     rect.h = h;
408     SDL_FillRect(screen, &rect, color);
409 }
410
411 #if 0
412 /* draw only the border of a rectangle */
413 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
414 {
415     int w1, w2, h1, h2;
416
417     /* fill the background */
418     w1 = x;
419     if (w1 < 0)
420         w1 = 0;
421     w2 = s->width - (x + w);
422     if (w2 < 0)
423         w2 = 0;
424     h1 = y;
425     if (h1 < 0)
426         h1 = 0;
427     h2 = s->height - (y + h);
428     if (h2 < 0)
429         h2 = 0;
430     fill_rectangle(screen,
431                    s->xleft, s->ytop,
432                    w1, s->height,
433                    color);
434     fill_rectangle(screen,
435                    s->xleft + s->width - w2, s->ytop,
436                    w2, s->height,
437                    color);
438     fill_rectangle(screen,
439                    s->xleft + w1, s->ytop,
440                    s->width - w1 - w2, h1,
441                    color);
442     fill_rectangle(screen,
443                    s->xleft + w1, s->ytop + s->height - h2,
444                    s->width - w1 - w2, h2,
445                    color);
446 }
447 #endif
448
449 #define ALPHA_BLEND(a, oldp, newp, s)\
450 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
451
452 #define RGBA_IN(r, g, b, a, s)\
453 {\
454     unsigned int v = ((const uint32_t *)(s))[0];\
455     a = (v >> 24) & 0xff;\
456     r = (v >> 16) & 0xff;\
457     g = (v >> 8) & 0xff;\
458     b = v & 0xff;\
459 }
460
461 #define YUVA_IN(y, u, v, a, s, pal)\
462 {\
463     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
464     a = (val >> 24) & 0xff;\
465     y = (val >> 16) & 0xff;\
466     u = (val >> 8) & 0xff;\
467     v = val & 0xff;\
468 }
469
470 #define YUVA_OUT(d, y, u, v, a)\
471 {\
472     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
473 }
474
475
476 #define BPP 1
477
478 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
479 {
480     int wrap, wrap3, width2, skip2;
481     int y, u, v, a, u1, v1, a1, w, h;
482     uint8_t *lum, *cb, *cr;
483     const uint8_t *p;
484     const uint32_t *pal;
485     int dstx, dsty, dstw, dsth;
486
487     dstw = av_clip(rect->w, 0, imgw);
488     dsth = av_clip(rect->h, 0, imgh);
489     dstx = av_clip(rect->x, 0, imgw - dstw);
490     dsty = av_clip(rect->y, 0, imgh - dsth);
491     lum = dst->data[0] + dsty * dst->linesize[0];
492     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
493     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
494
495     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
496     skip2 = dstx >> 1;
497     wrap = dst->linesize[0];
498     wrap3 = rect->pict.linesize[0];
499     p = rect->pict.data[0];
500     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
501
502     if (dsty & 1) {
503         lum += dstx;
504         cb += skip2;
505         cr += skip2;
506
507         if (dstx & 1) {
508             YUVA_IN(y, u, v, a, p, pal);
509             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
511             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
512             cb++;
513             cr++;
514             lum++;
515             p += BPP;
516         }
517         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 = u;
520             v1 = v;
521             a1 = a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523
524             YUVA_IN(y, u, v, a, p + BPP, pal);
525             u1 += u;
526             v1 += v;
527             a1 += a;
528             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531             cb++;
532             cr++;
533             p += 2 * BPP;
534             lum += 2;
535         }
536         if (w) {
537             YUVA_IN(y, u, v, a, p, pal);
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
540             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
541             p++;
542             lum++;
543         }
544         p += wrap3 - dstw * BPP;
545         lum += wrap - dstw - dstx;
546         cb += dst->linesize[1] - width2 - skip2;
547         cr += dst->linesize[2] - width2 - skip2;
548     }
549     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
550         lum += dstx;
551         cb += skip2;
552         cr += skip2;
553
554         if (dstx & 1) {
555             YUVA_IN(y, u, v, a, p, pal);
556             u1 = u;
557             v1 = v;
558             a1 = a;
559             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560             p += wrap3;
561             lum += wrap;
562             YUVA_IN(y, u, v, a, p, pal);
563             u1 += u;
564             v1 += v;
565             a1 += a;
566             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569             cb++;
570             cr++;
571             p += -wrap3 + BPP;
572             lum += -wrap + 1;
573         }
574         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 = u;
577             v1 = v;
578             a1 = a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580
581             YUVA_IN(y, u, v, a, p + BPP, pal);
582             u1 += u;
583             v1 += v;
584             a1 += a;
585             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586             p += wrap3;
587             lum += wrap;
588
589             YUVA_IN(y, u, v, a, p, pal);
590             u1 += u;
591             v1 += v;
592             a1 += a;
593             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594
595             YUVA_IN(y, u, v, a, p + BPP, pal);
596             u1 += u;
597             v1 += v;
598             a1 += a;
599             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
600
601             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
602             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
603
604             cb++;
605             cr++;
606             p += -wrap3 + 2 * BPP;
607             lum += -wrap + 2;
608         }
609         if (w) {
610             YUVA_IN(y, u, v, a, p, pal);
611             u1 = u;
612             v1 = v;
613             a1 = a;
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615             p += wrap3;
616             lum += wrap;
617             YUVA_IN(y, u, v, a, p, pal);
618             u1 += u;
619             v1 += v;
620             a1 += a;
621             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
623             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
624             cb++;
625             cr++;
626             p += -wrap3 + BPP;
627             lum += -wrap + 1;
628         }
629         p += wrap3 + (wrap3 - dstw * BPP);
630         lum += wrap + (wrap - dstw - dstx);
631         cb += dst->linesize[1] - width2 - skip2;
632         cr += dst->linesize[2] - width2 - skip2;
633     }
634     /* handle odd height */
635     if (h) {
636         lum += dstx;
637         cb += skip2;
638         cr += skip2;
639
640         if (dstx & 1) {
641             YUVA_IN(y, u, v, a, p, pal);
642             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
644             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
645             cb++;
646             cr++;
647             lum++;
648             p += BPP;
649         }
650         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
651             YUVA_IN(y, u, v, a, p, pal);
652             u1 = u;
653             v1 = v;
654             a1 = a;
655             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656
657             YUVA_IN(y, u, v, a, p + BPP, pal);
658             u1 += u;
659             v1 += v;
660             a1 += a;
661             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
662             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
663             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
664             cb++;
665             cr++;
666             p += 2 * BPP;
667             lum += 2;
668         }
669         if (w) {
670             YUVA_IN(y, u, v, a, p, pal);
671             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
673             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
674         }
675     }
676 }
677
678 static void free_subpicture(SubPicture *sp)
679 {
680     avsubtitle_free(&sp->sub);
681 }
682
683 static void video_image_display(VideoState *is)
684 {
685     VideoPicture *vp;
686     SubPicture *sp;
687     AVPicture pict;
688     float aspect_ratio;
689     int width, height, x, y;
690     SDL_Rect rect;
691     int i;
692
693     vp = &is->pictq[is->pictq_rindex];
694     if (vp->bmp) {
695 #if CONFIG_AVFILTER
696          if (vp->picref->video->pixel_aspect.num == 0)
697              aspect_ratio = 0;
698          else
699              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
700 #else
701
702         /* XXX: use variable in the frame */
703         if (is->video_st->sample_aspect_ratio.num)
704             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
705         else if (is->video_st->codec->sample_aspect_ratio.num)
706             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
707         else
708             aspect_ratio = 0;
709 #endif
710         if (aspect_ratio <= 0.0)
711             aspect_ratio = 1.0;
712         aspect_ratio *= (float)vp->width / (float)vp->height;
713
714         if (is->subtitle_st)
715         {
716             if (is->subpq_size > 0)
717             {
718                 sp = &is->subpq[is->subpq_rindex];
719
720                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
721                 {
722                     SDL_LockYUVOverlay (vp->bmp);
723
724                     pict.data[0] = vp->bmp->pixels[0];
725                     pict.data[1] = vp->bmp->pixels[2];
726                     pict.data[2] = vp->bmp->pixels[1];
727
728                     pict.linesize[0] = vp->bmp->pitches[0];
729                     pict.linesize[1] = vp->bmp->pitches[2];
730                     pict.linesize[2] = vp->bmp->pitches[1];
731
732                     for (i = 0; i < sp->sub.num_rects; i++)
733                         blend_subrect(&pict, sp->sub.rects[i],
734                                       vp->bmp->w, vp->bmp->h);
735
736                     SDL_UnlockYUVOverlay (vp->bmp);
737                 }
738             }
739         }
740
741
742         /* XXX: we suppose the screen has a 1.0 pixel ratio */
743         height = is->height;
744         width = ((int)rint(height * aspect_ratio)) & ~1;
745         if (width > is->width) {
746             width = is->width;
747             height = ((int)rint(width / aspect_ratio)) & ~1;
748         }
749         x = (is->width - width) / 2;
750         y = (is->height - height) / 2;
751         if (!is->no_background) {
752             /* fill the background */
753             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
754         } else {
755             is->no_background = 0;
756         }
757         rect.x = is->xleft + x;
758         rect.y = is->ytop  + y;
759         rect.w = width;
760         rect.h = height;
761         SDL_DisplayYUVOverlay(vp->bmp, &rect);
762     } else {
763 #if 0
764         fill_rectangle(screen,
765                        is->xleft, is->ytop, is->width, is->height,
766                        QERGB(0x00, 0x00, 0x00));
767 #endif
768     }
769 }
770
771 static inline int compute_mod(int a, int b)
772 {
773     a = a % b;
774     if (a >= 0)
775         return a;
776     else
777         return a + b;
778 }
779
780 static void video_audio_display(VideoState *s)
781 {
782     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
783     int ch, channels, h, h2, bgcolor, fgcolor;
784     int16_t time_diff;
785     int rdft_bits, nb_freq;
786
787     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
788         ;
789     nb_freq= 1<<(rdft_bits-1);
790
791     /* compute display index : center on currently output samples */
792     channels = s->audio_st->codec->channels;
793     nb_display_channels = channels;
794     if (!s->paused) {
795         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
796         n = 2 * channels;
797         delay = audio_write_get_buf_size(s);
798         delay /= n;
799
800         /* to be more precise, we take into account the time spent since
801            the last buffer computation */
802         if (audio_callback_time) {
803             time_diff = av_gettime() - audio_callback_time;
804             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
805         }
806
807         delay += 2*data_used;
808         if (delay < data_used)
809             delay = data_used;
810
811         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
812         if(s->show_audio==1){
813             h= INT_MIN;
814             for(i=0; i<1000; i+=channels){
815                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
816                 int a= s->sample_array[idx];
817                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
818                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
819                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
820                 int score= a-d;
821                 if(h<score && (b^c)<0){
822                     h= score;
823                     i_start= idx;
824                 }
825             }
826         }
827
828         s->last_i_start = i_start;
829     } else {
830         i_start = s->last_i_start;
831     }
832
833     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
834     if(s->show_audio==1){
835         fill_rectangle(screen,
836                        s->xleft, s->ytop, s->width, s->height,
837                        bgcolor);
838
839         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
840
841         /* total height for one channel */
842         h = s->height / nb_display_channels;
843         /* graph height / 2 */
844         h2 = (h * 9) / 20;
845         for(ch = 0;ch < nb_display_channels; ch++) {
846             i = i_start + ch;
847             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
848             for(x = 0; x < s->width; x++) {
849                 y = (s->sample_array[i] * h2) >> 15;
850                 if (y < 0) {
851                     y = -y;
852                     ys = y1 - y;
853                 } else {
854                     ys = y1;
855                 }
856                 fill_rectangle(screen,
857                                s->xleft + x, ys, 1, y,
858                                fgcolor);
859                 i += channels;
860                 if (i >= SAMPLE_ARRAY_SIZE)
861                     i -= SAMPLE_ARRAY_SIZE;
862             }
863         }
864
865         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
866
867         for(ch = 1;ch < nb_display_channels; ch++) {
868             y = s->ytop + ch * h;
869             fill_rectangle(screen,
870                            s->xleft, y, s->width, 1,
871                            fgcolor);
872         }
873         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
874     }else{
875         nb_display_channels= FFMIN(nb_display_channels, 2);
876         if(rdft_bits != s->rdft_bits){
877             av_rdft_end(s->rdft);
878             av_free(s->rdft_data);
879             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
880             s->rdft_bits= rdft_bits;
881             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
882         }
883         {
884             FFTSample *data[2];
885             for(ch = 0;ch < nb_display_channels; ch++) {
886                 data[ch] = s->rdft_data + 2*nb_freq*ch;
887                 i = i_start + ch;
888                 for(x = 0; x < 2*nb_freq; x++) {
889                     double w= (x-nb_freq)*(1.0/nb_freq);
890                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
891                     i += channels;
892                     if (i >= SAMPLE_ARRAY_SIZE)
893                         i -= SAMPLE_ARRAY_SIZE;
894                 }
895                 av_rdft_calc(s->rdft, data[ch]);
896             }
897             //least efficient way to do this, we should of course directly access it but its more than fast enough
898             for(y=0; y<s->height; y++){
899                 double w= 1/sqrt(nb_freq);
900                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
901                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
902                        + data[1][2*y+1]*data[1][2*y+1])) : a;
903                 a= FFMIN(a,255);
904                 b= FFMIN(b,255);
905                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
906
907                 fill_rectangle(screen,
908                             s->xpos, s->height-y, 1, 1,
909                             fgcolor);
910             }
911         }
912         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
913         s->xpos++;
914         if(s->xpos >= s->width)
915             s->xpos= s->xleft;
916     }
917 }
918
919 static int video_open(VideoState *is){
920     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
921     int w,h;
922
923     if(is_full_screen) flags |= SDL_FULLSCREEN;
924     else               flags |= SDL_RESIZABLE;
925
926     if (is_full_screen && fs_screen_width) {
927         w = fs_screen_width;
928         h = fs_screen_height;
929     } else if(!is_full_screen && screen_width){
930         w = screen_width;
931         h = screen_height;
932 #if CONFIG_AVFILTER
933     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
934         w = is->out_video_filter->inputs[0]->w;
935         h = is->out_video_filter->inputs[0]->h;
936 #else
937     }else if (is->video_st && is->video_st->codec->width){
938         w = is->video_st->codec->width;
939         h = is->video_st->codec->height;
940 #endif
941     } else {
942         w = 640;
943         h = 480;
944     }
945     if(screen && is->width == screen->w && screen->w == w
946        && is->height== screen->h && screen->h == h)
947         return 0;
948
949 #ifndef __APPLE__
950     screen = SDL_SetVideoMode(w, h, 0, flags);
951 #else
952     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
953     screen = SDL_SetVideoMode(w, h, 24, flags);
954 #endif
955     if (!screen) {
956         fprintf(stderr, "SDL: could not set video mode - exiting\n");
957         return -1;
958     }
959     if (!window_title)
960         window_title = input_filename;
961     SDL_WM_SetCaption(window_title, window_title);
962
963     is->width = screen->w;
964     is->height = screen->h;
965
966     return 0;
967 }
968
969 /* display the current picture, if any */
970 static void video_display(VideoState *is)
971 {
972     if(!screen)
973         video_open(cur_stream);
974     if (is->audio_st && is->show_audio)
975         video_audio_display(is);
976     else if (is->video_st)
977         video_image_display(is);
978 }
979
980 static int refresh_thread(void *opaque)
981 {
982     VideoState *is= opaque;
983     while(!is->abort_request){
984         SDL_Event event;
985         event.type = FF_REFRESH_EVENT;
986         event.user.data1 = opaque;
987         if(!is->refresh){
988             is->refresh=1;
989             SDL_PushEvent(&event);
990         }
991         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
992     }
993     return 0;
994 }
995
996 /* get the current audio clock value */
997 static double get_audio_clock(VideoState *is)
998 {
999     double pts;
1000     int hw_buf_size, bytes_per_sec;
1001     pts = is->audio_clock;
1002     hw_buf_size = audio_write_get_buf_size(is);
1003     bytes_per_sec = 0;
1004     if (is->audio_st) {
1005         bytes_per_sec = is->audio_st->codec->sample_rate *
1006             2 * is->audio_st->codec->channels;
1007     }
1008     if (bytes_per_sec)
1009         pts -= (double)hw_buf_size / bytes_per_sec;
1010     return pts;
1011 }
1012
1013 /* get the current video clock value */
1014 static double get_video_clock(VideoState *is)
1015 {
1016     if (is->paused) {
1017         return is->video_current_pts;
1018     } else {
1019         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1020     }
1021 }
1022
1023 /* get the current external clock value */
1024 static double get_external_clock(VideoState *is)
1025 {
1026     int64_t ti;
1027     ti = av_gettime();
1028     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1029 }
1030
1031 /* get the current master clock value */
1032 static double get_master_clock(VideoState *is)
1033 {
1034     double val;
1035
1036     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1037         if (is->video_st)
1038             val = get_video_clock(is);
1039         else
1040             val = get_audio_clock(is);
1041     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1042         if (is->audio_st)
1043             val = get_audio_clock(is);
1044         else
1045             val = get_video_clock(is);
1046     } else {
1047         val = get_external_clock(is);
1048     }
1049     return val;
1050 }
1051
1052 /* seek in the stream */
1053 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1054 {
1055     if (!is->seek_req) {
1056         is->seek_pos = pos;
1057         is->seek_rel = rel;
1058         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1059         if (seek_by_bytes)
1060             is->seek_flags |= AVSEEK_FLAG_BYTE;
1061         is->seek_req = 1;
1062     }
1063 }
1064
1065 /* pause or resume the video */
1066 static void stream_pause(VideoState *is)
1067 {
1068     if (is->paused) {
1069         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1070         if(is->read_pause_return != AVERROR(ENOSYS)){
1071             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1072         }
1073         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1074     }
1075     is->paused = !is->paused;
1076 }
1077
1078 static double compute_target_time(double frame_current_pts, VideoState *is)
1079 {
1080     double delay, sync_threshold, diff;
1081
1082     /* compute nominal delay */
1083     delay = frame_current_pts - is->frame_last_pts;
1084     if (delay <= 0 || delay >= 10.0) {
1085         /* if incorrect delay, use previous one */
1086         delay = is->frame_last_delay;
1087     } else {
1088         is->frame_last_delay = delay;
1089     }
1090     is->frame_last_pts = frame_current_pts;
1091
1092     /* update delay to follow master synchronisation source */
1093     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1094          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1095         /* if video is slave, we try to correct big delays by
1096            duplicating or deleting a frame */
1097         diff = get_video_clock(is) - get_master_clock(is);
1098
1099         /* skip or repeat frame. We take into account the
1100            delay to compute the threshold. I still don't know
1101            if it is the best guess */
1102         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1103         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1104             if (diff <= -sync_threshold)
1105                 delay = 0;
1106             else if (diff >= sync_threshold)
1107                 delay = 2 * delay;
1108         }
1109     }
1110     is->frame_timer += delay;
1111 #if defined(DEBUG_SYNC)
1112     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1113             delay, actual_delay, frame_current_pts, -diff);
1114 #endif
1115
1116     return is->frame_timer;
1117 }
1118
1119 /* called to display each frame */
1120 static void video_refresh_timer(void *opaque)
1121 {
1122     VideoState *is = opaque;
1123     VideoPicture *vp;
1124
1125     SubPicture *sp, *sp2;
1126
1127     if (is->video_st) {
1128 retry:
1129         if (is->pictq_size == 0) {
1130             //nothing to do, no picture to display in the que
1131         } else {
1132             double time= av_gettime()/1000000.0;
1133             double next_target;
1134             /* dequeue the picture */
1135             vp = &is->pictq[is->pictq_rindex];
1136
1137             if(time < vp->target_clock)
1138                 return;
1139             /* update current video pts */
1140             is->video_current_pts = vp->pts;
1141             is->video_current_pts_drift = is->video_current_pts - time;
1142             is->video_current_pos = vp->pos;
1143             if(is->pictq_size > 1){
1144                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1145                 assert(nextvp->target_clock >= vp->target_clock);
1146                 next_target= nextvp->target_clock;
1147             }else{
1148                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1149             }
1150             if(framedrop && time > next_target){
1151                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1152                 if(is->pictq_size > 1 || time > next_target + 0.5){
1153                     /* update queue size and signal for next picture */
1154                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1155                         is->pictq_rindex = 0;
1156
1157                     SDL_LockMutex(is->pictq_mutex);
1158                     is->pictq_size--;
1159                     SDL_CondSignal(is->pictq_cond);
1160                     SDL_UnlockMutex(is->pictq_mutex);
1161                     goto retry;
1162                 }
1163             }
1164
1165             if(is->subtitle_st) {
1166                 if (is->subtitle_stream_changed) {
1167                     SDL_LockMutex(is->subpq_mutex);
1168
1169                     while (is->subpq_size) {
1170                         free_subpicture(&is->subpq[is->subpq_rindex]);
1171
1172                         /* update queue size and signal for next picture */
1173                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1174                             is->subpq_rindex = 0;
1175
1176                         is->subpq_size--;
1177                     }
1178                     is->subtitle_stream_changed = 0;
1179
1180                     SDL_CondSignal(is->subpq_cond);
1181                     SDL_UnlockMutex(is->subpq_mutex);
1182                 } else {
1183                     if (is->subpq_size > 0) {
1184                         sp = &is->subpq[is->subpq_rindex];
1185
1186                         if (is->subpq_size > 1)
1187                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1188                         else
1189                             sp2 = NULL;
1190
1191                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1192                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1193                         {
1194                             free_subpicture(sp);
1195
1196                             /* update queue size and signal for next picture */
1197                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1198                                 is->subpq_rindex = 0;
1199
1200                             SDL_LockMutex(is->subpq_mutex);
1201                             is->subpq_size--;
1202                             SDL_CondSignal(is->subpq_cond);
1203                             SDL_UnlockMutex(is->subpq_mutex);
1204                         }
1205                     }
1206                 }
1207             }
1208
1209             /* display picture */
1210             if (!display_disable)
1211                 video_display(is);
1212
1213             /* update queue size and signal for next picture */
1214             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1215                 is->pictq_rindex = 0;
1216
1217             SDL_LockMutex(is->pictq_mutex);
1218             is->pictq_size--;
1219             SDL_CondSignal(is->pictq_cond);
1220             SDL_UnlockMutex(is->pictq_mutex);
1221         }
1222     } else if (is->audio_st) {
1223         /* draw the next audio frame */
1224
1225         /* if only audio stream, then display the audio bars (better
1226            than nothing, just to test the implementation */
1227
1228         /* display picture */
1229         if (!display_disable)
1230             video_display(is);
1231     }
1232     if (show_status) {
1233         static int64_t last_time;
1234         int64_t cur_time;
1235         int aqsize, vqsize, sqsize;
1236         double av_diff;
1237
1238         cur_time = av_gettime();
1239         if (!last_time || (cur_time - last_time) >= 30000) {
1240             aqsize = 0;
1241             vqsize = 0;
1242             sqsize = 0;
1243             if (is->audio_st)
1244                 aqsize = is->audioq.size;
1245             if (is->video_st)
1246                 vqsize = is->videoq.size;
1247             if (is->subtitle_st)
1248                 sqsize = is->subtitleq.size;
1249             av_diff = 0;
1250             if (is->audio_st && is->video_st)
1251                 av_diff = get_audio_clock(is) - get_video_clock(is);
1252             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1253                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1254             fflush(stdout);
1255             last_time = cur_time;
1256         }
1257     }
1258 }
1259
1260 static void stream_close(VideoState *is)
1261 {
1262     VideoPicture *vp;
1263     int i;
1264     /* XXX: use a special url_shutdown call to abort parse cleanly */
1265     is->abort_request = 1;
1266     SDL_WaitThread(is->parse_tid, NULL);
1267     SDL_WaitThread(is->refresh_tid, NULL);
1268
1269     /* free all pictures */
1270     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1271         vp = &is->pictq[i];
1272 #if CONFIG_AVFILTER
1273         if (vp->picref) {
1274             avfilter_unref_buffer(vp->picref);
1275             vp->picref = NULL;
1276         }
1277 #endif
1278         if (vp->bmp) {
1279             SDL_FreeYUVOverlay(vp->bmp);
1280             vp->bmp = NULL;
1281         }
1282     }
1283     SDL_DestroyMutex(is->pictq_mutex);
1284     SDL_DestroyCond(is->pictq_cond);
1285     SDL_DestroyMutex(is->subpq_mutex);
1286     SDL_DestroyCond(is->subpq_cond);
1287 #if !CONFIG_AVFILTER
1288     if (is->img_convert_ctx)
1289         sws_freeContext(is->img_convert_ctx);
1290 #endif
1291     av_free(is);
1292 }
1293
1294 static void do_exit(void)
1295 {
1296     if (cur_stream) {
1297         stream_close(cur_stream);
1298         cur_stream = NULL;
1299     }
1300     uninit_opts();
1301 #if CONFIG_AVFILTER
1302     avfilter_uninit();
1303 #endif
1304     if (show_status)
1305         printf("\n");
1306     SDL_Quit();
1307     av_log(NULL, AV_LOG_QUIET, "");
1308     exit(0);
1309 }
1310
1311 /* allocate a picture (needs to do that in main thread to avoid
1312    potential locking problems */
1313 static void alloc_picture(void *opaque)
1314 {
1315     VideoState *is = opaque;
1316     VideoPicture *vp;
1317
1318     vp = &is->pictq[is->pictq_windex];
1319
1320     if (vp->bmp)
1321         SDL_FreeYUVOverlay(vp->bmp);
1322
1323 #if CONFIG_AVFILTER
1324     if (vp->picref)
1325         avfilter_unref_buffer(vp->picref);
1326     vp->picref = NULL;
1327
1328     vp->width   = is->out_video_filter->inputs[0]->w;
1329     vp->height  = is->out_video_filter->inputs[0]->h;
1330     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1331 #else
1332     vp->width   = is->video_st->codec->width;
1333     vp->height  = is->video_st->codec->height;
1334     vp->pix_fmt = is->video_st->codec->pix_fmt;
1335 #endif
1336
1337     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1338                                    SDL_YV12_OVERLAY,
1339                                    screen);
1340     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1341         /* SDL allocates a buffer smaller than requested if the video
1342          * overlay hardware is unable to support the requested size. */
1343         fprintf(stderr, "Error: the video system does not support an image\n"
1344                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1345                         "to reduce the image size.\n", vp->width, vp->height );
1346         do_exit();
1347     }
1348
1349     SDL_LockMutex(is->pictq_mutex);
1350     vp->allocated = 1;
1351     SDL_CondSignal(is->pictq_cond);
1352     SDL_UnlockMutex(is->pictq_mutex);
1353 }
1354
1355 /**
1356  *
1357  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1358  */
1359 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1360 {
1361     VideoPicture *vp;
1362     int dst_pix_fmt;
1363 #if CONFIG_AVFILTER
1364     AVPicture pict_src;
1365 #endif
1366     /* wait until we have space to put a new picture */
1367     SDL_LockMutex(is->pictq_mutex);
1368
1369     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1370         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1371
1372     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1373            !is->videoq.abort_request) {
1374         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1375     }
1376     SDL_UnlockMutex(is->pictq_mutex);
1377
1378     if (is->videoq.abort_request)
1379         return -1;
1380
1381     vp = &is->pictq[is->pictq_windex];
1382
1383     /* alloc or resize hardware picture buffer */
1384     if (!vp->bmp ||
1385 #if CONFIG_AVFILTER
1386         vp->width  != is->out_video_filter->inputs[0]->w ||
1387         vp->height != is->out_video_filter->inputs[0]->h) {
1388 #else
1389         vp->width != is->video_st->codec->width ||
1390         vp->height != is->video_st->codec->height) {
1391 #endif
1392         SDL_Event event;
1393
1394         vp->allocated = 0;
1395
1396         /* the allocation must be done in the main thread to avoid
1397            locking problems */
1398         event.type = FF_ALLOC_EVENT;
1399         event.user.data1 = is;
1400         SDL_PushEvent(&event);
1401
1402         /* wait until the picture is allocated */
1403         SDL_LockMutex(is->pictq_mutex);
1404         while (!vp->allocated && !is->videoq.abort_request) {
1405             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1406         }
1407         SDL_UnlockMutex(is->pictq_mutex);
1408
1409         if (is->videoq.abort_request)
1410             return -1;
1411     }
1412
1413     /* if the frame is not skipped, then display it */
1414     if (vp->bmp) {
1415         AVPicture pict;
1416 #if CONFIG_AVFILTER
1417         if(vp->picref)
1418             avfilter_unref_buffer(vp->picref);
1419         vp->picref = src_frame->opaque;
1420 #endif
1421
1422         /* get a pointer on the bitmap */
1423         SDL_LockYUVOverlay (vp->bmp);
1424
1425         dst_pix_fmt = PIX_FMT_YUV420P;
1426         memset(&pict,0,sizeof(AVPicture));
1427         pict.data[0] = vp->bmp->pixels[0];
1428         pict.data[1] = vp->bmp->pixels[2];
1429         pict.data[2] = vp->bmp->pixels[1];
1430
1431         pict.linesize[0] = vp->bmp->pitches[0];
1432         pict.linesize[1] = vp->bmp->pitches[2];
1433         pict.linesize[2] = vp->bmp->pitches[1];
1434
1435 #if CONFIG_AVFILTER
1436         pict_src.data[0] = src_frame->data[0];
1437         pict_src.data[1] = src_frame->data[1];
1438         pict_src.data[2] = src_frame->data[2];
1439
1440         pict_src.linesize[0] = src_frame->linesize[0];
1441         pict_src.linesize[1] = src_frame->linesize[1];
1442         pict_src.linesize[2] = src_frame->linesize[2];
1443
1444         //FIXME use direct rendering
1445         av_picture_copy(&pict, &pict_src,
1446                         vp->pix_fmt, vp->width, vp->height);
1447 #else
1448         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1449         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1450             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1451             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1452         if (is->img_convert_ctx == NULL) {
1453             fprintf(stderr, "Cannot initialize the conversion context\n");
1454             exit(1);
1455         }
1456         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1457                   0, vp->height, pict.data, pict.linesize);
1458 #endif
1459         /* update the bitmap content */
1460         SDL_UnlockYUVOverlay(vp->bmp);
1461
1462         vp->pts = pts;
1463         vp->pos = pos;
1464
1465         /* now we can update the picture count */
1466         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1467             is->pictq_windex = 0;
1468         SDL_LockMutex(is->pictq_mutex);
1469         vp->target_clock= compute_target_time(vp->pts, is);
1470
1471         is->pictq_size++;
1472         SDL_UnlockMutex(is->pictq_mutex);
1473     }
1474     return 0;
1475 }
1476
1477 /**
1478  * compute the exact PTS for the picture if it is omitted in the stream
1479  * @param pts1 the dts of the pkt / pts of the frame
1480  */
1481 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1482 {
1483     double frame_delay, pts;
1484
1485     pts = pts1;
1486
1487     if (pts != 0) {
1488         /* update video clock with pts, if present */
1489         is->video_clock = pts;
1490     } else {
1491         pts = is->video_clock;
1492     }
1493     /* update video clock for next frame */
1494     frame_delay = av_q2d(is->video_st->codec->time_base);
1495     /* for MPEG2, the frame can be repeated, so we update the
1496        clock accordingly */
1497     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1498     is->video_clock += frame_delay;
1499
1500 #if defined(DEBUG_SYNC) && 0
1501     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1502            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1503 #endif
1504     return queue_picture(is, src_frame, pts, pos);
1505 }
1506
1507 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1508 {
1509     int len1, got_picture, i;
1510
1511     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1512         return -1;
1513
1514     if (pkt->data == flush_pkt.data) {
1515         avcodec_flush_buffers(is->video_st->codec);
1516
1517         SDL_LockMutex(is->pictq_mutex);
1518         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1519         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1520             is->pictq[i].target_clock= 0;
1521         }
1522         while (is->pictq_size && !is->videoq.abort_request) {
1523             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1524         }
1525         is->video_current_pos = -1;
1526         SDL_UnlockMutex(is->pictq_mutex);
1527
1528         init_pts_correction(&is->pts_ctx);
1529         is->frame_last_pts = AV_NOPTS_VALUE;
1530         is->frame_last_delay = 0;
1531         is->frame_timer = (double)av_gettime() / 1000000.0;
1532         is->skip_frames = 1;
1533         is->skip_frames_index = 0;
1534         return 0;
1535     }
1536
1537     len1 = avcodec_decode_video2(is->video_st->codec,
1538                                  frame, &got_picture,
1539                                  pkt);
1540
1541     if (got_picture) {
1542         if (decoder_reorder_pts == -1) {
1543             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1544         } else if (decoder_reorder_pts) {
1545             *pts = frame->pkt_pts;
1546         } else {
1547             *pts = frame->pkt_dts;
1548         }
1549
1550         if (*pts == AV_NOPTS_VALUE) {
1551             *pts = 0;
1552         }
1553
1554         is->skip_frames_index += 1;
1555         if(is->skip_frames_index >= is->skip_frames){
1556             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1557             return 1;
1558         }
1559
1560     }
1561     return 0;
1562 }
1563
1564 #if CONFIG_AVFILTER
1565 typedef struct {
1566     VideoState *is;
1567     AVFrame *frame;
1568     int use_dr1;
1569 } FilterPriv;
1570
1571 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1572 {
1573     AVFilterContext *ctx = codec->opaque;
1574     AVFilterBufferRef  *ref;
1575     int perms = AV_PERM_WRITE;
1576     int i, w, h, stride[4];
1577     unsigned edge;
1578     int pixel_size;
1579
1580     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1581         perms |= AV_PERM_NEG_LINESIZES;
1582
1583     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1584         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1585         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1586         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1587     }
1588     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1589
1590     w = codec->width;
1591     h = codec->height;
1592     avcodec_align_dimensions2(codec, &w, &h, stride);
1593     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1594     w += edge << 1;
1595     h += edge << 1;
1596
1597     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1598         return -1;
1599
1600     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1601     ref->video->w = codec->width;
1602     ref->video->h = codec->height;
1603     for(i = 0; i < 4; i ++) {
1604         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1605         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1606
1607         if (ref->data[i]) {
1608             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1609         }
1610         pic->data[i]     = ref->data[i];
1611         pic->linesize[i] = ref->linesize[i];
1612     }
1613     pic->opaque = ref;
1614     pic->age    = INT_MAX;
1615     pic->type   = FF_BUFFER_TYPE_USER;
1616     pic->reordered_opaque = codec->reordered_opaque;
1617     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1618     else           pic->pkt_pts = AV_NOPTS_VALUE;
1619     return 0;
1620 }
1621
1622 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1623 {
1624     memset(pic->data, 0, sizeof(pic->data));
1625     avfilter_unref_buffer(pic->opaque);
1626 }
1627
1628 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1629 {
1630     AVFilterBufferRef *ref = pic->opaque;
1631
1632     if (pic->data[0] == NULL) {
1633         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1634         return codec->get_buffer(codec, pic);
1635     }
1636
1637     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1638         (codec->pix_fmt != ref->format)) {
1639         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1640         return -1;
1641     }
1642
1643     pic->reordered_opaque = codec->reordered_opaque;
1644     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1645     else           pic->pkt_pts = AV_NOPTS_VALUE;
1646     return 0;
1647 }
1648
1649 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1650 {
1651     FilterPriv *priv = ctx->priv;
1652     AVCodecContext *codec;
1653     if(!opaque) return -1;
1654
1655     priv->is = opaque;
1656     codec    = priv->is->video_st->codec;
1657     codec->opaque = ctx;
1658     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1659         priv->use_dr1 = 1;
1660         codec->get_buffer     = input_get_buffer;
1661         codec->release_buffer = input_release_buffer;
1662         codec->reget_buffer   = input_reget_buffer;
1663         codec->thread_safe_callbacks = 1;
1664     }
1665
1666     priv->frame = avcodec_alloc_frame();
1667
1668     return 0;
1669 }
1670
1671 static void input_uninit(AVFilterContext *ctx)
1672 {
1673     FilterPriv *priv = ctx->priv;
1674     av_free(priv->frame);
1675 }
1676
1677 static int input_request_frame(AVFilterLink *link)
1678 {
1679     FilterPriv *priv = link->src->priv;
1680     AVFilterBufferRef *picref;
1681     int64_t pts = 0;
1682     AVPacket pkt;
1683     int ret;
1684
1685     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1686         av_free_packet(&pkt);
1687     if (ret < 0)
1688         return -1;
1689
1690     if(priv->use_dr1) {
1691         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1692     } else {
1693         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1694         av_image_copy(picref->data, picref->linesize,
1695                       priv->frame->data, priv->frame->linesize,
1696                       picref->format, link->w, link->h);
1697     }
1698     av_free_packet(&pkt);
1699
1700     picref->pts = pts;
1701     picref->pos = pkt.pos;
1702     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1703     avfilter_start_frame(link, picref);
1704     avfilter_draw_slice(link, 0, link->h, 1);
1705     avfilter_end_frame(link);
1706
1707     return 0;
1708 }
1709
1710 static int input_query_formats(AVFilterContext *ctx)
1711 {
1712     FilterPriv *priv = ctx->priv;
1713     enum PixelFormat pix_fmts[] = {
1714         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1715     };
1716
1717     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1718     return 0;
1719 }
1720
1721 static int input_config_props(AVFilterLink *link)
1722 {
1723     FilterPriv *priv  = link->src->priv;
1724     AVCodecContext *c = priv->is->video_st->codec;
1725
1726     link->w = c->width;
1727     link->h = c->height;
1728     link->time_base = priv->is->video_st->time_base;
1729
1730     return 0;
1731 }
1732
1733 static AVFilter input_filter =
1734 {
1735     .name      = "ffplay_input",
1736
1737     .priv_size = sizeof(FilterPriv),
1738
1739     .init      = input_init,
1740     .uninit    = input_uninit,
1741
1742     .query_formats = input_query_formats,
1743
1744     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1745     .outputs   = (AVFilterPad[]) {{ .name = "default",
1746                                     .type = AVMEDIA_TYPE_VIDEO,
1747                                     .request_frame = input_request_frame,
1748                                     .config_props  = input_config_props, },
1749                                   { .name = NULL }},
1750 };
1751
1752 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1753 {
1754     char sws_flags_str[128];
1755     int ret;
1756     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1757     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1758     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1759     graph->scale_sws_opts = av_strdup(sws_flags_str);
1760
1761     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1762                                             NULL, is, graph)) < 0)
1763         goto the_end;
1764     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1765                                             NULL, &ffsink_ctx, graph)) < 0)
1766         goto the_end;
1767
1768     if(vfilters) {
1769         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1770         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1771
1772         outputs->name    = av_strdup("in");
1773         outputs->filter_ctx = filt_src;
1774         outputs->pad_idx = 0;
1775         outputs->next    = NULL;
1776
1777         inputs->name    = av_strdup("out");
1778         inputs->filter_ctx = filt_out;
1779         inputs->pad_idx = 0;
1780         inputs->next    = NULL;
1781
1782         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1783             goto the_end;
1784         av_freep(&vfilters);
1785     } else {
1786         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1787             goto the_end;
1788     }
1789
1790     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1791         goto the_end;
1792
1793     is->out_video_filter = filt_out;
1794 the_end:
1795     return ret;
1796 }
1797
1798 #endif  /* CONFIG_AVFILTER */
1799
1800 static int video_thread(void *arg)
1801 {
1802     VideoState *is = arg;
1803     AVFrame *frame= avcodec_alloc_frame();
1804     int64_t pts_int;
1805     double pts;
1806     int ret;
1807
1808 #if CONFIG_AVFILTER
1809     AVFilterGraph *graph = avfilter_graph_alloc();
1810     AVFilterContext *filt_out = NULL;
1811     int64_t pos;
1812
1813     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1814         goto the_end;
1815     filt_out = is->out_video_filter;
1816 #endif
1817
1818     for(;;) {
1819 #if !CONFIG_AVFILTER
1820         AVPacket pkt;
1821 #else
1822         AVFilterBufferRef *picref;
1823         AVRational tb;
1824 #endif
1825         while (is->paused && !is->videoq.abort_request)
1826             SDL_Delay(10);
1827 #if CONFIG_AVFILTER
1828         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1829         if (picref) {
1830             pts_int = picref->pts;
1831             pos     = picref->pos;
1832             frame->opaque = picref;
1833         }
1834
1835         if (av_cmp_q(tb, is->video_st->time_base)) {
1836             av_unused int64_t pts1 = pts_int;
1837             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1838             av_dlog(NULL, "video_thread(): "
1839                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1840                     tb.num, tb.den, pts1,
1841                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1842         }
1843 #else
1844         ret = get_video_frame(is, frame, &pts_int, &pkt);
1845 #endif
1846
1847         if (ret < 0) goto the_end;
1848
1849         if (!ret)
1850             continue;
1851
1852         pts = pts_int*av_q2d(is->video_st->time_base);
1853
1854 #if CONFIG_AVFILTER
1855         ret = output_picture2(is, frame, pts, pos);
1856 #else
1857         ret = output_picture2(is, frame, pts,  pkt.pos);
1858         av_free_packet(&pkt);
1859 #endif
1860         if (ret < 0)
1861             goto the_end;
1862
1863         if (step)
1864             if (cur_stream)
1865                 stream_pause(cur_stream);
1866     }
1867  the_end:
1868 #if CONFIG_AVFILTER
1869     avfilter_graph_free(&graph);
1870 #endif
1871     av_free(frame);
1872     return 0;
1873 }
1874
1875 static int subtitle_thread(void *arg)
1876 {
1877     VideoState *is = arg;
1878     SubPicture *sp;
1879     AVPacket pkt1, *pkt = &pkt1;
1880     int len1, got_subtitle;
1881     double pts;
1882     int i, j;
1883     int r, g, b, y, u, v, a;
1884
1885     for(;;) {
1886         while (is->paused && !is->subtitleq.abort_request) {
1887             SDL_Delay(10);
1888         }
1889         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1890             break;
1891
1892         if(pkt->data == flush_pkt.data){
1893             avcodec_flush_buffers(is->subtitle_st->codec);
1894             continue;
1895         }
1896         SDL_LockMutex(is->subpq_mutex);
1897         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1898                !is->subtitleq.abort_request) {
1899             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1900         }
1901         SDL_UnlockMutex(is->subpq_mutex);
1902
1903         if (is->subtitleq.abort_request)
1904             goto the_end;
1905
1906         sp = &is->subpq[is->subpq_windex];
1907
1908        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1909            this packet, if any */
1910         pts = 0;
1911         if (pkt->pts != AV_NOPTS_VALUE)
1912             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1913
1914         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1915                                     &sp->sub, &got_subtitle,
1916                                     pkt);
1917 //            if (len1 < 0)
1918 //                break;
1919         if (got_subtitle && sp->sub.format == 0) {
1920             sp->pts = pts;
1921
1922             for (i = 0; i < sp->sub.num_rects; i++)
1923             {
1924                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1925                 {
1926                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1927                     y = RGB_TO_Y_CCIR(r, g, b);
1928                     u = RGB_TO_U_CCIR(r, g, b, 0);
1929                     v = RGB_TO_V_CCIR(r, g, b, 0);
1930                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1931                 }
1932             }
1933
1934             /* now we can update the picture count */
1935             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1936                 is->subpq_windex = 0;
1937             SDL_LockMutex(is->subpq_mutex);
1938             is->subpq_size++;
1939             SDL_UnlockMutex(is->subpq_mutex);
1940         }
1941         av_free_packet(pkt);
1942 //        if (step)
1943 //            if (cur_stream)
1944 //                stream_pause(cur_stream);
1945     }
1946  the_end:
1947     return 0;
1948 }
1949
1950 /* copy samples for viewing in editor window */
1951 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1952 {
1953     int size, len, channels;
1954
1955     channels = is->audio_st->codec->channels;
1956
1957     size = samples_size / sizeof(short);
1958     while (size > 0) {
1959         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1960         if (len > size)
1961             len = size;
1962         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1963         samples += len;
1964         is->sample_array_index += len;
1965         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1966             is->sample_array_index = 0;
1967         size -= len;
1968     }
1969 }
1970
1971 /* return the new audio buffer size (samples can be added or deleted
1972    to get better sync if video or external master clock) */
1973 static int synchronize_audio(VideoState *is, short *samples,
1974                              int samples_size1, double pts)
1975 {
1976     int n, samples_size;
1977     double ref_clock;
1978
1979     n = 2 * is->audio_st->codec->channels;
1980     samples_size = samples_size1;
1981
1982     /* if not master, then we try to remove or add samples to correct the clock */
1983     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1984          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1985         double diff, avg_diff;
1986         int wanted_size, min_size, max_size, nb_samples;
1987
1988         ref_clock = get_master_clock(is);
1989         diff = get_audio_clock(is) - ref_clock;
1990
1991         if (diff < AV_NOSYNC_THRESHOLD) {
1992             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1993             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1994                 /* not enough measures to have a correct estimate */
1995                 is->audio_diff_avg_count++;
1996             } else {
1997                 /* estimate the A-V difference */
1998                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1999
2000                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2001                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2002                     nb_samples = samples_size / n;
2003
2004                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2005                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2006                     if (wanted_size < min_size)
2007                         wanted_size = min_size;
2008                     else if (wanted_size > max_size)
2009                         wanted_size = max_size;
2010
2011                     /* add or remove samples to correction the synchro */
2012                     if (wanted_size < samples_size) {
2013                         /* remove samples */
2014                         samples_size = wanted_size;
2015                     } else if (wanted_size > samples_size) {
2016                         uint8_t *samples_end, *q;
2017                         int nb;
2018
2019                         /* add samples */
2020                         nb = (samples_size - wanted_size);
2021                         samples_end = (uint8_t *)samples + samples_size - n;
2022                         q = samples_end + n;
2023                         while (nb > 0) {
2024                             memcpy(q, samples_end, n);
2025                             q += n;
2026                             nb -= n;
2027                         }
2028                         samples_size = wanted_size;
2029                     }
2030                 }
2031                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2032                         diff, avg_diff, samples_size - samples_size1,
2033                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2034             }
2035         } else {
2036             /* too big difference : may be initial PTS errors, so
2037                reset A-V filter */
2038             is->audio_diff_avg_count = 0;
2039             is->audio_diff_cum = 0;
2040         }
2041     }
2042
2043     return samples_size;
2044 }
2045
2046 /* decode one audio frame and returns its uncompressed size */
2047 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2048 {
2049     AVPacket *pkt_temp = &is->audio_pkt_temp;
2050     AVPacket *pkt = &is->audio_pkt;
2051     AVCodecContext *dec= is->audio_st->codec;
2052     int n, len1, data_size;
2053     double pts;
2054
2055     for(;;) {
2056         /* NOTE: the audio packet can contain several frames */
2057         while (pkt_temp->size > 0) {
2058             data_size = sizeof(is->audio_buf1);
2059             len1 = avcodec_decode_audio3(dec,
2060                                         (int16_t *)is->audio_buf1, &data_size,
2061                                         pkt_temp);
2062             if (len1 < 0) {
2063                 /* if error, we skip the frame */
2064                 pkt_temp->size = 0;
2065                 break;
2066             }
2067
2068             pkt_temp->data += len1;
2069             pkt_temp->size -= len1;
2070             if (data_size <= 0)
2071                 continue;
2072
2073             if (dec->sample_fmt != is->audio_src_fmt) {
2074                 if (is->reformat_ctx)
2075                     av_audio_convert_free(is->reformat_ctx);
2076                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2077                                                          dec->sample_fmt, 1, NULL, 0);
2078                 if (!is->reformat_ctx) {
2079                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2080                         av_get_sample_fmt_name(dec->sample_fmt),
2081                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2082                         break;
2083                 }
2084                 is->audio_src_fmt= dec->sample_fmt;
2085             }
2086
2087             if (is->reformat_ctx) {
2088                 const void *ibuf[6]= {is->audio_buf1};
2089                 void *obuf[6]= {is->audio_buf2};
2090                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2091                 int ostride[6]= {2};
2092                 int len= data_size/istride[0];
2093                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2094                     printf("av_audio_convert() failed\n");
2095                     break;
2096                 }
2097                 is->audio_buf= is->audio_buf2;
2098                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2099                           remove this legacy cruft */
2100                 data_size= len*2;
2101             }else{
2102                 is->audio_buf= is->audio_buf1;
2103             }
2104
2105             /* if no pts, then compute it */
2106             pts = is->audio_clock;
2107             *pts_ptr = pts;
2108             n = 2 * dec->channels;
2109             is->audio_clock += (double)data_size /
2110                 (double)(n * dec->sample_rate);
2111 #if defined(DEBUG_SYNC)
2112             {
2113                 static double last_clock;
2114                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2115                        is->audio_clock - last_clock,
2116                        is->audio_clock, pts);
2117                 last_clock = is->audio_clock;
2118             }
2119 #endif
2120             return data_size;
2121         }
2122
2123         /* free the current packet */
2124         if (pkt->data)
2125             av_free_packet(pkt);
2126
2127         if (is->paused || is->audioq.abort_request) {
2128             return -1;
2129         }
2130
2131         /* read next packet */
2132         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2133             return -1;
2134         if(pkt->data == flush_pkt.data){
2135             avcodec_flush_buffers(dec);
2136             continue;
2137         }
2138
2139         pkt_temp->data = pkt->data;
2140         pkt_temp->size = pkt->size;
2141
2142         /* if update the audio clock with the pts */
2143         if (pkt->pts != AV_NOPTS_VALUE) {
2144             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2145         }
2146     }
2147 }
2148
2149 /* get the current audio output buffer size, in samples. With SDL, we
2150    cannot have a precise information */
2151 static int audio_write_get_buf_size(VideoState *is)
2152 {
2153     return is->audio_buf_size - is->audio_buf_index;
2154 }
2155
2156
2157 /* prepare a new audio buffer */
2158 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2159 {
2160     VideoState *is = opaque;
2161     int audio_size, len1;
2162     double pts;
2163
2164     audio_callback_time = av_gettime();
2165
2166     while (len > 0) {
2167         if (is->audio_buf_index >= is->audio_buf_size) {
2168            audio_size = audio_decode_frame(is, &pts);
2169            if (audio_size < 0) {
2170                 /* if error, just output silence */
2171                is->audio_buf = is->audio_buf1;
2172                is->audio_buf_size = 1024;
2173                memset(is->audio_buf, 0, is->audio_buf_size);
2174            } else {
2175                if (is->show_audio)
2176                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2177                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2178                                               pts);
2179                is->audio_buf_size = audio_size;
2180            }
2181            is->audio_buf_index = 0;
2182         }
2183         len1 = is->audio_buf_size - is->audio_buf_index;
2184         if (len1 > len)
2185             len1 = len;
2186         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2187         len -= len1;
2188         stream += len1;
2189         is->audio_buf_index += len1;
2190     }
2191 }
2192
2193 /* open a given stream. Return 0 if OK */
2194 static int stream_component_open(VideoState *is, int stream_index)
2195 {
2196     AVFormatContext *ic = is->ic;
2197     AVCodecContext *avctx;
2198     AVCodec *codec;
2199     SDL_AudioSpec wanted_spec, spec;
2200
2201     if (stream_index < 0 || stream_index >= ic->nb_streams)
2202         return -1;
2203     avctx = ic->streams[stream_index]->codec;
2204
2205     /* prepare audio output */
2206     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2207         if (avctx->channels > 0) {
2208             avctx->request_channels = FFMIN(2, avctx->channels);
2209         } else {
2210             avctx->request_channels = 2;
2211         }
2212     }
2213
2214     codec = avcodec_find_decoder(avctx->codec_id);
2215     avctx->debug_mv = debug_mv;
2216     avctx->debug = debug;
2217     avctx->workaround_bugs = workaround_bugs;
2218     avctx->lowres = lowres;
2219     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2220     avctx->idct_algo= idct;
2221     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2222     avctx->skip_frame= skip_frame;
2223     avctx->skip_idct= skip_idct;
2224     avctx->skip_loop_filter= skip_loop_filter;
2225     avctx->error_recognition= error_recognition;
2226     avctx->error_concealment= error_concealment;
2227     avctx->thread_count= thread_count;
2228
2229     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2230
2231     if (!codec ||
2232         avcodec_open(avctx, codec) < 0)
2233         return -1;
2234
2235     /* prepare audio output */
2236     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2237         wanted_spec.freq = avctx->sample_rate;
2238         wanted_spec.format = AUDIO_S16SYS;
2239         wanted_spec.channels = avctx->channels;
2240         wanted_spec.silence = 0;
2241         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2242         wanted_spec.callback = sdl_audio_callback;
2243         wanted_spec.userdata = is;
2244         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2245             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2246             return -1;
2247         }
2248         is->audio_hw_buf_size = spec.size;
2249         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2250     }
2251
2252     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2253     switch(avctx->codec_type) {
2254     case AVMEDIA_TYPE_AUDIO:
2255         is->audio_stream = stream_index;
2256         is->audio_st = ic->streams[stream_index];
2257         is->audio_buf_size = 0;
2258         is->audio_buf_index = 0;
2259
2260         /* init averaging filter */
2261         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2262         is->audio_diff_avg_count = 0;
2263         /* since we do not have a precise anough audio fifo fullness,
2264            we correct audio sync only if larger than this threshold */
2265         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2266
2267         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2268         packet_queue_init(&is->audioq);
2269         SDL_PauseAudio(0);
2270         break;
2271     case AVMEDIA_TYPE_VIDEO:
2272         is->video_stream = stream_index;
2273         is->video_st = ic->streams[stream_index];
2274
2275 //        is->video_current_pts_time = av_gettime();
2276
2277         packet_queue_init(&is->videoq);
2278         is->video_tid = SDL_CreateThread(video_thread, is);
2279         break;
2280     case AVMEDIA_TYPE_SUBTITLE:
2281         is->subtitle_stream = stream_index;
2282         is->subtitle_st = ic->streams[stream_index];
2283         packet_queue_init(&is->subtitleq);
2284
2285         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2286         break;
2287     default:
2288         break;
2289     }
2290     return 0;
2291 }
2292
2293 static void stream_component_close(VideoState *is, int stream_index)
2294 {
2295     AVFormatContext *ic = is->ic;
2296     AVCodecContext *avctx;
2297
2298     if (stream_index < 0 || stream_index >= ic->nb_streams)
2299         return;
2300     avctx = ic->streams[stream_index]->codec;
2301
2302     switch(avctx->codec_type) {
2303     case AVMEDIA_TYPE_AUDIO:
2304         packet_queue_abort(&is->audioq);
2305
2306         SDL_CloseAudio();
2307
2308         packet_queue_end(&is->audioq);
2309         if (is->reformat_ctx)
2310             av_audio_convert_free(is->reformat_ctx);
2311         is->reformat_ctx = NULL;
2312         break;
2313     case AVMEDIA_TYPE_VIDEO:
2314         packet_queue_abort(&is->videoq);
2315
2316         /* note: we also signal this mutex to make sure we deblock the
2317            video thread in all cases */
2318         SDL_LockMutex(is->pictq_mutex);
2319         SDL_CondSignal(is->pictq_cond);
2320         SDL_UnlockMutex(is->pictq_mutex);
2321
2322         SDL_WaitThread(is->video_tid, NULL);
2323
2324         packet_queue_end(&is->videoq);
2325         break;
2326     case AVMEDIA_TYPE_SUBTITLE:
2327         packet_queue_abort(&is->subtitleq);
2328
2329         /* note: we also signal this mutex to make sure we deblock the
2330            video thread in all cases */
2331         SDL_LockMutex(is->subpq_mutex);
2332         is->subtitle_stream_changed = 1;
2333
2334         SDL_CondSignal(is->subpq_cond);
2335         SDL_UnlockMutex(is->subpq_mutex);
2336
2337         SDL_WaitThread(is->subtitle_tid, NULL);
2338
2339         packet_queue_end(&is->subtitleq);
2340         break;
2341     default:
2342         break;
2343     }
2344
2345     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2346     avcodec_close(avctx);
2347     switch(avctx->codec_type) {
2348     case AVMEDIA_TYPE_AUDIO:
2349         is->audio_st = NULL;
2350         is->audio_stream = -1;
2351         break;
2352     case AVMEDIA_TYPE_VIDEO:
2353         is->video_st = NULL;
2354         is->video_stream = -1;
2355         break;
2356     case AVMEDIA_TYPE_SUBTITLE:
2357         is->subtitle_st = NULL;
2358         is->subtitle_stream = -1;
2359         break;
2360     default:
2361         break;
2362     }
2363 }
2364
2365 /* since we have only one decoding thread, we can use a global
2366    variable instead of a thread local variable */
2367 static VideoState *global_video_state;
2368
2369 static int decode_interrupt_cb(void)
2370 {
2371     return (global_video_state && global_video_state->abort_request);
2372 }
2373
2374 /* this thread gets the stream from the disk or the network */
2375 static int decode_thread(void *arg)
2376 {
2377     VideoState *is = arg;
2378     AVFormatContext *ic;
2379     int err, i, ret;
2380     int st_index[AVMEDIA_TYPE_NB];
2381     AVPacket pkt1, *pkt = &pkt1;
2382     AVFormatParameters params, *ap = &params;
2383     int eof=0;
2384     int pkt_in_play_range = 0;
2385
2386     ic = avformat_alloc_context();
2387
2388     memset(st_index, -1, sizeof(st_index));
2389     is->video_stream = -1;
2390     is->audio_stream = -1;
2391     is->subtitle_stream = -1;
2392
2393     global_video_state = is;
2394     avio_set_interrupt_cb(decode_interrupt_cb);
2395
2396     memset(ap, 0, sizeof(*ap));
2397
2398     ap->prealloced_context = 1;
2399     ap->width = frame_width;
2400     ap->height= frame_height;
2401     ap->time_base= (AVRational){1, 25};
2402     ap->pix_fmt = frame_pix_fmt;
2403
2404     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2405
2406     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2407     if (err < 0) {
2408         print_error(is->filename, err);
2409         ret = -1;
2410         goto fail;
2411     }
2412     is->ic = ic;
2413
2414     if(genpts)
2415         ic->flags |= AVFMT_FLAG_GENPTS;
2416
2417     err = av_find_stream_info(ic);
2418     if (err < 0) {
2419         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2420         ret = -1;
2421         goto fail;
2422     }
2423     if(ic->pb)
2424         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2425
2426     if(seek_by_bytes<0)
2427         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2428
2429     /* if seeking requested, we execute it */
2430     if (start_time != AV_NOPTS_VALUE) {
2431         int64_t timestamp;
2432
2433         timestamp = start_time;
2434         /* add the stream start time */
2435         if (ic->start_time != AV_NOPTS_VALUE)
2436             timestamp += ic->start_time;
2437         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2438         if (ret < 0) {
2439             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2440                     is->filename, (double)timestamp / AV_TIME_BASE);
2441         }
2442     }
2443
2444     for (i = 0; i < ic->nb_streams; i++)
2445         ic->streams[i]->discard = AVDISCARD_ALL;
2446     if (!video_disable)
2447         st_index[AVMEDIA_TYPE_VIDEO] =
2448             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2449                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2450     if (!audio_disable)
2451         st_index[AVMEDIA_TYPE_AUDIO] =
2452             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2453                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2454                                 st_index[AVMEDIA_TYPE_VIDEO],
2455                                 NULL, 0);
2456     if (!video_disable)
2457         st_index[AVMEDIA_TYPE_SUBTITLE] =
2458             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2459                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2460                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2461                                  st_index[AVMEDIA_TYPE_AUDIO] :
2462                                  st_index[AVMEDIA_TYPE_VIDEO]),
2463                                 NULL, 0);
2464     if (show_status) {
2465         av_dump_format(ic, 0, is->filename, 0);
2466     }
2467
2468     /* open the streams */
2469     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2470         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2471     }
2472
2473     ret=-1;
2474     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2475         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2476     }
2477     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2478     if(ret<0) {
2479         if (!display_disable)
2480             is->show_audio = 2;
2481     }
2482
2483     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2484         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2485     }
2486
2487     if (is->video_stream < 0 && is->audio_stream < 0) {
2488         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2489         ret = -1;
2490         goto fail;
2491     }
2492
2493     for(;;) {
2494         if (is->abort_request)
2495             break;
2496         if (is->paused != is->last_paused) {
2497             is->last_paused = is->paused;
2498             if (is->paused)
2499                 is->read_pause_return= av_read_pause(ic);
2500             else
2501                 av_read_play(ic);
2502         }
2503 #if CONFIG_RTSP_DEMUXER
2504         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2505             /* wait 10 ms to avoid trying to get another packet */
2506             /* XXX: horrible */
2507             SDL_Delay(10);
2508             continue;
2509         }
2510 #endif
2511         if (is->seek_req) {
2512             int64_t seek_target= is->seek_pos;
2513             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2514             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2515 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2516 //      of the seek_pos/seek_rel variables
2517
2518             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2519             if (ret < 0) {
2520                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2521             }else{
2522                 if (is->audio_stream >= 0) {
2523                     packet_queue_flush(&is->audioq);
2524                     packet_queue_put(&is->audioq, &flush_pkt);
2525                 }
2526                 if (is->subtitle_stream >= 0) {
2527                     packet_queue_flush(&is->subtitleq);
2528                     packet_queue_put(&is->subtitleq, &flush_pkt);
2529                 }
2530                 if (is->video_stream >= 0) {
2531                     packet_queue_flush(&is->videoq);
2532                     packet_queue_put(&is->videoq, &flush_pkt);
2533                 }
2534             }
2535             is->seek_req = 0;
2536             eof= 0;
2537         }
2538
2539         /* if the queue are full, no need to read more */
2540         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2541             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2542                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2543                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2544             /* wait 10 ms */
2545             SDL_Delay(10);
2546             continue;
2547         }
2548         if(eof) {
2549             if(is->video_stream >= 0){
2550                 av_init_packet(pkt);
2551                 pkt->data=NULL;
2552                 pkt->size=0;
2553                 pkt->stream_index= is->video_stream;
2554                 packet_queue_put(&is->videoq, pkt);
2555             }
2556             SDL_Delay(10);
2557             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2558                 if(loop!=1 && (!loop || --loop)){
2559                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2560                 }else if(autoexit){
2561                     ret=AVERROR_EOF;
2562                     goto fail;
2563                 }
2564             }
2565             continue;
2566         }
2567         ret = av_read_frame(ic, pkt);
2568         if (ret < 0) {
2569             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2570                 eof=1;
2571             if (ic->pb && ic->pb->error)
2572                 break;
2573             SDL_Delay(100); /* wait for user event */
2574             continue;
2575         }
2576         /* check if packet is in play range specified by user, then queue, otherwise discard */
2577         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2578                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2579                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2580                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2581                 <= ((double)duration/1000000);
2582         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2583             packet_queue_put(&is->audioq, pkt);
2584         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2585             packet_queue_put(&is->videoq, pkt);
2586         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2587             packet_queue_put(&is->subtitleq, pkt);
2588         } else {
2589             av_free_packet(pkt);
2590         }
2591     }
2592     /* wait until the end */
2593     while (!is->abort_request) {
2594         SDL_Delay(100);
2595     }
2596
2597     ret = 0;
2598  fail:
2599     /* disable interrupting */
2600     global_video_state = NULL;
2601
2602     /* close each stream */
2603     if (is->audio_stream >= 0)
2604         stream_component_close(is, is->audio_stream);
2605     if (is->video_stream >= 0)
2606         stream_component_close(is, is->video_stream);
2607     if (is->subtitle_stream >= 0)
2608         stream_component_close(is, is->subtitle_stream);
2609     if (is->ic) {
2610         av_close_input_file(is->ic);
2611         is->ic = NULL; /* safety */
2612     }
2613     avio_set_interrupt_cb(NULL);
2614
2615     if (ret != 0) {
2616         SDL_Event event;
2617
2618         event.type = FF_QUIT_EVENT;
2619         event.user.data1 = is;
2620         SDL_PushEvent(&event);
2621     }
2622     return 0;
2623 }
2624
2625 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2626 {
2627     VideoState *is;
2628
2629     is = av_mallocz(sizeof(VideoState));
2630     if (!is)
2631         return NULL;
2632     av_strlcpy(is->filename, filename, sizeof(is->filename));
2633     is->iformat = iformat;
2634     is->ytop = 0;
2635     is->xleft = 0;
2636
2637     /* start video display */
2638     is->pictq_mutex = SDL_CreateMutex();
2639     is->pictq_cond = SDL_CreateCond();
2640
2641     is->subpq_mutex = SDL_CreateMutex();
2642     is->subpq_cond = SDL_CreateCond();
2643
2644     is->av_sync_type = av_sync_type;
2645     is->parse_tid = SDL_CreateThread(decode_thread, is);
2646     if (!is->parse_tid) {
2647         av_free(is);
2648         return NULL;
2649     }
2650     return is;
2651 }
2652
2653 static void stream_cycle_channel(VideoState *is, int codec_type)
2654 {
2655     AVFormatContext *ic = is->ic;
2656     int start_index, stream_index;
2657     AVStream *st;
2658
2659     if (codec_type == AVMEDIA_TYPE_VIDEO)
2660         start_index = is->video_stream;
2661     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2662         start_index = is->audio_stream;
2663     else
2664         start_index = is->subtitle_stream;
2665     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2666         return;
2667     stream_index = start_index;
2668     for(;;) {
2669         if (++stream_index >= is->ic->nb_streams)
2670         {
2671             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2672             {
2673                 stream_index = -1;
2674                 goto the_end;
2675             } else
2676                 stream_index = 0;
2677         }
2678         if (stream_index == start_index)
2679             return;
2680         st = ic->streams[stream_index];
2681         if (st->codec->codec_type == codec_type) {
2682             /* check that parameters are OK */
2683             switch(codec_type) {
2684             case AVMEDIA_TYPE_AUDIO:
2685                 if (st->codec->sample_rate != 0 &&
2686                     st->codec->channels != 0)
2687                     goto the_end;
2688                 break;
2689             case AVMEDIA_TYPE_VIDEO:
2690             case AVMEDIA_TYPE_SUBTITLE:
2691                 goto the_end;
2692             default:
2693                 break;
2694             }
2695         }
2696     }
2697  the_end:
2698     stream_component_close(is, start_index);
2699     stream_component_open(is, stream_index);
2700 }
2701
2702
2703 static void toggle_full_screen(void)
2704 {
2705     is_full_screen = !is_full_screen;
2706     if (!fs_screen_width) {
2707         /* use default SDL method */
2708 //        SDL_WM_ToggleFullScreen(screen);
2709     }
2710     video_open(cur_stream);
2711 }
2712
2713 static void toggle_pause(void)
2714 {
2715     if (cur_stream)
2716         stream_pause(cur_stream);
2717     step = 0;
2718 }
2719
2720 static void step_to_next_frame(void)
2721 {
2722     if (cur_stream) {
2723         /* if the stream is paused unpause it, then step */
2724         if (cur_stream->paused)
2725             stream_pause(cur_stream);
2726     }
2727     step = 1;
2728 }
2729
2730 static void toggle_audio_display(void)
2731 {
2732     if (cur_stream) {
2733         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2734         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2735         fill_rectangle(screen,
2736                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2737                     bgcolor);
2738         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2739     }
2740 }
2741
2742 /* handle an event sent by the GUI */
2743 static void event_loop(void)
2744 {
2745     SDL_Event event;
2746     double incr, pos, frac;
2747
2748     for(;;) {
2749         double x;
2750         SDL_WaitEvent(&event);
2751         switch(event.type) {
2752         case SDL_KEYDOWN:
2753             if (exit_on_keydown) {
2754                 do_exit();
2755                 break;
2756             }
2757             switch(event.key.keysym.sym) {
2758             case SDLK_ESCAPE:
2759             case SDLK_q:
2760                 do_exit();
2761                 break;
2762             case SDLK_f:
2763                 toggle_full_screen();
2764                 break;
2765             case SDLK_p:
2766             case SDLK_SPACE:
2767                 toggle_pause();
2768                 break;
2769             case SDLK_s: //S: Step to next frame
2770                 step_to_next_frame();
2771                 break;
2772             case SDLK_a:
2773                 if (cur_stream)
2774                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2775                 break;
2776             case SDLK_v:
2777                 if (cur_stream)
2778                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2779                 break;
2780             case SDLK_t:
2781                 if (cur_stream)
2782                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2783                 break;
2784             case SDLK_w:
2785                 toggle_audio_display();
2786                 break;
2787             case SDLK_LEFT:
2788                 incr = -10.0;
2789                 goto do_seek;
2790             case SDLK_RIGHT:
2791                 incr = 10.0;
2792                 goto do_seek;
2793             case SDLK_UP:
2794                 incr = 60.0;
2795                 goto do_seek;
2796             case SDLK_DOWN:
2797                 incr = -60.0;
2798             do_seek:
2799                 if (cur_stream) {
2800                     if (seek_by_bytes) {
2801                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2802                             pos= cur_stream->video_current_pos;
2803                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2804                             pos= cur_stream->audio_pkt.pos;
2805                         }else
2806                             pos = avio_tell(cur_stream->ic->pb);
2807                         if (cur_stream->ic->bit_rate)
2808                             incr *= cur_stream->ic->bit_rate / 8.0;
2809                         else
2810                             incr *= 180000.0;
2811                         pos += incr;
2812                         stream_seek(cur_stream, pos, incr, 1);
2813                     } else {
2814                         pos = get_master_clock(cur_stream);
2815                         pos += incr;
2816                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2817                     }
2818                 }
2819                 break;
2820             default:
2821                 break;
2822             }
2823             break;
2824         case SDL_MOUSEBUTTONDOWN:
2825             if (exit_on_mousedown) {
2826                 do_exit();
2827                 break;
2828             }
2829         case SDL_MOUSEMOTION:
2830             if(event.type ==SDL_MOUSEBUTTONDOWN){
2831                 x= event.button.x;
2832             }else{
2833                 if(event.motion.state != SDL_PRESSED)
2834                     break;
2835                 x= event.motion.x;
2836             }
2837             if (cur_stream) {
2838                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2839                     uint64_t size=  avio_size(cur_stream->ic->pb);
2840                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2841                 }else{
2842                     int64_t ts;
2843                     int ns, hh, mm, ss;
2844                     int tns, thh, tmm, tss;
2845                     tns = cur_stream->ic->duration/1000000LL;
2846                     thh = tns/3600;
2847                     tmm = (tns%3600)/60;
2848                     tss = (tns%60);
2849                     frac = x/cur_stream->width;
2850                     ns = frac*tns;
2851                     hh = ns/3600;
2852                     mm = (ns%3600)/60;
2853                     ss = (ns%60);
2854                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2855                             hh, mm, ss, thh, tmm, tss);
2856                     ts = frac*cur_stream->ic->duration;
2857                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2858                         ts += cur_stream->ic->start_time;
2859                     stream_seek(cur_stream, ts, 0, 0);
2860                 }
2861             }
2862             break;
2863         case SDL_VIDEORESIZE:
2864             if (cur_stream) {
2865                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2866                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2867                 screen_width = cur_stream->width = event.resize.w;
2868                 screen_height= cur_stream->height= event.resize.h;
2869             }
2870             break;
2871         case SDL_QUIT:
2872         case FF_QUIT_EVENT:
2873             do_exit();
2874             break;
2875         case FF_ALLOC_EVENT:
2876             video_open(event.user.data1);
2877             alloc_picture(event.user.data1);
2878             break;
2879         case FF_REFRESH_EVENT:
2880             video_refresh_timer(event.user.data1);
2881             cur_stream->refresh=0;
2882             break;
2883         default:
2884             break;
2885         }
2886     }
2887 }
2888
2889 static void opt_frame_size(const char *arg)
2890 {
2891     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2892         fprintf(stderr, "Incorrect frame size\n");
2893         exit(1);
2894     }
2895     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2896         fprintf(stderr, "Frame size must be a multiple of 2\n");
2897         exit(1);
2898     }
2899 }
2900
2901 static int opt_width(const char *opt, const char *arg)
2902 {
2903     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2904     return 0;
2905 }
2906
2907 static int opt_height(const char *opt, const char *arg)
2908 {
2909     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2910     return 0;
2911 }
2912
2913 static void opt_format(const char *arg)
2914 {
2915     file_iformat = av_find_input_format(arg);
2916     if (!file_iformat) {
2917         fprintf(stderr, "Unknown input format: %s\n", arg);
2918         exit(1);
2919     }
2920 }
2921
2922 static void opt_frame_pix_fmt(const char *arg)
2923 {
2924     frame_pix_fmt = av_get_pix_fmt(arg);
2925 }
2926
2927 static int opt_sync(const char *opt, const char *arg)
2928 {
2929     if (!strcmp(arg, "audio"))
2930         av_sync_type = AV_SYNC_AUDIO_MASTER;
2931     else if (!strcmp(arg, "video"))
2932         av_sync_type = AV_SYNC_VIDEO_MASTER;
2933     else if (!strcmp(arg, "ext"))
2934         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2935     else {
2936         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2937         exit(1);
2938     }
2939     return 0;
2940 }
2941
2942 static int opt_seek(const char *opt, const char *arg)
2943 {
2944     start_time = parse_time_or_die(opt, arg, 1);
2945     return 0;
2946 }
2947
2948 static int opt_duration(const char *opt, const char *arg)
2949 {
2950     duration = parse_time_or_die(opt, arg, 1);
2951     return 0;
2952 }
2953
2954 static int opt_debug(const char *opt, const char *arg)
2955 {
2956     av_log_set_level(99);
2957     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2958     return 0;
2959 }
2960
2961 static int opt_vismv(const char *opt, const char *arg)
2962 {
2963     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2964     return 0;
2965 }
2966
2967 static int opt_thread_count(const char *opt, const char *arg)
2968 {
2969     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2970 #if !HAVE_THREADS
2971     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2972 #endif
2973     return 0;
2974 }
2975
2976 static const OptionDef options[] = {
2977 #include "cmdutils_common_opts.h"
2978     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2979     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2980     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2981     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2982     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2983     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2984     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2985     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2986     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2987     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2988     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2989     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2990     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2991     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2992     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2993     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2994     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2995     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2996     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2997     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2998     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2999     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3000     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3001     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3002     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3003     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3004     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3005     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3006     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3007     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3008     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3009     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3010     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3011     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3012     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3013     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3014     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3015 #if CONFIG_AVFILTER
3016     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3017 #endif
3018     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3019     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3020     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
3021     { NULL, },
3022 };
3023
3024 static void show_usage(void)
3025 {
3026     printf("Simple media player\n");
3027     printf("usage: ffplay [options] input_file\n");
3028     printf("\n");
3029 }
3030
3031 static void show_help(void)
3032 {
3033     av_log_set_callback(log_callback_help);
3034     show_usage();
3035     show_help_options(options, "Main options:\n",
3036                       OPT_EXPERT, 0);
3037     show_help_options(options, "\nAdvanced options:\n",
3038                       OPT_EXPERT, OPT_EXPERT);
3039     printf("\n");
3040     av_opt_show2(avcodec_opts[0], NULL,
3041                  AV_OPT_FLAG_DECODING_PARAM, 0);
3042     printf("\n");
3043     av_opt_show2(avformat_opts, NULL,
3044                  AV_OPT_FLAG_DECODING_PARAM, 0);
3045 #if !CONFIG_AVFILTER
3046     printf("\n");
3047     av_opt_show2(sws_opts, NULL,
3048                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3049 #endif
3050     printf("\nWhile playing:\n"
3051            "q, ESC              quit\n"
3052            "f                   toggle full screen\n"
3053            "p, SPC              pause\n"
3054            "a                   cycle audio channel\n"
3055            "v                   cycle video channel\n"
3056            "t                   cycle subtitle channel\n"
3057            "w                   show audio waves\n"
3058            "s                   activate frame-step mode\n"
3059            "left/right          seek backward/forward 10 seconds\n"
3060            "down/up             seek backward/forward 1 minute\n"
3061            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3062            );
3063 }
3064
3065 static void opt_input_file(const char *filename)
3066 {
3067     if (input_filename) {
3068         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3069                 filename, input_filename);
3070         exit(1);
3071     }
3072     if (!strcmp(filename, "-"))
3073         filename = "pipe:";
3074     input_filename = filename;
3075 }
3076
3077 /* Called from the main */
3078 int main(int argc, char **argv)
3079 {
3080     int flags;
3081
3082     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3083
3084     /* register all codecs, demux and protocols */
3085     avcodec_register_all();
3086 #if CONFIG_AVDEVICE
3087     avdevice_register_all();
3088 #endif
3089 #if CONFIG_AVFILTER
3090     avfilter_register_all();
3091 #endif
3092     av_register_all();
3093
3094     init_opts();
3095
3096     show_banner();
3097
3098     parse_options(argc, argv, options, opt_input_file);
3099
3100     if (!input_filename) {
3101         show_usage();
3102         fprintf(stderr, "An input file must be specified\n");
3103         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3104         exit(1);
3105     }
3106
3107     if (display_disable) {
3108         video_disable = 1;
3109     }
3110     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3111 #if !defined(__MINGW32__) && !defined(__APPLE__)
3112     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3113 #endif
3114     if (SDL_Init (flags)) {
3115         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3116         exit(1);
3117     }
3118
3119     if (!display_disable) {
3120 #if HAVE_SDL_VIDEO_SIZE
3121         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3122         fs_screen_width = vi->current_w;
3123         fs_screen_height = vi->current_h;
3124 #endif
3125     }
3126
3127     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3128     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3129     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3130
3131     av_init_packet(&flush_pkt);
3132     flush_pkt.data= "FLUSH";
3133
3134     cur_stream = stream_open(input_filename, file_iformat);
3135
3136     event_loop();
3137
3138     /* never returns */
3139
3140     return 0;
3141 }