OSDN Git Service

avplay: fix fullscreen behaviour with SDL 1.2.14 on Mac OS X
[android-x86/external-ffmpeg.git] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "avplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     int reallocate;
107     enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110     AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140     int dtg_active_format;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     /* samples output by the codec. we reserve more space for avsync
157        compensation */
158     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     uint8_t *audio_buf;
161     unsigned int audio_buf_size; /* in bytes */
162     int audio_buf_index; /* in bytes */
163     AVPacket audio_pkt_temp;
164     AVPacket audio_pkt;
165     enum AVSampleFormat audio_src_fmt;
166     AVAudioConvert *reformat_ctx;
167
168     int show_audio; /* if true, display audio samples */
169     int16_t sample_array[SAMPLE_ARRAY_SIZE];
170     int sample_array_index;
171     int last_i_start;
172     RDFTContext *rdft;
173     int rdft_bits;
174     FFTSample *rdft_data;
175     int xpos;
176
177     SDL_Thread *subtitle_tid;
178     int subtitle_stream;
179     int subtitle_stream_changed;
180     AVStream *subtitle_st;
181     PacketQueue subtitleq;
182     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
183     int subpq_size, subpq_rindex, subpq_windex;
184     SDL_mutex *subpq_mutex;
185     SDL_cond *subpq_cond;
186
187     double frame_timer;
188     double frame_last_pts;
189     double frame_last_delay;
190     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
191     int video_stream;
192     AVStream *video_st;
193     PacketQueue videoq;
194     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
195     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
196     int64_t video_current_pos;                   ///<current displayed file pos
197     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
198     int pictq_size, pictq_rindex, pictq_windex;
199     SDL_mutex *pictq_mutex;
200     SDL_cond *pictq_cond;
201 #if !CONFIG_AVFILTER
202     struct SwsContext *img_convert_ctx;
203 #endif
204
205     //    QETimer *video_timer;
206     char filename[1024];
207     int width, height, xleft, ytop;
208
209     PtsCorrectionContext pts_ctx;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static void show_help(void);
221
222 /* options specified by the user */
223 static AVInputFormat *file_iformat;
224 static const char *input_filename;
225 static const char *window_title;
226 static int fs_screen_width;
227 static int fs_screen_height;
228 static int screen_width = 0;
229 static int screen_height = 0;
230 static int audio_disable;
231 static int video_disable;
232 static int wanted_stream[AVMEDIA_TYPE_NB]={
233     [AVMEDIA_TYPE_AUDIO]=-1,
234     [AVMEDIA_TYPE_VIDEO]=-1,
235     [AVMEDIA_TYPE_SUBTITLE]=-1,
236 };
237 static int seek_by_bytes=-1;
238 static int display_disable;
239 static int show_status = 1;
240 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
241 static int64_t start_time = AV_NOPTS_VALUE;
242 static int64_t duration = AV_NOPTS_VALUE;
243 static int debug = 0;
244 static int debug_mv = 0;
245 static int step = 0;
246 static int thread_count = 1;
247 static int workaround_bugs = 1;
248 static int fast = 0;
249 static int genpts = 0;
250 static int lowres = 0;
251 static int idct = FF_IDCT_AUTO;
252 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
255 static int error_recognition = FF_ER_CAREFUL;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts= -1;
258 static int autoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop=1;
262 static int framedrop=1;
263
264 static int rdftspeed=20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 void exit_program(int ret)
283 {
284     exit(ret);
285 }
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288
289 /* packet queue handling */
290 static void packet_queue_init(PacketQueue *q)
291 {
292     memset(q, 0, sizeof(PacketQueue));
293     q->mutex = SDL_CreateMutex();
294     q->cond = SDL_CreateCond();
295     packet_queue_put(q, &flush_pkt);
296 }
297
298 static void packet_queue_flush(PacketQueue *q)
299 {
300     AVPacketList *pkt, *pkt1;
301
302     SDL_LockMutex(q->mutex);
303     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304         pkt1 = pkt->next;
305         av_free_packet(&pkt->pkt);
306         av_freep(&pkt);
307     }
308     q->last_pkt = NULL;
309     q->first_pkt = NULL;
310     q->nb_packets = 0;
311     q->size = 0;
312     SDL_UnlockMutex(q->mutex);
313 }
314
315 static void packet_queue_end(PacketQueue *q)
316 {
317     packet_queue_flush(q);
318     SDL_DestroyMutex(q->mutex);
319     SDL_DestroyCond(q->cond);
320 }
321
322 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323 {
324     AVPacketList *pkt1;
325
326     /* duplicate the packet */
327     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328         return -1;
329
330     pkt1 = av_malloc(sizeof(AVPacketList));
331     if (!pkt1)
332         return -1;
333     pkt1->pkt = *pkt;
334     pkt1->next = NULL;
335
336
337     SDL_LockMutex(q->mutex);
338
339     if (!q->last_pkt)
340
341         q->first_pkt = pkt1;
342     else
343         q->last_pkt->next = pkt1;
344     q->last_pkt = pkt1;
345     q->nb_packets++;
346     q->size += pkt1->pkt.size + sizeof(*pkt1);
347     /* XXX: should duplicate packet data in DV case */
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351     return 0;
352 }
353
354 static void packet_queue_abort(PacketQueue *q)
355 {
356     SDL_LockMutex(q->mutex);
357
358     q->abort_request = 1;
359
360     SDL_CondSignal(q->cond);
361
362     SDL_UnlockMutex(q->mutex);
363 }
364
365 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367 {
368     AVPacketList *pkt1;
369     int ret;
370
371     SDL_LockMutex(q->mutex);
372
373     for(;;) {
374         if (q->abort_request) {
375             ret = -1;
376             break;
377         }
378
379         pkt1 = q->first_pkt;
380         if (pkt1) {
381             q->first_pkt = pkt1->next;
382             if (!q->first_pkt)
383                 q->last_pkt = NULL;
384             q->nb_packets--;
385             q->size -= pkt1->pkt.size + sizeof(*pkt1);
386             *pkt = pkt1->pkt;
387             av_free(pkt1);
388             ret = 1;
389             break;
390         } else if (!block) {
391             ret = 0;
392             break;
393         } else {
394             SDL_CondWait(q->cond, q->mutex);
395         }
396     }
397     SDL_UnlockMutex(q->mutex);
398     return ret;
399 }
400
401 static inline void fill_rectangle(SDL_Surface *screen,
402                                   int x, int y, int w, int h, int color)
403 {
404     SDL_Rect rect;
405     rect.x = x;
406     rect.y = y;
407     rect.w = w;
408     rect.h = h;
409     SDL_FillRect(screen, &rect, color);
410 }
411
412 #define ALPHA_BLEND(a, oldp, newp, s)\
413 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
414
415 #define RGBA_IN(r, g, b, a, s)\
416 {\
417     unsigned int v = ((const uint32_t *)(s))[0];\
418     a = (v >> 24) & 0xff;\
419     r = (v >> 16) & 0xff;\
420     g = (v >> 8) & 0xff;\
421     b = v & 0xff;\
422 }
423
424 #define YUVA_IN(y, u, v, a, s, pal)\
425 {\
426     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
427     a = (val >> 24) & 0xff;\
428     y = (val >> 16) & 0xff;\
429     u = (val >> 8) & 0xff;\
430     v = val & 0xff;\
431 }
432
433 #define YUVA_OUT(d, y, u, v, a)\
434 {\
435     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
436 }
437
438
439 #define BPP 1
440
441 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
442 {
443     int wrap, wrap3, width2, skip2;
444     int y, u, v, a, u1, v1, a1, w, h;
445     uint8_t *lum, *cb, *cr;
446     const uint8_t *p;
447     const uint32_t *pal;
448     int dstx, dsty, dstw, dsth;
449
450     dstw = av_clip(rect->w, 0, imgw);
451     dsth = av_clip(rect->h, 0, imgh);
452     dstx = av_clip(rect->x, 0, imgw - dstw);
453     dsty = av_clip(rect->y, 0, imgh - dsth);
454     lum = dst->data[0] + dsty * dst->linesize[0];
455     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
456     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
457
458     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
459     skip2 = dstx >> 1;
460     wrap = dst->linesize[0];
461     wrap3 = rect->pict.linesize[0];
462     p = rect->pict.data[0];
463     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
464
465     if (dsty & 1) {
466         lum += dstx;
467         cb += skip2;
468         cr += skip2;
469
470         if (dstx & 1) {
471             YUVA_IN(y, u, v, a, p, pal);
472             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
473             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
474             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
475             cb++;
476             cr++;
477             lum++;
478             p += BPP;
479         }
480         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
481             YUVA_IN(y, u, v, a, p, pal);
482             u1 = u;
483             v1 = v;
484             a1 = a;
485             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
486
487             YUVA_IN(y, u, v, a, p + BPP, pal);
488             u1 += u;
489             v1 += v;
490             a1 += a;
491             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
492             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
493             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
494             cb++;
495             cr++;
496             p += 2 * BPP;
497             lum += 2;
498         }
499         if (w) {
500             YUVA_IN(y, u, v, a, p, pal);
501             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
502             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
503             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
504             p++;
505             lum++;
506         }
507         p += wrap3 - dstw * BPP;
508         lum += wrap - dstw - dstx;
509         cb += dst->linesize[1] - width2 - skip2;
510         cr += dst->linesize[2] - width2 - skip2;
511     }
512     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
513         lum += dstx;
514         cb += skip2;
515         cr += skip2;
516
517         if (dstx & 1) {
518             YUVA_IN(y, u, v, a, p, pal);
519             u1 = u;
520             v1 = v;
521             a1 = a;
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523             p += wrap3;
524             lum += wrap;
525             YUVA_IN(y, u, v, a, p, pal);
526             u1 += u;
527             v1 += v;
528             a1 += a;
529             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532             cb++;
533             cr++;
534             p += -wrap3 + BPP;
535             lum += -wrap + 1;
536         }
537         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
538             YUVA_IN(y, u, v, a, p, pal);
539             u1 = u;
540             v1 = v;
541             a1 = a;
542             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543
544             YUVA_IN(y, u, v, a, p + BPP, pal);
545             u1 += u;
546             v1 += v;
547             a1 += a;
548             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
549             p += wrap3;
550             lum += wrap;
551
552             YUVA_IN(y, u, v, a, p, pal);
553             u1 += u;
554             v1 += v;
555             a1 += a;
556             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557
558             YUVA_IN(y, u, v, a, p + BPP, pal);
559             u1 += u;
560             v1 += v;
561             a1 += a;
562             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
563
564             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
565             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
566
567             cb++;
568             cr++;
569             p += -wrap3 + 2 * BPP;
570             lum += -wrap + 2;
571         }
572         if (w) {
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 = u;
575             v1 = v;
576             a1 = a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578             p += wrap3;
579             lum += wrap;
580             YUVA_IN(y, u, v, a, p, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
586             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
587             cb++;
588             cr++;
589             p += -wrap3 + BPP;
590             lum += -wrap + 1;
591         }
592         p += wrap3 + (wrap3 - dstw * BPP);
593         lum += wrap + (wrap - dstw - dstx);
594         cb += dst->linesize[1] - width2 - skip2;
595         cr += dst->linesize[2] - width2 - skip2;
596     }
597     /* handle odd height */
598     if (h) {
599         lum += dstx;
600         cb += skip2;
601         cr += skip2;
602
603         if (dstx & 1) {
604             YUVA_IN(y, u, v, a, p, pal);
605             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
607             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
608             cb++;
609             cr++;
610             lum++;
611             p += BPP;
612         }
613         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 = u;
616             v1 = v;
617             a1 = a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619
620             YUVA_IN(y, u, v, a, p + BPP, pal);
621             u1 += u;
622             v1 += v;
623             a1 += a;
624             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
625             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
626             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
627             cb++;
628             cr++;
629             p += 2 * BPP;
630             lum += 2;
631         }
632         if (w) {
633             YUVA_IN(y, u, v, a, p, pal);
634             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
635             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
636             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
637         }
638     }
639 }
640
641 static void free_subpicture(SubPicture *sp)
642 {
643     avsubtitle_free(&sp->sub);
644 }
645
646 static void video_image_display(VideoState *is)
647 {
648     VideoPicture *vp;
649     SubPicture *sp;
650     AVPicture pict;
651     float aspect_ratio;
652     int width, height, x, y;
653     SDL_Rect rect;
654     int i;
655
656     vp = &is->pictq[is->pictq_rindex];
657     if (vp->bmp) {
658 #if CONFIG_AVFILTER
659          if (vp->picref->video->pixel_aspect.num == 0)
660              aspect_ratio = 0;
661          else
662              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
663 #else
664
665         /* XXX: use variable in the frame */
666         if (is->video_st->sample_aspect_ratio.num)
667             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
668         else if (is->video_st->codec->sample_aspect_ratio.num)
669             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
670         else
671             aspect_ratio = 0;
672 #endif
673         if (aspect_ratio <= 0.0)
674             aspect_ratio = 1.0;
675         aspect_ratio *= (float)vp->width / (float)vp->height;
676
677         if (is->subtitle_st)
678         {
679             if (is->subpq_size > 0)
680             {
681                 sp = &is->subpq[is->subpq_rindex];
682
683                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
684                 {
685                     SDL_LockYUVOverlay (vp->bmp);
686
687                     pict.data[0] = vp->bmp->pixels[0];
688                     pict.data[1] = vp->bmp->pixels[2];
689                     pict.data[2] = vp->bmp->pixels[1];
690
691                     pict.linesize[0] = vp->bmp->pitches[0];
692                     pict.linesize[1] = vp->bmp->pitches[2];
693                     pict.linesize[2] = vp->bmp->pitches[1];
694
695                     for (i = 0; i < sp->sub.num_rects; i++)
696                         blend_subrect(&pict, sp->sub.rects[i],
697                                       vp->bmp->w, vp->bmp->h);
698
699                     SDL_UnlockYUVOverlay (vp->bmp);
700                 }
701             }
702         }
703
704
705         /* XXX: we suppose the screen has a 1.0 pixel ratio */
706         height = is->height;
707         width = ((int)rint(height * aspect_ratio)) & ~1;
708         if (width > is->width) {
709             width = is->width;
710             height = ((int)rint(width / aspect_ratio)) & ~1;
711         }
712         x = (is->width - width) / 2;
713         y = (is->height - height) / 2;
714         is->no_background = 0;
715         rect.x = is->xleft + x;
716         rect.y = is->ytop  + y;
717         rect.w = width;
718         rect.h = height;
719         SDL_DisplayYUVOverlay(vp->bmp, &rect);
720     }
721 }
722
723 /* get the current audio output buffer size, in samples. With SDL, we
724    cannot have a precise information */
725 static int audio_write_get_buf_size(VideoState *is)
726 {
727     return is->audio_buf_size - is->audio_buf_index;
728 }
729
730 static inline int compute_mod(int a, int b)
731 {
732     a = a % b;
733     if (a >= 0)
734         return a;
735     else
736         return a + b;
737 }
738
739 static void video_audio_display(VideoState *s)
740 {
741     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
742     int ch, channels, h, h2, bgcolor, fgcolor;
743     int16_t time_diff;
744     int rdft_bits, nb_freq;
745
746     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
747         ;
748     nb_freq= 1<<(rdft_bits-1);
749
750     /* compute display index : center on currently output samples */
751     channels = s->audio_st->codec->channels;
752     nb_display_channels = channels;
753     if (!s->paused) {
754         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
755         n = 2 * channels;
756         delay = audio_write_get_buf_size(s);
757         delay /= n;
758
759         /* to be more precise, we take into account the time spent since
760            the last buffer computation */
761         if (audio_callback_time) {
762             time_diff = av_gettime() - audio_callback_time;
763             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
764         }
765
766         delay += 2*data_used;
767         if (delay < data_used)
768             delay = data_used;
769
770         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
771         if(s->show_audio==1){
772             h= INT_MIN;
773             for(i=0; i<1000; i+=channels){
774                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
775                 int a= s->sample_array[idx];
776                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
777                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
778                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
779                 int score= a-d;
780                 if(h<score && (b^c)<0){
781                     h= score;
782                     i_start= idx;
783                 }
784             }
785         }
786
787         s->last_i_start = i_start;
788     } else {
789         i_start = s->last_i_start;
790     }
791
792     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
793     if(s->show_audio==1){
794         fill_rectangle(screen,
795                        s->xleft, s->ytop, s->width, s->height,
796                        bgcolor);
797
798         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
799
800         /* total height for one channel */
801         h = s->height / nb_display_channels;
802         /* graph height / 2 */
803         h2 = (h * 9) / 20;
804         for(ch = 0;ch < nb_display_channels; ch++) {
805             i = i_start + ch;
806             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
807             for(x = 0; x < s->width; x++) {
808                 y = (s->sample_array[i] * h2) >> 15;
809                 if (y < 0) {
810                     y = -y;
811                     ys = y1 - y;
812                 } else {
813                     ys = y1;
814                 }
815                 fill_rectangle(screen,
816                                s->xleft + x, ys, 1, y,
817                                fgcolor);
818                 i += channels;
819                 if (i >= SAMPLE_ARRAY_SIZE)
820                     i -= SAMPLE_ARRAY_SIZE;
821             }
822         }
823
824         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
825
826         for(ch = 1;ch < nb_display_channels; ch++) {
827             y = s->ytop + ch * h;
828             fill_rectangle(screen,
829                            s->xleft, y, s->width, 1,
830                            fgcolor);
831         }
832         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
833     }else{
834         nb_display_channels= FFMIN(nb_display_channels, 2);
835         if(rdft_bits != s->rdft_bits){
836             av_rdft_end(s->rdft);
837             av_free(s->rdft_data);
838             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
839             s->rdft_bits= rdft_bits;
840             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
841         }
842         {
843             FFTSample *data[2];
844             for(ch = 0;ch < nb_display_channels; ch++) {
845                 data[ch] = s->rdft_data + 2*nb_freq*ch;
846                 i = i_start + ch;
847                 for(x = 0; x < 2*nb_freq; x++) {
848                     double w= (x-nb_freq)*(1.0/nb_freq);
849                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
850                     i += channels;
851                     if (i >= SAMPLE_ARRAY_SIZE)
852                         i -= SAMPLE_ARRAY_SIZE;
853                 }
854                 av_rdft_calc(s->rdft, data[ch]);
855             }
856             //least efficient way to do this, we should of course directly access it but its more than fast enough
857             for(y=0; y<s->height; y++){
858                 double w= 1/sqrt(nb_freq);
859                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
860                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
861                        + data[1][2*y+1]*data[1][2*y+1])) : a;
862                 a= FFMIN(a,255);
863                 b= FFMIN(b,255);
864                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
865
866                 fill_rectangle(screen,
867                             s->xpos, s->height-y, 1, 1,
868                             fgcolor);
869             }
870         }
871         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
872         s->xpos++;
873         if(s->xpos >= s->width)
874             s->xpos= s->xleft;
875     }
876 }
877
878 static int video_open(VideoState *is){
879     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
880     int w,h;
881
882     if(is_full_screen) flags |= SDL_FULLSCREEN;
883     else               flags |= SDL_RESIZABLE;
884
885     if (is_full_screen && fs_screen_width) {
886         w = fs_screen_width;
887         h = fs_screen_height;
888     } else if(!is_full_screen && screen_width){
889         w = screen_width;
890         h = screen_height;
891 #if CONFIG_AVFILTER
892     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
893         w = is->out_video_filter->inputs[0]->w;
894         h = is->out_video_filter->inputs[0]->h;
895 #else
896     }else if (is->video_st && is->video_st->codec->width){
897         w = is->video_st->codec->width;
898         h = is->video_st->codec->height;
899 #endif
900     } else {
901         w = 640;
902         h = 480;
903     }
904     if(screen && is->width == screen->w && screen->w == w
905        && is->height== screen->h && screen->h == h)
906         return 0;
907
908 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
909     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
910     screen = SDL_SetVideoMode(w, h, 24, flags);
911 #else
912     screen = SDL_SetVideoMode(w, h, 0, flags);
913 #endif
914     if (!screen) {
915         fprintf(stderr, "SDL: could not set video mode - exiting\n");
916         return -1;
917     }
918     if (!window_title)
919         window_title = input_filename;
920     SDL_WM_SetCaption(window_title, window_title);
921
922     is->width = screen->w;
923     is->height = screen->h;
924
925     return 0;
926 }
927
928 /* display the current picture, if any */
929 static void video_display(VideoState *is)
930 {
931     if(!screen)
932         video_open(cur_stream);
933     if (is->audio_st && is->show_audio)
934         video_audio_display(is);
935     else if (is->video_st)
936         video_image_display(is);
937 }
938
939 static int refresh_thread(void *opaque)
940 {
941     VideoState *is= opaque;
942     while(!is->abort_request){
943         SDL_Event event;
944         event.type = FF_REFRESH_EVENT;
945         event.user.data1 = opaque;
946         if(!is->refresh){
947             is->refresh=1;
948             SDL_PushEvent(&event);
949         }
950         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
951     }
952     return 0;
953 }
954
955 /* get the current audio clock value */
956 static double get_audio_clock(VideoState *is)
957 {
958     double pts;
959     int hw_buf_size, bytes_per_sec;
960     pts = is->audio_clock;
961     hw_buf_size = audio_write_get_buf_size(is);
962     bytes_per_sec = 0;
963     if (is->audio_st) {
964         bytes_per_sec = is->audio_st->codec->sample_rate *
965             2 * is->audio_st->codec->channels;
966     }
967     if (bytes_per_sec)
968         pts -= (double)hw_buf_size / bytes_per_sec;
969     return pts;
970 }
971
972 /* get the current video clock value */
973 static double get_video_clock(VideoState *is)
974 {
975     if (is->paused) {
976         return is->video_current_pts;
977     } else {
978         return is->video_current_pts_drift + av_gettime() / 1000000.0;
979     }
980 }
981
982 /* get the current external clock value */
983 static double get_external_clock(VideoState *is)
984 {
985     int64_t ti;
986     ti = av_gettime();
987     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
988 }
989
990 /* get the current master clock value */
991 static double get_master_clock(VideoState *is)
992 {
993     double val;
994
995     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
996         if (is->video_st)
997             val = get_video_clock(is);
998         else
999             val = get_audio_clock(is);
1000     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1001         if (is->audio_st)
1002             val = get_audio_clock(is);
1003         else
1004             val = get_video_clock(is);
1005     } else {
1006         val = get_external_clock(is);
1007     }
1008     return val;
1009 }
1010
1011 /* seek in the stream */
1012 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1013 {
1014     if (!is->seek_req) {
1015         is->seek_pos = pos;
1016         is->seek_rel = rel;
1017         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1018         if (seek_by_bytes)
1019             is->seek_flags |= AVSEEK_FLAG_BYTE;
1020         is->seek_req = 1;
1021     }
1022 }
1023
1024 /* pause or resume the video */
1025 static void stream_pause(VideoState *is)
1026 {
1027     if (is->paused) {
1028         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1029         if(is->read_pause_return != AVERROR(ENOSYS)){
1030             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1031         }
1032         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1033     }
1034     is->paused = !is->paused;
1035 }
1036
1037 static double compute_target_time(double frame_current_pts, VideoState *is)
1038 {
1039     double delay, sync_threshold, diff;
1040
1041     /* compute nominal delay */
1042     delay = frame_current_pts - is->frame_last_pts;
1043     if (delay <= 0 || delay >= 10.0) {
1044         /* if incorrect delay, use previous one */
1045         delay = is->frame_last_delay;
1046     } else {
1047         is->frame_last_delay = delay;
1048     }
1049     is->frame_last_pts = frame_current_pts;
1050
1051     /* update delay to follow master synchronisation source */
1052     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1053          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1054         /* if video is slave, we try to correct big delays by
1055            duplicating or deleting a frame */
1056         diff = get_video_clock(is) - get_master_clock(is);
1057
1058         /* skip or repeat frame. We take into account the
1059            delay to compute the threshold. I still don't know
1060            if it is the best guess */
1061         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1062         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1063             if (diff <= -sync_threshold)
1064                 delay = 0;
1065             else if (diff >= sync_threshold)
1066                 delay = 2 * delay;
1067         }
1068     }
1069     is->frame_timer += delay;
1070
1071     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1072             delay, frame_current_pts, -diff);
1073
1074     return is->frame_timer;
1075 }
1076
1077 /* called to display each frame */
1078 static void video_refresh_timer(void *opaque)
1079 {
1080     VideoState *is = opaque;
1081     VideoPicture *vp;
1082
1083     SubPicture *sp, *sp2;
1084
1085     if (is->video_st) {
1086 retry:
1087         if (is->pictq_size == 0) {
1088             //nothing to do, no picture to display in the que
1089         } else {
1090             double time= av_gettime()/1000000.0;
1091             double next_target;
1092             /* dequeue the picture */
1093             vp = &is->pictq[is->pictq_rindex];
1094
1095             if(time < vp->target_clock)
1096                 return;
1097             /* update current video pts */
1098             is->video_current_pts = vp->pts;
1099             is->video_current_pts_drift = is->video_current_pts - time;
1100             is->video_current_pos = vp->pos;
1101             if(is->pictq_size > 1){
1102                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1103                 assert(nextvp->target_clock >= vp->target_clock);
1104                 next_target= nextvp->target_clock;
1105             }else{
1106                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1107             }
1108             if(framedrop && time > next_target){
1109                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1110                 if(is->pictq_size > 1 || time > next_target + 0.5){
1111                     /* update queue size and signal for next picture */
1112                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1113                         is->pictq_rindex = 0;
1114
1115                     SDL_LockMutex(is->pictq_mutex);
1116                     is->pictq_size--;
1117                     SDL_CondSignal(is->pictq_cond);
1118                     SDL_UnlockMutex(is->pictq_mutex);
1119                     goto retry;
1120                 }
1121             }
1122
1123             if(is->subtitle_st) {
1124                 if (is->subtitle_stream_changed) {
1125                     SDL_LockMutex(is->subpq_mutex);
1126
1127                     while (is->subpq_size) {
1128                         free_subpicture(&is->subpq[is->subpq_rindex]);
1129
1130                         /* update queue size and signal for next picture */
1131                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1132                             is->subpq_rindex = 0;
1133
1134                         is->subpq_size--;
1135                     }
1136                     is->subtitle_stream_changed = 0;
1137
1138                     SDL_CondSignal(is->subpq_cond);
1139                     SDL_UnlockMutex(is->subpq_mutex);
1140                 } else {
1141                     if (is->subpq_size > 0) {
1142                         sp = &is->subpq[is->subpq_rindex];
1143
1144                         if (is->subpq_size > 1)
1145                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1146                         else
1147                             sp2 = NULL;
1148
1149                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1150                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1151                         {
1152                             free_subpicture(sp);
1153
1154                             /* update queue size and signal for next picture */
1155                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1156                                 is->subpq_rindex = 0;
1157
1158                             SDL_LockMutex(is->subpq_mutex);
1159                             is->subpq_size--;
1160                             SDL_CondSignal(is->subpq_cond);
1161                             SDL_UnlockMutex(is->subpq_mutex);
1162                         }
1163                     }
1164                 }
1165             }
1166
1167             /* display picture */
1168             if (!display_disable)
1169                 video_display(is);
1170
1171             /* update queue size and signal for next picture */
1172             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1173                 is->pictq_rindex = 0;
1174
1175             SDL_LockMutex(is->pictq_mutex);
1176             is->pictq_size--;
1177             SDL_CondSignal(is->pictq_cond);
1178             SDL_UnlockMutex(is->pictq_mutex);
1179         }
1180     } else if (is->audio_st) {
1181         /* draw the next audio frame */
1182
1183         /* if only audio stream, then display the audio bars (better
1184            than nothing, just to test the implementation */
1185
1186         /* display picture */
1187         if (!display_disable)
1188             video_display(is);
1189     }
1190     if (show_status) {
1191         static int64_t last_time;
1192         int64_t cur_time;
1193         int aqsize, vqsize, sqsize;
1194         double av_diff;
1195
1196         cur_time = av_gettime();
1197         if (!last_time || (cur_time - last_time) >= 30000) {
1198             aqsize = 0;
1199             vqsize = 0;
1200             sqsize = 0;
1201             if (is->audio_st)
1202                 aqsize = is->audioq.size;
1203             if (is->video_st)
1204                 vqsize = is->videoq.size;
1205             if (is->subtitle_st)
1206                 sqsize = is->subtitleq.size;
1207             av_diff = 0;
1208             if (is->audio_st && is->video_st)
1209                 av_diff = get_audio_clock(is) - get_video_clock(is);
1210             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1211                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1212             fflush(stdout);
1213             last_time = cur_time;
1214         }
1215     }
1216 }
1217
1218 static void stream_close(VideoState *is)
1219 {
1220     VideoPicture *vp;
1221     int i;
1222     /* XXX: use a special url_shutdown call to abort parse cleanly */
1223     is->abort_request = 1;
1224     SDL_WaitThread(is->parse_tid, NULL);
1225     SDL_WaitThread(is->refresh_tid, NULL);
1226
1227     /* free all pictures */
1228     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1229         vp = &is->pictq[i];
1230 #if CONFIG_AVFILTER
1231         if (vp->picref) {
1232             avfilter_unref_buffer(vp->picref);
1233             vp->picref = NULL;
1234         }
1235 #endif
1236         if (vp->bmp) {
1237             SDL_FreeYUVOverlay(vp->bmp);
1238             vp->bmp = NULL;
1239         }
1240     }
1241     SDL_DestroyMutex(is->pictq_mutex);
1242     SDL_DestroyCond(is->pictq_cond);
1243     SDL_DestroyMutex(is->subpq_mutex);
1244     SDL_DestroyCond(is->subpq_cond);
1245 #if !CONFIG_AVFILTER
1246     if (is->img_convert_ctx)
1247         sws_freeContext(is->img_convert_ctx);
1248 #endif
1249     av_free(is);
1250 }
1251
1252 static void do_exit(void)
1253 {
1254     if (cur_stream) {
1255         stream_close(cur_stream);
1256         cur_stream = NULL;
1257     }
1258     uninit_opts();
1259 #if CONFIG_AVFILTER
1260     avfilter_uninit();
1261 #endif
1262     if (show_status)
1263         printf("\n");
1264     SDL_Quit();
1265     av_log(NULL, AV_LOG_QUIET, "");
1266     exit(0);
1267 }
1268
1269 /* allocate a picture (needs to do that in main thread to avoid
1270    potential locking problems */
1271 static void alloc_picture(void *opaque)
1272 {
1273     VideoState *is = opaque;
1274     VideoPicture *vp;
1275
1276     vp = &is->pictq[is->pictq_windex];
1277
1278     if (vp->bmp)
1279         SDL_FreeYUVOverlay(vp->bmp);
1280
1281 #if CONFIG_AVFILTER
1282     if (vp->picref)
1283         avfilter_unref_buffer(vp->picref);
1284     vp->picref = NULL;
1285
1286     vp->width   = is->out_video_filter->inputs[0]->w;
1287     vp->height  = is->out_video_filter->inputs[0]->h;
1288     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1289 #else
1290     vp->width   = is->video_st->codec->width;
1291     vp->height  = is->video_st->codec->height;
1292     vp->pix_fmt = is->video_st->codec->pix_fmt;
1293 #endif
1294
1295     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1296                                    SDL_YV12_OVERLAY,
1297                                    screen);
1298     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1299         /* SDL allocates a buffer smaller than requested if the video
1300          * overlay hardware is unable to support the requested size. */
1301         fprintf(stderr, "Error: the video system does not support an image\n"
1302                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1303                         "to reduce the image size.\n", vp->width, vp->height );
1304         do_exit();
1305     }
1306
1307     SDL_LockMutex(is->pictq_mutex);
1308     vp->allocated = 1;
1309     SDL_CondSignal(is->pictq_cond);
1310     SDL_UnlockMutex(is->pictq_mutex);
1311 }
1312
1313 /**
1314  *
1315  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1316  */
1317 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1318 {
1319     VideoPicture *vp;
1320 #if CONFIG_AVFILTER
1321     AVPicture pict_src;
1322 #else
1323     int dst_pix_fmt = PIX_FMT_YUV420P;
1324 #endif
1325     /* wait until we have space to put a new picture */
1326     SDL_LockMutex(is->pictq_mutex);
1327
1328     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1329         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1330
1331     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1332            !is->videoq.abort_request) {
1333         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1334     }
1335     SDL_UnlockMutex(is->pictq_mutex);
1336
1337     if (is->videoq.abort_request)
1338         return -1;
1339
1340     vp = &is->pictq[is->pictq_windex];
1341
1342     /* alloc or resize hardware picture buffer */
1343     if (!vp->bmp || vp->reallocate ||
1344 #if CONFIG_AVFILTER
1345         vp->width  != is->out_video_filter->inputs[0]->w ||
1346         vp->height != is->out_video_filter->inputs[0]->h) {
1347 #else
1348         vp->width != is->video_st->codec->width ||
1349         vp->height != is->video_st->codec->height) {
1350 #endif
1351         SDL_Event event;
1352
1353         vp->allocated  = 0;
1354         vp->reallocate = 0;
1355
1356         /* the allocation must be done in the main thread to avoid
1357            locking problems */
1358         event.type = FF_ALLOC_EVENT;
1359         event.user.data1 = is;
1360         SDL_PushEvent(&event);
1361
1362         /* wait until the picture is allocated */
1363         SDL_LockMutex(is->pictq_mutex);
1364         while (!vp->allocated && !is->videoq.abort_request) {
1365             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1366         }
1367         SDL_UnlockMutex(is->pictq_mutex);
1368
1369         if (is->videoq.abort_request)
1370             return -1;
1371     }
1372
1373     /* if the frame is not skipped, then display it */
1374     if (vp->bmp) {
1375         AVPicture pict;
1376 #if CONFIG_AVFILTER
1377         if(vp->picref)
1378             avfilter_unref_buffer(vp->picref);
1379         vp->picref = src_frame->opaque;
1380 #endif
1381
1382         /* get a pointer on the bitmap */
1383         SDL_LockYUVOverlay (vp->bmp);
1384
1385         memset(&pict,0,sizeof(AVPicture));
1386         pict.data[0] = vp->bmp->pixels[0];
1387         pict.data[1] = vp->bmp->pixels[2];
1388         pict.data[2] = vp->bmp->pixels[1];
1389
1390         pict.linesize[0] = vp->bmp->pitches[0];
1391         pict.linesize[1] = vp->bmp->pitches[2];
1392         pict.linesize[2] = vp->bmp->pitches[1];
1393
1394 #if CONFIG_AVFILTER
1395         pict_src.data[0] = src_frame->data[0];
1396         pict_src.data[1] = src_frame->data[1];
1397         pict_src.data[2] = src_frame->data[2];
1398
1399         pict_src.linesize[0] = src_frame->linesize[0];
1400         pict_src.linesize[1] = src_frame->linesize[1];
1401         pict_src.linesize[2] = src_frame->linesize[2];
1402
1403         //FIXME use direct rendering
1404         av_picture_copy(&pict, &pict_src,
1405                         vp->pix_fmt, vp->width, vp->height);
1406 #else
1407         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1408         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1409             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1410             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1411         if (is->img_convert_ctx == NULL) {
1412             fprintf(stderr, "Cannot initialize the conversion context\n");
1413             exit(1);
1414         }
1415         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1416                   0, vp->height, pict.data, pict.linesize);
1417 #endif
1418         /* update the bitmap content */
1419         SDL_UnlockYUVOverlay(vp->bmp);
1420
1421         vp->pts = pts;
1422         vp->pos = pos;
1423
1424         /* now we can update the picture count */
1425         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1426             is->pictq_windex = 0;
1427         SDL_LockMutex(is->pictq_mutex);
1428         vp->target_clock= compute_target_time(vp->pts, is);
1429
1430         is->pictq_size++;
1431         SDL_UnlockMutex(is->pictq_mutex);
1432     }
1433     return 0;
1434 }
1435
1436 /**
1437  * compute the exact PTS for the picture if it is omitted in the stream
1438  * @param pts1 the dts of the pkt / pts of the frame
1439  */
1440 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1441 {
1442     double frame_delay, pts;
1443
1444     pts = pts1;
1445
1446     if (pts != 0) {
1447         /* update video clock with pts, if present */
1448         is->video_clock = pts;
1449     } else {
1450         pts = is->video_clock;
1451     }
1452     /* update video clock for next frame */
1453     frame_delay = av_q2d(is->video_st->codec->time_base);
1454     /* for MPEG2, the frame can be repeated, so we update the
1455        clock accordingly */
1456     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1457     is->video_clock += frame_delay;
1458
1459     return queue_picture(is, src_frame, pts, pos);
1460 }
1461
1462 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1463 {
1464     int got_picture, i;
1465
1466     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1467         return -1;
1468
1469     if (pkt->data == flush_pkt.data) {
1470         avcodec_flush_buffers(is->video_st->codec);
1471
1472         SDL_LockMutex(is->pictq_mutex);
1473         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1474         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1475             is->pictq[i].target_clock= 0;
1476         }
1477         while (is->pictq_size && !is->videoq.abort_request) {
1478             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1479         }
1480         is->video_current_pos = -1;
1481         SDL_UnlockMutex(is->pictq_mutex);
1482
1483         init_pts_correction(&is->pts_ctx);
1484         is->frame_last_pts = AV_NOPTS_VALUE;
1485         is->frame_last_delay = 0;
1486         is->frame_timer = (double)av_gettime() / 1000000.0;
1487         is->skip_frames = 1;
1488         is->skip_frames_index = 0;
1489         return 0;
1490     }
1491
1492     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1493
1494     if (got_picture) {
1495         if (decoder_reorder_pts == -1) {
1496             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1497         } else if (decoder_reorder_pts) {
1498             *pts = frame->pkt_pts;
1499         } else {
1500             *pts = frame->pkt_dts;
1501         }
1502
1503         if (*pts == AV_NOPTS_VALUE) {
1504             *pts = 0;
1505         }
1506
1507         is->skip_frames_index += 1;
1508         if(is->skip_frames_index >= is->skip_frames){
1509             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1510             return 1;
1511         }
1512
1513     }
1514     return 0;
1515 }
1516
1517 #if CONFIG_AVFILTER
1518 typedef struct {
1519     VideoState *is;
1520     AVFrame *frame;
1521     int use_dr1;
1522 } FilterPriv;
1523
1524 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1525 {
1526     AVFilterContext *ctx = codec->opaque;
1527     AVFilterBufferRef  *ref;
1528     int perms = AV_PERM_WRITE;
1529     int i, w, h, stride[4];
1530     unsigned edge;
1531     int pixel_size;
1532
1533     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1534         perms |= AV_PERM_NEG_LINESIZES;
1535
1536     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1537         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1538         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1539         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1540     }
1541     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1542
1543     w = codec->width;
1544     h = codec->height;
1545     avcodec_align_dimensions2(codec, &w, &h, stride);
1546     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1547     w += edge << 1;
1548     h += edge << 1;
1549
1550     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1551         return -1;
1552
1553     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1554     ref->video->w = codec->width;
1555     ref->video->h = codec->height;
1556     for(i = 0; i < 4; i ++) {
1557         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1558         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1559
1560         if (ref->data[i]) {
1561             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1562         }
1563         pic->data[i]     = ref->data[i];
1564         pic->linesize[i] = ref->linesize[i];
1565     }
1566     pic->opaque = ref;
1567     pic->age    = INT_MAX;
1568     pic->type   = FF_BUFFER_TYPE_USER;
1569     pic->reordered_opaque = codec->reordered_opaque;
1570     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1571     else           pic->pkt_pts = AV_NOPTS_VALUE;
1572     return 0;
1573 }
1574
1575 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1576 {
1577     memset(pic->data, 0, sizeof(pic->data));
1578     avfilter_unref_buffer(pic->opaque);
1579 }
1580
1581 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1582 {
1583     AVFilterBufferRef *ref = pic->opaque;
1584
1585     if (pic->data[0] == NULL) {
1586         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1587         return codec->get_buffer(codec, pic);
1588     }
1589
1590     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1591         (codec->pix_fmt != ref->format)) {
1592         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1593         return -1;
1594     }
1595
1596     pic->reordered_opaque = codec->reordered_opaque;
1597     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1598     else           pic->pkt_pts = AV_NOPTS_VALUE;
1599     return 0;
1600 }
1601
1602 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1603 {
1604     FilterPriv *priv = ctx->priv;
1605     AVCodecContext *codec;
1606     if(!opaque) return -1;
1607
1608     priv->is = opaque;
1609     codec    = priv->is->video_st->codec;
1610     codec->opaque = ctx;
1611     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1612         priv->use_dr1 = 1;
1613         codec->get_buffer     = input_get_buffer;
1614         codec->release_buffer = input_release_buffer;
1615         codec->reget_buffer   = input_reget_buffer;
1616         codec->thread_safe_callbacks = 1;
1617     }
1618
1619     priv->frame = avcodec_alloc_frame();
1620
1621     return 0;
1622 }
1623
1624 static void input_uninit(AVFilterContext *ctx)
1625 {
1626     FilterPriv *priv = ctx->priv;
1627     av_free(priv->frame);
1628 }
1629
1630 static int input_request_frame(AVFilterLink *link)
1631 {
1632     FilterPriv *priv = link->src->priv;
1633     AVFilterBufferRef *picref;
1634     int64_t pts = 0;
1635     AVPacket pkt;
1636     int ret;
1637
1638     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1639         av_free_packet(&pkt);
1640     if (ret < 0)
1641         return -1;
1642
1643     if(priv->use_dr1) {
1644         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1645     } else {
1646         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1647         av_image_copy(picref->data, picref->linesize,
1648                       priv->frame->data, priv->frame->linesize,
1649                       picref->format, link->w, link->h);
1650     }
1651     av_free_packet(&pkt);
1652
1653     picref->pts = pts;
1654     picref->pos = pkt.pos;
1655     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1656     avfilter_start_frame(link, picref);
1657     avfilter_draw_slice(link, 0, link->h, 1);
1658     avfilter_end_frame(link);
1659
1660     return 0;
1661 }
1662
1663 static int input_query_formats(AVFilterContext *ctx)
1664 {
1665     FilterPriv *priv = ctx->priv;
1666     enum PixelFormat pix_fmts[] = {
1667         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1668     };
1669
1670     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1671     return 0;
1672 }
1673
1674 static int input_config_props(AVFilterLink *link)
1675 {
1676     FilterPriv *priv  = link->src->priv;
1677     AVCodecContext *c = priv->is->video_st->codec;
1678
1679     link->w = c->width;
1680     link->h = c->height;
1681     link->time_base = priv->is->video_st->time_base;
1682
1683     return 0;
1684 }
1685
1686 static AVFilter input_filter =
1687 {
1688     .name      = "avplay_input",
1689
1690     .priv_size = sizeof(FilterPriv),
1691
1692     .init      = input_init,
1693     .uninit    = input_uninit,
1694
1695     .query_formats = input_query_formats,
1696
1697     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1698     .outputs   = (AVFilterPad[]) {{ .name = "default",
1699                                     .type = AVMEDIA_TYPE_VIDEO,
1700                                     .request_frame = input_request_frame,
1701                                     .config_props  = input_config_props, },
1702                                   { .name = NULL }},
1703 };
1704
1705 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1706 {
1707     char sws_flags_str[128];
1708     int ret;
1709     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1710     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1711     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1712     graph->scale_sws_opts = av_strdup(sws_flags_str);
1713
1714     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1715                                             NULL, is, graph)) < 0)
1716         return ret;
1717     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1718                                             NULL, &ffsink_ctx, graph)) < 0)
1719         return ret;
1720
1721     if(vfilters) {
1722         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1723         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1724
1725         outputs->name    = av_strdup("in");
1726         outputs->filter_ctx = filt_src;
1727         outputs->pad_idx = 0;
1728         outputs->next    = NULL;
1729
1730         inputs->name    = av_strdup("out");
1731         inputs->filter_ctx = filt_out;
1732         inputs->pad_idx = 0;
1733         inputs->next    = NULL;
1734
1735         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1736             return ret;
1737         av_freep(&vfilters);
1738     } else {
1739         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1740             return ret;
1741     }
1742
1743     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1744         return ret;
1745
1746     is->out_video_filter = filt_out;
1747
1748     return ret;
1749 }
1750
1751 #endif  /* CONFIG_AVFILTER */
1752
1753 static int video_thread(void *arg)
1754 {
1755     VideoState *is = arg;
1756     AVFrame *frame= avcodec_alloc_frame();
1757     int64_t pts_int;
1758     double pts;
1759     int ret;
1760
1761 #if CONFIG_AVFILTER
1762     AVFilterGraph *graph = avfilter_graph_alloc();
1763     AVFilterContext *filt_out = NULL;
1764     int64_t pos;
1765     int last_w = is->video_st->codec->width;
1766     int last_h = is->video_st->codec->height;
1767
1768     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1769         goto the_end;
1770     filt_out = is->out_video_filter;
1771 #endif
1772
1773     for(;;) {
1774 #if !CONFIG_AVFILTER
1775         AVPacket pkt;
1776 #else
1777         AVFilterBufferRef *picref;
1778         AVRational tb;
1779 #endif
1780         while (is->paused && !is->videoq.abort_request)
1781             SDL_Delay(10);
1782 #if CONFIG_AVFILTER
1783         if (   last_w != is->video_st->codec->width
1784             || last_h != is->video_st->codec->height) {
1785             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1786                     is->video_st->codec->width, is->video_st->codec->height);
1787             avfilter_graph_free(&graph);
1788             graph = avfilter_graph_alloc();
1789             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1790                 goto the_end;
1791             filt_out = is->out_video_filter;
1792             last_w = is->video_st->codec->width;
1793             last_h = is->video_st->codec->height;
1794         }
1795         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1796         if (picref) {
1797             pts_int = picref->pts;
1798             pos     = picref->pos;
1799             frame->opaque = picref;
1800         }
1801
1802         if (av_cmp_q(tb, is->video_st->time_base)) {
1803             av_unused int64_t pts1 = pts_int;
1804             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1805             av_dlog(NULL, "video_thread(): "
1806                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1807                     tb.num, tb.den, pts1,
1808                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1809         }
1810 #else
1811         ret = get_video_frame(is, frame, &pts_int, &pkt);
1812 #endif
1813
1814         if (ret < 0) goto the_end;
1815
1816         if (!ret)
1817             continue;
1818
1819         pts = pts_int*av_q2d(is->video_st->time_base);
1820
1821 #if CONFIG_AVFILTER
1822         ret = output_picture2(is, frame, pts, pos);
1823 #else
1824         ret = output_picture2(is, frame, pts,  pkt.pos);
1825         av_free_packet(&pkt);
1826 #endif
1827         if (ret < 0)
1828             goto the_end;
1829
1830         if (step)
1831             if (cur_stream)
1832                 stream_pause(cur_stream);
1833     }
1834  the_end:
1835 #if CONFIG_AVFILTER
1836     avfilter_graph_free(&graph);
1837 #endif
1838     av_free(frame);
1839     return 0;
1840 }
1841
1842 static int subtitle_thread(void *arg)
1843 {
1844     VideoState *is = arg;
1845     SubPicture *sp;
1846     AVPacket pkt1, *pkt = &pkt1;
1847     int got_subtitle;
1848     double pts;
1849     int i, j;
1850     int r, g, b, y, u, v, a;
1851
1852     for(;;) {
1853         while (is->paused && !is->subtitleq.abort_request) {
1854             SDL_Delay(10);
1855         }
1856         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1857             break;
1858
1859         if(pkt->data == flush_pkt.data){
1860             avcodec_flush_buffers(is->subtitle_st->codec);
1861             continue;
1862         }
1863         SDL_LockMutex(is->subpq_mutex);
1864         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1865                !is->subtitleq.abort_request) {
1866             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1867         }
1868         SDL_UnlockMutex(is->subpq_mutex);
1869
1870         if (is->subtitleq.abort_request)
1871             return 0;
1872
1873         sp = &is->subpq[is->subpq_windex];
1874
1875        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1876            this packet, if any */
1877         pts = 0;
1878         if (pkt->pts != AV_NOPTS_VALUE)
1879             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1880
1881         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1882                                  &got_subtitle, pkt);
1883
1884         if (got_subtitle && sp->sub.format == 0) {
1885             sp->pts = pts;
1886
1887             for (i = 0; i < sp->sub.num_rects; i++)
1888             {
1889                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1890                 {
1891                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1892                     y = RGB_TO_Y_CCIR(r, g, b);
1893                     u = RGB_TO_U_CCIR(r, g, b, 0);
1894                     v = RGB_TO_V_CCIR(r, g, b, 0);
1895                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1896                 }
1897             }
1898
1899             /* now we can update the picture count */
1900             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1901                 is->subpq_windex = 0;
1902             SDL_LockMutex(is->subpq_mutex);
1903             is->subpq_size++;
1904             SDL_UnlockMutex(is->subpq_mutex);
1905         }
1906         av_free_packet(pkt);
1907     }
1908     return 0;
1909 }
1910
1911 /* copy samples for viewing in editor window */
1912 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1913 {
1914     int size, len;
1915
1916     size = samples_size / sizeof(short);
1917     while (size > 0) {
1918         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1919         if (len > size)
1920             len = size;
1921         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1922         samples += len;
1923         is->sample_array_index += len;
1924         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1925             is->sample_array_index = 0;
1926         size -= len;
1927     }
1928 }
1929
1930 /* return the new audio buffer size (samples can be added or deleted
1931    to get better sync if video or external master clock) */
1932 static int synchronize_audio(VideoState *is, short *samples,
1933                              int samples_size1, double pts)
1934 {
1935     int n, samples_size;
1936     double ref_clock;
1937
1938     n = 2 * is->audio_st->codec->channels;
1939     samples_size = samples_size1;
1940
1941     /* if not master, then we try to remove or add samples to correct the clock */
1942     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1943          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1944         double diff, avg_diff;
1945         int wanted_size, min_size, max_size, nb_samples;
1946
1947         ref_clock = get_master_clock(is);
1948         diff = get_audio_clock(is) - ref_clock;
1949
1950         if (diff < AV_NOSYNC_THRESHOLD) {
1951             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1952             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1953                 /* not enough measures to have a correct estimate */
1954                 is->audio_diff_avg_count++;
1955             } else {
1956                 /* estimate the A-V difference */
1957                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1958
1959                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1960                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1961                     nb_samples = samples_size / n;
1962
1963                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1964                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1965                     if (wanted_size < min_size)
1966                         wanted_size = min_size;
1967                     else if (wanted_size > max_size)
1968                         wanted_size = max_size;
1969
1970                     /* add or remove samples to correction the synchro */
1971                     if (wanted_size < samples_size) {
1972                         /* remove samples */
1973                         samples_size = wanted_size;
1974                     } else if (wanted_size > samples_size) {
1975                         uint8_t *samples_end, *q;
1976                         int nb;
1977
1978                         /* add samples */
1979                         nb = (samples_size - wanted_size);
1980                         samples_end = (uint8_t *)samples + samples_size - n;
1981                         q = samples_end + n;
1982                         while (nb > 0) {
1983                             memcpy(q, samples_end, n);
1984                             q += n;
1985                             nb -= n;
1986                         }
1987                         samples_size = wanted_size;
1988                     }
1989                 }
1990                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1991                         diff, avg_diff, samples_size - samples_size1,
1992                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1993             }
1994         } else {
1995             /* too big difference : may be initial PTS errors, so
1996                reset A-V filter */
1997             is->audio_diff_avg_count = 0;
1998             is->audio_diff_cum = 0;
1999         }
2000     }
2001
2002     return samples_size;
2003 }
2004
2005 /* decode one audio frame and returns its uncompressed size */
2006 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2007 {
2008     AVPacket *pkt_temp = &is->audio_pkt_temp;
2009     AVPacket *pkt = &is->audio_pkt;
2010     AVCodecContext *dec= is->audio_st->codec;
2011     int n, len1, data_size;
2012     double pts;
2013     int new_packet = 0;
2014     int flush_complete = 0;
2015
2016     for(;;) {
2017         /* NOTE: the audio packet can contain several frames */
2018         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2019             if (flush_complete)
2020                 break;
2021             new_packet = 0;
2022             data_size = sizeof(is->audio_buf1);
2023             len1 = avcodec_decode_audio3(dec,
2024                                         (int16_t *)is->audio_buf1, &data_size,
2025                                         pkt_temp);
2026             if (len1 < 0) {
2027                 /* if error, we skip the frame */
2028                 pkt_temp->size = 0;
2029                 break;
2030             }
2031
2032             pkt_temp->data += len1;
2033             pkt_temp->size -= len1;
2034
2035             if (data_size <= 0) {
2036                 /* stop sending empty packets if the decoder is finished */
2037                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2038                     flush_complete = 1;
2039                 continue;
2040             }
2041
2042             if (dec->sample_fmt != is->audio_src_fmt) {
2043                 if (is->reformat_ctx)
2044                     av_audio_convert_free(is->reformat_ctx);
2045                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2046                                                          dec->sample_fmt, 1, NULL, 0);
2047                 if (!is->reformat_ctx) {
2048                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2049                         av_get_sample_fmt_name(dec->sample_fmt),
2050                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2051                         break;
2052                 }
2053                 is->audio_src_fmt= dec->sample_fmt;
2054             }
2055
2056             if (is->reformat_ctx) {
2057                 const void *ibuf[6]= {is->audio_buf1};
2058                 void *obuf[6]= {is->audio_buf2};
2059                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2060                 int ostride[6]= {2};
2061                 int len= data_size/istride[0];
2062                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2063                     printf("av_audio_convert() failed\n");
2064                     break;
2065                 }
2066                 is->audio_buf= is->audio_buf2;
2067                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2068                           remove this legacy cruft */
2069                 data_size= len*2;
2070             }else{
2071                 is->audio_buf= is->audio_buf1;
2072             }
2073
2074             /* if no pts, then compute it */
2075             pts = is->audio_clock;
2076             *pts_ptr = pts;
2077             n = 2 * dec->channels;
2078             is->audio_clock += (double)data_size /
2079                 (double)(n * dec->sample_rate);
2080 #ifdef DEBUG
2081             {
2082                 static double last_clock;
2083                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2084                        is->audio_clock - last_clock,
2085                        is->audio_clock, pts);
2086                 last_clock = is->audio_clock;
2087             }
2088 #endif
2089             return data_size;
2090         }
2091
2092         /* free the current packet */
2093         if (pkt->data)
2094             av_free_packet(pkt);
2095
2096         if (is->paused || is->audioq.abort_request) {
2097             return -1;
2098         }
2099
2100         /* read next packet */
2101         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2102             return -1;
2103
2104         if (pkt->data == flush_pkt.data)
2105             avcodec_flush_buffers(dec);
2106
2107         pkt_temp->data = pkt->data;
2108         pkt_temp->size = pkt->size;
2109
2110         /* if update the audio clock with the pts */
2111         if (pkt->pts != AV_NOPTS_VALUE) {
2112             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2113         }
2114     }
2115 }
2116
2117 /* prepare a new audio buffer */
2118 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2119 {
2120     VideoState *is = opaque;
2121     int audio_size, len1;
2122     double pts;
2123
2124     audio_callback_time = av_gettime();
2125
2126     while (len > 0) {
2127         if (is->audio_buf_index >= is->audio_buf_size) {
2128            audio_size = audio_decode_frame(is, &pts);
2129            if (audio_size < 0) {
2130                 /* if error, just output silence */
2131                is->audio_buf = is->audio_buf1;
2132                is->audio_buf_size = 1024;
2133                memset(is->audio_buf, 0, is->audio_buf_size);
2134            } else {
2135                if (is->show_audio)
2136                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2137                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2138                                               pts);
2139                is->audio_buf_size = audio_size;
2140            }
2141            is->audio_buf_index = 0;
2142         }
2143         len1 = is->audio_buf_size - is->audio_buf_index;
2144         if (len1 > len)
2145             len1 = len;
2146         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2147         len -= len1;
2148         stream += len1;
2149         is->audio_buf_index += len1;
2150     }
2151 }
2152
2153 /* open a given stream. Return 0 if OK */
2154 static int stream_component_open(VideoState *is, int stream_index)
2155 {
2156     AVFormatContext *ic = is->ic;
2157     AVCodecContext *avctx;
2158     AVCodec *codec;
2159     SDL_AudioSpec wanted_spec, spec;
2160     AVDictionary *opts;
2161     AVDictionaryEntry *t = NULL;
2162
2163     if (stream_index < 0 || stream_index >= ic->nb_streams)
2164         return -1;
2165     avctx = ic->streams[stream_index]->codec;
2166
2167     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2168
2169     /* prepare audio output */
2170     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2171         if (avctx->channels > 0) {
2172             avctx->request_channels = FFMIN(2, avctx->channels);
2173         } else {
2174             avctx->request_channels = 2;
2175         }
2176     }
2177
2178     codec = avcodec_find_decoder(avctx->codec_id);
2179     avctx->debug_mv = debug_mv;
2180     avctx->debug = debug;
2181     avctx->workaround_bugs = workaround_bugs;
2182     avctx->lowres = lowres;
2183     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2184     avctx->idct_algo= idct;
2185     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2186     avctx->skip_frame= skip_frame;
2187     avctx->skip_idct= skip_idct;
2188     avctx->skip_loop_filter= skip_loop_filter;
2189     avctx->error_recognition= error_recognition;
2190     avctx->error_concealment= error_concealment;
2191     avctx->thread_count= thread_count;
2192
2193     if (!codec ||
2194         avcodec_open2(avctx, codec, &opts) < 0)
2195         return -1;
2196     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2197         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2198         return AVERROR_OPTION_NOT_FOUND;
2199     }
2200
2201     /* prepare audio output */
2202     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2203         wanted_spec.freq = avctx->sample_rate;
2204         wanted_spec.format = AUDIO_S16SYS;
2205         wanted_spec.channels = avctx->channels;
2206         wanted_spec.silence = 0;
2207         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2208         wanted_spec.callback = sdl_audio_callback;
2209         wanted_spec.userdata = is;
2210         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2211             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2212             return -1;
2213         }
2214         is->audio_hw_buf_size = spec.size;
2215         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2216     }
2217
2218     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2219     switch(avctx->codec_type) {
2220     case AVMEDIA_TYPE_AUDIO:
2221         is->audio_stream = stream_index;
2222         is->audio_st = ic->streams[stream_index];
2223         is->audio_buf_size = 0;
2224         is->audio_buf_index = 0;
2225
2226         /* init averaging filter */
2227         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2228         is->audio_diff_avg_count = 0;
2229         /* since we do not have a precise anough audio fifo fullness,
2230            we correct audio sync only if larger than this threshold */
2231         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2232
2233         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2234         packet_queue_init(&is->audioq);
2235         SDL_PauseAudio(0);
2236         break;
2237     case AVMEDIA_TYPE_VIDEO:
2238         is->video_stream = stream_index;
2239         is->video_st = ic->streams[stream_index];
2240
2241         packet_queue_init(&is->videoq);
2242         is->video_tid = SDL_CreateThread(video_thread, is);
2243         break;
2244     case AVMEDIA_TYPE_SUBTITLE:
2245         is->subtitle_stream = stream_index;
2246         is->subtitle_st = ic->streams[stream_index];
2247         packet_queue_init(&is->subtitleq);
2248
2249         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2250         break;
2251     default:
2252         break;
2253     }
2254     return 0;
2255 }
2256
2257 static void stream_component_close(VideoState *is, int stream_index)
2258 {
2259     AVFormatContext *ic = is->ic;
2260     AVCodecContext *avctx;
2261
2262     if (stream_index < 0 || stream_index >= ic->nb_streams)
2263         return;
2264     avctx = ic->streams[stream_index]->codec;
2265
2266     switch(avctx->codec_type) {
2267     case AVMEDIA_TYPE_AUDIO:
2268         packet_queue_abort(&is->audioq);
2269
2270         SDL_CloseAudio();
2271
2272         packet_queue_end(&is->audioq);
2273         av_free_packet(&is->audio_pkt);
2274         if (is->reformat_ctx)
2275             av_audio_convert_free(is->reformat_ctx);
2276         is->reformat_ctx = NULL;
2277
2278         if (is->rdft) {
2279             av_rdft_end(is->rdft);
2280             av_freep(&is->rdft_data);
2281         }
2282         break;
2283     case AVMEDIA_TYPE_VIDEO:
2284         packet_queue_abort(&is->videoq);
2285
2286         /* note: we also signal this mutex to make sure we deblock the
2287            video thread in all cases */
2288         SDL_LockMutex(is->pictq_mutex);
2289         SDL_CondSignal(is->pictq_cond);
2290         SDL_UnlockMutex(is->pictq_mutex);
2291
2292         SDL_WaitThread(is->video_tid, NULL);
2293
2294         packet_queue_end(&is->videoq);
2295         break;
2296     case AVMEDIA_TYPE_SUBTITLE:
2297         packet_queue_abort(&is->subtitleq);
2298
2299         /* note: we also signal this mutex to make sure we deblock the
2300            video thread in all cases */
2301         SDL_LockMutex(is->subpq_mutex);
2302         is->subtitle_stream_changed = 1;
2303
2304         SDL_CondSignal(is->subpq_cond);
2305         SDL_UnlockMutex(is->subpq_mutex);
2306
2307         SDL_WaitThread(is->subtitle_tid, NULL);
2308
2309         packet_queue_end(&is->subtitleq);
2310         break;
2311     default:
2312         break;
2313     }
2314
2315     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2316     avcodec_close(avctx);
2317     switch(avctx->codec_type) {
2318     case AVMEDIA_TYPE_AUDIO:
2319         is->audio_st = NULL;
2320         is->audio_stream = -1;
2321         break;
2322     case AVMEDIA_TYPE_VIDEO:
2323         is->video_st = NULL;
2324         is->video_stream = -1;
2325         break;
2326     case AVMEDIA_TYPE_SUBTITLE:
2327         is->subtitle_st = NULL;
2328         is->subtitle_stream = -1;
2329         break;
2330     default:
2331         break;
2332     }
2333 }
2334
2335 /* since we have only one decoding thread, we can use a global
2336    variable instead of a thread local variable */
2337 static VideoState *global_video_state;
2338
2339 static int decode_interrupt_cb(void)
2340 {
2341     return (global_video_state && global_video_state->abort_request);
2342 }
2343
2344 /* this thread gets the stream from the disk or the network */
2345 static int decode_thread(void *arg)
2346 {
2347     VideoState *is = arg;
2348     AVFormatContext *ic = NULL;
2349     int err, i, ret;
2350     int st_index[AVMEDIA_TYPE_NB];
2351     AVPacket pkt1, *pkt = &pkt1;
2352     int eof=0;
2353     int pkt_in_play_range = 0;
2354     AVDictionaryEntry *t;
2355     AVDictionary **opts;
2356     int orig_nb_streams;
2357
2358     memset(st_index, -1, sizeof(st_index));
2359     is->video_stream = -1;
2360     is->audio_stream = -1;
2361     is->subtitle_stream = -1;
2362
2363     global_video_state = is;
2364     avio_set_interrupt_cb(decode_interrupt_cb);
2365
2366     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2367     if (err < 0) {
2368         print_error(is->filename, err);
2369         ret = -1;
2370         goto fail;
2371     }
2372     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2373         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2374         ret = AVERROR_OPTION_NOT_FOUND;
2375         goto fail;
2376     }
2377     is->ic = ic;
2378
2379     if(genpts)
2380         ic->flags |= AVFMT_FLAG_GENPTS;
2381
2382     opts = setup_find_stream_info_opts(ic, codec_opts);
2383     orig_nb_streams = ic->nb_streams;
2384
2385     err = avformat_find_stream_info(ic, opts);
2386     if (err < 0) {
2387         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2388         ret = -1;
2389         goto fail;
2390     }
2391     for (i = 0; i < orig_nb_streams; i++)
2392         av_dict_free(&opts[i]);
2393     av_freep(&opts);
2394
2395     if(ic->pb)
2396         ic->pb->eof_reached= 0; //FIXME hack, avplay maybe should not use url_feof() to test for the end
2397
2398     if(seek_by_bytes<0)
2399         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2400
2401     /* if seeking requested, we execute it */
2402     if (start_time != AV_NOPTS_VALUE) {
2403         int64_t timestamp;
2404
2405         timestamp = start_time;
2406         /* add the stream start time */
2407         if (ic->start_time != AV_NOPTS_VALUE)
2408             timestamp += ic->start_time;
2409         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2410         if (ret < 0) {
2411             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2412                     is->filename, (double)timestamp / AV_TIME_BASE);
2413         }
2414     }
2415
2416     for (i = 0; i < ic->nb_streams; i++)
2417         ic->streams[i]->discard = AVDISCARD_ALL;
2418     if (!video_disable)
2419         st_index[AVMEDIA_TYPE_VIDEO] =
2420             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2421                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2422     if (!audio_disable)
2423         st_index[AVMEDIA_TYPE_AUDIO] =
2424             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2425                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2426                                 st_index[AVMEDIA_TYPE_VIDEO],
2427                                 NULL, 0);
2428     if (!video_disable)
2429         st_index[AVMEDIA_TYPE_SUBTITLE] =
2430             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2431                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2432                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2433                                  st_index[AVMEDIA_TYPE_AUDIO] :
2434                                  st_index[AVMEDIA_TYPE_VIDEO]),
2435                                 NULL, 0);
2436     if (show_status) {
2437         av_dump_format(ic, 0, is->filename, 0);
2438     }
2439
2440     /* open the streams */
2441     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2442         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2443     }
2444
2445     ret=-1;
2446     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2447         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2448     }
2449     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2450     if(ret<0) {
2451         if (!display_disable)
2452             is->show_audio = 2;
2453     }
2454
2455     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2456         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2457     }
2458
2459     if (is->video_stream < 0 && is->audio_stream < 0) {
2460         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2461         ret = -1;
2462         goto fail;
2463     }
2464
2465     for(;;) {
2466         if (is->abort_request)
2467             break;
2468         if (is->paused != is->last_paused) {
2469             is->last_paused = is->paused;
2470             if (is->paused)
2471                 is->read_pause_return= av_read_pause(ic);
2472             else
2473                 av_read_play(ic);
2474         }
2475 #if CONFIG_RTSP_DEMUXER
2476         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2477             /* wait 10 ms to avoid trying to get another packet */
2478             /* XXX: horrible */
2479             SDL_Delay(10);
2480             continue;
2481         }
2482 #endif
2483         if (is->seek_req) {
2484             int64_t seek_target= is->seek_pos;
2485             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2486             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2487 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2488 //      of the seek_pos/seek_rel variables
2489
2490             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2491             if (ret < 0) {
2492                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2493             }else{
2494                 if (is->audio_stream >= 0) {
2495                     packet_queue_flush(&is->audioq);
2496                     packet_queue_put(&is->audioq, &flush_pkt);
2497                 }
2498                 if (is->subtitle_stream >= 0) {
2499                     packet_queue_flush(&is->subtitleq);
2500                     packet_queue_put(&is->subtitleq, &flush_pkt);
2501                 }
2502                 if (is->video_stream >= 0) {
2503                     packet_queue_flush(&is->videoq);
2504                     packet_queue_put(&is->videoq, &flush_pkt);
2505                 }
2506             }
2507             is->seek_req = 0;
2508             eof= 0;
2509         }
2510
2511         /* if the queue are full, no need to read more */
2512         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2513             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2514                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2515                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2516             /* wait 10 ms */
2517             SDL_Delay(10);
2518             continue;
2519         }
2520         if(eof) {
2521             if(is->video_stream >= 0){
2522                 av_init_packet(pkt);
2523                 pkt->data=NULL;
2524                 pkt->size=0;
2525                 pkt->stream_index= is->video_stream;
2526                 packet_queue_put(&is->videoq, pkt);
2527             }
2528             if (is->audio_stream >= 0 &&
2529                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2530                 av_init_packet(pkt);
2531                 pkt->data = NULL;
2532                 pkt->size = 0;
2533                 pkt->stream_index = is->audio_stream;
2534                 packet_queue_put(&is->audioq, pkt);
2535             }
2536             SDL_Delay(10);
2537             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2538                 if(loop!=1 && (!loop || --loop)){
2539                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2540                 }else if(autoexit){
2541                     ret=AVERROR_EOF;
2542                     goto fail;
2543                 }
2544             }
2545             continue;
2546         }
2547         ret = av_read_frame(ic, pkt);
2548         if (ret < 0) {
2549             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2550                 eof=1;
2551             if (ic->pb && ic->pb->error)
2552                 break;
2553             SDL_Delay(100); /* wait for user event */
2554             continue;
2555         }
2556         /* check if packet is in play range specified by user, then queue, otherwise discard */
2557         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2558                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2559                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2560                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2561                 <= ((double)duration/1000000);
2562         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2563             packet_queue_put(&is->audioq, pkt);
2564         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2565             packet_queue_put(&is->videoq, pkt);
2566         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2567             packet_queue_put(&is->subtitleq, pkt);
2568         } else {
2569             av_free_packet(pkt);
2570         }
2571     }
2572     /* wait until the end */
2573     while (!is->abort_request) {
2574         SDL_Delay(100);
2575     }
2576
2577     ret = 0;
2578  fail:
2579     /* disable interrupting */
2580     global_video_state = NULL;
2581
2582     /* close each stream */
2583     if (is->audio_stream >= 0)
2584         stream_component_close(is, is->audio_stream);
2585     if (is->video_stream >= 0)
2586         stream_component_close(is, is->video_stream);
2587     if (is->subtitle_stream >= 0)
2588         stream_component_close(is, is->subtitle_stream);
2589     if (is->ic) {
2590         av_close_input_file(is->ic);
2591         is->ic = NULL; /* safety */
2592     }
2593     avio_set_interrupt_cb(NULL);
2594
2595     if (ret != 0) {
2596         SDL_Event event;
2597
2598         event.type = FF_QUIT_EVENT;
2599         event.user.data1 = is;
2600         SDL_PushEvent(&event);
2601     }
2602     return 0;
2603 }
2604
2605 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2606 {
2607     VideoState *is;
2608
2609     is = av_mallocz(sizeof(VideoState));
2610     if (!is)
2611         return NULL;
2612     av_strlcpy(is->filename, filename, sizeof(is->filename));
2613     is->iformat = iformat;
2614     is->ytop = 0;
2615     is->xleft = 0;
2616
2617     /* start video display */
2618     is->pictq_mutex = SDL_CreateMutex();
2619     is->pictq_cond = SDL_CreateCond();
2620
2621     is->subpq_mutex = SDL_CreateMutex();
2622     is->subpq_cond = SDL_CreateCond();
2623
2624     is->av_sync_type = av_sync_type;
2625     is->parse_tid = SDL_CreateThread(decode_thread, is);
2626     if (!is->parse_tid) {
2627         av_free(is);
2628         return NULL;
2629     }
2630     return is;
2631 }
2632
2633 static void stream_cycle_channel(VideoState *is, int codec_type)
2634 {
2635     AVFormatContext *ic = is->ic;
2636     int start_index, stream_index;
2637     AVStream *st;
2638
2639     if (codec_type == AVMEDIA_TYPE_VIDEO)
2640         start_index = is->video_stream;
2641     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2642         start_index = is->audio_stream;
2643     else
2644         start_index = is->subtitle_stream;
2645     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2646         return;
2647     stream_index = start_index;
2648     for(;;) {
2649         if (++stream_index >= is->ic->nb_streams)
2650         {
2651             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2652             {
2653                 stream_index = -1;
2654                 goto the_end;
2655             } else
2656                 stream_index = 0;
2657         }
2658         if (stream_index == start_index)
2659             return;
2660         st = ic->streams[stream_index];
2661         if (st->codec->codec_type == codec_type) {
2662             /* check that parameters are OK */
2663             switch(codec_type) {
2664             case AVMEDIA_TYPE_AUDIO:
2665                 if (st->codec->sample_rate != 0 &&
2666                     st->codec->channels != 0)
2667                     goto the_end;
2668                 break;
2669             case AVMEDIA_TYPE_VIDEO:
2670             case AVMEDIA_TYPE_SUBTITLE:
2671                 goto the_end;
2672             default:
2673                 break;
2674             }
2675         }
2676     }
2677  the_end:
2678     stream_component_close(is, start_index);
2679     stream_component_open(is, stream_index);
2680 }
2681
2682
2683 static void toggle_full_screen(void)
2684 {
2685     is_full_screen = !is_full_screen;
2686 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2687     /* OSX needs to empty the picture_queue */
2688     for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2689         cur_stream->pictq[i].reallocate = 1;
2690     }
2691 #endif
2692     video_open(cur_stream);
2693 }
2694
2695 static void toggle_pause(void)
2696 {
2697     if (cur_stream)
2698         stream_pause(cur_stream);
2699     step = 0;
2700 }
2701
2702 static void step_to_next_frame(void)
2703 {
2704     if (cur_stream) {
2705         /* if the stream is paused unpause it, then step */
2706         if (cur_stream->paused)
2707             stream_pause(cur_stream);
2708     }
2709     step = 1;
2710 }
2711
2712 static void toggle_audio_display(void)
2713 {
2714     if (cur_stream) {
2715         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2716         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2717         fill_rectangle(screen,
2718                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2719                     bgcolor);
2720         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2721     }
2722 }
2723
2724 /* handle an event sent by the GUI */
2725 static void event_loop(void)
2726 {
2727     SDL_Event event;
2728     double incr, pos, frac;
2729
2730     for(;;) {
2731         double x;
2732         SDL_WaitEvent(&event);
2733         switch(event.type) {
2734         case SDL_KEYDOWN:
2735             if (exit_on_keydown) {
2736                 do_exit();
2737                 break;
2738             }
2739             switch(event.key.keysym.sym) {
2740             case SDLK_ESCAPE:
2741             case SDLK_q:
2742                 do_exit();
2743                 break;
2744             case SDLK_f:
2745                 toggle_full_screen();
2746                 break;
2747             case SDLK_p:
2748             case SDLK_SPACE:
2749                 toggle_pause();
2750                 break;
2751             case SDLK_s: //S: Step to next frame
2752                 step_to_next_frame();
2753                 break;
2754             case SDLK_a:
2755                 if (cur_stream)
2756                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2757                 break;
2758             case SDLK_v:
2759                 if (cur_stream)
2760                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2761                 break;
2762             case SDLK_t:
2763                 if (cur_stream)
2764                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2765                 break;
2766             case SDLK_w:
2767                 toggle_audio_display();
2768                 break;
2769             case SDLK_LEFT:
2770                 incr = -10.0;
2771                 goto do_seek;
2772             case SDLK_RIGHT:
2773                 incr = 10.0;
2774                 goto do_seek;
2775             case SDLK_UP:
2776                 incr = 60.0;
2777                 goto do_seek;
2778             case SDLK_DOWN:
2779                 incr = -60.0;
2780             do_seek:
2781                 if (cur_stream) {
2782                     if (seek_by_bytes) {
2783                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2784                             pos= cur_stream->video_current_pos;
2785                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2786                             pos= cur_stream->audio_pkt.pos;
2787                         }else
2788                             pos = avio_tell(cur_stream->ic->pb);
2789                         if (cur_stream->ic->bit_rate)
2790                             incr *= cur_stream->ic->bit_rate / 8.0;
2791                         else
2792                             incr *= 180000.0;
2793                         pos += incr;
2794                         stream_seek(cur_stream, pos, incr, 1);
2795                     } else {
2796                         pos = get_master_clock(cur_stream);
2797                         pos += incr;
2798                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2799                     }
2800                 }
2801                 break;
2802             default:
2803                 break;
2804             }
2805             break;
2806         case SDL_MOUSEBUTTONDOWN:
2807             if (exit_on_mousedown) {
2808                 do_exit();
2809                 break;
2810             }
2811         case SDL_MOUSEMOTION:
2812             if(event.type ==SDL_MOUSEBUTTONDOWN){
2813                 x= event.button.x;
2814             }else{
2815                 if(event.motion.state != SDL_PRESSED)
2816                     break;
2817                 x= event.motion.x;
2818             }
2819             if (cur_stream) {
2820                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2821                     uint64_t size=  avio_size(cur_stream->ic->pb);
2822                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2823                 }else{
2824                     int64_t ts;
2825                     int ns, hh, mm, ss;
2826                     int tns, thh, tmm, tss;
2827                     tns = cur_stream->ic->duration/1000000LL;
2828                     thh = tns/3600;
2829                     tmm = (tns%3600)/60;
2830                     tss = (tns%60);
2831                     frac = x/cur_stream->width;
2832                     ns = frac*tns;
2833                     hh = ns/3600;
2834                     mm = (ns%3600)/60;
2835                     ss = (ns%60);
2836                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2837                             hh, mm, ss, thh, tmm, tss);
2838                     ts = frac*cur_stream->ic->duration;
2839                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2840                         ts += cur_stream->ic->start_time;
2841                     stream_seek(cur_stream, ts, 0, 0);
2842                 }
2843             }
2844             break;
2845         case SDL_VIDEORESIZE:
2846             if (cur_stream) {
2847                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2848                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2849                 screen_width = cur_stream->width = event.resize.w;
2850                 screen_height= cur_stream->height= event.resize.h;
2851             }
2852             break;
2853         case SDL_QUIT:
2854         case FF_QUIT_EVENT:
2855             do_exit();
2856             break;
2857         case FF_ALLOC_EVENT:
2858             video_open(event.user.data1);
2859             alloc_picture(event.user.data1);
2860             break;
2861         case FF_REFRESH_EVENT:
2862             video_refresh_timer(event.user.data1);
2863             cur_stream->refresh=0;
2864             break;
2865         default:
2866             break;
2867         }
2868     }
2869 }
2870
2871 static int opt_frame_size(const char *opt, const char *arg)
2872 {
2873     av_log(NULL, AV_LOG_ERROR,
2874            "Option '%s' has been removed, use private format options instead\n", opt);
2875     return AVERROR(EINVAL);
2876 }
2877
2878 static int opt_width(const char *opt, const char *arg)
2879 {
2880     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2881     return 0;
2882 }
2883
2884 static int opt_height(const char *opt, const char *arg)
2885 {
2886     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2887     return 0;
2888 }
2889
2890 static int opt_format(const char *opt, const char *arg)
2891 {
2892     file_iformat = av_find_input_format(arg);
2893     if (!file_iformat) {
2894         fprintf(stderr, "Unknown input format: %s\n", arg);
2895         return AVERROR(EINVAL);
2896     }
2897     return 0;
2898 }
2899
2900 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2901 {
2902     av_log(NULL, AV_LOG_ERROR,
2903            "Option '%s' has been removed, use private format options instead\n", opt);
2904     return AVERROR(EINVAL);
2905 }
2906
2907 static int opt_sync(const char *opt, const char *arg)
2908 {
2909     if (!strcmp(arg, "audio"))
2910         av_sync_type = AV_SYNC_AUDIO_MASTER;
2911     else if (!strcmp(arg, "video"))
2912         av_sync_type = AV_SYNC_VIDEO_MASTER;
2913     else if (!strcmp(arg, "ext"))
2914         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2915     else {
2916         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2917         exit(1);
2918     }
2919     return 0;
2920 }
2921
2922 static int opt_seek(const char *opt, const char *arg)
2923 {
2924     start_time = parse_time_or_die(opt, arg, 1);
2925     return 0;
2926 }
2927
2928 static int opt_duration(const char *opt, const char *arg)
2929 {
2930     duration = parse_time_or_die(opt, arg, 1);
2931     return 0;
2932 }
2933
2934 static int opt_debug(const char *opt, const char *arg)
2935 {
2936     av_log_set_level(99);
2937     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2938     return 0;
2939 }
2940
2941 static int opt_vismv(const char *opt, const char *arg)
2942 {
2943     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2944     return 0;
2945 }
2946
2947 static int opt_thread_count(const char *opt, const char *arg)
2948 {
2949     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2950 #if !HAVE_THREADS
2951     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2952 #endif
2953     return 0;
2954 }
2955
2956 static const OptionDef options[] = {
2957 #include "cmdutils_common_opts.h"
2958     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2959     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2960     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2961     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2962     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2963     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2964     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2965     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2966     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2967     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2968     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2969     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2970     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2971     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2972     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2973     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2974     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2975     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2976     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2977     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2978     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2979     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2980     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2981     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2982     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2983     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2984     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2985     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2986     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2987     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2988     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2989     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2990     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2991     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2992     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2993     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2994     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2995 #if CONFIG_AVFILTER
2996     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2997 #endif
2998     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2999     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3000     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
3001     { NULL, },
3002 };
3003
3004 static void show_usage(void)
3005 {
3006     printf("Simple media player\n");
3007     printf("usage: %s [options] input_file\n", program_name);
3008     printf("\n");
3009 }
3010
3011 static void show_help(void)
3012 {
3013     av_log_set_callback(log_callback_help);
3014     show_usage();
3015     show_help_options(options, "Main options:\n",
3016                       OPT_EXPERT, 0);
3017     show_help_options(options, "\nAdvanced options:\n",
3018                       OPT_EXPERT, OPT_EXPERT);
3019     printf("\n");
3020     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3021     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3022 #if !CONFIG_AVFILTER
3023     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3024 #endif
3025     printf("\nWhile playing:\n"
3026            "q, ESC              quit\n"
3027            "f                   toggle full screen\n"
3028            "p, SPC              pause\n"
3029            "a                   cycle audio channel\n"
3030            "v                   cycle video channel\n"
3031            "t                   cycle subtitle channel\n"
3032            "w                   show audio waves\n"
3033            "s                   activate frame-step mode\n"
3034            "left/right          seek backward/forward 10 seconds\n"
3035            "down/up             seek backward/forward 1 minute\n"
3036            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3037            );
3038 }
3039
3040 static void opt_input_file(void *optctx, const char *filename)
3041 {
3042     if (input_filename) {
3043         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3044                 filename, input_filename);
3045         exit(1);
3046     }
3047     if (!strcmp(filename, "-"))
3048         filename = "pipe:";
3049     input_filename = filename;
3050 }
3051
3052 /* Called from the main */
3053 int main(int argc, char **argv)
3054 {
3055     int flags;
3056
3057     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3058     parse_loglevel(argc, argv, options);
3059
3060     /* register all codecs, demux and protocols */
3061     avcodec_register_all();
3062 #if CONFIG_AVDEVICE
3063     avdevice_register_all();
3064 #endif
3065 #if CONFIG_AVFILTER
3066     avfilter_register_all();
3067 #endif
3068     av_register_all();
3069
3070     init_opts();
3071
3072     show_banner();
3073
3074     parse_options(NULL, argc, argv, options, opt_input_file);
3075
3076     if (!input_filename) {
3077         show_usage();
3078         fprintf(stderr, "An input file must be specified\n");
3079         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3080         exit(1);
3081     }
3082
3083     if (display_disable) {
3084         video_disable = 1;
3085     }
3086     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3087 #if !defined(__MINGW32__) && !defined(__APPLE__)
3088     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3089 #endif
3090     if (SDL_Init (flags)) {
3091         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3092         exit(1);
3093     }
3094
3095     if (!display_disable) {
3096 #if HAVE_SDL_VIDEO_SIZE
3097         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3098         fs_screen_width = vi->current_w;
3099         fs_screen_height = vi->current_h;
3100 #endif
3101     }
3102
3103     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3104     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3105     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3106
3107     av_init_packet(&flush_pkt);
3108     flush_pkt.data= "FLUSH";
3109
3110     cur_stream = stream_open(input_filename, file_iformat);
3111
3112     event_loop();
3113
3114     /* never returns */
3115
3116     return 0;
3117 }