OSDN Git Service

Merge remote-tracking branch 'qatar/master'
[coroid/ffmpeg_saccubus.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166     double audio_current_pts;
167     double audio_current_pts_drift;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210     int step;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static int opt_help(const char *opt, const char *arg);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int audio_disable;
232 static int video_disable;
233 static int wanted_stream[AVMEDIA_TYPE_NB]={
234     [AVMEDIA_TYPE_AUDIO]=-1,
235     [AVMEDIA_TYPE_VIDEO]=-1,
236     [AVMEDIA_TYPE_SUBTITLE]=-1,
237 };
238 static int seek_by_bytes=-1;
239 static int display_disable;
240 static int show_status = 1;
241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
242 static int64_t start_time = AV_NOPTS_VALUE;
243 static int64_t duration = AV_NOPTS_VALUE;
244 static int thread_count = 1;
245 static int workaround_bugs = 1;
246 static int fast = 0;
247 static int genpts = 0;
248 static int lowres = 0;
249 static int idct = FF_IDCT_AUTO;
250 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
253 static int error_recognition = FF_ER_CAREFUL;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts= -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop=1;
260 static int framedrop=-1;
261 static enum ShowMode show_mode = SHOW_MODE_NONE;
262
263 static int rdftspeed=20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static int64_t audio_callback_time;
271
272 static AVPacket flush_pkt;
273
274 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
275 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
276 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
277
278 static SDL_Surface *screen;
279
280 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
281 {
282     AVPacketList *pkt1;
283
284     /* duplicate the packet */
285     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
286         return -1;
287
288     pkt1 = av_malloc(sizeof(AVPacketList));
289     if (!pkt1)
290         return -1;
291     pkt1->pkt = *pkt;
292     pkt1->next = NULL;
293
294
295     SDL_LockMutex(q->mutex);
296
297     if (!q->last_pkt)
298
299         q->first_pkt = pkt1;
300     else
301         q->last_pkt->next = pkt1;
302     q->last_pkt = pkt1;
303     q->nb_packets++;
304     q->size += pkt1->pkt.size + sizeof(*pkt1);
305     /* XXX: should duplicate packet data in DV case */
306     SDL_CondSignal(q->cond);
307
308     SDL_UnlockMutex(q->mutex);
309     return 0;
310 }
311
312 /* packet queue handling */
313 static void packet_queue_init(PacketQueue *q)
314 {
315     memset(q, 0, sizeof(PacketQueue));
316     q->mutex = SDL_CreateMutex();
317     q->cond = SDL_CreateCond();
318     packet_queue_put(q, &flush_pkt);
319 }
320
321 static void packet_queue_flush(PacketQueue *q)
322 {
323     AVPacketList *pkt, *pkt1;
324
325     SDL_LockMutex(q->mutex);
326     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
327         pkt1 = pkt->next;
328         av_free_packet(&pkt->pkt);
329         av_freep(&pkt);
330     }
331     q->last_pkt = NULL;
332     q->first_pkt = NULL;
333     q->nb_packets = 0;
334     q->size = 0;
335     SDL_UnlockMutex(q->mutex);
336 }
337
338 static void packet_queue_end(PacketQueue *q)
339 {
340     packet_queue_flush(q);
341     SDL_DestroyMutex(q->mutex);
342     SDL_DestroyCond(q->cond);
343 }
344
345 static void packet_queue_abort(PacketQueue *q)
346 {
347     SDL_LockMutex(q->mutex);
348
349     q->abort_request = 1;
350
351     SDL_CondSignal(q->cond);
352
353     SDL_UnlockMutex(q->mutex);
354 }
355
356 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
357 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
358 {
359     AVPacketList *pkt1;
360     int ret;
361
362     SDL_LockMutex(q->mutex);
363
364     for(;;) {
365         if (q->abort_request) {
366             ret = -1;
367             break;
368         }
369
370         pkt1 = q->first_pkt;
371         if (pkt1) {
372             q->first_pkt = pkt1->next;
373             if (!q->first_pkt)
374                 q->last_pkt = NULL;
375             q->nb_packets--;
376             q->size -= pkt1->pkt.size + sizeof(*pkt1);
377             *pkt = pkt1->pkt;
378             av_free(pkt1);
379             ret = 1;
380             break;
381         } else if (!block) {
382             ret = 0;
383             break;
384         } else {
385             SDL_CondWait(q->cond, q->mutex);
386         }
387     }
388     SDL_UnlockMutex(q->mutex);
389     return ret;
390 }
391
392 static inline void fill_rectangle(SDL_Surface *screen,
393                                   int x, int y, int w, int h, int color)
394 {
395     SDL_Rect rect;
396     rect.x = x;
397     rect.y = y;
398     rect.w = w;
399     rect.h = h;
400     SDL_FillRect(screen, &rect, color);
401 }
402
403 #define ALPHA_BLEND(a, oldp, newp, s)\
404 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
405
406 #define RGBA_IN(r, g, b, a, s)\
407 {\
408     unsigned int v = ((const uint32_t *)(s))[0];\
409     a = (v >> 24) & 0xff;\
410     r = (v >> 16) & 0xff;\
411     g = (v >> 8) & 0xff;\
412     b = v & 0xff;\
413 }
414
415 #define YUVA_IN(y, u, v, a, s, pal)\
416 {\
417     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
418     a = (val >> 24) & 0xff;\
419     y = (val >> 16) & 0xff;\
420     u = (val >> 8) & 0xff;\
421     v = val & 0xff;\
422 }
423
424 #define YUVA_OUT(d, y, u, v, a)\
425 {\
426     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
427 }
428
429
430 #define BPP 1
431
432 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
433 {
434     int wrap, wrap3, width2, skip2;
435     int y, u, v, a, u1, v1, a1, w, h;
436     uint8_t *lum, *cb, *cr;
437     const uint8_t *p;
438     const uint32_t *pal;
439     int dstx, dsty, dstw, dsth;
440
441     dstw = av_clip(rect->w, 0, imgw);
442     dsth = av_clip(rect->h, 0, imgh);
443     dstx = av_clip(rect->x, 0, imgw - dstw);
444     dsty = av_clip(rect->y, 0, imgh - dsth);
445     lum = dst->data[0] + dsty * dst->linesize[0];
446     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
447     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
448
449     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
450     skip2 = dstx >> 1;
451     wrap = dst->linesize[0];
452     wrap3 = rect->pict.linesize[0];
453     p = rect->pict.data[0];
454     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
455
456     if (dsty & 1) {
457         lum += dstx;
458         cb += skip2;
459         cr += skip2;
460
461         if (dstx & 1) {
462             YUVA_IN(y, u, v, a, p, pal);
463             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
464             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
465             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
466             cb++;
467             cr++;
468             lum++;
469             p += BPP;
470         }
471         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
472             YUVA_IN(y, u, v, a, p, pal);
473             u1 = u;
474             v1 = v;
475             a1 = a;
476             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
477
478             YUVA_IN(y, u, v, a, p + BPP, pal);
479             u1 += u;
480             v1 += v;
481             a1 += a;
482             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
483             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
484             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
485             cb++;
486             cr++;
487             p += 2 * BPP;
488             lum += 2;
489         }
490         if (w) {
491             YUVA_IN(y, u, v, a, p, pal);
492             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
493             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
494             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
495             p++;
496             lum++;
497         }
498         p += wrap3 - dstw * BPP;
499         lum += wrap - dstw - dstx;
500         cb += dst->linesize[1] - width2 - skip2;
501         cr += dst->linesize[2] - width2 - skip2;
502     }
503     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
504         lum += dstx;
505         cb += skip2;
506         cr += skip2;
507
508         if (dstx & 1) {
509             YUVA_IN(y, u, v, a, p, pal);
510             u1 = u;
511             v1 = v;
512             a1 = a;
513             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514             p += wrap3;
515             lum += wrap;
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 += u;
518             v1 += v;
519             a1 += a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
522             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
523             cb++;
524             cr++;
525             p += -wrap3 + BPP;
526             lum += -wrap + 1;
527         }
528         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
529             YUVA_IN(y, u, v, a, p, pal);
530             u1 = u;
531             v1 = v;
532             a1 = a;
533             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534
535             YUVA_IN(y, u, v, a, p + BPP, pal);
536             u1 += u;
537             v1 += v;
538             a1 += a;
539             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540             p += wrap3;
541             lum += wrap;
542
543             YUVA_IN(y, u, v, a, p, pal);
544             u1 += u;
545             v1 += v;
546             a1 += a;
547             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
548
549             YUVA_IN(y, u, v, a, p + BPP, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
554
555             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
556             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
557
558             cb++;
559             cr++;
560             p += -wrap3 + 2 * BPP;
561             lum += -wrap + 2;
562         }
563         if (w) {
564             YUVA_IN(y, u, v, a, p, pal);
565             u1 = u;
566             v1 = v;
567             a1 = a;
568             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569             p += wrap3;
570             lum += wrap;
571             YUVA_IN(y, u, v, a, p, pal);
572             u1 += u;
573             v1 += v;
574             a1 += a;
575             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
577             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
578             cb++;
579             cr++;
580             p += -wrap3 + BPP;
581             lum += -wrap + 1;
582         }
583         p += wrap3 + (wrap3 - dstw * BPP);
584         lum += wrap + (wrap - dstw - dstx);
585         cb += dst->linesize[1] - width2 - skip2;
586         cr += dst->linesize[2] - width2 - skip2;
587     }
588     /* handle odd height */
589     if (h) {
590         lum += dstx;
591         cb += skip2;
592         cr += skip2;
593
594         if (dstx & 1) {
595             YUVA_IN(y, u, v, a, p, pal);
596             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
598             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
599             cb++;
600             cr++;
601             lum++;
602             p += BPP;
603         }
604         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
605             YUVA_IN(y, u, v, a, p, pal);
606             u1 = u;
607             v1 = v;
608             a1 = a;
609             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
610
611             YUVA_IN(y, u, v, a, p + BPP, pal);
612             u1 += u;
613             v1 += v;
614             a1 += a;
615             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
616             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
617             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
618             cb++;
619             cr++;
620             p += 2 * BPP;
621             lum += 2;
622         }
623         if (w) {
624             YUVA_IN(y, u, v, a, p, pal);
625             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
627             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
628         }
629     }
630 }
631
632 static void free_subpicture(SubPicture *sp)
633 {
634     avsubtitle_free(&sp->sub);
635 }
636
637 static void video_image_display(VideoState *is)
638 {
639     VideoPicture *vp;
640     SubPicture *sp;
641     AVPicture pict;
642     float aspect_ratio;
643     int width, height, x, y;
644     SDL_Rect rect;
645     int i;
646
647     vp = &is->pictq[is->pictq_rindex];
648     if (vp->bmp) {
649 #if CONFIG_AVFILTER
650          if (vp->picref->video->sample_aspect_ratio.num == 0)
651              aspect_ratio = 0;
652          else
653              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
654 #else
655
656         /* XXX: use variable in the frame */
657         if (is->video_st->sample_aspect_ratio.num)
658             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
659         else if (is->video_st->codec->sample_aspect_ratio.num)
660             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
661         else
662             aspect_ratio = 0;
663 #endif
664         if (aspect_ratio <= 0.0)
665             aspect_ratio = 1.0;
666         aspect_ratio *= (float)vp->width / (float)vp->height;
667
668         if (is->subtitle_st) {
669             if (is->subpq_size > 0) {
670                 sp = &is->subpq[is->subpq_rindex];
671
672                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
673                     SDL_LockYUVOverlay (vp->bmp);
674
675                     pict.data[0] = vp->bmp->pixels[0];
676                     pict.data[1] = vp->bmp->pixels[2];
677                     pict.data[2] = vp->bmp->pixels[1];
678
679                     pict.linesize[0] = vp->bmp->pitches[0];
680                     pict.linesize[1] = vp->bmp->pitches[2];
681                     pict.linesize[2] = vp->bmp->pitches[1];
682
683                     for (i = 0; i < sp->sub.num_rects; i++)
684                         blend_subrect(&pict, sp->sub.rects[i],
685                                       vp->bmp->w, vp->bmp->h);
686
687                     SDL_UnlockYUVOverlay (vp->bmp);
688                 }
689             }
690         }
691
692
693         /* XXX: we suppose the screen has a 1.0 pixel ratio */
694         height = is->height;
695         width = ((int)rint(height * aspect_ratio)) & ~1;
696         if (width > is->width) {
697             width = is->width;
698             height = ((int)rint(width / aspect_ratio)) & ~1;
699         }
700         x = (is->width - width) / 2;
701         y = (is->height - height) / 2;
702         is->no_background = 0;
703         rect.x = is->xleft + x;
704         rect.y = is->ytop  + y;
705         rect.w = FFMAX(width,  1);
706         rect.h = FFMAX(height, 1);
707         SDL_DisplayYUVOverlay(vp->bmp, &rect);
708     }
709 }
710
711 static inline int compute_mod(int a, int b)
712 {
713     return a < 0 ? a%b + b : a%b;
714 }
715
716 static void video_audio_display(VideoState *s)
717 {
718     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
719     int ch, channels, h, h2, bgcolor, fgcolor;
720     int16_t time_diff;
721     int rdft_bits, nb_freq;
722
723     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
724         ;
725     nb_freq= 1<<(rdft_bits-1);
726
727     /* compute display index : center on currently output samples */
728     channels = s->audio_st->codec->channels;
729     nb_display_channels = channels;
730     if (!s->paused) {
731         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
732         n = 2 * channels;
733         delay = s->audio_write_buf_size;
734         delay /= n;
735
736         /* to be more precise, we take into account the time spent since
737            the last buffer computation */
738         if (audio_callback_time) {
739             time_diff = av_gettime() - audio_callback_time;
740             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
741         }
742
743         delay += 2*data_used;
744         if (delay < data_used)
745             delay = data_used;
746
747         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
748         if (s->show_mode == SHOW_MODE_WAVES) {
749             h= INT_MIN;
750             for(i=0; i<1000; i+=channels){
751                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
752                 int a= s->sample_array[idx];
753                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
754                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
755                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
756                 int score= a-d;
757                 if(h<score && (b^c)<0){
758                     h= score;
759                     i_start= idx;
760                 }
761             }
762         }
763
764         s->last_i_start = i_start;
765     } else {
766         i_start = s->last_i_start;
767     }
768
769     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
770     if (s->show_mode == SHOW_MODE_WAVES) {
771         fill_rectangle(screen,
772                        s->xleft, s->ytop, s->width, s->height,
773                        bgcolor);
774
775         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
776
777         /* total height for one channel */
778         h = s->height / nb_display_channels;
779         /* graph height / 2 */
780         h2 = (h * 9) / 20;
781         for(ch = 0;ch < nb_display_channels; ch++) {
782             i = i_start + ch;
783             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
784             for(x = 0; x < s->width; x++) {
785                 y = (s->sample_array[i] * h2) >> 15;
786                 if (y < 0) {
787                     y = -y;
788                     ys = y1 - y;
789                 } else {
790                     ys = y1;
791                 }
792                 fill_rectangle(screen,
793                                s->xleft + x, ys, 1, y,
794                                fgcolor);
795                 i += channels;
796                 if (i >= SAMPLE_ARRAY_SIZE)
797                     i -= SAMPLE_ARRAY_SIZE;
798             }
799         }
800
801         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
802
803         for(ch = 1;ch < nb_display_channels; ch++) {
804             y = s->ytop + ch * h;
805             fill_rectangle(screen,
806                            s->xleft, y, s->width, 1,
807                            fgcolor);
808         }
809         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
810     }else{
811         nb_display_channels= FFMIN(nb_display_channels, 2);
812         if(rdft_bits != s->rdft_bits){
813             av_rdft_end(s->rdft);
814             av_free(s->rdft_data);
815             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
816             s->rdft_bits= rdft_bits;
817             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
818         }
819         {
820             FFTSample *data[2];
821             for(ch = 0;ch < nb_display_channels; ch++) {
822                 data[ch] = s->rdft_data + 2*nb_freq*ch;
823                 i = i_start + ch;
824                 for(x = 0; x < 2*nb_freq; x++) {
825                     double w= (x-nb_freq)*(1.0/nb_freq);
826                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
827                     i += channels;
828                     if (i >= SAMPLE_ARRAY_SIZE)
829                         i -= SAMPLE_ARRAY_SIZE;
830                 }
831                 av_rdft_calc(s->rdft, data[ch]);
832             }
833             //least efficient way to do this, we should of course directly access it but its more than fast enough
834             for(y=0; y<s->height; y++){
835                 double w= 1/sqrt(nb_freq);
836                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
837                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
838                        + data[1][2*y+1]*data[1][2*y+1])) : a;
839                 a= FFMIN(a,255);
840                 b= FFMIN(b,255);
841                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
842
843                 fill_rectangle(screen,
844                             s->xpos, s->height-y, 1, 1,
845                             fgcolor);
846             }
847         }
848         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
849         s->xpos++;
850         if(s->xpos >= s->width)
851             s->xpos= s->xleft;
852     }
853 }
854
855 static void stream_close(VideoState *is)
856 {
857     VideoPicture *vp;
858     int i;
859     /* XXX: use a special url_shutdown call to abort parse cleanly */
860     is->abort_request = 1;
861     SDL_WaitThread(is->read_tid, NULL);
862     SDL_WaitThread(is->refresh_tid, NULL);
863
864     /* free all pictures */
865     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
866         vp = &is->pictq[i];
867 #if CONFIG_AVFILTER
868         if (vp->picref) {
869             avfilter_unref_buffer(vp->picref);
870             vp->picref = NULL;
871         }
872 #endif
873         if (vp->bmp) {
874             SDL_FreeYUVOverlay(vp->bmp);
875             vp->bmp = NULL;
876         }
877     }
878     SDL_DestroyMutex(is->pictq_mutex);
879     SDL_DestroyCond(is->pictq_cond);
880     SDL_DestroyMutex(is->subpq_mutex);
881     SDL_DestroyCond(is->subpq_cond);
882 #if !CONFIG_AVFILTER
883     if (is->img_convert_ctx)
884         sws_freeContext(is->img_convert_ctx);
885 #endif
886     av_free(is);
887 }
888
889 static void do_exit(VideoState *is)
890 {
891     if (is) {
892         stream_close(is);
893     }
894     av_lockmgr_register(NULL);
895     uninit_opts();
896 #if CONFIG_AVFILTER
897     avfilter_uninit();
898 #endif
899     if (show_status)
900         printf("\n");
901     SDL_Quit();
902     av_log(NULL, AV_LOG_QUIET, "%s", "");
903     exit(0);
904 }
905
906 static int video_open(VideoState *is){
907     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
908     int w,h;
909
910     if(is_full_screen) flags |= SDL_FULLSCREEN;
911     else               flags |= SDL_RESIZABLE;
912
913     if (is_full_screen && fs_screen_width) {
914         w = fs_screen_width;
915         h = fs_screen_height;
916     } else if(!is_full_screen && screen_width){
917         w = screen_width;
918         h = screen_height;
919 #if CONFIG_AVFILTER
920     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
921         w = is->out_video_filter->inputs[0]->w;
922         h = is->out_video_filter->inputs[0]->h;
923 #else
924     }else if (is->video_st && is->video_st->codec->width){
925         w = is->video_st->codec->width;
926         h = is->video_st->codec->height;
927 #endif
928     } else {
929         w = 640;
930         h = 480;
931     }
932     if(screen && is->width == screen->w && screen->w == w
933        && is->height== screen->h && screen->h == h)
934         return 0;
935
936 #ifndef __APPLE__
937     screen = SDL_SetVideoMode(w, h, 0, flags);
938 #else
939     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
940     screen = SDL_SetVideoMode(w, h, 24, flags);
941 #endif
942     if (!screen) {
943         fprintf(stderr, "SDL: could not set video mode - exiting\n");
944         do_exit(is);
945     }
946     if (!window_title)
947         window_title = input_filename;
948     SDL_WM_SetCaption(window_title, window_title);
949
950     is->width = screen->w;
951     is->height = screen->h;
952
953     return 0;
954 }
955
956 /* display the current picture, if any */
957 static void video_display(VideoState *is)
958 {
959     if(!screen)
960         video_open(is);
961     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
962         video_audio_display(is);
963     else if (is->video_st)
964         video_image_display(is);
965 }
966
967 static int refresh_thread(void *opaque)
968 {
969     VideoState *is= opaque;
970     while(!is->abort_request){
971         SDL_Event event;
972         event.type = FF_REFRESH_EVENT;
973         event.user.data1 = opaque;
974         if(!is->refresh){
975             is->refresh=1;
976             SDL_PushEvent(&event);
977         }
978         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
979         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
980     }
981     return 0;
982 }
983
984 /* get the current audio clock value */
985 static double get_audio_clock(VideoState *is)
986 {
987     if (is->paused) {
988         return is->audio_current_pts;
989     } else {
990         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
991     }
992 }
993
994 /* get the current video clock value */
995 static double get_video_clock(VideoState *is)
996 {
997     if (is->paused) {
998         return is->video_current_pts;
999     } else {
1000         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1001     }
1002 }
1003
1004 /* get the current external clock value */
1005 static double get_external_clock(VideoState *is)
1006 {
1007     int64_t ti;
1008     ti = av_gettime();
1009     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1010 }
1011
1012 /* get the current master clock value */
1013 static double get_master_clock(VideoState *is)
1014 {
1015     double val;
1016
1017     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1018         if (is->video_st)
1019             val = get_video_clock(is);
1020         else
1021             val = get_audio_clock(is);
1022     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1023         if (is->audio_st)
1024             val = get_audio_clock(is);
1025         else
1026             val = get_video_clock(is);
1027     } else {
1028         val = get_external_clock(is);
1029     }
1030     return val;
1031 }
1032
1033 /* seek in the stream */
1034 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1035 {
1036     if (!is->seek_req) {
1037         is->seek_pos = pos;
1038         is->seek_rel = rel;
1039         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1040         if (seek_by_bytes)
1041             is->seek_flags |= AVSEEK_FLAG_BYTE;
1042         is->seek_req = 1;
1043     }
1044 }
1045
1046 /* pause or resume the video */
1047 static void stream_toggle_pause(VideoState *is)
1048 {
1049     if (is->paused) {
1050         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1051         if(is->read_pause_return != AVERROR(ENOSYS)){
1052             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1053         }
1054         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1055     }
1056     is->paused = !is->paused;
1057 }
1058
1059 static double compute_target_time(double frame_current_pts, VideoState *is)
1060 {
1061     double delay, sync_threshold, diff;
1062
1063     /* compute nominal delay */
1064     delay = frame_current_pts - is->frame_last_pts;
1065     if (delay <= 0 || delay >= 10.0) {
1066         /* if incorrect delay, use previous one */
1067         delay = is->frame_last_delay;
1068     } else {
1069         is->frame_last_delay = delay;
1070     }
1071     is->frame_last_pts = frame_current_pts;
1072
1073     /* update delay to follow master synchronisation source */
1074     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1075          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1076         /* if video is slave, we try to correct big delays by
1077            duplicating or deleting a frame */
1078         diff = get_video_clock(is) - get_master_clock(is);
1079
1080         /* skip or repeat frame. We take into account the
1081            delay to compute the threshold. I still don't know
1082            if it is the best guess */
1083         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1084         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1085             if (diff <= -sync_threshold)
1086                 delay = 0;
1087             else if (diff >= sync_threshold)
1088                 delay = 2 * delay;
1089         }
1090     }
1091     is->frame_timer += delay;
1092
1093     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1094             delay, frame_current_pts, -diff);
1095
1096     return is->frame_timer;
1097 }
1098
1099 /* called to display each frame */
1100 static void video_refresh(void *opaque)
1101 {
1102     VideoState *is = opaque;
1103     VideoPicture *vp;
1104
1105     SubPicture *sp, *sp2;
1106
1107     if (is->video_st) {
1108 retry:
1109         if (is->pictq_size == 0) {
1110             //nothing to do, no picture to display in the que
1111         } else {
1112             double time= av_gettime()/1000000.0;
1113             double next_target;
1114             /* dequeue the picture */
1115             vp = &is->pictq[is->pictq_rindex];
1116
1117             if(time < vp->target_clock)
1118                 return;
1119             /* update current video pts */
1120             is->video_current_pts = vp->pts;
1121             is->video_current_pts_drift = is->video_current_pts - time;
1122             is->video_current_pos = vp->pos;
1123             if(is->pictq_size > 1){
1124                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1125                 assert(nextvp->target_clock >= vp->target_clock);
1126                 next_target= nextvp->target_clock;
1127             }else{
1128                 next_target= vp->target_clock + vp->duration;
1129             }
1130             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1131                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1132                 if(is->pictq_size > 1 || time > next_target + 0.5){
1133                     /* update queue size and signal for next picture */
1134                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1135                         is->pictq_rindex = 0;
1136
1137                     SDL_LockMutex(is->pictq_mutex);
1138                     is->pictq_size--;
1139                     SDL_CondSignal(is->pictq_cond);
1140                     SDL_UnlockMutex(is->pictq_mutex);
1141                     goto retry;
1142                 }
1143             }
1144
1145             if(is->subtitle_st) {
1146                 if (is->subtitle_stream_changed) {
1147                     SDL_LockMutex(is->subpq_mutex);
1148
1149                     while (is->subpq_size) {
1150                         free_subpicture(&is->subpq[is->subpq_rindex]);
1151
1152                         /* update queue size and signal for next picture */
1153                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1154                             is->subpq_rindex = 0;
1155
1156                         is->subpq_size--;
1157                     }
1158                     is->subtitle_stream_changed = 0;
1159
1160                     SDL_CondSignal(is->subpq_cond);
1161                     SDL_UnlockMutex(is->subpq_mutex);
1162                 } else {
1163                     if (is->subpq_size > 0) {
1164                         sp = &is->subpq[is->subpq_rindex];
1165
1166                         if (is->subpq_size > 1)
1167                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1168                         else
1169                             sp2 = NULL;
1170
1171                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1172                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1173                         {
1174                             free_subpicture(sp);
1175
1176                             /* update queue size and signal for next picture */
1177                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1178                                 is->subpq_rindex = 0;
1179
1180                             SDL_LockMutex(is->subpq_mutex);
1181                             is->subpq_size--;
1182                             SDL_CondSignal(is->subpq_cond);
1183                             SDL_UnlockMutex(is->subpq_mutex);
1184                         }
1185                     }
1186                 }
1187             }
1188
1189             /* display picture */
1190             if (!display_disable)
1191                 video_display(is);
1192
1193             /* update queue size and signal for next picture */
1194             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1195                 is->pictq_rindex = 0;
1196
1197             SDL_LockMutex(is->pictq_mutex);
1198             is->pictq_size--;
1199             SDL_CondSignal(is->pictq_cond);
1200             SDL_UnlockMutex(is->pictq_mutex);
1201         }
1202     } else if (is->audio_st) {
1203         /* draw the next audio frame */
1204
1205         /* if only audio stream, then display the audio bars (better
1206            than nothing, just to test the implementation */
1207
1208         /* display picture */
1209         if (!display_disable)
1210             video_display(is);
1211     }
1212     if (show_status) {
1213         static int64_t last_time;
1214         int64_t cur_time;
1215         int aqsize, vqsize, sqsize;
1216         double av_diff;
1217
1218         cur_time = av_gettime();
1219         if (!last_time || (cur_time - last_time) >= 30000) {
1220             aqsize = 0;
1221             vqsize = 0;
1222             sqsize = 0;
1223             if (is->audio_st)
1224                 aqsize = is->audioq.size;
1225             if (is->video_st)
1226                 vqsize = is->videoq.size;
1227             if (is->subtitle_st)
1228                 sqsize = is->subtitleq.size;
1229             av_diff = 0;
1230             if (is->audio_st && is->video_st)
1231                 av_diff = get_audio_clock(is) - get_video_clock(is);
1232             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1233                    get_master_clock(is),
1234                    av_diff,
1235                    FFMAX(is->skip_frames-1, 0),
1236                    aqsize / 1024,
1237                    vqsize / 1024,
1238                    sqsize,
1239                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1240                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1241             fflush(stdout);
1242             last_time = cur_time;
1243         }
1244     }
1245 }
1246
1247 /* allocate a picture (needs to do that in main thread to avoid
1248    potential locking problems */
1249 static void alloc_picture(void *opaque)
1250 {
1251     VideoState *is = opaque;
1252     VideoPicture *vp;
1253
1254     vp = &is->pictq[is->pictq_windex];
1255
1256     if (vp->bmp)
1257         SDL_FreeYUVOverlay(vp->bmp);
1258
1259 #if CONFIG_AVFILTER
1260     if (vp->picref)
1261         avfilter_unref_buffer(vp->picref);
1262     vp->picref = NULL;
1263
1264     vp->width   = is->out_video_filter->inputs[0]->w;
1265     vp->height  = is->out_video_filter->inputs[0]->h;
1266     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1267 #else
1268     vp->width   = is->video_st->codec->width;
1269     vp->height  = is->video_st->codec->height;
1270     vp->pix_fmt = is->video_st->codec->pix_fmt;
1271 #endif
1272
1273     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1274                                    SDL_YV12_OVERLAY,
1275                                    screen);
1276     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1277         /* SDL allocates a buffer smaller than requested if the video
1278          * overlay hardware is unable to support the requested size. */
1279         fprintf(stderr, "Error: the video system does not support an image\n"
1280                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1281                         "to reduce the image size.\n", vp->width, vp->height );
1282         do_exit(is);
1283     }
1284
1285     SDL_LockMutex(is->pictq_mutex);
1286     vp->allocated = 1;
1287     SDL_CondSignal(is->pictq_cond);
1288     SDL_UnlockMutex(is->pictq_mutex);
1289 }
1290
1291 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1292 {
1293     VideoPicture *vp;
1294     double frame_delay, pts = pts1;
1295
1296     /* compute the exact PTS for the picture if it is omitted in the stream
1297      * pts1 is the dts of the pkt / pts of the frame */
1298     if (pts != 0) {
1299         /* update video clock with pts, if present */
1300         is->video_clock = pts;
1301     } else {
1302         pts = is->video_clock;
1303     }
1304     /* update video clock for next frame */
1305     frame_delay = av_q2d(is->video_st->codec->time_base);
1306     /* for MPEG2, the frame can be repeated, so we update the
1307        clock accordingly */
1308     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1309     is->video_clock += frame_delay;
1310
1311 #if defined(DEBUG_SYNC) && 0
1312     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1313            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1314 #endif
1315
1316     /* wait until we have space to put a new picture */
1317     SDL_LockMutex(is->pictq_mutex);
1318
1319     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1320         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1321
1322     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1323            !is->videoq.abort_request) {
1324         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1325     }
1326     SDL_UnlockMutex(is->pictq_mutex);
1327
1328     if (is->videoq.abort_request)
1329         return -1;
1330
1331     vp = &is->pictq[is->pictq_windex];
1332
1333     vp->duration = frame_delay;
1334
1335     /* alloc or resize hardware picture buffer */
1336     if (!vp->bmp ||
1337 #if CONFIG_AVFILTER
1338         vp->width  != is->out_video_filter->inputs[0]->w ||
1339         vp->height != is->out_video_filter->inputs[0]->h) {
1340 #else
1341         vp->width != is->video_st->codec->width ||
1342         vp->height != is->video_st->codec->height) {
1343 #endif
1344         SDL_Event event;
1345
1346         vp->allocated = 0;
1347
1348         /* the allocation must be done in the main thread to avoid
1349            locking problems */
1350         event.type = FF_ALLOC_EVENT;
1351         event.user.data1 = is;
1352         SDL_PushEvent(&event);
1353
1354         /* wait until the picture is allocated */
1355         SDL_LockMutex(is->pictq_mutex);
1356         while (!vp->allocated && !is->videoq.abort_request) {
1357             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1358         }
1359         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1360         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1361             while (!vp->allocated) {
1362                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1363             }
1364         }
1365         SDL_UnlockMutex(is->pictq_mutex);
1366
1367         if (is->videoq.abort_request)
1368             return -1;
1369     }
1370
1371     /* if the frame is not skipped, then display it */
1372     if (vp->bmp) {
1373         AVPicture pict;
1374 #if CONFIG_AVFILTER
1375         if(vp->picref)
1376             avfilter_unref_buffer(vp->picref);
1377         vp->picref = src_frame->opaque;
1378 #endif
1379
1380         /* get a pointer on the bitmap */
1381         SDL_LockYUVOverlay (vp->bmp);
1382
1383         memset(&pict,0,sizeof(AVPicture));
1384         pict.data[0] = vp->bmp->pixels[0];
1385         pict.data[1] = vp->bmp->pixels[2];
1386         pict.data[2] = vp->bmp->pixels[1];
1387
1388         pict.linesize[0] = vp->bmp->pitches[0];
1389         pict.linesize[1] = vp->bmp->pitches[2];
1390         pict.linesize[2] = vp->bmp->pitches[1];
1391
1392 #if CONFIG_AVFILTER
1393         //FIXME use direct rendering
1394         av_picture_copy(&pict, (AVPicture *)src_frame,
1395                         vp->pix_fmt, vp->width, vp->height);
1396 #else
1397         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1398         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1399             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1400             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1401         if (is->img_convert_ctx == NULL) {
1402             fprintf(stderr, "Cannot initialize the conversion context\n");
1403             exit(1);
1404         }
1405         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1406                   0, vp->height, pict.data, pict.linesize);
1407 #endif
1408         /* update the bitmap content */
1409         SDL_UnlockYUVOverlay(vp->bmp);
1410
1411         vp->pts = pts;
1412         vp->pos = pos;
1413
1414         /* now we can update the picture count */
1415         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1416             is->pictq_windex = 0;
1417         SDL_LockMutex(is->pictq_mutex);
1418         vp->target_clock= compute_target_time(vp->pts, is);
1419
1420         is->pictq_size++;
1421         SDL_UnlockMutex(is->pictq_mutex);
1422     }
1423     return 0;
1424 }
1425
1426 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1427 {
1428     int got_picture, i;
1429
1430     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1431         return -1;
1432
1433     if (pkt->data == flush_pkt.data) {
1434         avcodec_flush_buffers(is->video_st->codec);
1435
1436         SDL_LockMutex(is->pictq_mutex);
1437         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1438         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1439             is->pictq[i].target_clock= 0;
1440         }
1441         while (is->pictq_size && !is->videoq.abort_request) {
1442             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1443         }
1444         is->video_current_pos = -1;
1445         SDL_UnlockMutex(is->pictq_mutex);
1446
1447         is->frame_last_pts = AV_NOPTS_VALUE;
1448         is->frame_last_delay = 0;
1449         is->frame_timer = (double)av_gettime() / 1000000.0;
1450         is->skip_frames = 1;
1451         is->skip_frames_index = 0;
1452         return 0;
1453     }
1454
1455     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1456
1457     if (got_picture) {
1458         if (decoder_reorder_pts == -1) {
1459             *pts = frame->best_effort_timestamp;
1460         } else if (decoder_reorder_pts) {
1461             *pts = frame->pkt_pts;
1462         } else {
1463             *pts = frame->pkt_dts;
1464         }
1465
1466         if (*pts == AV_NOPTS_VALUE) {
1467             *pts = 0;
1468         }
1469
1470         is->skip_frames_index += 1;
1471         if(is->skip_frames_index >= is->skip_frames){
1472             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1473             return 1;
1474         }
1475
1476     }
1477     return 0;
1478 }
1479
1480 #if CONFIG_AVFILTER
1481 typedef struct {
1482     VideoState *is;
1483     AVFrame *frame;
1484     int use_dr1;
1485 } FilterPriv;
1486
1487 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1488 {
1489     AVFilterContext *ctx = codec->opaque;
1490     AVFilterBufferRef  *ref;
1491     int perms = AV_PERM_WRITE;
1492     int i, w, h, stride[4];
1493     unsigned edge;
1494     int pixel_size;
1495
1496     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1497
1498     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1499         perms |= AV_PERM_NEG_LINESIZES;
1500
1501     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1502         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1503         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1504         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1505     }
1506     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1507
1508     w = codec->width;
1509     h = codec->height;
1510
1511     if(av_image_check_size(w, h, 0, codec))
1512         return -1;
1513
1514     avcodec_align_dimensions2(codec, &w, &h, stride);
1515     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1516     w += edge << 1;
1517     h += edge << 1;
1518
1519     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1520         return -1;
1521
1522     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1523     ref->video->w = codec->width;
1524     ref->video->h = codec->height;
1525     for(i = 0; i < 4; i ++) {
1526         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1527         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1528
1529         if (ref->data[i]) {
1530             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1531         }
1532         pic->data[i]     = ref->data[i];
1533         pic->linesize[i] = ref->linesize[i];
1534     }
1535     pic->opaque = ref;
1536     pic->age    = INT_MAX;
1537     pic->type   = FF_BUFFER_TYPE_USER;
1538     pic->reordered_opaque = codec->reordered_opaque;
1539     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1540     else           pic->pkt_pts = AV_NOPTS_VALUE;
1541     return 0;
1542 }
1543
1544 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1545 {
1546     memset(pic->data, 0, sizeof(pic->data));
1547     avfilter_unref_buffer(pic->opaque);
1548 }
1549
1550 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1551 {
1552     AVFilterBufferRef *ref = pic->opaque;
1553
1554     if (pic->data[0] == NULL) {
1555         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1556         return codec->get_buffer(codec, pic);
1557     }
1558
1559     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1560         (codec->pix_fmt != ref->format)) {
1561         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1562         return -1;
1563     }
1564
1565     pic->reordered_opaque = codec->reordered_opaque;
1566     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1567     else           pic->pkt_pts = AV_NOPTS_VALUE;
1568     return 0;
1569 }
1570
1571 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1572 {
1573     FilterPriv *priv = ctx->priv;
1574     AVCodecContext *codec;
1575     if(!opaque) return -1;
1576
1577     priv->is = opaque;
1578     codec    = priv->is->video_st->codec;
1579     codec->opaque = ctx;
1580     if((codec->codec->capabilities & CODEC_CAP_DR1)
1581     ) {
1582         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1583         priv->use_dr1 = 1;
1584         codec->get_buffer     = input_get_buffer;
1585         codec->release_buffer = input_release_buffer;
1586         codec->reget_buffer   = input_reget_buffer;
1587         codec->thread_safe_callbacks = 1;
1588     }
1589
1590     priv->frame = avcodec_alloc_frame();
1591
1592     return 0;
1593 }
1594
1595 static void input_uninit(AVFilterContext *ctx)
1596 {
1597     FilterPriv *priv = ctx->priv;
1598     av_free(priv->frame);
1599 }
1600
1601 static int input_request_frame(AVFilterLink *link)
1602 {
1603     FilterPriv *priv = link->src->priv;
1604     AVFilterBufferRef *picref;
1605     int64_t pts = 0;
1606     AVPacket pkt;
1607     int ret;
1608
1609     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1610         av_free_packet(&pkt);
1611     if (ret < 0)
1612         return -1;
1613
1614     if(priv->use_dr1 && priv->frame->opaque) {
1615         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1616     } else {
1617         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1618         av_image_copy(picref->data, picref->linesize,
1619                       priv->frame->data, priv->frame->linesize,
1620                       picref->format, link->w, link->h);
1621     }
1622     av_free_packet(&pkt);
1623
1624     avfilter_copy_frame_props(picref, priv->frame);
1625     picref->pts = pts;
1626
1627     avfilter_start_frame(link, picref);
1628     avfilter_draw_slice(link, 0, link->h, 1);
1629     avfilter_end_frame(link);
1630
1631     return 0;
1632 }
1633
1634 static int input_query_formats(AVFilterContext *ctx)
1635 {
1636     FilterPriv *priv = ctx->priv;
1637     enum PixelFormat pix_fmts[] = {
1638         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1639     };
1640
1641     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1642     return 0;
1643 }
1644
1645 static int input_config_props(AVFilterLink *link)
1646 {
1647     FilterPriv *priv  = link->src->priv;
1648     AVStream *s = priv->is->video_st;
1649
1650     link->w = s->codec->width;
1651     link->h = s->codec->height;
1652     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1653         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1654     link->time_base = s->time_base;
1655
1656     return 0;
1657 }
1658
1659 static AVFilter input_filter =
1660 {
1661     .name      = "ffplay_input",
1662
1663     .priv_size = sizeof(FilterPriv),
1664
1665     .init      = input_init,
1666     .uninit    = input_uninit,
1667
1668     .query_formats = input_query_formats,
1669
1670     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1671     .outputs   = (AVFilterPad[]) {{ .name = "default",
1672                                     .type = AVMEDIA_TYPE_VIDEO,
1673                                     .request_frame = input_request_frame,
1674                                     .config_props  = input_config_props, },
1675                                   { .name = NULL }},
1676 };
1677
1678 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1679 {
1680     char sws_flags_str[128];
1681     int ret;
1682     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1683     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1684     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1685     graph->scale_sws_opts = av_strdup(sws_flags_str);
1686
1687     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1688                                             NULL, is, graph)) < 0)
1689         return ret;
1690     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1691                                             NULL, pix_fmts, graph)) < 0)
1692         return ret;
1693
1694     if(vfilters) {
1695         AVFilterInOut *outputs = avfilter_inout_alloc();
1696         AVFilterInOut *inputs  = avfilter_inout_alloc();
1697
1698         outputs->name    = av_strdup("in");
1699         outputs->filter_ctx = filt_src;
1700         outputs->pad_idx = 0;
1701         outputs->next    = NULL;
1702
1703         inputs->name    = av_strdup("out");
1704         inputs->filter_ctx = filt_out;
1705         inputs->pad_idx = 0;
1706         inputs->next    = NULL;
1707
1708         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1709             return ret;
1710     } else {
1711         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1712             return ret;
1713     }
1714
1715     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1716         return ret;
1717
1718     is->out_video_filter = filt_out;
1719
1720     return ret;
1721 }
1722
1723 #endif  /* CONFIG_AVFILTER */
1724
1725 static int video_thread(void *arg)
1726 {
1727     VideoState *is = arg;
1728     AVFrame *frame= avcodec_alloc_frame();
1729     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1730     double pts;
1731     int ret;
1732
1733 #if CONFIG_AVFILTER
1734     AVFilterGraph *graph = avfilter_graph_alloc();
1735     AVFilterContext *filt_out = NULL;
1736     int last_w = is->video_st->codec->width;
1737     int last_h = is->video_st->codec->height;
1738
1739     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1740         goto the_end;
1741     filt_out = is->out_video_filter;
1742 #endif
1743
1744     for(;;) {
1745 #if !CONFIG_AVFILTER
1746         AVPacket pkt;
1747 #else
1748         AVFilterBufferRef *picref;
1749         AVRational tb = filt_out->inputs[0]->time_base;
1750 #endif
1751         while (is->paused && !is->videoq.abort_request)
1752             SDL_Delay(10);
1753 #if CONFIG_AVFILTER
1754         if (   last_w != is->video_st->codec->width
1755             || last_h != is->video_st->codec->height) {
1756             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1757                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1758             avfilter_graph_free(&graph);
1759             graph = avfilter_graph_alloc();
1760             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1761                 goto the_end;
1762             filt_out = is->out_video_filter;
1763             last_w = is->video_st->codec->width;
1764             last_h = is->video_st->codec->height;
1765         }
1766         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1767         if (picref) {
1768             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1769             pts_int = picref->pts;
1770             pos     = picref->pos;
1771             frame->opaque = picref;
1772         }
1773
1774         if (av_cmp_q(tb, is->video_st->time_base)) {
1775             av_unused int64_t pts1 = pts_int;
1776             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1777             av_dlog(NULL, "video_thread(): "
1778                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1779                     tb.num, tb.den, pts1,
1780                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1781         }
1782 #else
1783         ret = get_video_frame(is, frame, &pts_int, &pkt);
1784         pos = pkt.pos;
1785         av_free_packet(&pkt);
1786 #endif
1787
1788         if (ret < 0) goto the_end;
1789
1790 #if CONFIG_AVFILTER
1791         if (!picref)
1792             continue;
1793 #endif
1794
1795         pts = pts_int*av_q2d(is->video_st->time_base);
1796
1797         ret = queue_picture(is, frame, pts, pos);
1798
1799         if (ret < 0)
1800             goto the_end;
1801
1802         if (is->step)
1803             stream_toggle_pause(is);
1804     }
1805  the_end:
1806 #if CONFIG_AVFILTER
1807     avfilter_graph_free(&graph);
1808 #endif
1809     av_free(frame);
1810     return 0;
1811 }
1812
1813 static int subtitle_thread(void *arg)
1814 {
1815     VideoState *is = arg;
1816     SubPicture *sp;
1817     AVPacket pkt1, *pkt = &pkt1;
1818     int got_subtitle;
1819     double pts;
1820     int i, j;
1821     int r, g, b, y, u, v, a;
1822
1823     for(;;) {
1824         while (is->paused && !is->subtitleq.abort_request) {
1825             SDL_Delay(10);
1826         }
1827         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1828             break;
1829
1830         if(pkt->data == flush_pkt.data){
1831             avcodec_flush_buffers(is->subtitle_st->codec);
1832             continue;
1833         }
1834         SDL_LockMutex(is->subpq_mutex);
1835         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1836                !is->subtitleq.abort_request) {
1837             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1838         }
1839         SDL_UnlockMutex(is->subpq_mutex);
1840
1841         if (is->subtitleq.abort_request)
1842             return 0;
1843
1844         sp = &is->subpq[is->subpq_windex];
1845
1846        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1847            this packet, if any */
1848         pts = 0;
1849         if (pkt->pts != AV_NOPTS_VALUE)
1850             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1851
1852         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1853                                  &got_subtitle, pkt);
1854
1855         if (got_subtitle && sp->sub.format == 0) {
1856             sp->pts = pts;
1857
1858             for (i = 0; i < sp->sub.num_rects; i++)
1859             {
1860                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1861                 {
1862                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1863                     y = RGB_TO_Y_CCIR(r, g, b);
1864                     u = RGB_TO_U_CCIR(r, g, b, 0);
1865                     v = RGB_TO_V_CCIR(r, g, b, 0);
1866                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1867                 }
1868             }
1869
1870             /* now we can update the picture count */
1871             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1872                 is->subpq_windex = 0;
1873             SDL_LockMutex(is->subpq_mutex);
1874             is->subpq_size++;
1875             SDL_UnlockMutex(is->subpq_mutex);
1876         }
1877         av_free_packet(pkt);
1878     }
1879     return 0;
1880 }
1881
1882 /* copy samples for viewing in editor window */
1883 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1884 {
1885     int size, len;
1886
1887     size = samples_size / sizeof(short);
1888     while (size > 0) {
1889         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1890         if (len > size)
1891             len = size;
1892         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1893         samples += len;
1894         is->sample_array_index += len;
1895         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1896             is->sample_array_index = 0;
1897         size -= len;
1898     }
1899 }
1900
1901 /* return the new audio buffer size (samples can be added or deleted
1902    to get better sync if video or external master clock) */
1903 static int synchronize_audio(VideoState *is, short *samples,
1904                              int samples_size1, double pts)
1905 {
1906     int n, samples_size;
1907     double ref_clock;
1908
1909     n = 2 * is->audio_st->codec->channels;
1910     samples_size = samples_size1;
1911
1912     /* if not master, then we try to remove or add samples to correct the clock */
1913     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1914          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1915         double diff, avg_diff;
1916         int wanted_size, min_size, max_size, nb_samples;
1917
1918         ref_clock = get_master_clock(is);
1919         diff = get_audio_clock(is) - ref_clock;
1920
1921         if (diff < AV_NOSYNC_THRESHOLD) {
1922             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1923             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1924                 /* not enough measures to have a correct estimate */
1925                 is->audio_diff_avg_count++;
1926             } else {
1927                 /* estimate the A-V difference */
1928                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1929
1930                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1931                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1932                     nb_samples = samples_size / n;
1933
1934                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1935                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1936                     if (wanted_size < min_size)
1937                         wanted_size = min_size;
1938                     else if (wanted_size > max_size)
1939                         wanted_size = max_size;
1940
1941                     /* add or remove samples to correction the synchro */
1942                     if (wanted_size < samples_size) {
1943                         /* remove samples */
1944                         samples_size = wanted_size;
1945                     } else if (wanted_size > samples_size) {
1946                         uint8_t *samples_end, *q;
1947                         int nb;
1948
1949                         /* add samples */
1950                         nb = (samples_size - wanted_size);
1951                         samples_end = (uint8_t *)samples + samples_size - n;
1952                         q = samples_end + n;
1953                         while (nb > 0) {
1954                             memcpy(q, samples_end, n);
1955                             q += n;
1956                             nb -= n;
1957                         }
1958                         samples_size = wanted_size;
1959                     }
1960                 }
1961 #if 0
1962                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1963                        diff, avg_diff, samples_size - samples_size1,
1964                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1965 #endif
1966             }
1967         } else {
1968             /* too big difference : may be initial PTS errors, so
1969                reset A-V filter */
1970             is->audio_diff_avg_count = 0;
1971             is->audio_diff_cum = 0;
1972         }
1973     }
1974
1975     return samples_size;
1976 }
1977
1978 /* decode one audio frame and returns its uncompressed size */
1979 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1980 {
1981     AVPacket *pkt_temp = &is->audio_pkt_temp;
1982     AVPacket *pkt = &is->audio_pkt;
1983     AVCodecContext *dec= is->audio_st->codec;
1984     int n, len1, data_size;
1985     double pts;
1986
1987     for(;;) {
1988         /* NOTE: the audio packet can contain several frames */
1989         while (pkt_temp->size > 0) {
1990             data_size = sizeof(is->audio_buf1);
1991             len1 = avcodec_decode_audio3(dec,
1992                                         (int16_t *)is->audio_buf1, &data_size,
1993                                         pkt_temp);
1994             if (len1 < 0) {
1995                 /* if error, we skip the frame */
1996                 pkt_temp->size = 0;
1997                 break;
1998             }
1999
2000             pkt_temp->data += len1;
2001             pkt_temp->size -= len1;
2002             if (data_size <= 0)
2003                 continue;
2004
2005             if (dec->sample_fmt != is->audio_src_fmt) {
2006                 if (is->reformat_ctx)
2007                     av_audio_convert_free(is->reformat_ctx);
2008                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2009                                                          dec->sample_fmt, 1, NULL, 0);
2010                 if (!is->reformat_ctx) {
2011                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2012                         av_get_sample_fmt_name(dec->sample_fmt),
2013                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2014                         break;
2015                 }
2016                 is->audio_src_fmt= dec->sample_fmt;
2017             }
2018
2019             if (is->reformat_ctx) {
2020                 const void *ibuf[6]= {is->audio_buf1};
2021                 void *obuf[6]= {is->audio_buf2};
2022                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2023                 int ostride[6]= {2};
2024                 int len= data_size/istride[0];
2025                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2026                     printf("av_audio_convert() failed\n");
2027                     break;
2028                 }
2029                 is->audio_buf= is->audio_buf2;
2030                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2031                           remove this legacy cruft */
2032                 data_size= len*2;
2033             }else{
2034                 is->audio_buf= is->audio_buf1;
2035             }
2036
2037             /* if no pts, then compute it */
2038             pts = is->audio_clock;
2039             *pts_ptr = pts;
2040             n = 2 * dec->channels;
2041             is->audio_clock += (double)data_size /
2042                 (double)(n * dec->sample_rate);
2043 #ifdef DEBUG
2044             {
2045                 static double last_clock;
2046                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2047                        is->audio_clock - last_clock,
2048                        is->audio_clock, pts);
2049                 last_clock = is->audio_clock;
2050             }
2051 #endif
2052             return data_size;
2053         }
2054
2055         /* free the current packet */
2056         if (pkt->data)
2057             av_free_packet(pkt);
2058
2059         if (is->paused || is->audioq.abort_request) {
2060             return -1;
2061         }
2062
2063         /* read next packet */
2064         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2065             return -1;
2066         if(pkt->data == flush_pkt.data){
2067             avcodec_flush_buffers(dec);
2068             continue;
2069         }
2070
2071         pkt_temp->data = pkt->data;
2072         pkt_temp->size = pkt->size;
2073
2074         /* if update the audio clock with the pts */
2075         if (pkt->pts != AV_NOPTS_VALUE) {
2076             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2077         }
2078     }
2079 }
2080
2081 /* prepare a new audio buffer */
2082 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2083 {
2084     VideoState *is = opaque;
2085     int audio_size, len1;
2086     int bytes_per_sec;
2087     double pts;
2088
2089     audio_callback_time = av_gettime();
2090
2091     while (len > 0) {
2092         if (is->audio_buf_index >= is->audio_buf_size) {
2093            audio_size = audio_decode_frame(is, &pts);
2094            if (audio_size < 0) {
2095                 /* if error, just output silence */
2096                is->audio_buf = is->audio_buf1;
2097                is->audio_buf_size = 1024;
2098                memset(is->audio_buf, 0, is->audio_buf_size);
2099            } else {
2100                if (is->show_mode != SHOW_MODE_VIDEO)
2101                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2102                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2103                                               pts);
2104                is->audio_buf_size = audio_size;
2105            }
2106            is->audio_buf_index = 0;
2107         }
2108         len1 = is->audio_buf_size - is->audio_buf_index;
2109         if (len1 > len)
2110             len1 = len;
2111         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2112         len -= len1;
2113         stream += len1;
2114         is->audio_buf_index += len1;
2115     }
2116     bytes_per_sec = is->audio_st->codec->sample_rate *
2117             2 * is->audio_st->codec->channels;
2118     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2119     /* Let's assume the audio driver that is used by SDL has two periods. */
2120     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2121     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2122 }
2123
2124 /* open a given stream. Return 0 if OK */
2125 static int stream_component_open(VideoState *is, int stream_index)
2126 {
2127     AVFormatContext *ic = is->ic;
2128     AVCodecContext *avctx;
2129     AVCodec *codec;
2130     SDL_AudioSpec wanted_spec, spec;
2131     AVDictionary *opts;
2132     AVDictionaryEntry *t = NULL;
2133
2134     if (stream_index < 0 || stream_index >= ic->nb_streams)
2135         return -1;
2136     avctx = ic->streams[stream_index]->codec;
2137
2138     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2139
2140     /* prepare audio output */
2141     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2142         if (avctx->channels > 0) {
2143             avctx->request_channels = FFMIN(2, avctx->channels);
2144         } else {
2145             avctx->request_channels = 2;
2146         }
2147     }
2148
2149     codec = avcodec_find_decoder(avctx->codec_id);
2150     if (!codec)
2151         return -1;
2152
2153     avctx->workaround_bugs = workaround_bugs;
2154     avctx->lowres = lowres;
2155     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2156     avctx->idct_algo= idct;
2157     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2158     avctx->skip_frame= skip_frame;
2159     avctx->skip_idct= skip_idct;
2160     avctx->skip_loop_filter= skip_loop_filter;
2161     avctx->error_recognition= error_recognition;
2162     avctx->error_concealment= error_concealment;
2163     avctx->thread_count= thread_count;
2164
2165     if(codec->capabilities & CODEC_CAP_DR1)
2166         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2167
2168     if (!codec ||
2169         avcodec_open2(avctx, codec, &opts) < 0)
2170         return -1;
2171     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2172         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2173         return AVERROR_OPTION_NOT_FOUND;
2174     }
2175
2176     /* prepare audio output */
2177     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2178         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2179             fprintf(stderr, "Invalid sample rate or channel count\n");
2180             return -1;
2181         }
2182         wanted_spec.freq = avctx->sample_rate;
2183         wanted_spec.format = AUDIO_S16SYS;
2184         wanted_spec.channels = avctx->channels;
2185         wanted_spec.silence = 0;
2186         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2187         wanted_spec.callback = sdl_audio_callback;
2188         wanted_spec.userdata = is;
2189         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2190             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2191             return -1;
2192         }
2193         is->audio_hw_buf_size = spec.size;
2194         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2195     }
2196
2197     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2198     switch(avctx->codec_type) {
2199     case AVMEDIA_TYPE_AUDIO:
2200         is->audio_stream = stream_index;
2201         is->audio_st = ic->streams[stream_index];
2202         is->audio_buf_size = 0;
2203         is->audio_buf_index = 0;
2204
2205         /* init averaging filter */
2206         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2207         is->audio_diff_avg_count = 0;
2208         /* since we do not have a precise anough audio fifo fullness,
2209            we correct audio sync only if larger than this threshold */
2210         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2211
2212         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2213         packet_queue_init(&is->audioq);
2214         SDL_PauseAudio(0);
2215         break;
2216     case AVMEDIA_TYPE_VIDEO:
2217         is->video_stream = stream_index;
2218         is->video_st = ic->streams[stream_index];
2219
2220         packet_queue_init(&is->videoq);
2221         is->video_tid = SDL_CreateThread(video_thread, is);
2222         break;
2223     case AVMEDIA_TYPE_SUBTITLE:
2224         is->subtitle_stream = stream_index;
2225         is->subtitle_st = ic->streams[stream_index];
2226         packet_queue_init(&is->subtitleq);
2227
2228         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2229         break;
2230     default:
2231         break;
2232     }
2233     return 0;
2234 }
2235
2236 static void stream_component_close(VideoState *is, int stream_index)
2237 {
2238     AVFormatContext *ic = is->ic;
2239     AVCodecContext *avctx;
2240
2241     if (stream_index < 0 || stream_index >= ic->nb_streams)
2242         return;
2243     avctx = ic->streams[stream_index]->codec;
2244
2245     switch(avctx->codec_type) {
2246     case AVMEDIA_TYPE_AUDIO:
2247         packet_queue_abort(&is->audioq);
2248
2249         SDL_CloseAudio();
2250
2251         packet_queue_end(&is->audioq);
2252         if (is->reformat_ctx)
2253             av_audio_convert_free(is->reformat_ctx);
2254         is->reformat_ctx = NULL;
2255         break;
2256     case AVMEDIA_TYPE_VIDEO:
2257         packet_queue_abort(&is->videoq);
2258
2259         /* note: we also signal this mutex to make sure we deblock the
2260            video thread in all cases */
2261         SDL_LockMutex(is->pictq_mutex);
2262         SDL_CondSignal(is->pictq_cond);
2263         SDL_UnlockMutex(is->pictq_mutex);
2264
2265         SDL_WaitThread(is->video_tid, NULL);
2266
2267         packet_queue_end(&is->videoq);
2268         break;
2269     case AVMEDIA_TYPE_SUBTITLE:
2270         packet_queue_abort(&is->subtitleq);
2271
2272         /* note: we also signal this mutex to make sure we deblock the
2273            video thread in all cases */
2274         SDL_LockMutex(is->subpq_mutex);
2275         is->subtitle_stream_changed = 1;
2276
2277         SDL_CondSignal(is->subpq_cond);
2278         SDL_UnlockMutex(is->subpq_mutex);
2279
2280         SDL_WaitThread(is->subtitle_tid, NULL);
2281
2282         packet_queue_end(&is->subtitleq);
2283         break;
2284     default:
2285         break;
2286     }
2287
2288     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2289     avcodec_close(avctx);
2290     switch(avctx->codec_type) {
2291     case AVMEDIA_TYPE_AUDIO:
2292         is->audio_st = NULL;
2293         is->audio_stream = -1;
2294         break;
2295     case AVMEDIA_TYPE_VIDEO:
2296         is->video_st = NULL;
2297         is->video_stream = -1;
2298         break;
2299     case AVMEDIA_TYPE_SUBTITLE:
2300         is->subtitle_st = NULL;
2301         is->subtitle_stream = -1;
2302         break;
2303     default:
2304         break;
2305     }
2306 }
2307
2308 /* since we have only one decoding thread, we can use a global
2309    variable instead of a thread local variable */
2310 static VideoState *global_video_state;
2311
2312 static int decode_interrupt_cb(void)
2313 {
2314     return (global_video_state && global_video_state->abort_request);
2315 }
2316
2317 /* this thread gets the stream from the disk or the network */
2318 static int read_thread(void *arg)
2319 {
2320     VideoState *is = arg;
2321     AVFormatContext *ic = NULL;
2322     int err, i, ret;
2323     int st_index[AVMEDIA_TYPE_NB];
2324     AVPacket pkt1, *pkt = &pkt1;
2325     int eof=0;
2326     int pkt_in_play_range = 0;
2327     AVDictionaryEntry *t;
2328     AVDictionary **opts;
2329     int orig_nb_streams;
2330
2331     memset(st_index, -1, sizeof(st_index));
2332     is->video_stream = -1;
2333     is->audio_stream = -1;
2334     is->subtitle_stream = -1;
2335
2336     global_video_state = is;
2337     avio_set_interrupt_cb(decode_interrupt_cb);
2338
2339     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2340     if (err < 0) {
2341         print_error(is->filename, err);
2342         ret = -1;
2343         goto fail;
2344     }
2345     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2346         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2347         ret = AVERROR_OPTION_NOT_FOUND;
2348         goto fail;
2349     }
2350     is->ic = ic;
2351
2352     if(genpts)
2353         ic->flags |= AVFMT_FLAG_GENPTS;
2354
2355     opts = setup_find_stream_info_opts(ic, codec_opts);
2356     orig_nb_streams = ic->nb_streams;
2357
2358     err = avformat_find_stream_info(ic, opts);
2359     if (err < 0) {
2360         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2361         ret = -1;
2362         goto fail;
2363     }
2364     for (i = 0; i < orig_nb_streams; i++)
2365         av_dict_free(&opts[i]);
2366     av_freep(&opts);
2367
2368     if(ic->pb)
2369         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2370
2371     if(seek_by_bytes<0)
2372         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2373
2374     /* if seeking requested, we execute it */
2375     if (start_time != AV_NOPTS_VALUE) {
2376         int64_t timestamp;
2377
2378         timestamp = start_time;
2379         /* add the stream start time */
2380         if (ic->start_time != AV_NOPTS_VALUE)
2381             timestamp += ic->start_time;
2382         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2383         if (ret < 0) {
2384             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2385                     is->filename, (double)timestamp / AV_TIME_BASE);
2386         }
2387     }
2388
2389     for (i = 0; i < ic->nb_streams; i++)
2390         ic->streams[i]->discard = AVDISCARD_ALL;
2391     if (!video_disable)
2392         st_index[AVMEDIA_TYPE_VIDEO] =
2393             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2394                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2395     if (!audio_disable)
2396         st_index[AVMEDIA_TYPE_AUDIO] =
2397             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2398                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2399                                 st_index[AVMEDIA_TYPE_VIDEO],
2400                                 NULL, 0);
2401     if (!video_disable)
2402         st_index[AVMEDIA_TYPE_SUBTITLE] =
2403             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2404                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2405                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2406                                  st_index[AVMEDIA_TYPE_AUDIO] :
2407                                  st_index[AVMEDIA_TYPE_VIDEO]),
2408                                 NULL, 0);
2409     if (show_status) {
2410         av_dump_format(ic, 0, is->filename, 0);
2411     }
2412
2413     is->show_mode = show_mode;
2414
2415     /* open the streams */
2416     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2417         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2418     }
2419
2420     ret=-1;
2421     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2422         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2423     }
2424     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2425     if (is->show_mode == SHOW_MODE_NONE)
2426         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2427
2428     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2429         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2430     }
2431
2432     if (is->video_stream < 0 && is->audio_stream < 0) {
2433         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2434         ret = -1;
2435         goto fail;
2436     }
2437
2438     for(;;) {
2439         if (is->abort_request)
2440             break;
2441         if (is->paused != is->last_paused) {
2442             is->last_paused = is->paused;
2443             if (is->paused)
2444                 is->read_pause_return= av_read_pause(ic);
2445             else
2446                 av_read_play(ic);
2447         }
2448 #if CONFIG_RTSP_DEMUXER
2449         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2450             /* wait 10 ms to avoid trying to get another packet */
2451             /* XXX: horrible */
2452             SDL_Delay(10);
2453             continue;
2454         }
2455 #endif
2456         if (is->seek_req) {
2457             int64_t seek_target= is->seek_pos;
2458             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2459             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2460 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2461 //      of the seek_pos/seek_rel variables
2462
2463             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2464             if (ret < 0) {
2465                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2466             }else{
2467                 if (is->audio_stream >= 0) {
2468                     packet_queue_flush(&is->audioq);
2469                     packet_queue_put(&is->audioq, &flush_pkt);
2470                 }
2471                 if (is->subtitle_stream >= 0) {
2472                     packet_queue_flush(&is->subtitleq);
2473                     packet_queue_put(&is->subtitleq, &flush_pkt);
2474                 }
2475                 if (is->video_stream >= 0) {
2476                     packet_queue_flush(&is->videoq);
2477                     packet_queue_put(&is->videoq, &flush_pkt);
2478                 }
2479             }
2480             is->seek_req = 0;
2481             eof= 0;
2482         }
2483
2484         /* if the queue are full, no need to read more */
2485         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2486             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2487                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2488                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2489             /* wait 10 ms */
2490             SDL_Delay(10);
2491             continue;
2492         }
2493         if(eof) {
2494             if(is->video_stream >= 0){
2495                 av_init_packet(pkt);
2496                 pkt->data=NULL;
2497                 pkt->size=0;
2498                 pkt->stream_index= is->video_stream;
2499                 packet_queue_put(&is->videoq, pkt);
2500             }
2501             SDL_Delay(10);
2502             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2503                 if(loop!=1 && (!loop || --loop)){
2504                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2505                 }else if(autoexit){
2506                     ret=AVERROR_EOF;
2507                     goto fail;
2508                 }
2509             }
2510             eof=0;
2511             continue;
2512         }
2513         ret = av_read_frame(ic, pkt);
2514         if (ret < 0) {
2515             if (ret == AVERROR_EOF || url_feof(ic->pb))
2516                 eof=1;
2517             if (ic->pb && ic->pb->error)
2518                 break;
2519             SDL_Delay(100); /* wait for user event */
2520             continue;
2521         }
2522         /* check if packet is in play range specified by user, then queue, otherwise discard */
2523         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2524                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2525                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2526                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2527                 <= ((double)duration/1000000);
2528         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2529             packet_queue_put(&is->audioq, pkt);
2530         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2531             packet_queue_put(&is->videoq, pkt);
2532         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2533             packet_queue_put(&is->subtitleq, pkt);
2534         } else {
2535             av_free_packet(pkt);
2536         }
2537     }
2538     /* wait until the end */
2539     while (!is->abort_request) {
2540         SDL_Delay(100);
2541     }
2542
2543     ret = 0;
2544  fail:
2545     /* disable interrupting */
2546     global_video_state = NULL;
2547
2548     /* close each stream */
2549     if (is->audio_stream >= 0)
2550         stream_component_close(is, is->audio_stream);
2551     if (is->video_stream >= 0)
2552         stream_component_close(is, is->video_stream);
2553     if (is->subtitle_stream >= 0)
2554         stream_component_close(is, is->subtitle_stream);
2555     if (is->ic) {
2556         av_close_input_file(is->ic);
2557         is->ic = NULL; /* safety */
2558     }
2559     avio_set_interrupt_cb(NULL);
2560
2561     if (ret != 0) {
2562         SDL_Event event;
2563
2564         event.type = FF_QUIT_EVENT;
2565         event.user.data1 = is;
2566         SDL_PushEvent(&event);
2567     }
2568     return 0;
2569 }
2570
2571 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2572 {
2573     VideoState *is;
2574
2575     is = av_mallocz(sizeof(VideoState));
2576     if (!is)
2577         return NULL;
2578     av_strlcpy(is->filename, filename, sizeof(is->filename));
2579     is->iformat = iformat;
2580     is->ytop = 0;
2581     is->xleft = 0;
2582
2583     /* start video display */
2584     is->pictq_mutex = SDL_CreateMutex();
2585     is->pictq_cond = SDL_CreateCond();
2586
2587     is->subpq_mutex = SDL_CreateMutex();
2588     is->subpq_cond = SDL_CreateCond();
2589
2590     is->av_sync_type = av_sync_type;
2591     is->read_tid = SDL_CreateThread(read_thread, is);
2592     if (!is->read_tid) {
2593         av_free(is);
2594         return NULL;
2595     }
2596     return is;
2597 }
2598
2599 static void stream_cycle_channel(VideoState *is, int codec_type)
2600 {
2601     AVFormatContext *ic = is->ic;
2602     int start_index, stream_index;
2603     AVStream *st;
2604
2605     if (codec_type == AVMEDIA_TYPE_VIDEO)
2606         start_index = is->video_stream;
2607     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2608         start_index = is->audio_stream;
2609     else
2610         start_index = is->subtitle_stream;
2611     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2612         return;
2613     stream_index = start_index;
2614     for(;;) {
2615         if (++stream_index >= is->ic->nb_streams)
2616         {
2617             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2618             {
2619                 stream_index = -1;
2620                 goto the_end;
2621             } else
2622                 stream_index = 0;
2623         }
2624         if (stream_index == start_index)
2625             return;
2626         st = ic->streams[stream_index];
2627         if (st->codec->codec_type == codec_type) {
2628             /* check that parameters are OK */
2629             switch(codec_type) {
2630             case AVMEDIA_TYPE_AUDIO:
2631                 if (st->codec->sample_rate != 0 &&
2632                     st->codec->channels != 0)
2633                     goto the_end;
2634                 break;
2635             case AVMEDIA_TYPE_VIDEO:
2636             case AVMEDIA_TYPE_SUBTITLE:
2637                 goto the_end;
2638             default:
2639                 break;
2640             }
2641         }
2642     }
2643  the_end:
2644     stream_component_close(is, start_index);
2645     stream_component_open(is, stream_index);
2646 }
2647
2648
2649 static void toggle_full_screen(VideoState *is)
2650 {
2651     is_full_screen = !is_full_screen;
2652     video_open(is);
2653 }
2654
2655 static void toggle_pause(VideoState *is)
2656 {
2657     stream_toggle_pause(is);
2658     is->step = 0;
2659 }
2660
2661 static void step_to_next_frame(VideoState *is)
2662 {
2663     /* if the stream is paused unpause it, then step */
2664     if (is->paused)
2665         stream_toggle_pause(is);
2666     is->step = 1;
2667 }
2668
2669 static void toggle_audio_display(VideoState *is)
2670 {
2671     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2672     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2673     fill_rectangle(screen,
2674                 is->xleft, is->ytop, is->width, is->height,
2675                 bgcolor);
2676     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2677 }
2678
2679 /* handle an event sent by the GUI */
2680 static void event_loop(VideoState *cur_stream)
2681 {
2682     SDL_Event event;
2683     double incr, pos, frac;
2684
2685     for(;;) {
2686         double x;
2687         SDL_WaitEvent(&event);
2688         switch(event.type) {
2689         case SDL_KEYDOWN:
2690             if (exit_on_keydown) {
2691                 do_exit(cur_stream);
2692                 break;
2693             }
2694             switch(event.key.keysym.sym) {
2695             case SDLK_ESCAPE:
2696             case SDLK_q:
2697                 do_exit(cur_stream);
2698                 break;
2699             case SDLK_f:
2700                 toggle_full_screen(cur_stream);
2701                 break;
2702             case SDLK_p:
2703             case SDLK_SPACE:
2704                 toggle_pause(cur_stream);
2705                 break;
2706             case SDLK_s: //S: Step to next frame
2707                 step_to_next_frame(cur_stream);
2708                 break;
2709             case SDLK_a:
2710                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2711                 break;
2712             case SDLK_v:
2713                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2714                 break;
2715             case SDLK_t:
2716                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2717                 break;
2718             case SDLK_w:
2719                 toggle_audio_display(cur_stream);
2720                 break;
2721             case SDLK_LEFT:
2722                 incr = -10.0;
2723                 goto do_seek;
2724             case SDLK_RIGHT:
2725                 incr = 10.0;
2726                 goto do_seek;
2727             case SDLK_UP:
2728                 incr = 60.0;
2729                 goto do_seek;
2730             case SDLK_DOWN:
2731                 incr = -60.0;
2732             do_seek:
2733                 if (seek_by_bytes) {
2734                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2735                         pos= cur_stream->video_current_pos;
2736                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2737                         pos= cur_stream->audio_pkt.pos;
2738                     }else
2739                         pos = avio_tell(cur_stream->ic->pb);
2740                     if (cur_stream->ic->bit_rate)
2741                         incr *= cur_stream->ic->bit_rate / 8.0;
2742                     else
2743                         incr *= 180000.0;
2744                     pos += incr;
2745                     stream_seek(cur_stream, pos, incr, 1);
2746                 } else {
2747                     pos = get_master_clock(cur_stream);
2748                     pos += incr;
2749                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2750                 }
2751                 break;
2752             default:
2753                 break;
2754             }
2755             break;
2756         case SDL_MOUSEBUTTONDOWN:
2757             if (exit_on_mousedown) {
2758                 do_exit(cur_stream);
2759                 break;
2760             }
2761         case SDL_MOUSEMOTION:
2762             if(event.type ==SDL_MOUSEBUTTONDOWN){
2763                 x= event.button.x;
2764             }else{
2765                 if(event.motion.state != SDL_PRESSED)
2766                     break;
2767                 x= event.motion.x;
2768             }
2769             if(seek_by_bytes || cur_stream->ic->duration<=0){
2770                 uint64_t size=  avio_size(cur_stream->ic->pb);
2771                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2772             }else{
2773                 int64_t ts;
2774                 int ns, hh, mm, ss;
2775                 int tns, thh, tmm, tss;
2776                 tns = cur_stream->ic->duration/1000000LL;
2777                 thh = tns/3600;
2778                 tmm = (tns%3600)/60;
2779                 tss = (tns%60);
2780                 frac = x/cur_stream->width;
2781                 ns = frac*tns;
2782                 hh = ns/3600;
2783                 mm = (ns%3600)/60;
2784                 ss = (ns%60);
2785                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2786                         hh, mm, ss, thh, tmm, tss);
2787                 ts = frac*cur_stream->ic->duration;
2788                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2789                     ts += cur_stream->ic->start_time;
2790                 stream_seek(cur_stream, ts, 0, 0);
2791             }
2792             break;
2793         case SDL_VIDEORESIZE:
2794             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2795                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2796             screen_width = cur_stream->width = event.resize.w;
2797             screen_height= cur_stream->height= event.resize.h;
2798             break;
2799         case SDL_QUIT:
2800         case FF_QUIT_EVENT:
2801             do_exit(cur_stream);
2802             break;
2803         case FF_ALLOC_EVENT:
2804             video_open(event.user.data1);
2805             alloc_picture(event.user.data1);
2806             break;
2807         case FF_REFRESH_EVENT:
2808             video_refresh(event.user.data1);
2809             cur_stream->refresh=0;
2810             break;
2811         default:
2812             break;
2813         }
2814     }
2815 }
2816
2817 static int opt_frame_size(const char *opt, const char *arg)
2818 {
2819     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2820     return opt_default("video_size", arg);
2821 }
2822
2823 static int opt_width(const char *opt, const char *arg)
2824 {
2825     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2826     return 0;
2827 }
2828
2829 static int opt_height(const char *opt, const char *arg)
2830 {
2831     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2832     return 0;
2833 }
2834
2835 static int opt_format(const char *opt, const char *arg)
2836 {
2837     file_iformat = av_find_input_format(arg);
2838     if (!file_iformat) {
2839         fprintf(stderr, "Unknown input format: %s\n", arg);
2840         return AVERROR(EINVAL);
2841     }
2842     return 0;
2843 }
2844
2845 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2846 {
2847     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2848     return opt_default("pixel_format", arg);
2849 }
2850
2851 static int opt_sync(const char *opt, const char *arg)
2852 {
2853     if (!strcmp(arg, "audio"))
2854         av_sync_type = AV_SYNC_AUDIO_MASTER;
2855     else if (!strcmp(arg, "video"))
2856         av_sync_type = AV_SYNC_VIDEO_MASTER;
2857     else if (!strcmp(arg, "ext"))
2858         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2859     else {
2860         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2861         exit(1);
2862     }
2863     return 0;
2864 }
2865
2866 static int opt_seek(const char *opt, const char *arg)
2867 {
2868     start_time = parse_time_or_die(opt, arg, 1);
2869     return 0;
2870 }
2871
2872 static int opt_duration(const char *opt, const char *arg)
2873 {
2874     duration = parse_time_or_die(opt, arg, 1);
2875     return 0;
2876 }
2877
2878 static int opt_thread_count(const char *opt, const char *arg)
2879 {
2880     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2881 #if !HAVE_THREADS
2882     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2883 #endif
2884     return 0;
2885 }
2886
2887 static int opt_show_mode(const char *opt, const char *arg)
2888 {
2889     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2890                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2891                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2892                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2893     return 0;
2894 }
2895
2896 static int opt_input_file(const char *opt, const char *filename)
2897 {
2898     if (input_filename) {
2899         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2900                 filename, input_filename);
2901         exit(1);
2902     }
2903     if (!strcmp(filename, "-"))
2904         filename = "pipe:";
2905     input_filename = filename;
2906     return 0;
2907 }
2908
2909 static const OptionDef options[] = {
2910 #include "cmdutils_common_opts.h"
2911     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2912     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2913     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2914     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2915     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2916     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2917     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2918     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2919     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2920     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2921     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2922     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2923     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2924     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2925     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2926     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2927     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2928     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2929     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2930     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2931     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2932     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2933     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2934     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2935     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2936     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2937     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2938     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2939     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2940     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2941     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2942     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2943     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2944     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2945     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2946 #if CONFIG_AVFILTER
2947     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2948 #endif
2949     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2950     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2951     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2952     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2953     { NULL, },
2954 };
2955
2956 static void show_usage(void)
2957 {
2958     printf("Simple media player\n");
2959     printf("usage: %s [options] input_file\n", program_name);
2960     printf("\n");
2961 }
2962
2963 static int opt_help(const char *opt, const char *arg)
2964 {
2965     av_log_set_callback(log_callback_help);
2966     show_usage();
2967     show_help_options(options, "Main options:\n",
2968                       OPT_EXPERT, 0);
2969     show_help_options(options, "\nAdvanced options:\n",
2970                       OPT_EXPERT, OPT_EXPERT);
2971     printf("\n");
2972     av_opt_show2(avcodec_opts[0], NULL,
2973                  AV_OPT_FLAG_DECODING_PARAM, 0);
2974     printf("\n");
2975     av_opt_show2(avformat_opts, NULL,
2976                  AV_OPT_FLAG_DECODING_PARAM, 0);
2977 #if !CONFIG_AVFILTER
2978     printf("\n");
2979     av_opt_show2(sws_opts, NULL,
2980                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2981 #endif
2982     printf("\nWhile playing:\n"
2983            "q, ESC              quit\n"
2984            "f                   toggle full screen\n"
2985            "p, SPC              pause\n"
2986            "a                   cycle audio channel\n"
2987            "v                   cycle video channel\n"
2988            "t                   cycle subtitle channel\n"
2989            "w                   show audio waves\n"
2990            "s                   activate frame-step mode\n"
2991            "left/right          seek backward/forward 10 seconds\n"
2992            "down/up             seek backward/forward 1 minute\n"
2993            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2994            );
2995     return 0;
2996 }
2997
2998 static int lockmgr(void **mtx, enum AVLockOp op)
2999 {
3000    switch(op) {
3001       case AV_LOCK_CREATE:
3002           *mtx = SDL_CreateMutex();
3003           if(!*mtx)
3004               return 1;
3005           return 0;
3006       case AV_LOCK_OBTAIN:
3007           return !!SDL_LockMutex(*mtx);
3008       case AV_LOCK_RELEASE:
3009           return !!SDL_UnlockMutex(*mtx);
3010       case AV_LOCK_DESTROY:
3011           SDL_DestroyMutex(*mtx);
3012           return 0;
3013    }
3014    return 1;
3015 }
3016
3017 /* Called from the main */
3018 int main(int argc, char **argv)
3019 {
3020     int flags;
3021     VideoState *is;
3022
3023     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3024
3025     /* register all codecs, demux and protocols */
3026     avcodec_register_all();
3027 #if CONFIG_AVDEVICE
3028     avdevice_register_all();
3029 #endif
3030 #if CONFIG_AVFILTER
3031     avfilter_register_all();
3032 #endif
3033     av_register_all();
3034
3035     init_opts();
3036
3037     show_banner();
3038
3039     parse_options(argc, argv, options, opt_input_file);
3040
3041     if (!input_filename) {
3042         show_usage();
3043         fprintf(stderr, "An input file must be specified\n");
3044         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3045         exit(1);
3046     }
3047
3048     if (display_disable) {
3049         video_disable = 1;
3050     }
3051     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3052     if (audio_disable)
3053         flags &= ~SDL_INIT_AUDIO;
3054 #if !defined(__MINGW32__) && !defined(__APPLE__)
3055     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3056 #endif
3057     if (SDL_Init (flags)) {
3058         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3059         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3060         exit(1);
3061     }
3062
3063     if (!display_disable) {
3064 #if HAVE_SDL_VIDEO_SIZE
3065         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3066         fs_screen_width = vi->current_w;
3067         fs_screen_height = vi->current_h;
3068 #endif
3069     }
3070
3071     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3072     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3073     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3074
3075     if (av_lockmgr_register(lockmgr)) {
3076         fprintf(stderr, "Could not initialize lock manager!\n");
3077         do_exit(NULL);
3078     }
3079
3080     av_init_packet(&flush_pkt);
3081     flush_pkt.data= "FLUSH";
3082
3083     is = stream_open(input_filename, file_iformat);
3084     if (!is) {
3085         fprintf(stderr, "Failed to initialize VideoState!\n");
3086         do_exit(NULL);
3087     }
3088
3089     event_loop(is);
3090
3091     /* never returns */
3092
3093     return 0;
3094 }