OSDN Git Service

Merge remote-tracking branch 'qatar/master'
[coroid/ffmpeg_saccubus.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166     double audio_current_pts;
167     double audio_current_pts_drift;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210     int step;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static int opt_help(const char *opt, const char *arg);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int audio_disable;
232 static int video_disable;
233 static int wanted_stream[AVMEDIA_TYPE_NB]={
234     [AVMEDIA_TYPE_AUDIO]=-1,
235     [AVMEDIA_TYPE_VIDEO]=-1,
236     [AVMEDIA_TYPE_SUBTITLE]=-1,
237 };
238 static int seek_by_bytes=-1;
239 static int display_disable;
240 static int show_status = 1;
241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
242 static int64_t start_time = AV_NOPTS_VALUE;
243 static int64_t duration = AV_NOPTS_VALUE;
244 static int thread_count = 1;
245 static int workaround_bugs = 1;
246 static int fast = 0;
247 static int genpts = 0;
248 static int lowres = 0;
249 static int idct = FF_IDCT_AUTO;
250 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
253 static int error_recognition = FF_ER_CAREFUL;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts= -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop=1;
260 static int framedrop=-1;
261 static enum ShowMode show_mode = SHOW_MODE_NONE;
262
263 static int rdftspeed=20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static int64_t audio_callback_time;
271
272 static AVPacket flush_pkt;
273
274 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
275 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
276 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
277
278 static SDL_Surface *screen;
279
280 void exit_program(int ret)
281 {
282     exit(ret);
283 }
284
285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
286 {
287     AVPacketList *pkt1;
288
289     /* duplicate the packet */
290     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
291         return -1;
292
293     pkt1 = av_malloc(sizeof(AVPacketList));
294     if (!pkt1)
295         return -1;
296     pkt1->pkt = *pkt;
297     pkt1->next = NULL;
298
299
300     SDL_LockMutex(q->mutex);
301
302     if (!q->last_pkt)
303
304         q->first_pkt = pkt1;
305     else
306         q->last_pkt->next = pkt1;
307     q->last_pkt = pkt1;
308     q->nb_packets++;
309     q->size += pkt1->pkt.size + sizeof(*pkt1);
310     /* XXX: should duplicate packet data in DV case */
311     SDL_CondSignal(q->cond);
312
313     SDL_UnlockMutex(q->mutex);
314     return 0;
315 }
316
317 /* packet queue handling */
318 static void packet_queue_init(PacketQueue *q)
319 {
320     memset(q, 0, sizeof(PacketQueue));
321     q->mutex = SDL_CreateMutex();
322     q->cond = SDL_CreateCond();
323     packet_queue_put(q, &flush_pkt);
324 }
325
326 static void packet_queue_flush(PacketQueue *q)
327 {
328     AVPacketList *pkt, *pkt1;
329
330     SDL_LockMutex(q->mutex);
331     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
332         pkt1 = pkt->next;
333         av_free_packet(&pkt->pkt);
334         av_freep(&pkt);
335     }
336     q->last_pkt = NULL;
337     q->first_pkt = NULL;
338     q->nb_packets = 0;
339     q->size = 0;
340     SDL_UnlockMutex(q->mutex);
341 }
342
343 static void packet_queue_end(PacketQueue *q)
344 {
345     packet_queue_flush(q);
346     SDL_DestroyMutex(q->mutex);
347     SDL_DestroyCond(q->cond);
348 }
349
350 static void packet_queue_abort(PacketQueue *q)
351 {
352     SDL_LockMutex(q->mutex);
353
354     q->abort_request = 1;
355
356     SDL_CondSignal(q->cond);
357
358     SDL_UnlockMutex(q->mutex);
359 }
360
361 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
362 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
363 {
364     AVPacketList *pkt1;
365     int ret;
366
367     SDL_LockMutex(q->mutex);
368
369     for(;;) {
370         if (q->abort_request) {
371             ret = -1;
372             break;
373         }
374
375         pkt1 = q->first_pkt;
376         if (pkt1) {
377             q->first_pkt = pkt1->next;
378             if (!q->first_pkt)
379                 q->last_pkt = NULL;
380             q->nb_packets--;
381             q->size -= pkt1->pkt.size + sizeof(*pkt1);
382             *pkt = pkt1->pkt;
383             av_free(pkt1);
384             ret = 1;
385             break;
386         } else if (!block) {
387             ret = 0;
388             break;
389         } else {
390             SDL_CondWait(q->cond, q->mutex);
391         }
392     }
393     SDL_UnlockMutex(q->mutex);
394     return ret;
395 }
396
397 static inline void fill_rectangle(SDL_Surface *screen,
398                                   int x, int y, int w, int h, int color)
399 {
400     SDL_Rect rect;
401     rect.x = x;
402     rect.y = y;
403     rect.w = w;
404     rect.h = h;
405     SDL_FillRect(screen, &rect, color);
406 }
407
408 #define ALPHA_BLEND(a, oldp, newp, s)\
409 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
410
411 #define RGBA_IN(r, g, b, a, s)\
412 {\
413     unsigned int v = ((const uint32_t *)(s))[0];\
414     a = (v >> 24) & 0xff;\
415     r = (v >> 16) & 0xff;\
416     g = (v >> 8) & 0xff;\
417     b = v & 0xff;\
418 }
419
420 #define YUVA_IN(y, u, v, a, s, pal)\
421 {\
422     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
423     a = (val >> 24) & 0xff;\
424     y = (val >> 16) & 0xff;\
425     u = (val >> 8) & 0xff;\
426     v = val & 0xff;\
427 }
428
429 #define YUVA_OUT(d, y, u, v, a)\
430 {\
431     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
432 }
433
434
435 #define BPP 1
436
437 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
438 {
439     int wrap, wrap3, width2, skip2;
440     int y, u, v, a, u1, v1, a1, w, h;
441     uint8_t *lum, *cb, *cr;
442     const uint8_t *p;
443     const uint32_t *pal;
444     int dstx, dsty, dstw, dsth;
445
446     dstw = av_clip(rect->w, 0, imgw);
447     dsth = av_clip(rect->h, 0, imgh);
448     dstx = av_clip(rect->x, 0, imgw - dstw);
449     dsty = av_clip(rect->y, 0, imgh - dsth);
450     lum = dst->data[0] + dsty * dst->linesize[0];
451     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
452     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
453
454     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
455     skip2 = dstx >> 1;
456     wrap = dst->linesize[0];
457     wrap3 = rect->pict.linesize[0];
458     p = rect->pict.data[0];
459     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
460
461     if (dsty & 1) {
462         lum += dstx;
463         cb += skip2;
464         cr += skip2;
465
466         if (dstx & 1) {
467             YUVA_IN(y, u, v, a, p, pal);
468             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
469             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
470             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
471             cb++;
472             cr++;
473             lum++;
474             p += BPP;
475         }
476         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
477             YUVA_IN(y, u, v, a, p, pal);
478             u1 = u;
479             v1 = v;
480             a1 = a;
481             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482
483             YUVA_IN(y, u, v, a, p + BPP, pal);
484             u1 += u;
485             v1 += v;
486             a1 += a;
487             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
488             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
489             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
490             cb++;
491             cr++;
492             p += 2 * BPP;
493             lum += 2;
494         }
495         if (w) {
496             YUVA_IN(y, u, v, a, p, pal);
497             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
498             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
499             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
500             p++;
501             lum++;
502         }
503         p += wrap3 - dstw * BPP;
504         lum += wrap - dstw - dstx;
505         cb += dst->linesize[1] - width2 - skip2;
506         cr += dst->linesize[2] - width2 - skip2;
507     }
508     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
509         lum += dstx;
510         cb += skip2;
511         cr += skip2;
512
513         if (dstx & 1) {
514             YUVA_IN(y, u, v, a, p, pal);
515             u1 = u;
516             v1 = v;
517             a1 = a;
518             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
519             p += wrap3;
520             lum += wrap;
521             YUVA_IN(y, u, v, a, p, pal);
522             u1 += u;
523             v1 += v;
524             a1 += a;
525             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
527             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
528             cb++;
529             cr++;
530             p += -wrap3 + BPP;
531             lum += -wrap + 1;
532         }
533         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
534             YUVA_IN(y, u, v, a, p, pal);
535             u1 = u;
536             v1 = v;
537             a1 = a;
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539
540             YUVA_IN(y, u, v, a, p + BPP, pal);
541             u1 += u;
542             v1 += v;
543             a1 += a;
544             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
545             p += wrap3;
546             lum += wrap;
547
548             YUVA_IN(y, u, v, a, p, pal);
549             u1 += u;
550             v1 += v;
551             a1 += a;
552             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553
554             YUVA_IN(y, u, v, a, p + BPP, pal);
555             u1 += u;
556             v1 += v;
557             a1 += a;
558             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
559
560             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
561             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
562
563             cb++;
564             cr++;
565             p += -wrap3 + 2 * BPP;
566             lum += -wrap + 2;
567         }
568         if (w) {
569             YUVA_IN(y, u, v, a, p, pal);
570             u1 = u;
571             v1 = v;
572             a1 = a;
573             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
574             p += wrap3;
575             lum += wrap;
576             YUVA_IN(y, u, v, a, p, pal);
577             u1 += u;
578             v1 += v;
579             a1 += a;
580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
582             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
583             cb++;
584             cr++;
585             p += -wrap3 + BPP;
586             lum += -wrap + 1;
587         }
588         p += wrap3 + (wrap3 - dstw * BPP);
589         lum += wrap + (wrap - dstw - dstx);
590         cb += dst->linesize[1] - width2 - skip2;
591         cr += dst->linesize[2] - width2 - skip2;
592     }
593     /* handle odd height */
594     if (h) {
595         lum += dstx;
596         cb += skip2;
597         cr += skip2;
598
599         if (dstx & 1) {
600             YUVA_IN(y, u, v, a, p, pal);
601             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
602             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
603             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
604             cb++;
605             cr++;
606             lum++;
607             p += BPP;
608         }
609         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
610             YUVA_IN(y, u, v, a, p, pal);
611             u1 = u;
612             v1 = v;
613             a1 = a;
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615
616             YUVA_IN(y, u, v, a, p + BPP, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
623             cb++;
624             cr++;
625             p += 2 * BPP;
626             lum += 2;
627         }
628         if (w) {
629             YUVA_IN(y, u, v, a, p, pal);
630             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
631             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
632             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
633         }
634     }
635 }
636
637 static void free_subpicture(SubPicture *sp)
638 {
639     avsubtitle_free(&sp->sub);
640 }
641
642 static void video_image_display(VideoState *is)
643 {
644     VideoPicture *vp;
645     SubPicture *sp;
646     AVPicture pict;
647     float aspect_ratio;
648     int width, height, x, y;
649     SDL_Rect rect;
650     int i;
651
652     vp = &is->pictq[is->pictq_rindex];
653     if (vp->bmp) {
654 #if CONFIG_AVFILTER
655          if (vp->picref->video->sample_aspect_ratio.num == 0)
656              aspect_ratio = 0;
657          else
658              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
659 #else
660
661         /* XXX: use variable in the frame */
662         if (is->video_st->sample_aspect_ratio.num)
663             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
664         else if (is->video_st->codec->sample_aspect_ratio.num)
665             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
666         else
667             aspect_ratio = 0;
668 #endif
669         if (aspect_ratio <= 0.0)
670             aspect_ratio = 1.0;
671         aspect_ratio *= (float)vp->width / (float)vp->height;
672
673         if (is->subtitle_st) {
674             if (is->subpq_size > 0) {
675                 sp = &is->subpq[is->subpq_rindex];
676
677                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
678                     SDL_LockYUVOverlay (vp->bmp);
679
680                     pict.data[0] = vp->bmp->pixels[0];
681                     pict.data[1] = vp->bmp->pixels[2];
682                     pict.data[2] = vp->bmp->pixels[1];
683
684                     pict.linesize[0] = vp->bmp->pitches[0];
685                     pict.linesize[1] = vp->bmp->pitches[2];
686                     pict.linesize[2] = vp->bmp->pitches[1];
687
688                     for (i = 0; i < sp->sub.num_rects; i++)
689                         blend_subrect(&pict, sp->sub.rects[i],
690                                       vp->bmp->w, vp->bmp->h);
691
692                     SDL_UnlockYUVOverlay (vp->bmp);
693                 }
694             }
695         }
696
697
698         /* XXX: we suppose the screen has a 1.0 pixel ratio */
699         height = is->height;
700         width = ((int)rint(height * aspect_ratio)) & ~1;
701         if (width > is->width) {
702             width = is->width;
703             height = ((int)rint(width / aspect_ratio)) & ~1;
704         }
705         x = (is->width - width) / 2;
706         y = (is->height - height) / 2;
707         is->no_background = 0;
708         rect.x = is->xleft + x;
709         rect.y = is->ytop  + y;
710         rect.w = FFMAX(width,  1);
711         rect.h = FFMAX(height, 1);
712         SDL_DisplayYUVOverlay(vp->bmp, &rect);
713     }
714 }
715
716 static inline int compute_mod(int a, int b)
717 {
718     return a < 0 ? a%b + b : a%b;
719 }
720
721 static void video_audio_display(VideoState *s)
722 {
723     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
724     int ch, channels, h, h2, bgcolor, fgcolor;
725     int16_t time_diff;
726     int rdft_bits, nb_freq;
727
728     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
729         ;
730     nb_freq= 1<<(rdft_bits-1);
731
732     /* compute display index : center on currently output samples */
733     channels = s->audio_st->codec->channels;
734     nb_display_channels = channels;
735     if (!s->paused) {
736         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
737         n = 2 * channels;
738         delay = s->audio_write_buf_size;
739         delay /= n;
740
741         /* to be more precise, we take into account the time spent since
742            the last buffer computation */
743         if (audio_callback_time) {
744             time_diff = av_gettime() - audio_callback_time;
745             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
746         }
747
748         delay += 2*data_used;
749         if (delay < data_used)
750             delay = data_used;
751
752         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
753         if (s->show_mode == SHOW_MODE_WAVES) {
754             h= INT_MIN;
755             for(i=0; i<1000; i+=channels){
756                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
757                 int a= s->sample_array[idx];
758                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
759                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
760                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
761                 int score= a-d;
762                 if(h<score && (b^c)<0){
763                     h= score;
764                     i_start= idx;
765                 }
766             }
767         }
768
769         s->last_i_start = i_start;
770     } else {
771         i_start = s->last_i_start;
772     }
773
774     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
775     if (s->show_mode == SHOW_MODE_WAVES) {
776         fill_rectangle(screen,
777                        s->xleft, s->ytop, s->width, s->height,
778                        bgcolor);
779
780         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
781
782         /* total height for one channel */
783         h = s->height / nb_display_channels;
784         /* graph height / 2 */
785         h2 = (h * 9) / 20;
786         for(ch = 0;ch < nb_display_channels; ch++) {
787             i = i_start + ch;
788             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
789             for(x = 0; x < s->width; x++) {
790                 y = (s->sample_array[i] * h2) >> 15;
791                 if (y < 0) {
792                     y = -y;
793                     ys = y1 - y;
794                 } else {
795                     ys = y1;
796                 }
797                 fill_rectangle(screen,
798                                s->xleft + x, ys, 1, y,
799                                fgcolor);
800                 i += channels;
801                 if (i >= SAMPLE_ARRAY_SIZE)
802                     i -= SAMPLE_ARRAY_SIZE;
803             }
804         }
805
806         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
807
808         for(ch = 1;ch < nb_display_channels; ch++) {
809             y = s->ytop + ch * h;
810             fill_rectangle(screen,
811                            s->xleft, y, s->width, 1,
812                            fgcolor);
813         }
814         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
815     }else{
816         nb_display_channels= FFMIN(nb_display_channels, 2);
817         if(rdft_bits != s->rdft_bits){
818             av_rdft_end(s->rdft);
819             av_free(s->rdft_data);
820             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
821             s->rdft_bits= rdft_bits;
822             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
823         }
824         {
825             FFTSample *data[2];
826             for(ch = 0;ch < nb_display_channels; ch++) {
827                 data[ch] = s->rdft_data + 2*nb_freq*ch;
828                 i = i_start + ch;
829                 for(x = 0; x < 2*nb_freq; x++) {
830                     double w= (x-nb_freq)*(1.0/nb_freq);
831                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
832                     i += channels;
833                     if (i >= SAMPLE_ARRAY_SIZE)
834                         i -= SAMPLE_ARRAY_SIZE;
835                 }
836                 av_rdft_calc(s->rdft, data[ch]);
837             }
838             //least efficient way to do this, we should of course directly access it but its more than fast enough
839             for(y=0; y<s->height; y++){
840                 double w= 1/sqrt(nb_freq);
841                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
842                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
843                        + data[1][2*y+1]*data[1][2*y+1])) : a;
844                 a= FFMIN(a,255);
845                 b= FFMIN(b,255);
846                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
847
848                 fill_rectangle(screen,
849                             s->xpos, s->height-y, 1, 1,
850                             fgcolor);
851             }
852         }
853         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
854         s->xpos++;
855         if(s->xpos >= s->width)
856             s->xpos= s->xleft;
857     }
858 }
859
860 static void stream_close(VideoState *is)
861 {
862     VideoPicture *vp;
863     int i;
864     /* XXX: use a special url_shutdown call to abort parse cleanly */
865     is->abort_request = 1;
866     SDL_WaitThread(is->read_tid, NULL);
867     SDL_WaitThread(is->refresh_tid, NULL);
868
869     /* free all pictures */
870     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
871         vp = &is->pictq[i];
872 #if CONFIG_AVFILTER
873         if (vp->picref) {
874             avfilter_unref_buffer(vp->picref);
875             vp->picref = NULL;
876         }
877 #endif
878         if (vp->bmp) {
879             SDL_FreeYUVOverlay(vp->bmp);
880             vp->bmp = NULL;
881         }
882     }
883     SDL_DestroyMutex(is->pictq_mutex);
884     SDL_DestroyCond(is->pictq_cond);
885     SDL_DestroyMutex(is->subpq_mutex);
886     SDL_DestroyCond(is->subpq_cond);
887 #if !CONFIG_AVFILTER
888     if (is->img_convert_ctx)
889         sws_freeContext(is->img_convert_ctx);
890 #endif
891     av_free(is);
892 }
893
894 static void do_exit(VideoState *is)
895 {
896     if (is) {
897         stream_close(is);
898     }
899     av_lockmgr_register(NULL);
900     uninit_opts();
901 #if CONFIG_AVFILTER
902     avfilter_uninit();
903 #endif
904     if (show_status)
905         printf("\n");
906     SDL_Quit();
907     av_log(NULL, AV_LOG_QUIET, "%s", "");
908     exit(0);
909 }
910
911 static int video_open(VideoState *is){
912     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
913     int w,h;
914
915     if(is_full_screen) flags |= SDL_FULLSCREEN;
916     else               flags |= SDL_RESIZABLE;
917
918     if (is_full_screen && fs_screen_width) {
919         w = fs_screen_width;
920         h = fs_screen_height;
921     } else if(!is_full_screen && screen_width){
922         w = screen_width;
923         h = screen_height;
924 #if CONFIG_AVFILTER
925     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
926         w = is->out_video_filter->inputs[0]->w;
927         h = is->out_video_filter->inputs[0]->h;
928 #else
929     }else if (is->video_st && is->video_st->codec->width){
930         w = is->video_st->codec->width;
931         h = is->video_st->codec->height;
932 #endif
933     } else {
934         w = 640;
935         h = 480;
936     }
937     if(screen && is->width == screen->w && screen->w == w
938        && is->height== screen->h && screen->h == h)
939         return 0;
940
941 #ifndef __APPLE__
942     screen = SDL_SetVideoMode(w, h, 0, flags);
943 #else
944     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
945     screen = SDL_SetVideoMode(w, h, 24, flags);
946 #endif
947     if (!screen) {
948         fprintf(stderr, "SDL: could not set video mode - exiting\n");
949         do_exit(is);
950     }
951     if (!window_title)
952         window_title = input_filename;
953     SDL_WM_SetCaption(window_title, window_title);
954
955     is->width = screen->w;
956     is->height = screen->h;
957
958     return 0;
959 }
960
961 /* display the current picture, if any */
962 static void video_display(VideoState *is)
963 {
964     if(!screen)
965         video_open(is);
966     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
967         video_audio_display(is);
968     else if (is->video_st)
969         video_image_display(is);
970 }
971
972 static int refresh_thread(void *opaque)
973 {
974     VideoState *is= opaque;
975     while(!is->abort_request){
976         SDL_Event event;
977         event.type = FF_REFRESH_EVENT;
978         event.user.data1 = opaque;
979         if(!is->refresh){
980             is->refresh=1;
981             SDL_PushEvent(&event);
982         }
983         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
984         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
985     }
986     return 0;
987 }
988
989 /* get the current audio clock value */
990 static double get_audio_clock(VideoState *is)
991 {
992     if (is->paused) {
993         return is->audio_current_pts;
994     } else {
995         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
996     }
997 }
998
999 /* get the current video clock value */
1000 static double get_video_clock(VideoState *is)
1001 {
1002     if (is->paused) {
1003         return is->video_current_pts;
1004     } else {
1005         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1006     }
1007 }
1008
1009 /* get the current external clock value */
1010 static double get_external_clock(VideoState *is)
1011 {
1012     int64_t ti;
1013     ti = av_gettime();
1014     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1015 }
1016
1017 /* get the current master clock value */
1018 static double get_master_clock(VideoState *is)
1019 {
1020     double val;
1021
1022     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1023         if (is->video_st)
1024             val = get_video_clock(is);
1025         else
1026             val = get_audio_clock(is);
1027     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1028         if (is->audio_st)
1029             val = get_audio_clock(is);
1030         else
1031             val = get_video_clock(is);
1032     } else {
1033         val = get_external_clock(is);
1034     }
1035     return val;
1036 }
1037
1038 /* seek in the stream */
1039 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1040 {
1041     if (!is->seek_req) {
1042         is->seek_pos = pos;
1043         is->seek_rel = rel;
1044         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1045         if (seek_by_bytes)
1046             is->seek_flags |= AVSEEK_FLAG_BYTE;
1047         is->seek_req = 1;
1048     }
1049 }
1050
1051 /* pause or resume the video */
1052 static void stream_toggle_pause(VideoState *is)
1053 {
1054     if (is->paused) {
1055         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1056         if(is->read_pause_return != AVERROR(ENOSYS)){
1057             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1058         }
1059         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1060     }
1061     is->paused = !is->paused;
1062 }
1063
1064 static double compute_target_time(double frame_current_pts, VideoState *is)
1065 {
1066     double delay, sync_threshold, diff;
1067
1068     /* compute nominal delay */
1069     delay = frame_current_pts - is->frame_last_pts;
1070     if (delay <= 0 || delay >= 10.0) {
1071         /* if incorrect delay, use previous one */
1072         delay = is->frame_last_delay;
1073     } else {
1074         is->frame_last_delay = delay;
1075     }
1076     is->frame_last_pts = frame_current_pts;
1077
1078     /* update delay to follow master synchronisation source */
1079     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1080          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1081         /* if video is slave, we try to correct big delays by
1082            duplicating or deleting a frame */
1083         diff = get_video_clock(is) - get_master_clock(is);
1084
1085         /* skip or repeat frame. We take into account the
1086            delay to compute the threshold. I still don't know
1087            if it is the best guess */
1088         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1089         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1090             if (diff <= -sync_threshold)
1091                 delay = 0;
1092             else if (diff >= sync_threshold)
1093                 delay = 2 * delay;
1094         }
1095     }
1096     is->frame_timer += delay;
1097
1098     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1099             delay, frame_current_pts, -diff);
1100
1101     return is->frame_timer;
1102 }
1103
1104 /* called to display each frame */
1105 static void video_refresh(void *opaque)
1106 {
1107     VideoState *is = opaque;
1108     VideoPicture *vp;
1109
1110     SubPicture *sp, *sp2;
1111
1112     if (is->video_st) {
1113 retry:
1114         if (is->pictq_size == 0) {
1115             //nothing to do, no picture to display in the que
1116         } else {
1117             double time= av_gettime()/1000000.0;
1118             double next_target;
1119             /* dequeue the picture */
1120             vp = &is->pictq[is->pictq_rindex];
1121
1122             if(time < vp->target_clock)
1123                 return;
1124             /* update current video pts */
1125             is->video_current_pts = vp->pts;
1126             is->video_current_pts_drift = is->video_current_pts - time;
1127             is->video_current_pos = vp->pos;
1128             if(is->pictq_size > 1){
1129                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1130                 assert(nextvp->target_clock >= vp->target_clock);
1131                 next_target= nextvp->target_clock;
1132             }else{
1133                 next_target= vp->target_clock + vp->duration;
1134             }
1135             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1136                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1137                 if(is->pictq_size > 1 || time > next_target + 0.5){
1138                     /* update queue size and signal for next picture */
1139                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1140                         is->pictq_rindex = 0;
1141
1142                     SDL_LockMutex(is->pictq_mutex);
1143                     is->pictq_size--;
1144                     SDL_CondSignal(is->pictq_cond);
1145                     SDL_UnlockMutex(is->pictq_mutex);
1146                     goto retry;
1147                 }
1148             }
1149
1150             if(is->subtitle_st) {
1151                 if (is->subtitle_stream_changed) {
1152                     SDL_LockMutex(is->subpq_mutex);
1153
1154                     while (is->subpq_size) {
1155                         free_subpicture(&is->subpq[is->subpq_rindex]);
1156
1157                         /* update queue size and signal for next picture */
1158                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1159                             is->subpq_rindex = 0;
1160
1161                         is->subpq_size--;
1162                     }
1163                     is->subtitle_stream_changed = 0;
1164
1165                     SDL_CondSignal(is->subpq_cond);
1166                     SDL_UnlockMutex(is->subpq_mutex);
1167                 } else {
1168                     if (is->subpq_size > 0) {
1169                         sp = &is->subpq[is->subpq_rindex];
1170
1171                         if (is->subpq_size > 1)
1172                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1173                         else
1174                             sp2 = NULL;
1175
1176                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1177                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1178                         {
1179                             free_subpicture(sp);
1180
1181                             /* update queue size and signal for next picture */
1182                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1183                                 is->subpq_rindex = 0;
1184
1185                             SDL_LockMutex(is->subpq_mutex);
1186                             is->subpq_size--;
1187                             SDL_CondSignal(is->subpq_cond);
1188                             SDL_UnlockMutex(is->subpq_mutex);
1189                         }
1190                     }
1191                 }
1192             }
1193
1194             /* display picture */
1195             if (!display_disable)
1196                 video_display(is);
1197
1198             /* update queue size and signal for next picture */
1199             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1200                 is->pictq_rindex = 0;
1201
1202             SDL_LockMutex(is->pictq_mutex);
1203             is->pictq_size--;
1204             SDL_CondSignal(is->pictq_cond);
1205             SDL_UnlockMutex(is->pictq_mutex);
1206         }
1207     } else if (is->audio_st) {
1208         /* draw the next audio frame */
1209
1210         /* if only audio stream, then display the audio bars (better
1211            than nothing, just to test the implementation */
1212
1213         /* display picture */
1214         if (!display_disable)
1215             video_display(is);
1216     }
1217     if (show_status) {
1218         static int64_t last_time;
1219         int64_t cur_time;
1220         int aqsize, vqsize, sqsize;
1221         double av_diff;
1222
1223         cur_time = av_gettime();
1224         if (!last_time || (cur_time - last_time) >= 30000) {
1225             aqsize = 0;
1226             vqsize = 0;
1227             sqsize = 0;
1228             if (is->audio_st)
1229                 aqsize = is->audioq.size;
1230             if (is->video_st)
1231                 vqsize = is->videoq.size;
1232             if (is->subtitle_st)
1233                 sqsize = is->subtitleq.size;
1234             av_diff = 0;
1235             if (is->audio_st && is->video_st)
1236                 av_diff = get_audio_clock(is) - get_video_clock(is);
1237             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1238                    get_master_clock(is),
1239                    av_diff,
1240                    FFMAX(is->skip_frames-1, 0),
1241                    aqsize / 1024,
1242                    vqsize / 1024,
1243                    sqsize,
1244                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1245                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1246             fflush(stdout);
1247             last_time = cur_time;
1248         }
1249     }
1250 }
1251
1252 /* allocate a picture (needs to do that in main thread to avoid
1253    potential locking problems */
1254 static void alloc_picture(void *opaque)
1255 {
1256     VideoState *is = opaque;
1257     VideoPicture *vp;
1258
1259     vp = &is->pictq[is->pictq_windex];
1260
1261     if (vp->bmp)
1262         SDL_FreeYUVOverlay(vp->bmp);
1263
1264 #if CONFIG_AVFILTER
1265     if (vp->picref)
1266         avfilter_unref_buffer(vp->picref);
1267     vp->picref = NULL;
1268
1269     vp->width   = is->out_video_filter->inputs[0]->w;
1270     vp->height  = is->out_video_filter->inputs[0]->h;
1271     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1272 #else
1273     vp->width   = is->video_st->codec->width;
1274     vp->height  = is->video_st->codec->height;
1275     vp->pix_fmt = is->video_st->codec->pix_fmt;
1276 #endif
1277
1278     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1279                                    SDL_YV12_OVERLAY,
1280                                    screen);
1281     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1282         /* SDL allocates a buffer smaller than requested if the video
1283          * overlay hardware is unable to support the requested size. */
1284         fprintf(stderr, "Error: the video system does not support an image\n"
1285                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1286                         "to reduce the image size.\n", vp->width, vp->height );
1287         do_exit(is);
1288     }
1289
1290     SDL_LockMutex(is->pictq_mutex);
1291     vp->allocated = 1;
1292     SDL_CondSignal(is->pictq_cond);
1293     SDL_UnlockMutex(is->pictq_mutex);
1294 }
1295
1296 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1297 {
1298     VideoPicture *vp;
1299     double frame_delay, pts = pts1;
1300
1301     /* compute the exact PTS for the picture if it is omitted in the stream
1302      * pts1 is the dts of the pkt / pts of the frame */
1303     if (pts != 0) {
1304         /* update video clock with pts, if present */
1305         is->video_clock = pts;
1306     } else {
1307         pts = is->video_clock;
1308     }
1309     /* update video clock for next frame */
1310     frame_delay = av_q2d(is->video_st->codec->time_base);
1311     /* for MPEG2, the frame can be repeated, so we update the
1312        clock accordingly */
1313     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1314     is->video_clock += frame_delay;
1315
1316 #if defined(DEBUG_SYNC) && 0
1317     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1318            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1319 #endif
1320
1321     /* wait until we have space to put a new picture */
1322     SDL_LockMutex(is->pictq_mutex);
1323
1324     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1325         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1326
1327     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1328            !is->videoq.abort_request) {
1329         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1330     }
1331     SDL_UnlockMutex(is->pictq_mutex);
1332
1333     if (is->videoq.abort_request)
1334         return -1;
1335
1336     vp = &is->pictq[is->pictq_windex];
1337
1338     vp->duration = frame_delay;
1339
1340     /* alloc or resize hardware picture buffer */
1341     if (!vp->bmp ||
1342 #if CONFIG_AVFILTER
1343         vp->width  != is->out_video_filter->inputs[0]->w ||
1344         vp->height != is->out_video_filter->inputs[0]->h) {
1345 #else
1346         vp->width != is->video_st->codec->width ||
1347         vp->height != is->video_st->codec->height) {
1348 #endif
1349         SDL_Event event;
1350
1351         vp->allocated = 0;
1352
1353         /* the allocation must be done in the main thread to avoid
1354            locking problems */
1355         event.type = FF_ALLOC_EVENT;
1356         event.user.data1 = is;
1357         SDL_PushEvent(&event);
1358
1359         /* wait until the picture is allocated */
1360         SDL_LockMutex(is->pictq_mutex);
1361         while (!vp->allocated && !is->videoq.abort_request) {
1362             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1363         }
1364         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1365         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1366             while (!vp->allocated) {
1367                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1368             }
1369         }
1370         SDL_UnlockMutex(is->pictq_mutex);
1371
1372         if (is->videoq.abort_request)
1373             return -1;
1374     }
1375
1376     /* if the frame is not skipped, then display it */
1377     if (vp->bmp) {
1378         AVPicture pict;
1379 #if CONFIG_AVFILTER
1380         if(vp->picref)
1381             avfilter_unref_buffer(vp->picref);
1382         vp->picref = src_frame->opaque;
1383 #endif
1384
1385         /* get a pointer on the bitmap */
1386         SDL_LockYUVOverlay (vp->bmp);
1387
1388         memset(&pict,0,sizeof(AVPicture));
1389         pict.data[0] = vp->bmp->pixels[0];
1390         pict.data[1] = vp->bmp->pixels[2];
1391         pict.data[2] = vp->bmp->pixels[1];
1392
1393         pict.linesize[0] = vp->bmp->pitches[0];
1394         pict.linesize[1] = vp->bmp->pitches[2];
1395         pict.linesize[2] = vp->bmp->pitches[1];
1396
1397 #if CONFIG_AVFILTER
1398         //FIXME use direct rendering
1399         av_picture_copy(&pict, (AVPicture *)src_frame,
1400                         vp->pix_fmt, vp->width, vp->height);
1401 #else
1402         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1403         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1404             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1405             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1406         if (is->img_convert_ctx == NULL) {
1407             fprintf(stderr, "Cannot initialize the conversion context\n");
1408             exit(1);
1409         }
1410         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1411                   0, vp->height, pict.data, pict.linesize);
1412 #endif
1413         /* update the bitmap content */
1414         SDL_UnlockYUVOverlay(vp->bmp);
1415
1416         vp->pts = pts;
1417         vp->pos = pos;
1418
1419         /* now we can update the picture count */
1420         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1421             is->pictq_windex = 0;
1422         SDL_LockMutex(is->pictq_mutex);
1423         vp->target_clock= compute_target_time(vp->pts, is);
1424
1425         is->pictq_size++;
1426         SDL_UnlockMutex(is->pictq_mutex);
1427     }
1428     return 0;
1429 }
1430
1431 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1432 {
1433     int got_picture, i;
1434
1435     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1436         return -1;
1437
1438     if (pkt->data == flush_pkt.data) {
1439         avcodec_flush_buffers(is->video_st->codec);
1440
1441         SDL_LockMutex(is->pictq_mutex);
1442         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1443         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1444             is->pictq[i].target_clock= 0;
1445         }
1446         while (is->pictq_size && !is->videoq.abort_request) {
1447             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1448         }
1449         is->video_current_pos = -1;
1450         SDL_UnlockMutex(is->pictq_mutex);
1451
1452         is->frame_last_pts = AV_NOPTS_VALUE;
1453         is->frame_last_delay = 0;
1454         is->frame_timer = (double)av_gettime() / 1000000.0;
1455         is->skip_frames = 1;
1456         is->skip_frames_index = 0;
1457         return 0;
1458     }
1459
1460     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1461
1462     if (got_picture) {
1463         if (decoder_reorder_pts == -1) {
1464             *pts = frame->best_effort_timestamp;
1465         } else if (decoder_reorder_pts) {
1466             *pts = frame->pkt_pts;
1467         } else {
1468             *pts = frame->pkt_dts;
1469         }
1470
1471         if (*pts == AV_NOPTS_VALUE) {
1472             *pts = 0;
1473         }
1474
1475         is->skip_frames_index += 1;
1476         if(is->skip_frames_index >= is->skip_frames){
1477             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1478             return 1;
1479         }
1480
1481     }
1482     return 0;
1483 }
1484
1485 #if CONFIG_AVFILTER
1486 typedef struct {
1487     VideoState *is;
1488     AVFrame *frame;
1489     int use_dr1;
1490 } FilterPriv;
1491
1492 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1493 {
1494     AVFilterContext *ctx = codec->opaque;
1495     AVFilterBufferRef  *ref;
1496     int perms = AV_PERM_WRITE;
1497     int i, w, h, stride[4];
1498     unsigned edge;
1499     int pixel_size;
1500
1501     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1502
1503     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1504         perms |= AV_PERM_NEG_LINESIZES;
1505
1506     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1507         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1508         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1509         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1510     }
1511     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1512
1513     w = codec->width;
1514     h = codec->height;
1515
1516     if(av_image_check_size(w, h, 0, codec))
1517         return -1;
1518
1519     avcodec_align_dimensions2(codec, &w, &h, stride);
1520     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1521     w += edge << 1;
1522     h += edge << 1;
1523
1524     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1525         return -1;
1526
1527     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1528     ref->video->w = codec->width;
1529     ref->video->h = codec->height;
1530     for(i = 0; i < 4; i ++) {
1531         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1532         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1533
1534         if (ref->data[i]) {
1535             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1536         }
1537         pic->data[i]     = ref->data[i];
1538         pic->linesize[i] = ref->linesize[i];
1539     }
1540     pic->opaque = ref;
1541     pic->age    = INT_MAX;
1542     pic->type   = FF_BUFFER_TYPE_USER;
1543     pic->reordered_opaque = codec->reordered_opaque;
1544     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1545     else           pic->pkt_pts = AV_NOPTS_VALUE;
1546     return 0;
1547 }
1548
1549 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1550 {
1551     memset(pic->data, 0, sizeof(pic->data));
1552     avfilter_unref_buffer(pic->opaque);
1553 }
1554
1555 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1556 {
1557     AVFilterBufferRef *ref = pic->opaque;
1558
1559     if (pic->data[0] == NULL) {
1560         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1561         return codec->get_buffer(codec, pic);
1562     }
1563
1564     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1565         (codec->pix_fmt != ref->format)) {
1566         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1567         return -1;
1568     }
1569
1570     pic->reordered_opaque = codec->reordered_opaque;
1571     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1572     else           pic->pkt_pts = AV_NOPTS_VALUE;
1573     return 0;
1574 }
1575
1576 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1577 {
1578     FilterPriv *priv = ctx->priv;
1579     AVCodecContext *codec;
1580     if(!opaque) return -1;
1581
1582     priv->is = opaque;
1583     codec    = priv->is->video_st->codec;
1584     codec->opaque = ctx;
1585     if((codec->codec->capabilities & CODEC_CAP_DR1)
1586     ) {
1587         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1588         priv->use_dr1 = 1;
1589         codec->get_buffer     = input_get_buffer;
1590         codec->release_buffer = input_release_buffer;
1591         codec->reget_buffer   = input_reget_buffer;
1592         codec->thread_safe_callbacks = 1;
1593     }
1594
1595     priv->frame = avcodec_alloc_frame();
1596
1597     return 0;
1598 }
1599
1600 static void input_uninit(AVFilterContext *ctx)
1601 {
1602     FilterPriv *priv = ctx->priv;
1603     av_free(priv->frame);
1604 }
1605
1606 static int input_request_frame(AVFilterLink *link)
1607 {
1608     FilterPriv *priv = link->src->priv;
1609     AVFilterBufferRef *picref;
1610     int64_t pts = 0;
1611     AVPacket pkt;
1612     int ret;
1613
1614     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1615         av_free_packet(&pkt);
1616     if (ret < 0)
1617         return -1;
1618
1619     if(priv->use_dr1 && priv->frame->opaque) {
1620         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1621     } else {
1622         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1623         av_image_copy(picref->data, picref->linesize,
1624                       priv->frame->data, priv->frame->linesize,
1625                       picref->format, link->w, link->h);
1626     }
1627     av_free_packet(&pkt);
1628
1629     avfilter_copy_frame_props(picref, priv->frame);
1630     picref->pts = pts;
1631
1632     avfilter_start_frame(link, picref);
1633     avfilter_draw_slice(link, 0, link->h, 1);
1634     avfilter_end_frame(link);
1635
1636     return 0;
1637 }
1638
1639 static int input_query_formats(AVFilterContext *ctx)
1640 {
1641     FilterPriv *priv = ctx->priv;
1642     enum PixelFormat pix_fmts[] = {
1643         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1644     };
1645
1646     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1647     return 0;
1648 }
1649
1650 static int input_config_props(AVFilterLink *link)
1651 {
1652     FilterPriv *priv  = link->src->priv;
1653     AVStream *s = priv->is->video_st;
1654
1655     link->w = s->codec->width;
1656     link->h = s->codec->height;
1657     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1658         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1659     link->time_base = s->time_base;
1660
1661     return 0;
1662 }
1663
1664 static AVFilter input_filter =
1665 {
1666     .name      = "ffplay_input",
1667
1668     .priv_size = sizeof(FilterPriv),
1669
1670     .init      = input_init,
1671     .uninit    = input_uninit,
1672
1673     .query_formats = input_query_formats,
1674
1675     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1676     .outputs   = (AVFilterPad[]) {{ .name = "default",
1677                                     .type = AVMEDIA_TYPE_VIDEO,
1678                                     .request_frame = input_request_frame,
1679                                     .config_props  = input_config_props, },
1680                                   { .name = NULL }},
1681 };
1682
1683 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1684 {
1685     char sws_flags_str[128];
1686     int ret;
1687     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1688     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1689     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1690     graph->scale_sws_opts = av_strdup(sws_flags_str);
1691
1692     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1693                                             NULL, is, graph)) < 0)
1694         return ret;
1695     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1696                                             NULL, pix_fmts, graph)) < 0)
1697         return ret;
1698
1699     if(vfilters) {
1700         AVFilterInOut *outputs = avfilter_inout_alloc();
1701         AVFilterInOut *inputs  = avfilter_inout_alloc();
1702
1703         outputs->name    = av_strdup("in");
1704         outputs->filter_ctx = filt_src;
1705         outputs->pad_idx = 0;
1706         outputs->next    = NULL;
1707
1708         inputs->name    = av_strdup("out");
1709         inputs->filter_ctx = filt_out;
1710         inputs->pad_idx = 0;
1711         inputs->next    = NULL;
1712
1713         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1714             return ret;
1715     } else {
1716         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1717             return ret;
1718     }
1719
1720     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1721         return ret;
1722
1723     is->out_video_filter = filt_out;
1724
1725     return ret;
1726 }
1727
1728 #endif  /* CONFIG_AVFILTER */
1729
1730 static int video_thread(void *arg)
1731 {
1732     VideoState *is = arg;
1733     AVFrame *frame= avcodec_alloc_frame();
1734     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1735     double pts;
1736     int ret;
1737
1738 #if CONFIG_AVFILTER
1739     AVFilterGraph *graph = avfilter_graph_alloc();
1740     AVFilterContext *filt_out = NULL;
1741     int last_w = is->video_st->codec->width;
1742     int last_h = is->video_st->codec->height;
1743
1744     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1745         goto the_end;
1746     filt_out = is->out_video_filter;
1747 #endif
1748
1749     for(;;) {
1750 #if !CONFIG_AVFILTER
1751         AVPacket pkt;
1752 #else
1753         AVFilterBufferRef *picref;
1754         AVRational tb = filt_out->inputs[0]->time_base;
1755 #endif
1756         while (is->paused && !is->videoq.abort_request)
1757             SDL_Delay(10);
1758 #if CONFIG_AVFILTER
1759         if (   last_w != is->video_st->codec->width
1760             || last_h != is->video_st->codec->height) {
1761             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1762                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1763             avfilter_graph_free(&graph);
1764             graph = avfilter_graph_alloc();
1765             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1766                 goto the_end;
1767             filt_out = is->out_video_filter;
1768             last_w = is->video_st->codec->width;
1769             last_h = is->video_st->codec->height;
1770         }
1771         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1772         if (picref) {
1773             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1774             pts_int = picref->pts;
1775             pos     = picref->pos;
1776             frame->opaque = picref;
1777         }
1778
1779         if (av_cmp_q(tb, is->video_st->time_base)) {
1780             av_unused int64_t pts1 = pts_int;
1781             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1782             av_dlog(NULL, "video_thread(): "
1783                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1784                     tb.num, tb.den, pts1,
1785                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1786         }
1787 #else
1788         ret = get_video_frame(is, frame, &pts_int, &pkt);
1789         pos = pkt.pos;
1790         av_free_packet(&pkt);
1791 #endif
1792
1793         if (ret < 0) goto the_end;
1794
1795 #if CONFIG_AVFILTER
1796         if (!picref)
1797             continue;
1798 #endif
1799
1800         pts = pts_int*av_q2d(is->video_st->time_base);
1801
1802         ret = queue_picture(is, frame, pts, pos);
1803
1804         if (ret < 0)
1805             goto the_end;
1806
1807         if (is->step)
1808             stream_toggle_pause(is);
1809     }
1810  the_end:
1811 #if CONFIG_AVFILTER
1812     avfilter_graph_free(&graph);
1813 #endif
1814     av_free(frame);
1815     return 0;
1816 }
1817
1818 static int subtitle_thread(void *arg)
1819 {
1820     VideoState *is = arg;
1821     SubPicture *sp;
1822     AVPacket pkt1, *pkt = &pkt1;
1823     int got_subtitle;
1824     double pts;
1825     int i, j;
1826     int r, g, b, y, u, v, a;
1827
1828     for(;;) {
1829         while (is->paused && !is->subtitleq.abort_request) {
1830             SDL_Delay(10);
1831         }
1832         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1833             break;
1834
1835         if(pkt->data == flush_pkt.data){
1836             avcodec_flush_buffers(is->subtitle_st->codec);
1837             continue;
1838         }
1839         SDL_LockMutex(is->subpq_mutex);
1840         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1841                !is->subtitleq.abort_request) {
1842             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1843         }
1844         SDL_UnlockMutex(is->subpq_mutex);
1845
1846         if (is->subtitleq.abort_request)
1847             return 0;
1848
1849         sp = &is->subpq[is->subpq_windex];
1850
1851        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1852            this packet, if any */
1853         pts = 0;
1854         if (pkt->pts != AV_NOPTS_VALUE)
1855             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1856
1857         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1858                                  &got_subtitle, pkt);
1859
1860         if (got_subtitle && sp->sub.format == 0) {
1861             sp->pts = pts;
1862
1863             for (i = 0; i < sp->sub.num_rects; i++)
1864             {
1865                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1866                 {
1867                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1868                     y = RGB_TO_Y_CCIR(r, g, b);
1869                     u = RGB_TO_U_CCIR(r, g, b, 0);
1870                     v = RGB_TO_V_CCIR(r, g, b, 0);
1871                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1872                 }
1873             }
1874
1875             /* now we can update the picture count */
1876             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1877                 is->subpq_windex = 0;
1878             SDL_LockMutex(is->subpq_mutex);
1879             is->subpq_size++;
1880             SDL_UnlockMutex(is->subpq_mutex);
1881         }
1882         av_free_packet(pkt);
1883     }
1884     return 0;
1885 }
1886
1887 /* copy samples for viewing in editor window */
1888 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1889 {
1890     int size, len;
1891
1892     size = samples_size / sizeof(short);
1893     while (size > 0) {
1894         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1895         if (len > size)
1896             len = size;
1897         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1898         samples += len;
1899         is->sample_array_index += len;
1900         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1901             is->sample_array_index = 0;
1902         size -= len;
1903     }
1904 }
1905
1906 /* return the new audio buffer size (samples can be added or deleted
1907    to get better sync if video or external master clock) */
1908 static int synchronize_audio(VideoState *is, short *samples,
1909                              int samples_size1, double pts)
1910 {
1911     int n, samples_size;
1912     double ref_clock;
1913
1914     n = 2 * is->audio_st->codec->channels;
1915     samples_size = samples_size1;
1916
1917     /* if not master, then we try to remove or add samples to correct the clock */
1918     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1919          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1920         double diff, avg_diff;
1921         int wanted_size, min_size, max_size, nb_samples;
1922
1923         ref_clock = get_master_clock(is);
1924         diff = get_audio_clock(is) - ref_clock;
1925
1926         if (diff < AV_NOSYNC_THRESHOLD) {
1927             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1928             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1929                 /* not enough measures to have a correct estimate */
1930                 is->audio_diff_avg_count++;
1931             } else {
1932                 /* estimate the A-V difference */
1933                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1934
1935                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1936                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1937                     nb_samples = samples_size / n;
1938
1939                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1940                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1941                     if (wanted_size < min_size)
1942                         wanted_size = min_size;
1943                     else if (wanted_size > max_size)
1944                         wanted_size = max_size;
1945
1946                     /* add or remove samples to correction the synchro */
1947                     if (wanted_size < samples_size) {
1948                         /* remove samples */
1949                         samples_size = wanted_size;
1950                     } else if (wanted_size > samples_size) {
1951                         uint8_t *samples_end, *q;
1952                         int nb;
1953
1954                         /* add samples */
1955                         nb = (samples_size - wanted_size);
1956                         samples_end = (uint8_t *)samples + samples_size - n;
1957                         q = samples_end + n;
1958                         while (nb > 0) {
1959                             memcpy(q, samples_end, n);
1960                             q += n;
1961                             nb -= n;
1962                         }
1963                         samples_size = wanted_size;
1964                     }
1965                 }
1966                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1967                         diff, avg_diff, samples_size - samples_size1,
1968                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1969             }
1970         } else {
1971             /* too big difference : may be initial PTS errors, so
1972                reset A-V filter */
1973             is->audio_diff_avg_count = 0;
1974             is->audio_diff_cum = 0;
1975         }
1976     }
1977
1978     return samples_size;
1979 }
1980
1981 /* decode one audio frame and returns its uncompressed size */
1982 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1983 {
1984     AVPacket *pkt_temp = &is->audio_pkt_temp;
1985     AVPacket *pkt = &is->audio_pkt;
1986     AVCodecContext *dec= is->audio_st->codec;
1987     int n, len1, data_size;
1988     double pts;
1989
1990     for(;;) {
1991         /* NOTE: the audio packet can contain several frames */
1992         while (pkt_temp->size > 0) {
1993             data_size = sizeof(is->audio_buf1);
1994             len1 = avcodec_decode_audio3(dec,
1995                                         (int16_t *)is->audio_buf1, &data_size,
1996                                         pkt_temp);
1997             if (len1 < 0) {
1998                 /* if error, we skip the frame */
1999                 pkt_temp->size = 0;
2000                 break;
2001             }
2002
2003             pkt_temp->data += len1;
2004             pkt_temp->size -= len1;
2005             if (data_size <= 0)
2006                 continue;
2007
2008             if (dec->sample_fmt != is->audio_src_fmt) {
2009                 if (is->reformat_ctx)
2010                     av_audio_convert_free(is->reformat_ctx);
2011                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2012                                                          dec->sample_fmt, 1, NULL, 0);
2013                 if (!is->reformat_ctx) {
2014                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2015                         av_get_sample_fmt_name(dec->sample_fmt),
2016                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2017                         break;
2018                 }
2019                 is->audio_src_fmt= dec->sample_fmt;
2020             }
2021
2022             if (is->reformat_ctx) {
2023                 const void *ibuf[6]= {is->audio_buf1};
2024                 void *obuf[6]= {is->audio_buf2};
2025                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2026                 int ostride[6]= {2};
2027                 int len= data_size/istride[0];
2028                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2029                     printf("av_audio_convert() failed\n");
2030                     break;
2031                 }
2032                 is->audio_buf= is->audio_buf2;
2033                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2034                           remove this legacy cruft */
2035                 data_size= len*2;
2036             }else{
2037                 is->audio_buf= is->audio_buf1;
2038             }
2039
2040             /* if no pts, then compute it */
2041             pts = is->audio_clock;
2042             *pts_ptr = pts;
2043             n = 2 * dec->channels;
2044             is->audio_clock += (double)data_size /
2045                 (double)(n * dec->sample_rate);
2046 #ifdef DEBUG
2047             {
2048                 static double last_clock;
2049                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2050                        is->audio_clock - last_clock,
2051                        is->audio_clock, pts);
2052                 last_clock = is->audio_clock;
2053             }
2054 #endif
2055             return data_size;
2056         }
2057
2058         /* free the current packet */
2059         if (pkt->data)
2060             av_free_packet(pkt);
2061
2062         if (is->paused || is->audioq.abort_request) {
2063             return -1;
2064         }
2065
2066         /* read next packet */
2067         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2068             return -1;
2069         if(pkt->data == flush_pkt.data){
2070             avcodec_flush_buffers(dec);
2071             continue;
2072         }
2073
2074         pkt_temp->data = pkt->data;
2075         pkt_temp->size = pkt->size;
2076
2077         /* if update the audio clock with the pts */
2078         if (pkt->pts != AV_NOPTS_VALUE) {
2079             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2080         }
2081     }
2082 }
2083
2084 /* prepare a new audio buffer */
2085 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2086 {
2087     VideoState *is = opaque;
2088     int audio_size, len1;
2089     int bytes_per_sec;
2090     double pts;
2091
2092     audio_callback_time = av_gettime();
2093
2094     while (len > 0) {
2095         if (is->audio_buf_index >= is->audio_buf_size) {
2096            audio_size = audio_decode_frame(is, &pts);
2097            if (audio_size < 0) {
2098                 /* if error, just output silence */
2099                is->audio_buf = is->audio_buf1;
2100                is->audio_buf_size = 1024;
2101                memset(is->audio_buf, 0, is->audio_buf_size);
2102            } else {
2103                if (is->show_mode != SHOW_MODE_VIDEO)
2104                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2105                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2106                                               pts);
2107                is->audio_buf_size = audio_size;
2108            }
2109            is->audio_buf_index = 0;
2110         }
2111         len1 = is->audio_buf_size - is->audio_buf_index;
2112         if (len1 > len)
2113             len1 = len;
2114         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2115         len -= len1;
2116         stream += len1;
2117         is->audio_buf_index += len1;
2118     }
2119     bytes_per_sec = is->audio_st->codec->sample_rate *
2120             2 * is->audio_st->codec->channels;
2121     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2122     /* Let's assume the audio driver that is used by SDL has two periods. */
2123     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2124     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2125 }
2126
2127 /* open a given stream. Return 0 if OK */
2128 static int stream_component_open(VideoState *is, int stream_index)
2129 {
2130     AVFormatContext *ic = is->ic;
2131     AVCodecContext *avctx;
2132     AVCodec *codec;
2133     SDL_AudioSpec wanted_spec, spec;
2134     AVDictionary *opts;
2135     AVDictionaryEntry *t = NULL;
2136
2137     if (stream_index < 0 || stream_index >= ic->nb_streams)
2138         return -1;
2139     avctx = ic->streams[stream_index]->codec;
2140
2141     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2142
2143     /* prepare audio output */
2144     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2145         if (avctx->channels > 0) {
2146             avctx->request_channels = FFMIN(2, avctx->channels);
2147         } else {
2148             avctx->request_channels = 2;
2149         }
2150     }
2151
2152     codec = avcodec_find_decoder(avctx->codec_id);
2153     if (!codec)
2154         return -1;
2155
2156     avctx->workaround_bugs = workaround_bugs;
2157     avctx->lowres = lowres;
2158     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2159     avctx->idct_algo= idct;
2160     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2161     avctx->skip_frame= skip_frame;
2162     avctx->skip_idct= skip_idct;
2163     avctx->skip_loop_filter= skip_loop_filter;
2164     avctx->error_recognition= error_recognition;
2165     avctx->error_concealment= error_concealment;
2166     avctx->thread_count= thread_count;
2167
2168     if(codec->capabilities & CODEC_CAP_DR1)
2169         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2170
2171     if (!codec ||
2172         avcodec_open2(avctx, codec, &opts) < 0)
2173         return -1;
2174     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2175         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2176         return AVERROR_OPTION_NOT_FOUND;
2177     }
2178
2179     /* prepare audio output */
2180     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2181         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2182             fprintf(stderr, "Invalid sample rate or channel count\n");
2183             return -1;
2184         }
2185         wanted_spec.freq = avctx->sample_rate;
2186         wanted_spec.format = AUDIO_S16SYS;
2187         wanted_spec.channels = avctx->channels;
2188         wanted_spec.silence = 0;
2189         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2190         wanted_spec.callback = sdl_audio_callback;
2191         wanted_spec.userdata = is;
2192         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2193             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2194             return -1;
2195         }
2196         is->audio_hw_buf_size = spec.size;
2197         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2198     }
2199
2200     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2201     switch(avctx->codec_type) {
2202     case AVMEDIA_TYPE_AUDIO:
2203         is->audio_stream = stream_index;
2204         is->audio_st = ic->streams[stream_index];
2205         is->audio_buf_size = 0;
2206         is->audio_buf_index = 0;
2207
2208         /* init averaging filter */
2209         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2210         is->audio_diff_avg_count = 0;
2211         /* since we do not have a precise anough audio fifo fullness,
2212            we correct audio sync only if larger than this threshold */
2213         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2214
2215         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2216         packet_queue_init(&is->audioq);
2217         SDL_PauseAudio(0);
2218         break;
2219     case AVMEDIA_TYPE_VIDEO:
2220         is->video_stream = stream_index;
2221         is->video_st = ic->streams[stream_index];
2222
2223         packet_queue_init(&is->videoq);
2224         is->video_tid = SDL_CreateThread(video_thread, is);
2225         break;
2226     case AVMEDIA_TYPE_SUBTITLE:
2227         is->subtitle_stream = stream_index;
2228         is->subtitle_st = ic->streams[stream_index];
2229         packet_queue_init(&is->subtitleq);
2230
2231         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2232         break;
2233     default:
2234         break;
2235     }
2236     return 0;
2237 }
2238
2239 static void stream_component_close(VideoState *is, int stream_index)
2240 {
2241     AVFormatContext *ic = is->ic;
2242     AVCodecContext *avctx;
2243
2244     if (stream_index < 0 || stream_index >= ic->nb_streams)
2245         return;
2246     avctx = ic->streams[stream_index]->codec;
2247
2248     switch(avctx->codec_type) {
2249     case AVMEDIA_TYPE_AUDIO:
2250         packet_queue_abort(&is->audioq);
2251
2252         SDL_CloseAudio();
2253
2254         packet_queue_end(&is->audioq);
2255         if (is->reformat_ctx)
2256             av_audio_convert_free(is->reformat_ctx);
2257         is->reformat_ctx = NULL;
2258         break;
2259     case AVMEDIA_TYPE_VIDEO:
2260         packet_queue_abort(&is->videoq);
2261
2262         /* note: we also signal this mutex to make sure we deblock the
2263            video thread in all cases */
2264         SDL_LockMutex(is->pictq_mutex);
2265         SDL_CondSignal(is->pictq_cond);
2266         SDL_UnlockMutex(is->pictq_mutex);
2267
2268         SDL_WaitThread(is->video_tid, NULL);
2269
2270         packet_queue_end(&is->videoq);
2271         break;
2272     case AVMEDIA_TYPE_SUBTITLE:
2273         packet_queue_abort(&is->subtitleq);
2274
2275         /* note: we also signal this mutex to make sure we deblock the
2276            video thread in all cases */
2277         SDL_LockMutex(is->subpq_mutex);
2278         is->subtitle_stream_changed = 1;
2279
2280         SDL_CondSignal(is->subpq_cond);
2281         SDL_UnlockMutex(is->subpq_mutex);
2282
2283         SDL_WaitThread(is->subtitle_tid, NULL);
2284
2285         packet_queue_end(&is->subtitleq);
2286         break;
2287     default:
2288         break;
2289     }
2290
2291     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2292     avcodec_close(avctx);
2293     switch(avctx->codec_type) {
2294     case AVMEDIA_TYPE_AUDIO:
2295         is->audio_st = NULL;
2296         is->audio_stream = -1;
2297         break;
2298     case AVMEDIA_TYPE_VIDEO:
2299         is->video_st = NULL;
2300         is->video_stream = -1;
2301         break;
2302     case AVMEDIA_TYPE_SUBTITLE:
2303         is->subtitle_st = NULL;
2304         is->subtitle_stream = -1;
2305         break;
2306     default:
2307         break;
2308     }
2309 }
2310
2311 /* since we have only one decoding thread, we can use a global
2312    variable instead of a thread local variable */
2313 static VideoState *global_video_state;
2314
2315 static int decode_interrupt_cb(void)
2316 {
2317     return (global_video_state && global_video_state->abort_request);
2318 }
2319
2320 /* this thread gets the stream from the disk or the network */
2321 static int read_thread(void *arg)
2322 {
2323     VideoState *is = arg;
2324     AVFormatContext *ic = NULL;
2325     int err, i, ret;
2326     int st_index[AVMEDIA_TYPE_NB];
2327     AVPacket pkt1, *pkt = &pkt1;
2328     int eof=0;
2329     int pkt_in_play_range = 0;
2330     AVDictionaryEntry *t;
2331     AVDictionary **opts;
2332     int orig_nb_streams;
2333
2334     memset(st_index, -1, sizeof(st_index));
2335     is->video_stream = -1;
2336     is->audio_stream = -1;
2337     is->subtitle_stream = -1;
2338
2339     global_video_state = is;
2340     avio_set_interrupt_cb(decode_interrupt_cb);
2341
2342     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2343     if (err < 0) {
2344         print_error(is->filename, err);
2345         ret = -1;
2346         goto fail;
2347     }
2348     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2349         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2350         ret = AVERROR_OPTION_NOT_FOUND;
2351         goto fail;
2352     }
2353     is->ic = ic;
2354
2355     if(genpts)
2356         ic->flags |= AVFMT_FLAG_GENPTS;
2357
2358     opts = setup_find_stream_info_opts(ic, codec_opts);
2359     orig_nb_streams = ic->nb_streams;
2360
2361     err = avformat_find_stream_info(ic, opts);
2362     if (err < 0) {
2363         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2364         ret = -1;
2365         goto fail;
2366     }
2367     for (i = 0; i < orig_nb_streams; i++)
2368         av_dict_free(&opts[i]);
2369     av_freep(&opts);
2370
2371     if(ic->pb)
2372         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2373
2374     if(seek_by_bytes<0)
2375         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2376
2377     /* if seeking requested, we execute it */
2378     if (start_time != AV_NOPTS_VALUE) {
2379         int64_t timestamp;
2380
2381         timestamp = start_time;
2382         /* add the stream start time */
2383         if (ic->start_time != AV_NOPTS_VALUE)
2384             timestamp += ic->start_time;
2385         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2386         if (ret < 0) {
2387             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2388                     is->filename, (double)timestamp / AV_TIME_BASE);
2389         }
2390     }
2391
2392     for (i = 0; i < ic->nb_streams; i++)
2393         ic->streams[i]->discard = AVDISCARD_ALL;
2394     if (!video_disable)
2395         st_index[AVMEDIA_TYPE_VIDEO] =
2396             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2397                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2398     if (!audio_disable)
2399         st_index[AVMEDIA_TYPE_AUDIO] =
2400             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2401                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2402                                 st_index[AVMEDIA_TYPE_VIDEO],
2403                                 NULL, 0);
2404     if (!video_disable)
2405         st_index[AVMEDIA_TYPE_SUBTITLE] =
2406             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2407                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2408                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2409                                  st_index[AVMEDIA_TYPE_AUDIO] :
2410                                  st_index[AVMEDIA_TYPE_VIDEO]),
2411                                 NULL, 0);
2412     if (show_status) {
2413         av_dump_format(ic, 0, is->filename, 0);
2414     }
2415
2416     is->show_mode = show_mode;
2417
2418     /* open the streams */
2419     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2420         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2421     }
2422
2423     ret=-1;
2424     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2425         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2426     }
2427     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2428     if (is->show_mode == SHOW_MODE_NONE)
2429         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2430
2431     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2432         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2433     }
2434
2435     if (is->video_stream < 0 && is->audio_stream < 0) {
2436         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2437         ret = -1;
2438         goto fail;
2439     }
2440
2441     for(;;) {
2442         if (is->abort_request)
2443             break;
2444         if (is->paused != is->last_paused) {
2445             is->last_paused = is->paused;
2446             if (is->paused)
2447                 is->read_pause_return= av_read_pause(ic);
2448             else
2449                 av_read_play(ic);
2450         }
2451 #if CONFIG_RTSP_DEMUXER
2452         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2453             /* wait 10 ms to avoid trying to get another packet */
2454             /* XXX: horrible */
2455             SDL_Delay(10);
2456             continue;
2457         }
2458 #endif
2459         if (is->seek_req) {
2460             int64_t seek_target= is->seek_pos;
2461             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2462             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2463 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2464 //      of the seek_pos/seek_rel variables
2465
2466             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2467             if (ret < 0) {
2468                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2469             }else{
2470                 if (is->audio_stream >= 0) {
2471                     packet_queue_flush(&is->audioq);
2472                     packet_queue_put(&is->audioq, &flush_pkt);
2473                 }
2474                 if (is->subtitle_stream >= 0) {
2475                     packet_queue_flush(&is->subtitleq);
2476                     packet_queue_put(&is->subtitleq, &flush_pkt);
2477                 }
2478                 if (is->video_stream >= 0) {
2479                     packet_queue_flush(&is->videoq);
2480                     packet_queue_put(&is->videoq, &flush_pkt);
2481                 }
2482             }
2483             is->seek_req = 0;
2484             eof= 0;
2485         }
2486
2487         /* if the queue are full, no need to read more */
2488         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2489             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2490                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2491                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2492             /* wait 10 ms */
2493             SDL_Delay(10);
2494             continue;
2495         }
2496         if(eof) {
2497             if(is->video_stream >= 0){
2498                 av_init_packet(pkt);
2499                 pkt->data=NULL;
2500                 pkt->size=0;
2501                 pkt->stream_index= is->video_stream;
2502                 packet_queue_put(&is->videoq, pkt);
2503             }
2504             SDL_Delay(10);
2505             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2506                 if(loop!=1 && (!loop || --loop)){
2507                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2508                 }else if(autoexit){
2509                     ret=AVERROR_EOF;
2510                     goto fail;
2511                 }
2512             }
2513             eof=0;
2514             continue;
2515         }
2516         ret = av_read_frame(ic, pkt);
2517         if (ret < 0) {
2518             if (ret == AVERROR_EOF || url_feof(ic->pb))
2519                 eof=1;
2520             if (ic->pb && ic->pb->error)
2521                 break;
2522             SDL_Delay(100); /* wait for user event */
2523             continue;
2524         }
2525         /* check if packet is in play range specified by user, then queue, otherwise discard */
2526         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2527                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2528                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2529                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2530                 <= ((double)duration/1000000);
2531         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2532             packet_queue_put(&is->audioq, pkt);
2533         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2534             packet_queue_put(&is->videoq, pkt);
2535         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2536             packet_queue_put(&is->subtitleq, pkt);
2537         } else {
2538             av_free_packet(pkt);
2539         }
2540     }
2541     /* wait until the end */
2542     while (!is->abort_request) {
2543         SDL_Delay(100);
2544     }
2545
2546     ret = 0;
2547  fail:
2548     /* disable interrupting */
2549     global_video_state = NULL;
2550
2551     /* close each stream */
2552     if (is->audio_stream >= 0)
2553         stream_component_close(is, is->audio_stream);
2554     if (is->video_stream >= 0)
2555         stream_component_close(is, is->video_stream);
2556     if (is->subtitle_stream >= 0)
2557         stream_component_close(is, is->subtitle_stream);
2558     if (is->ic) {
2559         av_close_input_file(is->ic);
2560         is->ic = NULL; /* safety */
2561     }
2562     avio_set_interrupt_cb(NULL);
2563
2564     if (ret != 0) {
2565         SDL_Event event;
2566
2567         event.type = FF_QUIT_EVENT;
2568         event.user.data1 = is;
2569         SDL_PushEvent(&event);
2570     }
2571     return 0;
2572 }
2573
2574 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2575 {
2576     VideoState *is;
2577
2578     is = av_mallocz(sizeof(VideoState));
2579     if (!is)
2580         return NULL;
2581     av_strlcpy(is->filename, filename, sizeof(is->filename));
2582     is->iformat = iformat;
2583     is->ytop = 0;
2584     is->xleft = 0;
2585
2586     /* start video display */
2587     is->pictq_mutex = SDL_CreateMutex();
2588     is->pictq_cond = SDL_CreateCond();
2589
2590     is->subpq_mutex = SDL_CreateMutex();
2591     is->subpq_cond = SDL_CreateCond();
2592
2593     is->av_sync_type = av_sync_type;
2594     is->read_tid = SDL_CreateThread(read_thread, is);
2595     if (!is->read_tid) {
2596         av_free(is);
2597         return NULL;
2598     }
2599     return is;
2600 }
2601
2602 static void stream_cycle_channel(VideoState *is, int codec_type)
2603 {
2604     AVFormatContext *ic = is->ic;
2605     int start_index, stream_index;
2606     AVStream *st;
2607
2608     if (codec_type == AVMEDIA_TYPE_VIDEO)
2609         start_index = is->video_stream;
2610     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2611         start_index = is->audio_stream;
2612     else
2613         start_index = is->subtitle_stream;
2614     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2615         return;
2616     stream_index = start_index;
2617     for(;;) {
2618         if (++stream_index >= is->ic->nb_streams)
2619         {
2620             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2621             {
2622                 stream_index = -1;
2623                 goto the_end;
2624             } else
2625                 stream_index = 0;
2626         }
2627         if (stream_index == start_index)
2628             return;
2629         st = ic->streams[stream_index];
2630         if (st->codec->codec_type == codec_type) {
2631             /* check that parameters are OK */
2632             switch(codec_type) {
2633             case AVMEDIA_TYPE_AUDIO:
2634                 if (st->codec->sample_rate != 0 &&
2635                     st->codec->channels != 0)
2636                     goto the_end;
2637                 break;
2638             case AVMEDIA_TYPE_VIDEO:
2639             case AVMEDIA_TYPE_SUBTITLE:
2640                 goto the_end;
2641             default:
2642                 break;
2643             }
2644         }
2645     }
2646  the_end:
2647     stream_component_close(is, start_index);
2648     stream_component_open(is, stream_index);
2649 }
2650
2651
2652 static void toggle_full_screen(VideoState *is)
2653 {
2654     is_full_screen = !is_full_screen;
2655     video_open(is);
2656 }
2657
2658 static void toggle_pause(VideoState *is)
2659 {
2660     stream_toggle_pause(is);
2661     is->step = 0;
2662 }
2663
2664 static void step_to_next_frame(VideoState *is)
2665 {
2666     /* if the stream is paused unpause it, then step */
2667     if (is->paused)
2668         stream_toggle_pause(is);
2669     is->step = 1;
2670 }
2671
2672 static void toggle_audio_display(VideoState *is)
2673 {
2674     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2675     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2676     fill_rectangle(screen,
2677                 is->xleft, is->ytop, is->width, is->height,
2678                 bgcolor);
2679     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2680 }
2681
2682 /* handle an event sent by the GUI */
2683 static void event_loop(VideoState *cur_stream)
2684 {
2685     SDL_Event event;
2686     double incr, pos, frac;
2687
2688     for(;;) {
2689         double x;
2690         SDL_WaitEvent(&event);
2691         switch(event.type) {
2692         case SDL_KEYDOWN:
2693             if (exit_on_keydown) {
2694                 do_exit(cur_stream);
2695                 break;
2696             }
2697             switch(event.key.keysym.sym) {
2698             case SDLK_ESCAPE:
2699             case SDLK_q:
2700                 do_exit(cur_stream);
2701                 break;
2702             case SDLK_f:
2703                 toggle_full_screen(cur_stream);
2704                 break;
2705             case SDLK_p:
2706             case SDLK_SPACE:
2707                 toggle_pause(cur_stream);
2708                 break;
2709             case SDLK_s: //S: Step to next frame
2710                 step_to_next_frame(cur_stream);
2711                 break;
2712             case SDLK_a:
2713                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2714                 break;
2715             case SDLK_v:
2716                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2717                 break;
2718             case SDLK_t:
2719                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2720                 break;
2721             case SDLK_w:
2722                 toggle_audio_display(cur_stream);
2723                 break;
2724             case SDLK_LEFT:
2725                 incr = -10.0;
2726                 goto do_seek;
2727             case SDLK_RIGHT:
2728                 incr = 10.0;
2729                 goto do_seek;
2730             case SDLK_UP:
2731                 incr = 60.0;
2732                 goto do_seek;
2733             case SDLK_DOWN:
2734                 incr = -60.0;
2735             do_seek:
2736                 if (seek_by_bytes) {
2737                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2738                         pos= cur_stream->video_current_pos;
2739                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2740                         pos= cur_stream->audio_pkt.pos;
2741                     }else
2742                         pos = avio_tell(cur_stream->ic->pb);
2743                     if (cur_stream->ic->bit_rate)
2744                         incr *= cur_stream->ic->bit_rate / 8.0;
2745                     else
2746                         incr *= 180000.0;
2747                     pos += incr;
2748                     stream_seek(cur_stream, pos, incr, 1);
2749                 } else {
2750                     pos = get_master_clock(cur_stream);
2751                     pos += incr;
2752                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2753                 }
2754                 break;
2755             default:
2756                 break;
2757             }
2758             break;
2759         case SDL_MOUSEBUTTONDOWN:
2760             if (exit_on_mousedown) {
2761                 do_exit(cur_stream);
2762                 break;
2763             }
2764         case SDL_MOUSEMOTION:
2765             if(event.type ==SDL_MOUSEBUTTONDOWN){
2766                 x= event.button.x;
2767             }else{
2768                 if(event.motion.state != SDL_PRESSED)
2769                     break;
2770                 x= event.motion.x;
2771             }
2772             if(seek_by_bytes || cur_stream->ic->duration<=0){
2773                 uint64_t size=  avio_size(cur_stream->ic->pb);
2774                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2775             }else{
2776                 int64_t ts;
2777                 int ns, hh, mm, ss;
2778                 int tns, thh, tmm, tss;
2779                 tns = cur_stream->ic->duration/1000000LL;
2780                 thh = tns/3600;
2781                 tmm = (tns%3600)/60;
2782                 tss = (tns%60);
2783                 frac = x/cur_stream->width;
2784                 ns = frac*tns;
2785                 hh = ns/3600;
2786                 mm = (ns%3600)/60;
2787                 ss = (ns%60);
2788                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2789                         hh, mm, ss, thh, tmm, tss);
2790                 ts = frac*cur_stream->ic->duration;
2791                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2792                     ts += cur_stream->ic->start_time;
2793                 stream_seek(cur_stream, ts, 0, 0);
2794             }
2795             break;
2796         case SDL_VIDEORESIZE:
2797             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2798                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2799             screen_width = cur_stream->width = event.resize.w;
2800             screen_height= cur_stream->height= event.resize.h;
2801             break;
2802         case SDL_QUIT:
2803         case FF_QUIT_EVENT:
2804             do_exit(cur_stream);
2805             break;
2806         case FF_ALLOC_EVENT:
2807             video_open(event.user.data1);
2808             alloc_picture(event.user.data1);
2809             break;
2810         case FF_REFRESH_EVENT:
2811             video_refresh(event.user.data1);
2812             cur_stream->refresh=0;
2813             break;
2814         default:
2815             break;
2816         }
2817     }
2818 }
2819
2820 static int opt_frame_size(const char *opt, const char *arg)
2821 {
2822     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2823     return opt_default("video_size", arg);
2824 }
2825
2826 static int opt_width(const char *opt, const char *arg)
2827 {
2828     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2829     return 0;
2830 }
2831
2832 static int opt_height(const char *opt, const char *arg)
2833 {
2834     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2835     return 0;
2836 }
2837
2838 static int opt_format(const char *opt, const char *arg)
2839 {
2840     file_iformat = av_find_input_format(arg);
2841     if (!file_iformat) {
2842         fprintf(stderr, "Unknown input format: %s\n", arg);
2843         return AVERROR(EINVAL);
2844     }
2845     return 0;
2846 }
2847
2848 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2849 {
2850     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2851     return opt_default("pixel_format", arg);
2852 }
2853
2854 static int opt_sync(const char *opt, const char *arg)
2855 {
2856     if (!strcmp(arg, "audio"))
2857         av_sync_type = AV_SYNC_AUDIO_MASTER;
2858     else if (!strcmp(arg, "video"))
2859         av_sync_type = AV_SYNC_VIDEO_MASTER;
2860     else if (!strcmp(arg, "ext"))
2861         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2862     else {
2863         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2864         exit(1);
2865     }
2866     return 0;
2867 }
2868
2869 static int opt_seek(const char *opt, const char *arg)
2870 {
2871     start_time = parse_time_or_die(opt, arg, 1);
2872     return 0;
2873 }
2874
2875 static int opt_duration(const char *opt, const char *arg)
2876 {
2877     duration = parse_time_or_die(opt, arg, 1);
2878     return 0;
2879 }
2880
2881 static int opt_thread_count(const char *opt, const char *arg)
2882 {
2883     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2884 #if !HAVE_THREADS
2885     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2886 #endif
2887     return 0;
2888 }
2889
2890 static int opt_show_mode(const char *opt, const char *arg)
2891 {
2892     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2893                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2894                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2895                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2896     return 0;
2897 }
2898
2899 static void opt_input_file(void *optctx, const char *filename)
2900 {
2901     if (input_filename) {
2902         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2903                 filename, input_filename);
2904         exit_program(1);
2905     }
2906     if (!strcmp(filename, "-"))
2907         filename = "pipe:";
2908     input_filename = filename;
2909 }
2910
2911 static int dummy;
2912
2913 static const OptionDef options[] = {
2914 #include "cmdutils_common_opts.h"
2915     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2916     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2917     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2918     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2919     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2920     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2921     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2922     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2923     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2924     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2925     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2926     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2927     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2928     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2929     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2930     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2931     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2932     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2933     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2934     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2935     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2936     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2937     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2938     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2939     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2940     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2941     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2942     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2943     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2944     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2945     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2946     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2947     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2948     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2949     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2950 #if CONFIG_AVFILTER
2951     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2952 #endif
2953     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2954     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2955     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2956     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
2957     { NULL, },
2958 };
2959
2960 static void show_usage(void)
2961 {
2962     printf("Simple media player\n");
2963     printf("usage: %s [options] input_file\n", program_name);
2964     printf("\n");
2965 }
2966
2967 static int opt_help(const char *opt, const char *arg)
2968 {
2969     const AVClass *class;
2970     av_log_set_callback(log_callback_help);
2971     show_usage();
2972     show_help_options(options, "Main options:\n",
2973                       OPT_EXPERT, 0);
2974     show_help_options(options, "\nAdvanced options:\n",
2975                       OPT_EXPERT, OPT_EXPERT);
2976     printf("\n");
2977     class = avcodec_get_class();
2978     av_opt_show2(&class, NULL,
2979                  AV_OPT_FLAG_DECODING_PARAM, 0);
2980     printf("\n");
2981     class = avformat_get_class();
2982     av_opt_show2(&class, NULL,
2983                  AV_OPT_FLAG_DECODING_PARAM, 0);
2984 #if !CONFIG_AVFILTER
2985     printf("\n");
2986     class = sws_get_class();
2987     av_opt_show2(&class, NULL,
2988                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2989 #endif
2990     printf("\nWhile playing:\n"
2991            "q, ESC              quit\n"
2992            "f                   toggle full screen\n"
2993            "p, SPC              pause\n"
2994            "a                   cycle audio channel\n"
2995            "v                   cycle video channel\n"
2996            "t                   cycle subtitle channel\n"
2997            "w                   show audio waves\n"
2998            "s                   activate frame-step mode\n"
2999            "left/right          seek backward/forward 10 seconds\n"
3000            "down/up             seek backward/forward 1 minute\n"
3001            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3002            );
3003     return 0;
3004 }
3005
3006 static int lockmgr(void **mtx, enum AVLockOp op)
3007 {
3008    switch(op) {
3009       case AV_LOCK_CREATE:
3010           *mtx = SDL_CreateMutex();
3011           if(!*mtx)
3012               return 1;
3013           return 0;
3014       case AV_LOCK_OBTAIN:
3015           return !!SDL_LockMutex(*mtx);
3016       case AV_LOCK_RELEASE:
3017           return !!SDL_UnlockMutex(*mtx);
3018       case AV_LOCK_DESTROY:
3019           SDL_DestroyMutex(*mtx);
3020           return 0;
3021    }
3022    return 1;
3023 }
3024
3025 /* Called from the main */
3026 int main(int argc, char **argv)
3027 {
3028     int flags;
3029     VideoState *is;
3030
3031     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3032
3033     /* register all codecs, demux and protocols */
3034     avcodec_register_all();
3035 #if CONFIG_AVDEVICE
3036     avdevice_register_all();
3037 #endif
3038 #if CONFIG_AVFILTER
3039     avfilter_register_all();
3040 #endif
3041     av_register_all();
3042
3043     init_opts();
3044
3045     show_banner();
3046
3047     parse_options(NULL, argc, argv, options, opt_input_file);
3048
3049     if (!input_filename) {
3050         show_usage();
3051         fprintf(stderr, "An input file must be specified\n");
3052         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3053         exit(1);
3054     }
3055
3056     if (display_disable) {
3057         video_disable = 1;
3058     }
3059     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3060     if (audio_disable)
3061         flags &= ~SDL_INIT_AUDIO;
3062 #if !defined(__MINGW32__) && !defined(__APPLE__)
3063     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3064 #endif
3065     if (SDL_Init (flags)) {
3066         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3067         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3068         exit(1);
3069     }
3070
3071     if (!display_disable) {
3072 #if HAVE_SDL_VIDEO_SIZE
3073         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3074         fs_screen_width = vi->current_w;
3075         fs_screen_height = vi->current_h;
3076 #endif
3077     }
3078
3079     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3080     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3081     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3082
3083     if (av_lockmgr_register(lockmgr)) {
3084         fprintf(stderr, "Could not initialize lock manager!\n");
3085         do_exit(NULL);
3086     }
3087
3088     av_init_packet(&flush_pkt);
3089     flush_pkt.data= "FLUSH";
3090
3091     is = stream_open(input_filename, file_iformat);
3092     if (!is) {
3093         fprintf(stderr, "Failed to initialize VideoState!\n");
3094         do_exit(NULL);
3095     }
3096
3097     event_loop(is);
3098
3099     /* never returns */
3100
3101     return 0;
3102 }