OSDN Git Service

Windows MinGW環境ビルドエラー対応(Windows7 Mingw32 gcc4.5.2)
[coroid/ffmpeg_saccubus.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/buffersink.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166     double audio_current_pts;
167     double audio_current_pts_drift;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210     int step;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static int opt_help(const char *opt, const char *arg);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int audio_disable;
232 static int video_disable;
233 static int wanted_stream[AVMEDIA_TYPE_NB]={
234     [AVMEDIA_TYPE_AUDIO]=-1,
235     [AVMEDIA_TYPE_VIDEO]=-1,
236     [AVMEDIA_TYPE_SUBTITLE]=-1,
237 };
238 static int seek_by_bytes=-1;
239 static int display_disable;
240 static int show_status = 1;
241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
242 static int64_t start_time = AV_NOPTS_VALUE;
243 static int64_t duration = AV_NOPTS_VALUE;
244 static int thread_count = 1;
245 static int workaround_bugs = 1;
246 static int fast = 0;
247 static int genpts = 0;
248 static int lowres = 0;
249 static int idct = FF_IDCT_AUTO;
250 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
253 static int error_recognition = FF_ER_CAREFUL;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts= -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop=1;
260 static int framedrop=-1;
261 static enum ShowMode show_mode = SHOW_MODE_NONE;
262
263 static int rdftspeed=20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static int64_t audio_callback_time;
271
272 static AVPacket flush_pkt;
273
274 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
275 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
276 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
277
278 static SDL_Surface *screen;
279
280 void exit_program(int ret)
281 {
282     exit(ret);
283 }
284
285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
286 {
287     AVPacketList *pkt1;
288
289     /* duplicate the packet */
290     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
291         return -1;
292
293     pkt1 = av_malloc(sizeof(AVPacketList));
294     if (!pkt1)
295         return -1;
296     pkt1->pkt = *pkt;
297     pkt1->next = NULL;
298
299
300     SDL_LockMutex(q->mutex);
301
302     if (!q->last_pkt)
303
304         q->first_pkt = pkt1;
305     else
306         q->last_pkt->next = pkt1;
307     q->last_pkt = pkt1;
308     q->nb_packets++;
309     q->size += pkt1->pkt.size + sizeof(*pkt1);
310     /* XXX: should duplicate packet data in DV case */
311     SDL_CondSignal(q->cond);
312
313     SDL_UnlockMutex(q->mutex);
314     return 0;
315 }
316
317 /* packet queue handling */
318 static void packet_queue_init(PacketQueue *q)
319 {
320     memset(q, 0, sizeof(PacketQueue));
321     q->mutex = SDL_CreateMutex();
322     q->cond = SDL_CreateCond();
323     packet_queue_put(q, &flush_pkt);
324 }
325
326 static void packet_queue_flush(PacketQueue *q)
327 {
328     AVPacketList *pkt, *pkt1;
329
330     SDL_LockMutex(q->mutex);
331     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
332         pkt1 = pkt->next;
333         av_free_packet(&pkt->pkt);
334         av_freep(&pkt);
335     }
336     q->last_pkt = NULL;
337     q->first_pkt = NULL;
338     q->nb_packets = 0;
339     q->size = 0;
340     SDL_UnlockMutex(q->mutex);
341 }
342
343 static void packet_queue_end(PacketQueue *q)
344 {
345     packet_queue_flush(q);
346     SDL_DestroyMutex(q->mutex);
347     SDL_DestroyCond(q->cond);
348 }
349
350 static void packet_queue_abort(PacketQueue *q)
351 {
352     SDL_LockMutex(q->mutex);
353
354     q->abort_request = 1;
355
356     SDL_CondSignal(q->cond);
357
358     SDL_UnlockMutex(q->mutex);
359 }
360
361 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
362 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
363 {
364     AVPacketList *pkt1;
365     int ret;
366
367     SDL_LockMutex(q->mutex);
368
369     for(;;) {
370         if (q->abort_request) {
371             ret = -1;
372             break;
373         }
374
375         pkt1 = q->first_pkt;
376         if (pkt1) {
377             q->first_pkt = pkt1->next;
378             if (!q->first_pkt)
379                 q->last_pkt = NULL;
380             q->nb_packets--;
381             q->size -= pkt1->pkt.size + sizeof(*pkt1);
382             *pkt = pkt1->pkt;
383             av_free(pkt1);
384             ret = 1;
385             break;
386         } else if (!block) {
387             ret = 0;
388             break;
389         } else {
390             SDL_CondWait(q->cond, q->mutex);
391         }
392     }
393     SDL_UnlockMutex(q->mutex);
394     return ret;
395 }
396
397 static inline void fill_rectangle(SDL_Surface *screen,
398                                   int x, int y, int w, int h, int color)
399 {
400     SDL_Rect rect;
401     rect.x = x;
402     rect.y = y;
403     rect.w = w;
404     rect.h = h;
405     SDL_FillRect(screen, &rect, color);
406 }
407
408 #define ALPHA_BLEND(a, oldp, newp, s)\
409 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
410
411 #define RGBA_IN(r, g, b, a, s)\
412 {\
413     unsigned int v = ((const uint32_t *)(s))[0];\
414     a = (v >> 24) & 0xff;\
415     r = (v >> 16) & 0xff;\
416     g = (v >> 8) & 0xff;\
417     b = v & 0xff;\
418 }
419
420 #define YUVA_IN(y, u, v, a, s, pal)\
421 {\
422     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
423     a = (val >> 24) & 0xff;\
424     y = (val >> 16) & 0xff;\
425     u = (val >> 8) & 0xff;\
426     v = val & 0xff;\
427 }
428
429 #define YUVA_OUT(d, y, u, v, a)\
430 {\
431     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
432 }
433
434
435 #define BPP 1
436
437 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
438 {
439     int wrap, wrap3, width2, skip2;
440     int y, u, v, a, u1, v1, a1, w, h;
441     uint8_t *lum, *cb, *cr;
442     const uint8_t *p;
443     const uint32_t *pal;
444     int dstx, dsty, dstw, dsth;
445
446     dstw = av_clip(rect->w, 0, imgw);
447     dsth = av_clip(rect->h, 0, imgh);
448     dstx = av_clip(rect->x, 0, imgw - dstw);
449     dsty = av_clip(rect->y, 0, imgh - dsth);
450     lum = dst->data[0] + dsty * dst->linesize[0];
451     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
452     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
453
454     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
455     skip2 = dstx >> 1;
456     wrap = dst->linesize[0];
457     wrap3 = rect->pict.linesize[0];
458     p = rect->pict.data[0];
459     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
460
461     if (dsty & 1) {
462         lum += dstx;
463         cb += skip2;
464         cr += skip2;
465
466         if (dstx & 1) {
467             YUVA_IN(y, u, v, a, p, pal);
468             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
469             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
470             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
471             cb++;
472             cr++;
473             lum++;
474             p += BPP;
475         }
476         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
477             YUVA_IN(y, u, v, a, p, pal);
478             u1 = u;
479             v1 = v;
480             a1 = a;
481             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482
483             YUVA_IN(y, u, v, a, p + BPP, pal);
484             u1 += u;
485             v1 += v;
486             a1 += a;
487             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
488             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
489             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
490             cb++;
491             cr++;
492             p += 2 * BPP;
493             lum += 2;
494         }
495         if (w) {
496             YUVA_IN(y, u, v, a, p, pal);
497             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
498             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
499             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
500             p++;
501             lum++;
502         }
503         p += wrap3 - dstw * BPP;
504         lum += wrap - dstw - dstx;
505         cb += dst->linesize[1] - width2 - skip2;
506         cr += dst->linesize[2] - width2 - skip2;
507     }
508     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
509         lum += dstx;
510         cb += skip2;
511         cr += skip2;
512
513         if (dstx & 1) {
514             YUVA_IN(y, u, v, a, p, pal);
515             u1 = u;
516             v1 = v;
517             a1 = a;
518             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
519             p += wrap3;
520             lum += wrap;
521             YUVA_IN(y, u, v, a, p, pal);
522             u1 += u;
523             v1 += v;
524             a1 += a;
525             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
527             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
528             cb++;
529             cr++;
530             p += -wrap3 + BPP;
531             lum += -wrap + 1;
532         }
533         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
534             YUVA_IN(y, u, v, a, p, pal);
535             u1 = u;
536             v1 = v;
537             a1 = a;
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539
540             YUVA_IN(y, u, v, a, p + BPP, pal);
541             u1 += u;
542             v1 += v;
543             a1 += a;
544             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
545             p += wrap3;
546             lum += wrap;
547
548             YUVA_IN(y, u, v, a, p, pal);
549             u1 += u;
550             v1 += v;
551             a1 += a;
552             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553
554             YUVA_IN(y, u, v, a, p + BPP, pal);
555             u1 += u;
556             v1 += v;
557             a1 += a;
558             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
559
560             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
561             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
562
563             cb++;
564             cr++;
565             p += -wrap3 + 2 * BPP;
566             lum += -wrap + 2;
567         }
568         if (w) {
569             YUVA_IN(y, u, v, a, p, pal);
570             u1 = u;
571             v1 = v;
572             a1 = a;
573             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
574             p += wrap3;
575             lum += wrap;
576             YUVA_IN(y, u, v, a, p, pal);
577             u1 += u;
578             v1 += v;
579             a1 += a;
580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
582             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
583             cb++;
584             cr++;
585             p += -wrap3 + BPP;
586             lum += -wrap + 1;
587         }
588         p += wrap3 + (wrap3 - dstw * BPP);
589         lum += wrap + (wrap - dstw - dstx);
590         cb += dst->linesize[1] - width2 - skip2;
591         cr += dst->linesize[2] - width2 - skip2;
592     }
593     /* handle odd height */
594     if (h) {
595         lum += dstx;
596         cb += skip2;
597         cr += skip2;
598
599         if (dstx & 1) {
600             YUVA_IN(y, u, v, a, p, pal);
601             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
602             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
603             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
604             cb++;
605             cr++;
606             lum++;
607             p += BPP;
608         }
609         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
610             YUVA_IN(y, u, v, a, p, pal);
611             u1 = u;
612             v1 = v;
613             a1 = a;
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615
616             YUVA_IN(y, u, v, a, p + BPP, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
623             cb++;
624             cr++;
625             p += 2 * BPP;
626             lum += 2;
627         }
628         if (w) {
629             YUVA_IN(y, u, v, a, p, pal);
630             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
631             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
632             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
633         }
634     }
635 }
636
637 static void free_subpicture(SubPicture *sp)
638 {
639     avsubtitle_free(&sp->sub);
640 }
641
642 static void video_image_display(VideoState *is)
643 {
644     VideoPicture *vp;
645     SubPicture *sp;
646     AVPicture pict;
647     float aspect_ratio;
648     int width, height, x, y;
649     SDL_Rect rect;
650     int i;
651
652     vp = &is->pictq[is->pictq_rindex];
653     if (vp->bmp) {
654 #if CONFIG_AVFILTER
655          if (vp->picref->video->sample_aspect_ratio.num == 0)
656              aspect_ratio = 0;
657          else
658              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
659 #else
660
661         /* XXX: use variable in the frame */
662         if (is->video_st->sample_aspect_ratio.num)
663             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
664         else if (is->video_st->codec->sample_aspect_ratio.num)
665             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
666         else
667             aspect_ratio = 0;
668 #endif
669         if (aspect_ratio <= 0.0)
670             aspect_ratio = 1.0;
671         aspect_ratio *= (float)vp->width / (float)vp->height;
672
673         if (is->subtitle_st) {
674             if (is->subpq_size > 0) {
675                 sp = &is->subpq[is->subpq_rindex];
676
677                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
678                     SDL_LockYUVOverlay (vp->bmp);
679
680                     pict.data[0] = vp->bmp->pixels[0];
681                     pict.data[1] = vp->bmp->pixels[2];
682                     pict.data[2] = vp->bmp->pixels[1];
683
684                     pict.linesize[0] = vp->bmp->pitches[0];
685                     pict.linesize[1] = vp->bmp->pitches[2];
686                     pict.linesize[2] = vp->bmp->pitches[1];
687
688                     for (i = 0; i < sp->sub.num_rects; i++)
689                         blend_subrect(&pict, sp->sub.rects[i],
690                                       vp->bmp->w, vp->bmp->h);
691
692                     SDL_UnlockYUVOverlay (vp->bmp);
693                 }
694             }
695         }
696
697
698         /* XXX: we suppose the screen has a 1.0 pixel ratio */
699         height = is->height;
700         width = ((int)rint(height * aspect_ratio)) & ~1;
701         if (width > is->width) {
702             width = is->width;
703             height = ((int)rint(width / aspect_ratio)) & ~1;
704         }
705         x = (is->width - width) / 2;
706         y = (is->height - height) / 2;
707         is->no_background = 0;
708         rect.x = is->xleft + x;
709         rect.y = is->ytop  + y;
710         rect.w = FFMAX(width,  1);
711         rect.h = FFMAX(height, 1);
712         SDL_DisplayYUVOverlay(vp->bmp, &rect);
713     }
714 }
715
716 static inline int compute_mod(int a, int b)
717 {
718     return a < 0 ? a%b + b : a%b;
719 }
720
721 static void video_audio_display(VideoState *s)
722 {
723     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
724     int ch, channels, h, h2, bgcolor, fgcolor;
725     int16_t time_diff;
726     int rdft_bits, nb_freq;
727
728     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
729         ;
730     nb_freq= 1<<(rdft_bits-1);
731
732     /* compute display index : center on currently output samples */
733     channels = s->audio_st->codec->channels;
734     nb_display_channels = channels;
735     if (!s->paused) {
736         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
737         n = 2 * channels;
738         delay = s->audio_write_buf_size;
739         delay /= n;
740
741         /* to be more precise, we take into account the time spent since
742            the last buffer computation */
743         if (audio_callback_time) {
744             time_diff = av_gettime() - audio_callback_time;
745             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
746         }
747
748         delay += 2*data_used;
749         if (delay < data_used)
750             delay = data_used;
751
752         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
753         if (s->show_mode == SHOW_MODE_WAVES) {
754             h= INT_MIN;
755             for(i=0; i<1000; i+=channels){
756                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
757                 int a= s->sample_array[idx];
758                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
759                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
760                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
761                 int score= a-d;
762                 if(h<score && (b^c)<0){
763                     h= score;
764                     i_start= idx;
765                 }
766             }
767         }
768
769         s->last_i_start = i_start;
770     } else {
771         i_start = s->last_i_start;
772     }
773
774     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
775     if (s->show_mode == SHOW_MODE_WAVES) {
776         fill_rectangle(screen,
777                        s->xleft, s->ytop, s->width, s->height,
778                        bgcolor);
779
780         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
781
782         /* total height for one channel */
783         h = s->height / nb_display_channels;
784         /* graph height / 2 */
785         h2 = (h * 9) / 20;
786         for(ch = 0;ch < nb_display_channels; ch++) {
787             i = i_start + ch;
788             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
789             for(x = 0; x < s->width; x++) {
790                 y = (s->sample_array[i] * h2) >> 15;
791                 if (y < 0) {
792                     y = -y;
793                     ys = y1 - y;
794                 } else {
795                     ys = y1;
796                 }
797                 fill_rectangle(screen,
798                                s->xleft + x, ys, 1, y,
799                                fgcolor);
800                 i += channels;
801                 if (i >= SAMPLE_ARRAY_SIZE)
802                     i -= SAMPLE_ARRAY_SIZE;
803             }
804         }
805
806         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
807
808         for(ch = 1;ch < nb_display_channels; ch++) {
809             y = s->ytop + ch * h;
810             fill_rectangle(screen,
811                            s->xleft, y, s->width, 1,
812                            fgcolor);
813         }
814         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
815     }else{
816         nb_display_channels= FFMIN(nb_display_channels, 2);
817         if(rdft_bits != s->rdft_bits){
818             av_rdft_end(s->rdft);
819             av_free(s->rdft_data);
820             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
821             s->rdft_bits= rdft_bits;
822             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
823         }
824         {
825             FFTSample *data[2];
826             for(ch = 0;ch < nb_display_channels; ch++) {
827                 data[ch] = s->rdft_data + 2*nb_freq*ch;
828                 i = i_start + ch;
829                 for(x = 0; x < 2*nb_freq; x++) {
830                     double w= (x-nb_freq)*(1.0/nb_freq);
831                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
832                     i += channels;
833                     if (i >= SAMPLE_ARRAY_SIZE)
834                         i -= SAMPLE_ARRAY_SIZE;
835                 }
836                 av_rdft_calc(s->rdft, data[ch]);
837             }
838             //least efficient way to do this, we should of course directly access it but its more than fast enough
839             for(y=0; y<s->height; y++){
840                 double w= 1/sqrt(nb_freq);
841                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
842                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
843                        + data[1][2*y+1]*data[1][2*y+1])) : a;
844                 a= FFMIN(a,255);
845                 b= FFMIN(b,255);
846                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
847
848                 fill_rectangle(screen,
849                             s->xpos, s->height-y, 1, 1,
850                             fgcolor);
851             }
852         }
853         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
854         s->xpos++;
855         if(s->xpos >= s->width)
856             s->xpos= s->xleft;
857     }
858 }
859
860 static void stream_close(VideoState *is)
861 {
862     VideoPicture *vp;
863     int i;
864     /* XXX: use a special url_shutdown call to abort parse cleanly */
865     is->abort_request = 1;
866     SDL_WaitThread(is->read_tid, NULL);
867     SDL_WaitThread(is->refresh_tid, NULL);
868
869     /* free all pictures */
870     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
871         vp = &is->pictq[i];
872 #if CONFIG_AVFILTER
873         if (vp->picref) {
874             avfilter_unref_buffer(vp->picref);
875             vp->picref = NULL;
876         }
877 #endif
878         if (vp->bmp) {
879             SDL_FreeYUVOverlay(vp->bmp);
880             vp->bmp = NULL;
881         }
882     }
883     SDL_DestroyMutex(is->pictq_mutex);
884     SDL_DestroyCond(is->pictq_cond);
885     SDL_DestroyMutex(is->subpq_mutex);
886     SDL_DestroyCond(is->subpq_cond);
887 #if !CONFIG_AVFILTER
888     if (is->img_convert_ctx)
889         sws_freeContext(is->img_convert_ctx);
890 #endif
891     av_free(is);
892 }
893
894 static void do_exit(VideoState *is)
895 {
896     if (is) {
897         stream_close(is);
898     }
899     av_lockmgr_register(NULL);
900     uninit_opts();
901 #if CONFIG_AVFILTER
902     avfilter_uninit();
903 #endif
904     if (show_status)
905         printf("\n");
906     SDL_Quit();
907     av_log(NULL, AV_LOG_QUIET, "%s", "");
908     exit(0);
909 }
910
911 static int video_open(VideoState *is){
912     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
913     int w,h;
914
915     if(is_full_screen) flags |= SDL_FULLSCREEN;
916     else               flags |= SDL_RESIZABLE;
917
918     if (is_full_screen && fs_screen_width) {
919         w = fs_screen_width;
920         h = fs_screen_height;
921     } else if(!is_full_screen && screen_width){
922         w = screen_width;
923         h = screen_height;
924 #if CONFIG_AVFILTER
925     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
926         w = is->out_video_filter->inputs[0]->w;
927         h = is->out_video_filter->inputs[0]->h;
928 #else
929     }else if (is->video_st && is->video_st->codec->width){
930         w = is->video_st->codec->width;
931         h = is->video_st->codec->height;
932 #endif
933     } else {
934         w = 640;
935         h = 480;
936     }
937     if(screen && is->width == screen->w && screen->w == w
938        && is->height== screen->h && screen->h == h)
939         return 0;
940
941 #ifndef __APPLE__
942     screen = SDL_SetVideoMode(w, h, 0, flags);
943 #else
944     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
945     screen = SDL_SetVideoMode(w, h, 24, flags);
946 #endif
947     if (!screen) {
948         fprintf(stderr, "SDL: could not set video mode - exiting\n");
949         do_exit(is);
950     }
951     if (!window_title)
952         window_title = input_filename;
953     SDL_WM_SetCaption(window_title, window_title);
954
955     is->width = screen->w;
956     is->height = screen->h;
957
958     return 0;
959 }
960
961 /* display the current picture, if any */
962 static void video_display(VideoState *is)
963 {
964     if(!screen)
965         video_open(is);
966     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
967         video_audio_display(is);
968     else if (is->video_st)
969         video_image_display(is);
970 }
971
972 static int refresh_thread(void *opaque)
973 {
974     VideoState *is= opaque;
975     while(!is->abort_request){
976         SDL_Event event;
977         event.type = FF_REFRESH_EVENT;
978         event.user.data1 = opaque;
979         if(!is->refresh){
980             is->refresh=1;
981             SDL_PushEvent(&event);
982         }
983         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
984         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
985     }
986     return 0;
987 }
988
989 /* get the current audio clock value */
990 static double get_audio_clock(VideoState *is)
991 {
992     if (is->paused) {
993         return is->audio_current_pts;
994     } else {
995         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
996     }
997 }
998
999 /* get the current video clock value */
1000 static double get_video_clock(VideoState *is)
1001 {
1002     if (is->paused) {
1003         return is->video_current_pts;
1004     } else {
1005         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1006     }
1007 }
1008
1009 /* get the current external clock value */
1010 static double get_external_clock(VideoState *is)
1011 {
1012     int64_t ti;
1013     ti = av_gettime();
1014     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1015 }
1016
1017 /* get the current master clock value */
1018 static double get_master_clock(VideoState *is)
1019 {
1020     double val;
1021
1022     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1023         if (is->video_st)
1024             val = get_video_clock(is);
1025         else
1026             val = get_audio_clock(is);
1027     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1028         if (is->audio_st)
1029             val = get_audio_clock(is);
1030         else
1031             val = get_video_clock(is);
1032     } else {
1033         val = get_external_clock(is);
1034     }
1035     return val;
1036 }
1037
1038 /* seek in the stream */
1039 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1040 {
1041     if (!is->seek_req) {
1042         is->seek_pos = pos;
1043         is->seek_rel = rel;
1044         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1045         if (seek_by_bytes)
1046             is->seek_flags |= AVSEEK_FLAG_BYTE;
1047         is->seek_req = 1;
1048     }
1049 }
1050
1051 /* pause or resume the video */
1052 static void stream_toggle_pause(VideoState *is)
1053 {
1054     if (is->paused) {
1055         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1056         if(is->read_pause_return != AVERROR(ENOSYS)){
1057             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1058         }
1059         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1060     }
1061     is->paused = !is->paused;
1062 }
1063
1064 static double compute_target_time(double frame_current_pts, VideoState *is)
1065 {
1066     double delay, sync_threshold, diff;
1067
1068     /* compute nominal delay */
1069     delay = frame_current_pts - is->frame_last_pts;
1070     if (delay <= 0 || delay >= 10.0) {
1071         /* if incorrect delay, use previous one */
1072         delay = is->frame_last_delay;
1073     } else {
1074         is->frame_last_delay = delay;
1075     }
1076     is->frame_last_pts = frame_current_pts;
1077
1078     /* update delay to follow master synchronisation source */
1079     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1080          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1081         /* if video is slave, we try to correct big delays by
1082            duplicating or deleting a frame */
1083         diff = get_video_clock(is) - get_master_clock(is);
1084
1085         /* skip or repeat frame. We take into account the
1086            delay to compute the threshold. I still don't know
1087            if it is the best guess */
1088         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1089         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1090             if (diff <= -sync_threshold)
1091                 delay = 0;
1092             else if (diff >= sync_threshold)
1093                 delay = 2 * delay;
1094         }
1095     }
1096     is->frame_timer += delay;
1097
1098     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1099             delay, frame_current_pts, -diff);
1100
1101     return is->frame_timer;
1102 }
1103
1104 /* called to display each frame */
1105 static void video_refresh(void *opaque)
1106 {
1107     VideoState *is = opaque;
1108     VideoPicture *vp;
1109
1110     SubPicture *sp, *sp2;
1111
1112     if (is->video_st) {
1113 retry:
1114         if (is->pictq_size == 0) {
1115             //nothing to do, no picture to display in the que
1116         } else {
1117             double time= av_gettime()/1000000.0;
1118             double next_target;
1119             /* dequeue the picture */
1120             vp = &is->pictq[is->pictq_rindex];
1121
1122             if(time < vp->target_clock)
1123                 return;
1124             /* update current video pts */
1125             is->video_current_pts = vp->pts;
1126             is->video_current_pts_drift = is->video_current_pts - time;
1127             is->video_current_pos = vp->pos;
1128             if(is->pictq_size > 1){
1129                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1130                 assert(nextvp->target_clock >= vp->target_clock);
1131                 next_target= nextvp->target_clock;
1132             }else{
1133                 next_target= vp->target_clock + vp->duration;
1134             }
1135             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1136                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1137                 if(is->pictq_size > 1 || time > next_target + 0.5){
1138                     /* update queue size and signal for next picture */
1139                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1140                         is->pictq_rindex = 0;
1141
1142                     SDL_LockMutex(is->pictq_mutex);
1143                     is->pictq_size--;
1144                     SDL_CondSignal(is->pictq_cond);
1145                     SDL_UnlockMutex(is->pictq_mutex);
1146                     goto retry;
1147                 }
1148             }
1149
1150             if(is->subtitle_st) {
1151                 if (is->subtitle_stream_changed) {
1152                     SDL_LockMutex(is->subpq_mutex);
1153
1154                     while (is->subpq_size) {
1155                         free_subpicture(&is->subpq[is->subpq_rindex]);
1156
1157                         /* update queue size and signal for next picture */
1158                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1159                             is->subpq_rindex = 0;
1160
1161                         is->subpq_size--;
1162                     }
1163                     is->subtitle_stream_changed = 0;
1164
1165                     SDL_CondSignal(is->subpq_cond);
1166                     SDL_UnlockMutex(is->subpq_mutex);
1167                 } else {
1168                     if (is->subpq_size > 0) {
1169                         sp = &is->subpq[is->subpq_rindex];
1170
1171                         if (is->subpq_size > 1)
1172                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1173                         else
1174                             sp2 = NULL;
1175
1176                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1177                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1178                         {
1179                             free_subpicture(sp);
1180
1181                             /* update queue size and signal for next picture */
1182                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1183                                 is->subpq_rindex = 0;
1184
1185                             SDL_LockMutex(is->subpq_mutex);
1186                             is->subpq_size--;
1187                             SDL_CondSignal(is->subpq_cond);
1188                             SDL_UnlockMutex(is->subpq_mutex);
1189                         }
1190                     }
1191                 }
1192             }
1193
1194             /* display picture */
1195             if (!display_disable)
1196                 video_display(is);
1197
1198             /* update queue size and signal for next picture */
1199             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1200                 is->pictq_rindex = 0;
1201
1202             SDL_LockMutex(is->pictq_mutex);
1203             is->pictq_size--;
1204             SDL_CondSignal(is->pictq_cond);
1205             SDL_UnlockMutex(is->pictq_mutex);
1206         }
1207     } else if (is->audio_st) {
1208         /* draw the next audio frame */
1209
1210         /* if only audio stream, then display the audio bars (better
1211            than nothing, just to test the implementation */
1212
1213         /* display picture */
1214         if (!display_disable)
1215             video_display(is);
1216     }
1217     if (show_status) {
1218         static int64_t last_time;
1219         int64_t cur_time;
1220         int aqsize, vqsize, sqsize;
1221         double av_diff;
1222
1223         cur_time = av_gettime();
1224         if (!last_time || (cur_time - last_time) >= 30000) {
1225             aqsize = 0;
1226             vqsize = 0;
1227             sqsize = 0;
1228             if (is->audio_st)
1229                 aqsize = is->audioq.size;
1230             if (is->video_st)
1231                 vqsize = is->videoq.size;
1232             if (is->subtitle_st)
1233                 sqsize = is->subtitleq.size;
1234             av_diff = 0;
1235             if (is->audio_st && is->video_st)
1236                 av_diff = get_audio_clock(is) - get_video_clock(is);
1237             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1238                    get_master_clock(is),
1239                    av_diff,
1240                    FFMAX(is->skip_frames-1, 0),
1241                    aqsize / 1024,
1242                    vqsize / 1024,
1243                    sqsize,
1244                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1245                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1246             fflush(stdout);
1247             last_time = cur_time;
1248         }
1249     }
1250 }
1251
1252 /* allocate a picture (needs to do that in main thread to avoid
1253    potential locking problems */
1254 static void alloc_picture(void *opaque)
1255 {
1256     VideoState *is = opaque;
1257     VideoPicture *vp;
1258
1259     vp = &is->pictq[is->pictq_windex];
1260
1261     if (vp->bmp)
1262         SDL_FreeYUVOverlay(vp->bmp);
1263
1264 #if CONFIG_AVFILTER
1265     if (vp->picref)
1266         avfilter_unref_buffer(vp->picref);
1267     vp->picref = NULL;
1268
1269     vp->width   = is->out_video_filter->inputs[0]->w;
1270     vp->height  = is->out_video_filter->inputs[0]->h;
1271     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1272 #else
1273     vp->width   = is->video_st->codec->width;
1274     vp->height  = is->video_st->codec->height;
1275     vp->pix_fmt = is->video_st->codec->pix_fmt;
1276 #endif
1277
1278     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1279                                    SDL_YV12_OVERLAY,
1280                                    screen);
1281     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1282         /* SDL allocates a buffer smaller than requested if the video
1283          * overlay hardware is unable to support the requested size. */
1284         fprintf(stderr, "Error: the video system does not support an image\n"
1285                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1286                         "to reduce the image size.\n", vp->width, vp->height );
1287         do_exit(is);
1288     }
1289
1290     SDL_LockMutex(is->pictq_mutex);
1291     vp->allocated = 1;
1292     SDL_CondSignal(is->pictq_cond);
1293     SDL_UnlockMutex(is->pictq_mutex);
1294 }
1295
1296 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1297 {
1298     VideoPicture *vp;
1299     double frame_delay, pts = pts1;
1300
1301     /* compute the exact PTS for the picture if it is omitted in the stream
1302      * pts1 is the dts of the pkt / pts of the frame */
1303     if (pts != 0) {
1304         /* update video clock with pts, if present */
1305         is->video_clock = pts;
1306     } else {
1307         pts = is->video_clock;
1308     }
1309     /* update video clock for next frame */
1310     frame_delay = av_q2d(is->video_st->codec->time_base);
1311     /* for MPEG2, the frame can be repeated, so we update the
1312        clock accordingly */
1313     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1314     is->video_clock += frame_delay;
1315
1316 #if defined(DEBUG_SYNC) && 0
1317     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1318            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1319 #endif
1320
1321     /* wait until we have space to put a new picture */
1322     SDL_LockMutex(is->pictq_mutex);
1323
1324     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1325         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1326
1327     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1328            !is->videoq.abort_request) {
1329         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1330     }
1331     SDL_UnlockMutex(is->pictq_mutex);
1332
1333     if (is->videoq.abort_request)
1334         return -1;
1335
1336     vp = &is->pictq[is->pictq_windex];
1337
1338     vp->duration = frame_delay;
1339
1340     /* alloc or resize hardware picture buffer */
1341     if (!vp->bmp ||
1342 #if CONFIG_AVFILTER
1343         vp->width  != is->out_video_filter->inputs[0]->w ||
1344         vp->height != is->out_video_filter->inputs[0]->h) {
1345 #else
1346         vp->width != is->video_st->codec->width ||
1347         vp->height != is->video_st->codec->height) {
1348 #endif
1349         SDL_Event event;
1350
1351         vp->allocated = 0;
1352
1353         /* the allocation must be done in the main thread to avoid
1354            locking problems */
1355         event.type = FF_ALLOC_EVENT;
1356         event.user.data1 = is;
1357         SDL_PushEvent(&event);
1358
1359         /* wait until the picture is allocated */
1360         SDL_LockMutex(is->pictq_mutex);
1361         while (!vp->allocated && !is->videoq.abort_request) {
1362             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1363         }
1364         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1365         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1366             while (!vp->allocated) {
1367                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1368             }
1369         }
1370         SDL_UnlockMutex(is->pictq_mutex);
1371
1372         if (is->videoq.abort_request)
1373             return -1;
1374     }
1375
1376     /* if the frame is not skipped, then display it */
1377     if (vp->bmp) {
1378         AVPicture pict;
1379 #if CONFIG_AVFILTER
1380         if(vp->picref)
1381             avfilter_unref_buffer(vp->picref);
1382         vp->picref = src_frame->opaque;
1383 #endif
1384
1385         /* get a pointer on the bitmap */
1386         SDL_LockYUVOverlay (vp->bmp);
1387
1388         memset(&pict,0,sizeof(AVPicture));
1389         pict.data[0] = vp->bmp->pixels[0];
1390         pict.data[1] = vp->bmp->pixels[2];
1391         pict.data[2] = vp->bmp->pixels[1];
1392
1393         pict.linesize[0] = vp->bmp->pitches[0];
1394         pict.linesize[1] = vp->bmp->pitches[2];
1395         pict.linesize[2] = vp->bmp->pitches[1];
1396
1397 #if CONFIG_AVFILTER
1398         //FIXME use direct rendering
1399         av_picture_copy(&pict, (AVPicture *)src_frame,
1400                         vp->pix_fmt, vp->width, vp->height);
1401 #else
1402         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1403         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1404             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1405             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1406         if (is->img_convert_ctx == NULL) {
1407             fprintf(stderr, "Cannot initialize the conversion context\n");
1408             exit(1);
1409         }
1410         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1411                   0, vp->height, pict.data, pict.linesize);
1412 #endif
1413         /* update the bitmap content */
1414         SDL_UnlockYUVOverlay(vp->bmp);
1415
1416         vp->pts = pts;
1417         vp->pos = pos;
1418
1419         /* now we can update the picture count */
1420         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1421             is->pictq_windex = 0;
1422         SDL_LockMutex(is->pictq_mutex);
1423         vp->target_clock= compute_target_time(vp->pts, is);
1424
1425         is->pictq_size++;
1426         SDL_UnlockMutex(is->pictq_mutex);
1427     }
1428     return 0;
1429 }
1430
1431 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1432 {
1433     int got_picture, i;
1434
1435     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1436         return -1;
1437
1438     if (pkt->data == flush_pkt.data) {
1439         avcodec_flush_buffers(is->video_st->codec);
1440
1441         SDL_LockMutex(is->pictq_mutex);
1442         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1443         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1444             is->pictq[i].target_clock= 0;
1445         }
1446         while (is->pictq_size && !is->videoq.abort_request) {
1447             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1448         }
1449         is->video_current_pos = -1;
1450         SDL_UnlockMutex(is->pictq_mutex);
1451
1452         is->frame_last_pts = AV_NOPTS_VALUE;
1453         is->frame_last_delay = 0;
1454         is->frame_timer = (double)av_gettime() / 1000000.0;
1455         is->skip_frames = 1;
1456         is->skip_frames_index = 0;
1457         return 0;
1458     }
1459
1460     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1461
1462     if (got_picture) {
1463         if (decoder_reorder_pts == -1) {
1464             *pts = frame->best_effort_timestamp;
1465         } else if (decoder_reorder_pts) {
1466             *pts = frame->pkt_pts;
1467         } else {
1468             *pts = frame->pkt_dts;
1469         }
1470
1471         if (*pts == AV_NOPTS_VALUE) {
1472             *pts = 0;
1473         }
1474
1475         is->skip_frames_index += 1;
1476         if(is->skip_frames_index >= is->skip_frames){
1477             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1478             return 1;
1479         }
1480
1481     }
1482     return 0;
1483 }
1484
1485 #if CONFIG_AVFILTER
1486 typedef struct {
1487     VideoState *is;
1488     AVFrame *frame;
1489     int use_dr1;
1490 } FilterPriv;
1491
1492 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1493 {
1494     AVFilterContext *ctx = codec->opaque;
1495     AVFilterBufferRef  *ref;
1496     int perms = AV_PERM_WRITE;
1497     int i, w, h, stride[4];
1498     unsigned edge;
1499     int pixel_size;
1500
1501     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1502
1503     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1504         perms |= AV_PERM_NEG_LINESIZES;
1505
1506     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1507         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1508         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1509         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1510     }
1511     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1512
1513     w = codec->width;
1514     h = codec->height;
1515
1516     if(av_image_check_size(w, h, 0, codec))
1517         return -1;
1518
1519     avcodec_align_dimensions2(codec, &w, &h, stride);
1520     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1521     w += edge << 1;
1522     h += edge << 1;
1523
1524     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1525         return -1;
1526
1527     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1528     ref->video->w = codec->width;
1529     ref->video->h = codec->height;
1530     for(i = 0; i < 4; i ++) {
1531         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1532         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1533
1534         if (ref->data[i]) {
1535             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1536         }
1537         pic->data[i]     = ref->data[i];
1538         pic->linesize[i] = ref->linesize[i];
1539     }
1540     pic->opaque = ref;
1541     pic->age    = INT_MAX;
1542     pic->type   = FF_BUFFER_TYPE_USER;
1543     pic->reordered_opaque = codec->reordered_opaque;
1544     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1545     else           pic->pkt_pts = AV_NOPTS_VALUE;
1546     return 0;
1547 }
1548
1549 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1550 {
1551     memset(pic->data, 0, sizeof(pic->data));
1552     avfilter_unref_buffer(pic->opaque);
1553 }
1554
1555 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1556 {
1557     AVFilterBufferRef *ref = pic->opaque;
1558
1559     if (pic->data[0] == NULL) {
1560         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1561         return codec->get_buffer(codec, pic);
1562     }
1563
1564     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1565         (codec->pix_fmt != ref->format)) {
1566         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1567         return -1;
1568     }
1569
1570     pic->reordered_opaque = codec->reordered_opaque;
1571     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1572     else           pic->pkt_pts = AV_NOPTS_VALUE;
1573     return 0;
1574 }
1575
1576 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1577 {
1578     FilterPriv *priv = ctx->priv;
1579     AVCodecContext *codec;
1580     if(!opaque) return -1;
1581
1582     priv->is = opaque;
1583     codec    = priv->is->video_st->codec;
1584     codec->opaque = ctx;
1585     if((codec->codec->capabilities & CODEC_CAP_DR1)
1586     ) {
1587         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1588         priv->use_dr1 = 1;
1589         codec->get_buffer     = input_get_buffer;
1590         codec->release_buffer = input_release_buffer;
1591         codec->reget_buffer   = input_reget_buffer;
1592         codec->thread_safe_callbacks = 1;
1593     }
1594
1595     priv->frame = avcodec_alloc_frame();
1596
1597     return 0;
1598 }
1599
1600 static void input_uninit(AVFilterContext *ctx)
1601 {
1602     FilterPriv *priv = ctx->priv;
1603     av_free(priv->frame);
1604 }
1605
1606 static int input_request_frame(AVFilterLink *link)
1607 {
1608     FilterPriv *priv = link->src->priv;
1609     AVFilterBufferRef *picref;
1610     int64_t pts = 0;
1611     AVPacket pkt;
1612     int ret;
1613
1614     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1615         av_free_packet(&pkt);
1616     if (ret < 0)
1617         return -1;
1618
1619     if(priv->use_dr1 && priv->frame->opaque) {
1620         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1621     } else {
1622         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1623         av_image_copy(picref->data, picref->linesize,
1624                       priv->frame->data, priv->frame->linesize,
1625                       picref->format, link->w, link->h);
1626     }
1627     av_free_packet(&pkt);
1628
1629     avfilter_copy_frame_props(picref, priv->frame);
1630     picref->pts = pts;
1631
1632     avfilter_start_frame(link, picref);
1633     avfilter_draw_slice(link, 0, link->h, 1);
1634     avfilter_end_frame(link);
1635
1636     return 0;
1637 }
1638
1639 static int input_query_formats(AVFilterContext *ctx)
1640 {
1641     FilterPriv *priv = ctx->priv;
1642     enum PixelFormat pix_fmts[] = {
1643         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1644     };
1645
1646     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1647     return 0;
1648 }
1649
1650 static int input_config_props(AVFilterLink *link)
1651 {
1652     FilterPriv *priv  = link->src->priv;
1653     AVStream *s = priv->is->video_st;
1654
1655     link->w = s->codec->width;
1656     link->h = s->codec->height;
1657     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1658         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1659     link->time_base = s->time_base;
1660
1661     return 0;
1662 }
1663
1664 static AVFilter input_filter =
1665 {
1666     .name      = "ffplay_input",
1667
1668     .priv_size = sizeof(FilterPriv),
1669
1670     .init      = input_init,
1671     .uninit    = input_uninit,
1672
1673     .query_formats = input_query_formats,
1674
1675     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1676     .outputs   = (AVFilterPad[]) {{ .name = "default",
1677                                     .type = AVMEDIA_TYPE_VIDEO,
1678                                     .request_frame = input_request_frame,
1679                                     .config_props  = input_config_props, },
1680                                   { .name = NULL }},
1681 };
1682
1683 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1684 {
1685     char sws_flags_str[128];
1686     int ret;
1687     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1688     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1689     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1690     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1691     graph->scale_sws_opts = av_strdup(sws_flags_str);
1692
1693     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1694                                             NULL, is, graph)) < 0)
1695         return ret;
1696 #if FF_API_OLD_VSINK_API
1697     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1698                                        NULL, pix_fmts, graph);
1699 #else
1700     buffersink_params->pixel_fmts = pix_fmts;
1701     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1702                                        NULL, buffersink_params, graph);
1703 #endif
1704     av_freep(&buffersink_params);
1705     if (ret < 0)
1706         return ret;
1707
1708     if(vfilters) {
1709         AVFilterInOut *outputs = avfilter_inout_alloc();
1710         AVFilterInOut *inputs  = avfilter_inout_alloc();
1711
1712         outputs->name    = av_strdup("in");
1713         outputs->filter_ctx = filt_src;
1714         outputs->pad_idx = 0;
1715         outputs->next    = NULL;
1716
1717         inputs->name    = av_strdup("out");
1718         inputs->filter_ctx = filt_out;
1719         inputs->pad_idx = 0;
1720         inputs->next    = NULL;
1721
1722         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1723             return ret;
1724     } else {
1725         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1726             return ret;
1727     }
1728
1729     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1730         return ret;
1731
1732     is->out_video_filter = filt_out;
1733
1734     return ret;
1735 }
1736
1737 #endif  /* CONFIG_AVFILTER */
1738
1739 static int video_thread(void *arg)
1740 {
1741     VideoState *is = arg;
1742     AVFrame *frame= avcodec_alloc_frame();
1743     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1744     double pts;
1745     int ret;
1746
1747 #if CONFIG_AVFILTER
1748     AVFilterGraph *graph = avfilter_graph_alloc();
1749     AVFilterContext *filt_out = NULL;
1750     int last_w = is->video_st->codec->width;
1751     int last_h = is->video_st->codec->height;
1752
1753     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1754         goto the_end;
1755     filt_out = is->out_video_filter;
1756 #endif
1757
1758     for(;;) {
1759 #if !CONFIG_AVFILTER
1760         AVPacket pkt;
1761 #else
1762         AVFilterBufferRef *picref;
1763         AVRational tb = filt_out->inputs[0]->time_base;
1764 #endif
1765         while (is->paused && !is->videoq.abort_request)
1766             SDL_Delay(10);
1767 #if CONFIG_AVFILTER
1768         if (   last_w != is->video_st->codec->width
1769             || last_h != is->video_st->codec->height) {
1770             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1771                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1772             avfilter_graph_free(&graph);
1773             graph = avfilter_graph_alloc();
1774             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1775                 goto the_end;
1776             filt_out = is->out_video_filter;
1777             last_w = is->video_st->codec->width;
1778             last_h = is->video_st->codec->height;
1779         }
1780         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1781         if (picref) {
1782             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1783             pts_int = picref->pts;
1784             pos     = picref->pos;
1785             frame->opaque = picref;
1786         }
1787
1788         if (av_cmp_q(tb, is->video_st->time_base)) {
1789             av_unused int64_t pts1 = pts_int;
1790             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1791             av_dlog(NULL, "video_thread(): "
1792                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1793                     tb.num, tb.den, pts1,
1794                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1795         }
1796 #else
1797         ret = get_video_frame(is, frame, &pts_int, &pkt);
1798         pos = pkt.pos;
1799         av_free_packet(&pkt);
1800 #endif
1801
1802         if (ret < 0) goto the_end;
1803
1804 #if CONFIG_AVFILTER
1805         if (!picref)
1806             continue;
1807 #endif
1808
1809         pts = pts_int*av_q2d(is->video_st->time_base);
1810
1811         ret = queue_picture(is, frame, pts, pos);
1812
1813         if (ret < 0)
1814             goto the_end;
1815
1816         if (is->step)
1817             stream_toggle_pause(is);
1818     }
1819  the_end:
1820 #if CONFIG_AVFILTER
1821     avfilter_graph_free(&graph);
1822 #endif
1823     av_free(frame);
1824     return 0;
1825 }
1826
1827 static int subtitle_thread(void *arg)
1828 {
1829     VideoState *is = arg;
1830     SubPicture *sp;
1831     AVPacket pkt1, *pkt = &pkt1;
1832     int got_subtitle;
1833     double pts;
1834     int i, j;
1835     int r, g, b, y, u, v, a;
1836
1837     for(;;) {
1838         while (is->paused && !is->subtitleq.abort_request) {
1839             SDL_Delay(10);
1840         }
1841         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1842             break;
1843
1844         if(pkt->data == flush_pkt.data){
1845             avcodec_flush_buffers(is->subtitle_st->codec);
1846             continue;
1847         }
1848         SDL_LockMutex(is->subpq_mutex);
1849         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1850                !is->subtitleq.abort_request) {
1851             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1852         }
1853         SDL_UnlockMutex(is->subpq_mutex);
1854
1855         if (is->subtitleq.abort_request)
1856             return 0;
1857
1858         sp = &is->subpq[is->subpq_windex];
1859
1860        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1861            this packet, if any */
1862         pts = 0;
1863         if (pkt->pts != AV_NOPTS_VALUE)
1864             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1865
1866         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1867                                  &got_subtitle, pkt);
1868
1869         if (got_subtitle && sp->sub.format == 0) {
1870             sp->pts = pts;
1871
1872             for (i = 0; i < sp->sub.num_rects; i++)
1873             {
1874                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1875                 {
1876                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1877                     y = RGB_TO_Y_CCIR(r, g, b);
1878                     u = RGB_TO_U_CCIR(r, g, b, 0);
1879                     v = RGB_TO_V_CCIR(r, g, b, 0);
1880                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1881                 }
1882             }
1883
1884             /* now we can update the picture count */
1885             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1886                 is->subpq_windex = 0;
1887             SDL_LockMutex(is->subpq_mutex);
1888             is->subpq_size++;
1889             SDL_UnlockMutex(is->subpq_mutex);
1890         }
1891         av_free_packet(pkt);
1892     }
1893     return 0;
1894 }
1895
1896 /* copy samples for viewing in editor window */
1897 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1898 {
1899     int size, len;
1900
1901     size = samples_size / sizeof(short);
1902     while (size > 0) {
1903         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1904         if (len > size)
1905             len = size;
1906         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1907         samples += len;
1908         is->sample_array_index += len;
1909         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1910             is->sample_array_index = 0;
1911         size -= len;
1912     }
1913 }
1914
1915 /* return the new audio buffer size (samples can be added or deleted
1916    to get better sync if video or external master clock) */
1917 static int synchronize_audio(VideoState *is, short *samples,
1918                              int samples_size1, double pts)
1919 {
1920     int n, samples_size;
1921     double ref_clock;
1922
1923     n = 2 * is->audio_st->codec->channels;
1924     samples_size = samples_size1;
1925
1926     /* if not master, then we try to remove or add samples to correct the clock */
1927     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1928          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1929         double diff, avg_diff;
1930         int wanted_size, min_size, max_size, nb_samples;
1931
1932         ref_clock = get_master_clock(is);
1933         diff = get_audio_clock(is) - ref_clock;
1934
1935         if (diff < AV_NOSYNC_THRESHOLD) {
1936             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1937             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1938                 /* not enough measures to have a correct estimate */
1939                 is->audio_diff_avg_count++;
1940             } else {
1941                 /* estimate the A-V difference */
1942                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1943
1944                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1945                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1946                     nb_samples = samples_size / n;
1947
1948                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1949                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1950                     if (wanted_size < min_size)
1951                         wanted_size = min_size;
1952                     else if (wanted_size > max_size)
1953                         wanted_size = max_size;
1954
1955                     /* add or remove samples to correction the synchro */
1956                     if (wanted_size < samples_size) {
1957                         /* remove samples */
1958                         samples_size = wanted_size;
1959                     } else if (wanted_size > samples_size) {
1960                         uint8_t *samples_end, *q;
1961                         int nb;
1962
1963                         /* add samples */
1964                         nb = (samples_size - wanted_size);
1965                         samples_end = (uint8_t *)samples + samples_size - n;
1966                         q = samples_end + n;
1967                         while (nb > 0) {
1968                             memcpy(q, samples_end, n);
1969                             q += n;
1970                             nb -= n;
1971                         }
1972                         samples_size = wanted_size;
1973                     }
1974                 }
1975                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1976                         diff, avg_diff, samples_size - samples_size1,
1977                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1978             }
1979         } else {
1980             /* too big difference : may be initial PTS errors, so
1981                reset A-V filter */
1982             is->audio_diff_avg_count = 0;
1983             is->audio_diff_cum = 0;
1984         }
1985     }
1986
1987     return samples_size;
1988 }
1989
1990 /* decode one audio frame and returns its uncompressed size */
1991 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1992 {
1993     AVPacket *pkt_temp = &is->audio_pkt_temp;
1994     AVPacket *pkt = &is->audio_pkt;
1995     AVCodecContext *dec= is->audio_st->codec;
1996     int n, len1, data_size;
1997     double pts;
1998
1999     for(;;) {
2000         /* NOTE: the audio packet can contain several frames */
2001         while (pkt_temp->size > 0) {
2002             data_size = sizeof(is->audio_buf1);
2003             len1 = avcodec_decode_audio3(dec,
2004                                         (int16_t *)is->audio_buf1, &data_size,
2005                                         pkt_temp);
2006             if (len1 < 0) {
2007                 /* if error, we skip the frame */
2008                 pkt_temp->size = 0;
2009                 break;
2010             }
2011
2012             pkt_temp->data += len1;
2013             pkt_temp->size -= len1;
2014             if (data_size <= 0)
2015                 continue;
2016
2017             if (dec->sample_fmt != is->audio_src_fmt) {
2018                 if (is->reformat_ctx)
2019                     av_audio_convert_free(is->reformat_ctx);
2020                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2021                                                          dec->sample_fmt, 1, NULL, 0);
2022                 if (!is->reformat_ctx) {
2023                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2024                         av_get_sample_fmt_name(dec->sample_fmt),
2025                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2026                         break;
2027                 }
2028                 is->audio_src_fmt= dec->sample_fmt;
2029             }
2030
2031             if (is->reformat_ctx) {
2032                 const void *ibuf[6]= {is->audio_buf1};
2033                 void *obuf[6]= {is->audio_buf2};
2034                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2035                 int ostride[6]= {2};
2036                 int len= data_size/istride[0];
2037                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2038                     printf("av_audio_convert() failed\n");
2039                     break;
2040                 }
2041                 is->audio_buf= is->audio_buf2;
2042                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2043                           remove this legacy cruft */
2044                 data_size= len*2;
2045             }else{
2046                 is->audio_buf= is->audio_buf1;
2047             }
2048
2049             /* if no pts, then compute it */
2050             pts = is->audio_clock;
2051             *pts_ptr = pts;
2052             n = 2 * dec->channels;
2053             is->audio_clock += (double)data_size /
2054                 (double)(n * dec->sample_rate);
2055 #ifdef DEBUG
2056             {
2057                 static double last_clock;
2058                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2059                        is->audio_clock - last_clock,
2060                        is->audio_clock, pts);
2061                 last_clock = is->audio_clock;
2062             }
2063 #endif
2064             return data_size;
2065         }
2066
2067         /* free the current packet */
2068         if (pkt->data)
2069             av_free_packet(pkt);
2070
2071         if (is->paused || is->audioq.abort_request) {
2072             return -1;
2073         }
2074
2075         /* read next packet */
2076         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2077             return -1;
2078         if(pkt->data == flush_pkt.data){
2079             avcodec_flush_buffers(dec);
2080             continue;
2081         }
2082
2083         pkt_temp->data = pkt->data;
2084         pkt_temp->size = pkt->size;
2085
2086         /* if update the audio clock with the pts */
2087         if (pkt->pts != AV_NOPTS_VALUE) {
2088             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2089         }
2090     }
2091 }
2092
2093 /* prepare a new audio buffer */
2094 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2095 {
2096     VideoState *is = opaque;
2097     int audio_size, len1;
2098     int bytes_per_sec;
2099     double pts;
2100
2101     audio_callback_time = av_gettime();
2102
2103     while (len > 0) {
2104         if (is->audio_buf_index >= is->audio_buf_size) {
2105            audio_size = audio_decode_frame(is, &pts);
2106            if (audio_size < 0) {
2107                 /* if error, just output silence */
2108                is->audio_buf = is->audio_buf1;
2109                is->audio_buf_size = 1024;
2110                memset(is->audio_buf, 0, is->audio_buf_size);
2111            } else {
2112                if (is->show_mode != SHOW_MODE_VIDEO)
2113                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2114                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2115                                               pts);
2116                is->audio_buf_size = audio_size;
2117            }
2118            is->audio_buf_index = 0;
2119         }
2120         len1 = is->audio_buf_size - is->audio_buf_index;
2121         if (len1 > len)
2122             len1 = len;
2123         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2124         len -= len1;
2125         stream += len1;
2126         is->audio_buf_index += len1;
2127     }
2128     bytes_per_sec = is->audio_st->codec->sample_rate *
2129             2 * is->audio_st->codec->channels;
2130     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2131     /* Let's assume the audio driver that is used by SDL has two periods. */
2132     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2133     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2134 }
2135
2136 /* open a given stream. Return 0 if OK */
2137 static int stream_component_open(VideoState *is, int stream_index)
2138 {
2139     AVFormatContext *ic = is->ic;
2140     AVCodecContext *avctx;
2141     AVCodec *codec;
2142     SDL_AudioSpec wanted_spec, spec;
2143     AVDictionary *opts;
2144     AVDictionaryEntry *t = NULL;
2145
2146     if (stream_index < 0 || stream_index >= ic->nb_streams)
2147         return -1;
2148     avctx = ic->streams[stream_index]->codec;
2149
2150     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2151
2152     /* prepare audio output */
2153     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2154         if (avctx->channels > 0) {
2155             avctx->request_channels = FFMIN(2, avctx->channels);
2156         } else {
2157             avctx->request_channels = 2;
2158         }
2159     }
2160
2161     codec = avcodec_find_decoder(avctx->codec_id);
2162     if (!codec)
2163         return -1;
2164
2165     avctx->workaround_bugs = workaround_bugs;
2166     avctx->lowres = lowres;
2167     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2168     avctx->idct_algo= idct;
2169     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2170     avctx->skip_frame= skip_frame;
2171     avctx->skip_idct= skip_idct;
2172     avctx->skip_loop_filter= skip_loop_filter;
2173     avctx->error_recognition= error_recognition;
2174     avctx->error_concealment= error_concealment;
2175     avctx->thread_count= thread_count;
2176
2177     if(codec->capabilities & CODEC_CAP_DR1)
2178         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2179
2180     if (!codec ||
2181         avcodec_open2(avctx, codec, &opts) < 0)
2182         return -1;
2183     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2184         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2185         return AVERROR_OPTION_NOT_FOUND;
2186     }
2187
2188     /* prepare audio output */
2189     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2190         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2191             fprintf(stderr, "Invalid sample rate or channel count\n");
2192             return -1;
2193         }
2194         wanted_spec.freq = avctx->sample_rate;
2195         wanted_spec.format = AUDIO_S16SYS;
2196         wanted_spec.channels = avctx->channels;
2197         wanted_spec.silence = 0;
2198         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2199         wanted_spec.callback = sdl_audio_callback;
2200         wanted_spec.userdata = is;
2201         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2202             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2203             return -1;
2204         }
2205         is->audio_hw_buf_size = spec.size;
2206         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2207     }
2208
2209     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2210     switch(avctx->codec_type) {
2211     case AVMEDIA_TYPE_AUDIO:
2212         is->audio_stream = stream_index;
2213         is->audio_st = ic->streams[stream_index];
2214         is->audio_buf_size = 0;
2215         is->audio_buf_index = 0;
2216
2217         /* init averaging filter */
2218         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2219         is->audio_diff_avg_count = 0;
2220         /* since we do not have a precise anough audio fifo fullness,
2221            we correct audio sync only if larger than this threshold */
2222         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2223
2224         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2225         packet_queue_init(&is->audioq);
2226         SDL_PauseAudio(0);
2227         break;
2228     case AVMEDIA_TYPE_VIDEO:
2229         is->video_stream = stream_index;
2230         is->video_st = ic->streams[stream_index];
2231
2232         packet_queue_init(&is->videoq);
2233         is->video_tid = SDL_CreateThread(video_thread, is);
2234         break;
2235     case AVMEDIA_TYPE_SUBTITLE:
2236         is->subtitle_stream = stream_index;
2237         is->subtitle_st = ic->streams[stream_index];
2238         packet_queue_init(&is->subtitleq);
2239
2240         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2241         break;
2242     default:
2243         break;
2244     }
2245     return 0;
2246 }
2247
2248 static void stream_component_close(VideoState *is, int stream_index)
2249 {
2250     AVFormatContext *ic = is->ic;
2251     AVCodecContext *avctx;
2252
2253     if (stream_index < 0 || stream_index >= ic->nb_streams)
2254         return;
2255     avctx = ic->streams[stream_index]->codec;
2256
2257     switch(avctx->codec_type) {
2258     case AVMEDIA_TYPE_AUDIO:
2259         packet_queue_abort(&is->audioq);
2260
2261         SDL_CloseAudio();
2262
2263         packet_queue_end(&is->audioq);
2264         if (is->reformat_ctx)
2265             av_audio_convert_free(is->reformat_ctx);
2266         is->reformat_ctx = NULL;
2267         break;
2268     case AVMEDIA_TYPE_VIDEO:
2269         packet_queue_abort(&is->videoq);
2270
2271         /* note: we also signal this mutex to make sure we deblock the
2272            video thread in all cases */
2273         SDL_LockMutex(is->pictq_mutex);
2274         SDL_CondSignal(is->pictq_cond);
2275         SDL_UnlockMutex(is->pictq_mutex);
2276
2277         SDL_WaitThread(is->video_tid, NULL);
2278
2279         packet_queue_end(&is->videoq);
2280         break;
2281     case AVMEDIA_TYPE_SUBTITLE:
2282         packet_queue_abort(&is->subtitleq);
2283
2284         /* note: we also signal this mutex to make sure we deblock the
2285            video thread in all cases */
2286         SDL_LockMutex(is->subpq_mutex);
2287         is->subtitle_stream_changed = 1;
2288
2289         SDL_CondSignal(is->subpq_cond);
2290         SDL_UnlockMutex(is->subpq_mutex);
2291
2292         SDL_WaitThread(is->subtitle_tid, NULL);
2293
2294         packet_queue_end(&is->subtitleq);
2295         break;
2296     default:
2297         break;
2298     }
2299
2300     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2301     avcodec_close(avctx);
2302     switch(avctx->codec_type) {
2303     case AVMEDIA_TYPE_AUDIO:
2304         is->audio_st = NULL;
2305         is->audio_stream = -1;
2306         break;
2307     case AVMEDIA_TYPE_VIDEO:
2308         is->video_st = NULL;
2309         is->video_stream = -1;
2310         break;
2311     case AVMEDIA_TYPE_SUBTITLE:
2312         is->subtitle_st = NULL;
2313         is->subtitle_stream = -1;
2314         break;
2315     default:
2316         break;
2317     }
2318 }
2319
2320 /* since we have only one decoding thread, we can use a global
2321    variable instead of a thread local variable */
2322 static VideoState *global_video_state;
2323
2324 static int decode_interrupt_cb(void)
2325 {
2326     return (global_video_state && global_video_state->abort_request);
2327 }
2328
2329 /* this thread gets the stream from the disk or the network */
2330 static int read_thread(void *arg)
2331 {
2332     VideoState *is = arg;
2333     AVFormatContext *ic = NULL;
2334     int err, i, ret;
2335     int st_index[AVMEDIA_TYPE_NB];
2336     AVPacket pkt1, *pkt = &pkt1;
2337     int eof=0;
2338     int pkt_in_play_range = 0;
2339     AVDictionaryEntry *t;
2340     AVDictionary **opts;
2341     int orig_nb_streams;
2342
2343     memset(st_index, -1, sizeof(st_index));
2344     is->video_stream = -1;
2345     is->audio_stream = -1;
2346     is->subtitle_stream = -1;
2347
2348     global_video_state = is;
2349     avio_set_interrupt_cb(decode_interrupt_cb);
2350
2351     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2352     if (err < 0) {
2353         print_error(is->filename, err);
2354         ret = -1;
2355         goto fail;
2356     }
2357     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2358         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2359         ret = AVERROR_OPTION_NOT_FOUND;
2360         goto fail;
2361     }
2362     is->ic = ic;
2363
2364     if(genpts)
2365         ic->flags |= AVFMT_FLAG_GENPTS;
2366
2367     opts = setup_find_stream_info_opts(ic, codec_opts);
2368     orig_nb_streams = ic->nb_streams;
2369
2370     err = avformat_find_stream_info(ic, opts);
2371     if (err < 0) {
2372         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2373         ret = -1;
2374         goto fail;
2375     }
2376     for (i = 0; i < orig_nb_streams; i++)
2377         av_dict_free(&opts[i]);
2378     av_freep(&opts);
2379
2380     if(ic->pb)
2381         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2382
2383     if(seek_by_bytes<0)
2384         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2385
2386     /* if seeking requested, we execute it */
2387     if (start_time != AV_NOPTS_VALUE) {
2388         int64_t timestamp;
2389
2390         timestamp = start_time;
2391         /* add the stream start time */
2392         if (ic->start_time != AV_NOPTS_VALUE)
2393             timestamp += ic->start_time;
2394         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2395         if (ret < 0) {
2396             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2397                     is->filename, (double)timestamp / AV_TIME_BASE);
2398         }
2399     }
2400
2401     for (i = 0; i < ic->nb_streams; i++)
2402         ic->streams[i]->discard = AVDISCARD_ALL;
2403     if (!video_disable)
2404         st_index[AVMEDIA_TYPE_VIDEO] =
2405             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2406                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2407     if (!audio_disable)
2408         st_index[AVMEDIA_TYPE_AUDIO] =
2409             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2410                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2411                                 st_index[AVMEDIA_TYPE_VIDEO],
2412                                 NULL, 0);
2413     if (!video_disable)
2414         st_index[AVMEDIA_TYPE_SUBTITLE] =
2415             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2416                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2417                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2418                                  st_index[AVMEDIA_TYPE_AUDIO] :
2419                                  st_index[AVMEDIA_TYPE_VIDEO]),
2420                                 NULL, 0);
2421     if (show_status) {
2422         av_dump_format(ic, 0, is->filename, 0);
2423     }
2424
2425     is->show_mode = show_mode;
2426
2427     /* open the streams */
2428     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2429         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2430     }
2431
2432     ret=-1;
2433     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2434         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2435     }
2436     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2437     if (is->show_mode == SHOW_MODE_NONE)
2438         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2439
2440     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2441         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2442     }
2443
2444     if (is->video_stream < 0 && is->audio_stream < 0) {
2445         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2446         ret = -1;
2447         goto fail;
2448     }
2449
2450     for(;;) {
2451         if (is->abort_request)
2452             break;
2453         if (is->paused != is->last_paused) {
2454             is->last_paused = is->paused;
2455             if (is->paused)
2456                 is->read_pause_return= av_read_pause(ic);
2457             else
2458                 av_read_play(ic);
2459         }
2460 #if CONFIG_RTSP_DEMUXER
2461         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2462             /* wait 10 ms to avoid trying to get another packet */
2463             /* XXX: horrible */
2464             SDL_Delay(10);
2465             continue;
2466         }
2467 #endif
2468         if (is->seek_req) {
2469             int64_t seek_target= is->seek_pos;
2470             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2471             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2472 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2473 //      of the seek_pos/seek_rel variables
2474
2475             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2476             if (ret < 0) {
2477                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2478             }else{
2479                 if (is->audio_stream >= 0) {
2480                     packet_queue_flush(&is->audioq);
2481                     packet_queue_put(&is->audioq, &flush_pkt);
2482                 }
2483                 if (is->subtitle_stream >= 0) {
2484                     packet_queue_flush(&is->subtitleq);
2485                     packet_queue_put(&is->subtitleq, &flush_pkt);
2486                 }
2487                 if (is->video_stream >= 0) {
2488                     packet_queue_flush(&is->videoq);
2489                     packet_queue_put(&is->videoq, &flush_pkt);
2490                 }
2491             }
2492             is->seek_req = 0;
2493             eof= 0;
2494         }
2495
2496         /* if the queue are full, no need to read more */
2497         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2498             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2499                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2500                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2501             /* wait 10 ms */
2502             SDL_Delay(10);
2503             continue;
2504         }
2505         if(eof) {
2506             if(is->video_stream >= 0){
2507                 av_init_packet(pkt);
2508                 pkt->data=NULL;
2509                 pkt->size=0;
2510                 pkt->stream_index= is->video_stream;
2511                 packet_queue_put(&is->videoq, pkt);
2512             }
2513             SDL_Delay(10);
2514             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2515                 if(loop!=1 && (!loop || --loop)){
2516                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2517                 }else if(autoexit){
2518                     ret=AVERROR_EOF;
2519                     goto fail;
2520                 }
2521             }
2522             eof=0;
2523             continue;
2524         }
2525         ret = av_read_frame(ic, pkt);
2526         if (ret < 0) {
2527             if (ret == AVERROR_EOF || url_feof(ic->pb))
2528                 eof=1;
2529             if (ic->pb && ic->pb->error)
2530                 break;
2531             SDL_Delay(100); /* wait for user event */
2532             continue;
2533         }
2534         /* check if packet is in play range specified by user, then queue, otherwise discard */
2535         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2536                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2537                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2538                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2539                 <= ((double)duration/1000000);
2540         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2541             packet_queue_put(&is->audioq, pkt);
2542         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2543             packet_queue_put(&is->videoq, pkt);
2544         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2545             packet_queue_put(&is->subtitleq, pkt);
2546         } else {
2547             av_free_packet(pkt);
2548         }
2549     }
2550     /* wait until the end */
2551     while (!is->abort_request) {
2552         SDL_Delay(100);
2553     }
2554
2555     ret = 0;
2556  fail:
2557     /* disable interrupting */
2558     global_video_state = NULL;
2559
2560     /* close each stream */
2561     if (is->audio_stream >= 0)
2562         stream_component_close(is, is->audio_stream);
2563     if (is->video_stream >= 0)
2564         stream_component_close(is, is->video_stream);
2565     if (is->subtitle_stream >= 0)
2566         stream_component_close(is, is->subtitle_stream);
2567     if (is->ic) {
2568         av_close_input_file(is->ic);
2569         is->ic = NULL; /* safety */
2570     }
2571     avio_set_interrupt_cb(NULL);
2572
2573     if (ret != 0) {
2574         SDL_Event event;
2575
2576         event.type = FF_QUIT_EVENT;
2577         event.user.data1 = is;
2578         SDL_PushEvent(&event);
2579     }
2580     return 0;
2581 }
2582
2583 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2584 {
2585     VideoState *is;
2586
2587     is = av_mallocz(sizeof(VideoState));
2588     if (!is)
2589         return NULL;
2590     av_strlcpy(is->filename, filename, sizeof(is->filename));
2591     is->iformat = iformat;
2592     is->ytop = 0;
2593     is->xleft = 0;
2594
2595     /* start video display */
2596     is->pictq_mutex = SDL_CreateMutex();
2597     is->pictq_cond = SDL_CreateCond();
2598
2599     is->subpq_mutex = SDL_CreateMutex();
2600     is->subpq_cond = SDL_CreateCond();
2601
2602     is->av_sync_type = av_sync_type;
2603     is->read_tid = SDL_CreateThread(read_thread, is);
2604     if (!is->read_tid) {
2605         av_free(is);
2606         return NULL;
2607     }
2608     return is;
2609 }
2610
2611 static void stream_cycle_channel(VideoState *is, int codec_type)
2612 {
2613     AVFormatContext *ic = is->ic;
2614     int start_index, stream_index;
2615     AVStream *st;
2616
2617     if (codec_type == AVMEDIA_TYPE_VIDEO)
2618         start_index = is->video_stream;
2619     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2620         start_index = is->audio_stream;
2621     else
2622         start_index = is->subtitle_stream;
2623     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2624         return;
2625     stream_index = start_index;
2626     for(;;) {
2627         if (++stream_index >= is->ic->nb_streams)
2628         {
2629             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2630             {
2631                 stream_index = -1;
2632                 goto the_end;
2633             } else
2634                 stream_index = 0;
2635         }
2636         if (stream_index == start_index)
2637             return;
2638         st = ic->streams[stream_index];
2639         if (st->codec->codec_type == codec_type) {
2640             /* check that parameters are OK */
2641             switch(codec_type) {
2642             case AVMEDIA_TYPE_AUDIO:
2643                 if (st->codec->sample_rate != 0 &&
2644                     st->codec->channels != 0)
2645                     goto the_end;
2646                 break;
2647             case AVMEDIA_TYPE_VIDEO:
2648             case AVMEDIA_TYPE_SUBTITLE:
2649                 goto the_end;
2650             default:
2651                 break;
2652             }
2653         }
2654     }
2655  the_end:
2656     stream_component_close(is, start_index);
2657     stream_component_open(is, stream_index);
2658 }
2659
2660
2661 static void toggle_full_screen(VideoState *is)
2662 {
2663     is_full_screen = !is_full_screen;
2664     video_open(is);
2665 }
2666
2667 static void toggle_pause(VideoState *is)
2668 {
2669     stream_toggle_pause(is);
2670     is->step = 0;
2671 }
2672
2673 static void step_to_next_frame(VideoState *is)
2674 {
2675     /* if the stream is paused unpause it, then step */
2676     if (is->paused)
2677         stream_toggle_pause(is);
2678     is->step = 1;
2679 }
2680
2681 static void toggle_audio_display(VideoState *is)
2682 {
2683     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2684     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2685     fill_rectangle(screen,
2686                 is->xleft, is->ytop, is->width, is->height,
2687                 bgcolor);
2688     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2689 }
2690
2691 /* handle an event sent by the GUI */
2692 static void event_loop(VideoState *cur_stream)
2693 {
2694     SDL_Event event;
2695     double incr, pos, frac;
2696
2697     for(;;) {
2698         double x;
2699         SDL_WaitEvent(&event);
2700         switch(event.type) {
2701         case SDL_KEYDOWN:
2702             if (exit_on_keydown) {
2703                 do_exit(cur_stream);
2704                 break;
2705             }
2706             switch(event.key.keysym.sym) {
2707             case SDLK_ESCAPE:
2708             case SDLK_q:
2709                 do_exit(cur_stream);
2710                 break;
2711             case SDLK_f:
2712                 toggle_full_screen(cur_stream);
2713                 break;
2714             case SDLK_p:
2715             case SDLK_SPACE:
2716                 toggle_pause(cur_stream);
2717                 break;
2718             case SDLK_s: //S: Step to next frame
2719                 step_to_next_frame(cur_stream);
2720                 break;
2721             case SDLK_a:
2722                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2723                 break;
2724             case SDLK_v:
2725                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2726                 break;
2727             case SDLK_t:
2728                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2729                 break;
2730             case SDLK_w:
2731                 toggle_audio_display(cur_stream);
2732                 break;
2733             case SDLK_LEFT:
2734                 incr = -10.0;
2735                 goto do_seek;
2736             case SDLK_RIGHT:
2737                 incr = 10.0;
2738                 goto do_seek;
2739             case SDLK_UP:
2740                 incr = 60.0;
2741                 goto do_seek;
2742             case SDLK_DOWN:
2743                 incr = -60.0;
2744             do_seek:
2745                 if (seek_by_bytes) {
2746                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2747                         pos= cur_stream->video_current_pos;
2748                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2749                         pos= cur_stream->audio_pkt.pos;
2750                     }else
2751                         pos = avio_tell(cur_stream->ic->pb);
2752                     if (cur_stream->ic->bit_rate)
2753                         incr *= cur_stream->ic->bit_rate / 8.0;
2754                     else
2755                         incr *= 180000.0;
2756                     pos += incr;
2757                     stream_seek(cur_stream, pos, incr, 1);
2758                 } else {
2759                     pos = get_master_clock(cur_stream);
2760                     pos += incr;
2761                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2762                 }
2763                 break;
2764             default:
2765                 break;
2766             }
2767             break;
2768         case SDL_MOUSEBUTTONDOWN:
2769             if (exit_on_mousedown) {
2770                 do_exit(cur_stream);
2771                 break;
2772             }
2773         case SDL_MOUSEMOTION:
2774             if(event.type ==SDL_MOUSEBUTTONDOWN){
2775                 x= event.button.x;
2776             }else{
2777                 if(event.motion.state != SDL_PRESSED)
2778                     break;
2779                 x= event.motion.x;
2780             }
2781             if(seek_by_bytes || cur_stream->ic->duration<=0){
2782                 uint64_t size=  avio_size(cur_stream->ic->pb);
2783                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2784             }else{
2785                 int64_t ts;
2786                 int ns, hh, mm, ss;
2787                 int tns, thh, tmm, tss;
2788                 tns = cur_stream->ic->duration/1000000LL;
2789                 thh = tns/3600;
2790                 tmm = (tns%3600)/60;
2791                 tss = (tns%60);
2792                 frac = x/cur_stream->width;
2793                 ns = frac*tns;
2794                 hh = ns/3600;
2795                 mm = (ns%3600)/60;
2796                 ss = (ns%60);
2797                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2798                         hh, mm, ss, thh, tmm, tss);
2799                 ts = frac*cur_stream->ic->duration;
2800                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2801                     ts += cur_stream->ic->start_time;
2802                 stream_seek(cur_stream, ts, 0, 0);
2803             }
2804             break;
2805         case SDL_VIDEORESIZE:
2806             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2807                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2808             screen_width = cur_stream->width = event.resize.w;
2809             screen_height= cur_stream->height= event.resize.h;
2810             break;
2811         case SDL_QUIT:
2812         case FF_QUIT_EVENT:
2813             do_exit(cur_stream);
2814             break;
2815         case FF_ALLOC_EVENT:
2816             video_open(event.user.data1);
2817             alloc_picture(event.user.data1);
2818             break;
2819         case FF_REFRESH_EVENT:
2820             video_refresh(event.user.data1);
2821             cur_stream->refresh=0;
2822             break;
2823         default:
2824             break;
2825         }
2826     }
2827 }
2828
2829 static int opt_frame_size(const char *opt, const char *arg)
2830 {
2831     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2832     return opt_default("video_size", arg);
2833 }
2834
2835 static int opt_width(const char *opt, const char *arg)
2836 {
2837     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2838     return 0;
2839 }
2840
2841 static int opt_height(const char *opt, const char *arg)
2842 {
2843     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2844     return 0;
2845 }
2846
2847 static int opt_format(const char *opt, const char *arg)
2848 {
2849     file_iformat = av_find_input_format(arg);
2850     if (!file_iformat) {
2851         fprintf(stderr, "Unknown input format: %s\n", arg);
2852         return AVERROR(EINVAL);
2853     }
2854     return 0;
2855 }
2856
2857 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2858 {
2859     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2860     return opt_default("pixel_format", arg);
2861 }
2862
2863 static int opt_sync(const char *opt, const char *arg)
2864 {
2865     if (!strcmp(arg, "audio"))
2866         av_sync_type = AV_SYNC_AUDIO_MASTER;
2867     else if (!strcmp(arg, "video"))
2868         av_sync_type = AV_SYNC_VIDEO_MASTER;
2869     else if (!strcmp(arg, "ext"))
2870         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2871     else {
2872         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2873         exit(1);
2874     }
2875     return 0;
2876 }
2877
2878 static int opt_seek(const char *opt, const char *arg)
2879 {
2880     start_time = parse_time_or_die(opt, arg, 1);
2881     return 0;
2882 }
2883
2884 static int opt_duration(const char *opt, const char *arg)
2885 {
2886     duration = parse_time_or_die(opt, arg, 1);
2887     return 0;
2888 }
2889
2890 static int opt_thread_count(const char *opt, const char *arg)
2891 {
2892     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2893 #if !HAVE_THREADS
2894     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2895 #endif
2896     return 0;
2897 }
2898
2899 static int opt_show_mode(const char *opt, const char *arg)
2900 {
2901     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2902                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2903                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2904                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2905     return 0;
2906 }
2907
2908 static void opt_input_file(void *optctx, const char *filename)
2909 {
2910     if (input_filename) {
2911         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2912                 filename, input_filename);
2913         exit_program(1);
2914     }
2915     if (!strcmp(filename, "-"))
2916         filename = "pipe:";
2917     input_filename = filename;
2918 }
2919
2920 static int dummy;
2921
2922 static const OptionDef options[] = {
2923 #include "cmdutils_common_opts.h"
2924     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2925     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2926     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2927     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2928     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2929     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2930     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2931     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2932     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2933     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2934     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2935     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2936     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2937     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2938     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2939     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2940     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2941     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2942     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2943     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2944     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2945     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2946     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2947     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2948     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2949     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2950     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2951     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2952     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2953     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2954     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2955     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2956     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2957     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2958     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2959 #if CONFIG_AVFILTER
2960     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2961 #endif
2962     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2963     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2964     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2965     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
2966     { NULL, },
2967 };
2968
2969 static void show_usage(void)
2970 {
2971     printf("Simple media player\n");
2972     printf("usage: %s [options] input_file\n", program_name);
2973     printf("\n");
2974 }
2975
2976 static int opt_help(const char *opt, const char *arg)
2977 {
2978     const AVClass *class;
2979     av_log_set_callback(log_callback_help);
2980     show_usage();
2981     show_help_options(options, "Main options:\n",
2982                       OPT_EXPERT, 0);
2983     show_help_options(options, "\nAdvanced options:\n",
2984                       OPT_EXPERT, OPT_EXPERT);
2985     printf("\n");
2986     class = avcodec_get_class();
2987     av_opt_show2(&class, NULL,
2988                  AV_OPT_FLAG_DECODING_PARAM, 0);
2989     printf("\n");
2990     class = avformat_get_class();
2991     av_opt_show2(&class, NULL,
2992                  AV_OPT_FLAG_DECODING_PARAM, 0);
2993 #if !CONFIG_AVFILTER
2994     printf("\n");
2995     class = sws_get_class();
2996     av_opt_show2(&class, NULL,
2997                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2998 #endif
2999     printf("\nWhile playing:\n"
3000            "q, ESC              quit\n"
3001            "f                   toggle full screen\n"
3002            "p, SPC              pause\n"
3003            "a                   cycle audio channel\n"
3004            "v                   cycle video channel\n"
3005            "t                   cycle subtitle channel\n"
3006            "w                   show audio waves\n"
3007            "s                   activate frame-step mode\n"
3008            "left/right          seek backward/forward 10 seconds\n"
3009            "down/up             seek backward/forward 1 minute\n"
3010            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3011            );
3012     return 0;
3013 }
3014
3015 static int lockmgr(void **mtx, enum AVLockOp op)
3016 {
3017    switch(op) {
3018       case AV_LOCK_CREATE:
3019           *mtx = SDL_CreateMutex();
3020           if(!*mtx)
3021               return 1;
3022           return 0;
3023       case AV_LOCK_OBTAIN:
3024           return !!SDL_LockMutex(*mtx);
3025       case AV_LOCK_RELEASE:
3026           return !!SDL_UnlockMutex(*mtx);
3027       case AV_LOCK_DESTROY:
3028           SDL_DestroyMutex(*mtx);
3029           return 0;
3030    }
3031    return 1;
3032 }
3033
3034 /* Called from the main */
3035 int main(int argc, char **argv)
3036 {
3037     int flags;
3038     VideoState *is;
3039
3040     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3041
3042     /* register all codecs, demux and protocols */
3043     avcodec_register_all();
3044 #if CONFIG_AVDEVICE
3045     avdevice_register_all();
3046 #endif
3047 #if CONFIG_AVFILTER
3048     avfilter_register_all();
3049 #endif
3050     av_register_all();
3051
3052     init_opts();
3053
3054     show_banner();
3055
3056     parse_options(NULL, argc, argv, options, opt_input_file);
3057
3058     if (!input_filename) {
3059         show_usage();
3060         fprintf(stderr, "An input file must be specified\n");
3061         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3062         exit(1);
3063     }
3064
3065     if (display_disable) {
3066         video_disable = 1;
3067     }
3068     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3069     if (audio_disable)
3070         flags &= ~SDL_INIT_AUDIO;
3071 #if !defined(__MINGW32__) && !defined(__APPLE__)
3072     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3073 #endif
3074     if (SDL_Init (flags)) {
3075         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3076         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3077         exit(1);
3078     }
3079
3080     if (!display_disable) {
3081 #if HAVE_SDL_VIDEO_SIZE
3082         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3083         fs_screen_width = vi->current_w;
3084         fs_screen_height = vi->current_h;
3085 #endif
3086     }
3087
3088     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3089     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3090     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3091
3092     if (av_lockmgr_register(lockmgr)) {
3093         fprintf(stderr, "Could not initialize lock manager!\n");
3094         do_exit(NULL);
3095     }
3096
3097     av_init_packet(&flush_pkt);
3098     flush_pkt.data= "FLUSH";
3099
3100     is = stream_open(input_filename, file_iformat);
3101     if (!is) {
3102         fprintf(stderr, "Failed to initialize VideoState!\n");
3103         do_exit(NULL);
3104     }
3105
3106     event_loop(is);
3107
3108     /* never returns */
3109
3110     return 0;
3111 }