OSDN Git Service

imdct/x86: Use "s->mdct_size" instead of "1 << s->mdct_bits".
[coroid/libav_saccubus.git] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavcore/parseutils.h"
30 #include "libavformat/avformat.h"
31 #include "libavdevice/avdevice.h"
32 #include "libswscale/swscale.h"
33 #include "libavcodec/audioconvert.h"
34 #include "libavcodec/opt.h"
35 #include "libavcodec/avfft.h"
36
37 #if CONFIG_AVFILTER
38 # include "libavfilter/avfilter.h"
39 # include "libavfilter/avfiltergraph.h"
40 # include "libavfilter/graphparser.h"
41 #endif
42
43 #include "cmdutils.h"
44
45 #include <SDL.h>
46 #include <SDL_thread.h>
47
48 #ifdef __MINGW32__
49 #undef main /* We don't want SDL to override our main() */
50 #endif
51
52 #include <unistd.h>
53 #include <assert.h>
54
55 const char program_name[] = "FFplay";
56 const int program_birth_year = 2003;
57
58 //#define DEBUG_SYNC
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     int64_t pos;                                 ///<byte position in file
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *parse_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138     int dtg_active_format;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum SampleFormat audio_src_fmt;
164     AVAudioConvert *reformat_ctx;
165
166     int show_audio; /* if true, display audio samples */
167     int16_t sample_array[SAMPLE_ARRAY_SIZE];
168     int sample_array_index;
169     int last_i_start;
170     RDFTContext *rdft;
171     int rdft_bits;
172     FFTSample *rdft_data;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     //    QETimer *video_timer;
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207     int64_t faulty_pts;
208     int64_t faulty_dts;
209     int64_t last_dts_for_fault_detection;
210     int64_t last_pts_for_fault_detection;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int thread_count = 1;
252 static int workaround_bugs = 1;
253 static int fast = 0;
254 static int genpts = 0;
255 static int lowres = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260 static int error_recognition = FF_ER_CAREFUL;
261 static int error_concealment = 3;
262 static int decoder_reorder_pts= -1;
263 static int autoexit;
264 static int exit_on_keydown;
265 static int exit_on_mousedown;
266 static int loop=1;
267 static int framedrop=1;
268
269 static int rdftspeed=20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288
289 /* packet queue handling */
290 static void packet_queue_init(PacketQueue *q)
291 {
292     memset(q, 0, sizeof(PacketQueue));
293     q->mutex = SDL_CreateMutex();
294     q->cond = SDL_CreateCond();
295     packet_queue_put(q, &flush_pkt);
296 }
297
298 static void packet_queue_flush(PacketQueue *q)
299 {
300     AVPacketList *pkt, *pkt1;
301
302     SDL_LockMutex(q->mutex);
303     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304         pkt1 = pkt->next;
305         av_free_packet(&pkt->pkt);
306         av_freep(&pkt);
307     }
308     q->last_pkt = NULL;
309     q->first_pkt = NULL;
310     q->nb_packets = 0;
311     q->size = 0;
312     SDL_UnlockMutex(q->mutex);
313 }
314
315 static void packet_queue_end(PacketQueue *q)
316 {
317     packet_queue_flush(q);
318     SDL_DestroyMutex(q->mutex);
319     SDL_DestroyCond(q->cond);
320 }
321
322 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323 {
324     AVPacketList *pkt1;
325
326     /* duplicate the packet */
327     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328         return -1;
329
330     pkt1 = av_malloc(sizeof(AVPacketList));
331     if (!pkt1)
332         return -1;
333     pkt1->pkt = *pkt;
334     pkt1->next = NULL;
335
336
337     SDL_LockMutex(q->mutex);
338
339     if (!q->last_pkt)
340
341         q->first_pkt = pkt1;
342     else
343         q->last_pkt->next = pkt1;
344     q->last_pkt = pkt1;
345     q->nb_packets++;
346     q->size += pkt1->pkt.size + sizeof(*pkt1);
347     /* XXX: should duplicate packet data in DV case */
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351     return 0;
352 }
353
354 static void packet_queue_abort(PacketQueue *q)
355 {
356     SDL_LockMutex(q->mutex);
357
358     q->abort_request = 1;
359
360     SDL_CondSignal(q->cond);
361
362     SDL_UnlockMutex(q->mutex);
363 }
364
365 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367 {
368     AVPacketList *pkt1;
369     int ret;
370
371     SDL_LockMutex(q->mutex);
372
373     for(;;) {
374         if (q->abort_request) {
375             ret = -1;
376             break;
377         }
378
379         pkt1 = q->first_pkt;
380         if (pkt1) {
381             q->first_pkt = pkt1->next;
382             if (!q->first_pkt)
383                 q->last_pkt = NULL;
384             q->nb_packets--;
385             q->size -= pkt1->pkt.size + sizeof(*pkt1);
386             *pkt = pkt1->pkt;
387             av_free(pkt1);
388             ret = 1;
389             break;
390         } else if (!block) {
391             ret = 0;
392             break;
393         } else {
394             SDL_CondWait(q->cond, q->mutex);
395         }
396     }
397     SDL_UnlockMutex(q->mutex);
398     return ret;
399 }
400
401 static inline void fill_rectangle(SDL_Surface *screen,
402                                   int x, int y, int w, int h, int color)
403 {
404     SDL_Rect rect;
405     rect.x = x;
406     rect.y = y;
407     rect.w = w;
408     rect.h = h;
409     SDL_FillRect(screen, &rect, color);
410 }
411
412 #if 0
413 /* draw only the border of a rectangle */
414 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
415 {
416     int w1, w2, h1, h2;
417
418     /* fill the background */
419     w1 = x;
420     if (w1 < 0)
421         w1 = 0;
422     w2 = s->width - (x + w);
423     if (w2 < 0)
424         w2 = 0;
425     h1 = y;
426     if (h1 < 0)
427         h1 = 0;
428     h2 = s->height - (y + h);
429     if (h2 < 0)
430         h2 = 0;
431     fill_rectangle(screen,
432                    s->xleft, s->ytop,
433                    w1, s->height,
434                    color);
435     fill_rectangle(screen,
436                    s->xleft + s->width - w2, s->ytop,
437                    w2, s->height,
438                    color);
439     fill_rectangle(screen,
440                    s->xleft + w1, s->ytop,
441                    s->width - w1 - w2, h1,
442                    color);
443     fill_rectangle(screen,
444                    s->xleft + w1, s->ytop + s->height - h2,
445                    s->width - w1 - w2, h2,
446                    color);
447 }
448 #endif
449
450 #define ALPHA_BLEND(a, oldp, newp, s)\
451 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
452
453 #define RGBA_IN(r, g, b, a, s)\
454 {\
455     unsigned int v = ((const uint32_t *)(s))[0];\
456     a = (v >> 24) & 0xff;\
457     r = (v >> 16) & 0xff;\
458     g = (v >> 8) & 0xff;\
459     b = v & 0xff;\
460 }
461
462 #define YUVA_IN(y, u, v, a, s, pal)\
463 {\
464     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
465     a = (val >> 24) & 0xff;\
466     y = (val >> 16) & 0xff;\
467     u = (val >> 8) & 0xff;\
468     v = val & 0xff;\
469 }
470
471 #define YUVA_OUT(d, y, u, v, a)\
472 {\
473     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
474 }
475
476
477 #define BPP 1
478
479 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
480 {
481     int wrap, wrap3, width2, skip2;
482     int y, u, v, a, u1, v1, a1, w, h;
483     uint8_t *lum, *cb, *cr;
484     const uint8_t *p;
485     const uint32_t *pal;
486     int dstx, dsty, dstw, dsth;
487
488     dstw = av_clip(rect->w, 0, imgw);
489     dsth = av_clip(rect->h, 0, imgh);
490     dstx = av_clip(rect->x, 0, imgw - dstw);
491     dsty = av_clip(rect->y, 0, imgh - dsth);
492     lum = dst->data[0] + dsty * dst->linesize[0];
493     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
494     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
495
496     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
497     skip2 = dstx >> 1;
498     wrap = dst->linesize[0];
499     wrap3 = rect->pict.linesize[0];
500     p = rect->pict.data[0];
501     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
502
503     if (dsty & 1) {
504         lum += dstx;
505         cb += skip2;
506         cr += skip2;
507
508         if (dstx & 1) {
509             YUVA_IN(y, u, v, a, p, pal);
510             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513             cb++;
514             cr++;
515             lum++;
516             p += BPP;
517         }
518         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519             YUVA_IN(y, u, v, a, p, pal);
520             u1 = u;
521             v1 = v;
522             a1 = a;
523             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524
525             YUVA_IN(y, u, v, a, p + BPP, pal);
526             u1 += u;
527             v1 += v;
528             a1 += a;
529             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532             cb++;
533             cr++;
534             p += 2 * BPP;
535             lum += 2;
536         }
537         if (w) {
538             YUVA_IN(y, u, v, a, p, pal);
539             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
541             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
542             p++;
543             lum++;
544         }
545         p += wrap3 - dstw * BPP;
546         lum += wrap - dstw - dstx;
547         cb += dst->linesize[1] - width2 - skip2;
548         cr += dst->linesize[2] - width2 - skip2;
549     }
550     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
551         lum += dstx;
552         cb += skip2;
553         cr += skip2;
554
555         if (dstx & 1) {
556             YUVA_IN(y, u, v, a, p, pal);
557             u1 = u;
558             v1 = v;
559             a1 = a;
560             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561             p += wrap3;
562             lum += wrap;
563             YUVA_IN(y, u, v, a, p, pal);
564             u1 += u;
565             v1 += v;
566             a1 += a;
567             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570             cb++;
571             cr++;
572             p += -wrap3 + BPP;
573             lum += -wrap + 1;
574         }
575         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
576             YUVA_IN(y, u, v, a, p, pal);
577             u1 = u;
578             v1 = v;
579             a1 = a;
580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581
582             YUVA_IN(y, u, v, a, p + BPP, pal);
583             u1 += u;
584             v1 += v;
585             a1 += a;
586             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
587             p += wrap3;
588             lum += wrap;
589
590             YUVA_IN(y, u, v, a, p, pal);
591             u1 += u;
592             v1 += v;
593             a1 += a;
594             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595
596             YUVA_IN(y, u, v, a, p + BPP, pal);
597             u1 += u;
598             v1 += v;
599             a1 += a;
600             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601
602             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
603             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
604
605             cb++;
606             cr++;
607             p += -wrap3 + 2 * BPP;
608             lum += -wrap + 2;
609         }
610         if (w) {
611             YUVA_IN(y, u, v, a, p, pal);
612             u1 = u;
613             v1 = v;
614             a1 = a;
615             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616             p += wrap3;
617             lum += wrap;
618             YUVA_IN(y, u, v, a, p, pal);
619             u1 += u;
620             v1 += v;
621             a1 += a;
622             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
624             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
625             cb++;
626             cr++;
627             p += -wrap3 + BPP;
628             lum += -wrap + 1;
629         }
630         p += wrap3 + (wrap3 - dstw * BPP);
631         lum += wrap + (wrap - dstw - dstx);
632         cb += dst->linesize[1] - width2 - skip2;
633         cr += dst->linesize[2] - width2 - skip2;
634     }
635     /* handle odd height */
636     if (h) {
637         lum += dstx;
638         cb += skip2;
639         cr += skip2;
640
641         if (dstx & 1) {
642             YUVA_IN(y, u, v, a, p, pal);
643             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646             cb++;
647             cr++;
648             lum++;
649             p += BPP;
650         }
651         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
652             YUVA_IN(y, u, v, a, p, pal);
653             u1 = u;
654             v1 = v;
655             a1 = a;
656             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
657
658             YUVA_IN(y, u, v, a, p + BPP, pal);
659             u1 += u;
660             v1 += v;
661             a1 += a;
662             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
663             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
664             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
665             cb++;
666             cr++;
667             p += 2 * BPP;
668             lum += 2;
669         }
670         if (w) {
671             YUVA_IN(y, u, v, a, p, pal);
672             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
673             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
674             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
675         }
676     }
677 }
678
679 static void free_subpicture(SubPicture *sp)
680 {
681     avsubtitle_free(&sp->sub);
682 }
683
684 static void video_image_display(VideoState *is)
685 {
686     VideoPicture *vp;
687     SubPicture *sp;
688     AVPicture pict;
689     float aspect_ratio;
690     int width, height, x, y;
691     SDL_Rect rect;
692     int i;
693
694     vp = &is->pictq[is->pictq_rindex];
695     if (vp->bmp) {
696 #if CONFIG_AVFILTER
697          if (vp->picref->video->pixel_aspect.num == 0)
698              aspect_ratio = 0;
699          else
700              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
701 #else
702
703         /* XXX: use variable in the frame */
704         if (is->video_st->sample_aspect_ratio.num)
705             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
706         else if (is->video_st->codec->sample_aspect_ratio.num)
707             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
708         else
709             aspect_ratio = 0;
710 #endif
711         if (aspect_ratio <= 0.0)
712             aspect_ratio = 1.0;
713         aspect_ratio *= (float)vp->width / (float)vp->height;
714         /* if an active format is indicated, then it overrides the
715            mpeg format */
716 #if 0
717         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
718             is->dtg_active_format = is->video_st->codec->dtg_active_format;
719             printf("dtg_active_format=%d\n", is->dtg_active_format);
720         }
721 #endif
722 #if 0
723         switch(is->video_st->codec->dtg_active_format) {
724         case FF_DTG_AFD_SAME:
725         default:
726             /* nothing to do */
727             break;
728         case FF_DTG_AFD_4_3:
729             aspect_ratio = 4.0 / 3.0;
730             break;
731         case FF_DTG_AFD_16_9:
732             aspect_ratio = 16.0 / 9.0;
733             break;
734         case FF_DTG_AFD_14_9:
735             aspect_ratio = 14.0 / 9.0;
736             break;
737         case FF_DTG_AFD_4_3_SP_14_9:
738             aspect_ratio = 14.0 / 9.0;
739             break;
740         case FF_DTG_AFD_16_9_SP_14_9:
741             aspect_ratio = 14.0 / 9.0;
742             break;
743         case FF_DTG_AFD_SP_4_3:
744             aspect_ratio = 4.0 / 3.0;
745             break;
746         }
747 #endif
748
749         if (is->subtitle_st)
750         {
751             if (is->subpq_size > 0)
752             {
753                 sp = &is->subpq[is->subpq_rindex];
754
755                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
756                 {
757                     SDL_LockYUVOverlay (vp->bmp);
758
759                     pict.data[0] = vp->bmp->pixels[0];
760                     pict.data[1] = vp->bmp->pixels[2];
761                     pict.data[2] = vp->bmp->pixels[1];
762
763                     pict.linesize[0] = vp->bmp->pitches[0];
764                     pict.linesize[1] = vp->bmp->pitches[2];
765                     pict.linesize[2] = vp->bmp->pitches[1];
766
767                     for (i = 0; i < sp->sub.num_rects; i++)
768                         blend_subrect(&pict, sp->sub.rects[i],
769                                       vp->bmp->w, vp->bmp->h);
770
771                     SDL_UnlockYUVOverlay (vp->bmp);
772                 }
773             }
774         }
775
776
777         /* XXX: we suppose the screen has a 1.0 pixel ratio */
778         height = is->height;
779         width = ((int)rint(height * aspect_ratio)) & ~1;
780         if (width > is->width) {
781             width = is->width;
782             height = ((int)rint(width / aspect_ratio)) & ~1;
783         }
784         x = (is->width - width) / 2;
785         y = (is->height - height) / 2;
786         if (!is->no_background) {
787             /* fill the background */
788             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
789         } else {
790             is->no_background = 0;
791         }
792         rect.x = is->xleft + x;
793         rect.y = is->ytop  + y;
794         rect.w = width;
795         rect.h = height;
796         SDL_DisplayYUVOverlay(vp->bmp, &rect);
797     } else {
798 #if 0
799         fill_rectangle(screen,
800                        is->xleft, is->ytop, is->width, is->height,
801                        QERGB(0x00, 0x00, 0x00));
802 #endif
803     }
804 }
805
806 static inline int compute_mod(int a, int b)
807 {
808     a = a % b;
809     if (a >= 0)
810         return a;
811     else
812         return a + b;
813 }
814
815 static void video_audio_display(VideoState *s)
816 {
817     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
818     int ch, channels, h, h2, bgcolor, fgcolor;
819     int16_t time_diff;
820     int rdft_bits, nb_freq;
821
822     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
823         ;
824     nb_freq= 1<<(rdft_bits-1);
825
826     /* compute display index : center on currently output samples */
827     channels = s->audio_st->codec->channels;
828     nb_display_channels = channels;
829     if (!s->paused) {
830         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
831         n = 2 * channels;
832         delay = audio_write_get_buf_size(s);
833         delay /= n;
834
835         /* to be more precise, we take into account the time spent since
836            the last buffer computation */
837         if (audio_callback_time) {
838             time_diff = av_gettime() - audio_callback_time;
839             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
840         }
841
842         delay += 2*data_used;
843         if (delay < data_used)
844             delay = data_used;
845
846         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
847         if(s->show_audio==1){
848             h= INT_MIN;
849             for(i=0; i<1000; i+=channels){
850                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
851                 int a= s->sample_array[idx];
852                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
853                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
854                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
855                 int score= a-d;
856                 if(h<score && (b^c)<0){
857                     h= score;
858                     i_start= idx;
859                 }
860             }
861         }
862
863         s->last_i_start = i_start;
864     } else {
865         i_start = s->last_i_start;
866     }
867
868     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
869     if(s->show_audio==1){
870         fill_rectangle(screen,
871                        s->xleft, s->ytop, s->width, s->height,
872                        bgcolor);
873
874         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
875
876         /* total height for one channel */
877         h = s->height / nb_display_channels;
878         /* graph height / 2 */
879         h2 = (h * 9) / 20;
880         for(ch = 0;ch < nb_display_channels; ch++) {
881             i = i_start + ch;
882             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
883             for(x = 0; x < s->width; x++) {
884                 y = (s->sample_array[i] * h2) >> 15;
885                 if (y < 0) {
886                     y = -y;
887                     ys = y1 - y;
888                 } else {
889                     ys = y1;
890                 }
891                 fill_rectangle(screen,
892                                s->xleft + x, ys, 1, y,
893                                fgcolor);
894                 i += channels;
895                 if (i >= SAMPLE_ARRAY_SIZE)
896                     i -= SAMPLE_ARRAY_SIZE;
897             }
898         }
899
900         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
901
902         for(ch = 1;ch < nb_display_channels; ch++) {
903             y = s->ytop + ch * h;
904             fill_rectangle(screen,
905                            s->xleft, y, s->width, 1,
906                            fgcolor);
907         }
908         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
909     }else{
910         nb_display_channels= FFMIN(nb_display_channels, 2);
911         if(rdft_bits != s->rdft_bits){
912             av_rdft_end(s->rdft);
913             av_free(s->rdft_data);
914             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
915             s->rdft_bits= rdft_bits;
916             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
917         }
918         {
919             FFTSample *data[2];
920             for(ch = 0;ch < nb_display_channels; ch++) {
921                 data[ch] = s->rdft_data + 2*nb_freq*ch;
922                 i = i_start + ch;
923                 for(x = 0; x < 2*nb_freq; x++) {
924                     double w= (x-nb_freq)*(1.0/nb_freq);
925                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
926                     i += channels;
927                     if (i >= SAMPLE_ARRAY_SIZE)
928                         i -= SAMPLE_ARRAY_SIZE;
929                 }
930                 av_rdft_calc(s->rdft, data[ch]);
931             }
932             //least efficient way to do this, we should of course directly access it but its more than fast enough
933             for(y=0; y<s->height; y++){
934                 double w= 1/sqrt(nb_freq);
935                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
936                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
937                        + data[1][2*y+1]*data[1][2*y+1])) : a;
938                 a= FFMIN(a,255);
939                 b= FFMIN(b,255);
940                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
941
942                 fill_rectangle(screen,
943                             s->xpos, s->height-y, 1, 1,
944                             fgcolor);
945             }
946         }
947         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
948         s->xpos++;
949         if(s->xpos >= s->width)
950             s->xpos= s->xleft;
951     }
952 }
953
954 static int video_open(VideoState *is){
955     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
956     int w,h;
957
958     if(is_full_screen) flags |= SDL_FULLSCREEN;
959     else               flags |= SDL_RESIZABLE;
960
961     if (is_full_screen && fs_screen_width) {
962         w = fs_screen_width;
963         h = fs_screen_height;
964     } else if(!is_full_screen && screen_width){
965         w = screen_width;
966         h = screen_height;
967 #if CONFIG_AVFILTER
968     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
969         w = is->out_video_filter->inputs[0]->w;
970         h = is->out_video_filter->inputs[0]->h;
971 #else
972     }else if (is->video_st && is->video_st->codec->width){
973         w = is->video_st->codec->width;
974         h = is->video_st->codec->height;
975 #endif
976     } else {
977         w = 640;
978         h = 480;
979     }
980     if(screen && is->width == screen->w && screen->w == w
981        && is->height== screen->h && screen->h == h)
982         return 0;
983
984 #ifndef __APPLE__
985     screen = SDL_SetVideoMode(w, h, 0, flags);
986 #else
987     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
988     screen = SDL_SetVideoMode(w, h, 24, flags);
989 #endif
990     if (!screen) {
991         fprintf(stderr, "SDL: could not set video mode - exiting\n");
992         return -1;
993     }
994     if (!window_title)
995         window_title = input_filename;
996     SDL_WM_SetCaption(window_title, window_title);
997
998     is->width = screen->w;
999     is->height = screen->h;
1000
1001     return 0;
1002 }
1003
1004 /* display the current picture, if any */
1005 static void video_display(VideoState *is)
1006 {
1007     if(!screen)
1008         video_open(cur_stream);
1009     if (is->audio_st && is->show_audio)
1010         video_audio_display(is);
1011     else if (is->video_st)
1012         video_image_display(is);
1013 }
1014
1015 static int refresh_thread(void *opaque)
1016 {
1017     VideoState *is= opaque;
1018     while(!is->abort_request){
1019     SDL_Event event;
1020     event.type = FF_REFRESH_EVENT;
1021     event.user.data1 = opaque;
1022         if(!is->refresh){
1023             is->refresh=1;
1024     SDL_PushEvent(&event);
1025         }
1026         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1027     }
1028     return 0;
1029 }
1030
1031 /* get the current audio clock value */
1032 static double get_audio_clock(VideoState *is)
1033 {
1034     double pts;
1035     int hw_buf_size, bytes_per_sec;
1036     pts = is->audio_clock;
1037     hw_buf_size = audio_write_get_buf_size(is);
1038     bytes_per_sec = 0;
1039     if (is->audio_st) {
1040         bytes_per_sec = is->audio_st->codec->sample_rate *
1041             2 * is->audio_st->codec->channels;
1042     }
1043     if (bytes_per_sec)
1044         pts -= (double)hw_buf_size / bytes_per_sec;
1045     return pts;
1046 }
1047
1048 /* get the current video clock value */
1049 static double get_video_clock(VideoState *is)
1050 {
1051     if (is->paused) {
1052         return is->video_current_pts;
1053     } else {
1054         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1055     }
1056 }
1057
1058 /* get the current external clock value */
1059 static double get_external_clock(VideoState *is)
1060 {
1061     int64_t ti;
1062     ti = av_gettime();
1063     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1064 }
1065
1066 /* get the current master clock value */
1067 static double get_master_clock(VideoState *is)
1068 {
1069     double val;
1070
1071     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1072         if (is->video_st)
1073             val = get_video_clock(is);
1074         else
1075             val = get_audio_clock(is);
1076     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1077         if (is->audio_st)
1078             val = get_audio_clock(is);
1079         else
1080             val = get_video_clock(is);
1081     } else {
1082         val = get_external_clock(is);
1083     }
1084     return val;
1085 }
1086
1087 /* seek in the stream */
1088 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1089 {
1090     if (!is->seek_req) {
1091         is->seek_pos = pos;
1092         is->seek_rel = rel;
1093         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1094         if (seek_by_bytes)
1095             is->seek_flags |= AVSEEK_FLAG_BYTE;
1096         is->seek_req = 1;
1097     }
1098 }
1099
1100 /* pause or resume the video */
1101 static void stream_pause(VideoState *is)
1102 {
1103     if (is->paused) {
1104         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1105         if(is->read_pause_return != AVERROR(ENOSYS)){
1106             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1107         }
1108         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1109     }
1110     is->paused = !is->paused;
1111 }
1112
1113 static double compute_target_time(double frame_current_pts, VideoState *is)
1114 {
1115     double delay, sync_threshold, diff;
1116
1117     /* compute nominal delay */
1118     delay = frame_current_pts - is->frame_last_pts;
1119     if (delay <= 0 || delay >= 10.0) {
1120         /* if incorrect delay, use previous one */
1121         delay = is->frame_last_delay;
1122     } else {
1123         is->frame_last_delay = delay;
1124     }
1125     is->frame_last_pts = frame_current_pts;
1126
1127     /* update delay to follow master synchronisation source */
1128     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1129          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1130         /* if video is slave, we try to correct big delays by
1131            duplicating or deleting a frame */
1132         diff = get_video_clock(is) - get_master_clock(is);
1133
1134         /* skip or repeat frame. We take into account the
1135            delay to compute the threshold. I still don't know
1136            if it is the best guess */
1137         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1138         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1139             if (diff <= -sync_threshold)
1140                 delay = 0;
1141             else if (diff >= sync_threshold)
1142                 delay = 2 * delay;
1143         }
1144     }
1145     is->frame_timer += delay;
1146 #if defined(DEBUG_SYNC)
1147     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1148             delay, actual_delay, frame_current_pts, -diff);
1149 #endif
1150
1151     return is->frame_timer;
1152 }
1153
1154 /* called to display each frame */
1155 static void video_refresh_timer(void *opaque)
1156 {
1157     VideoState *is = opaque;
1158     VideoPicture *vp;
1159
1160     SubPicture *sp, *sp2;
1161
1162     if (is->video_st) {
1163 retry:
1164         if (is->pictq_size == 0) {
1165             //nothing to do, no picture to display in the que
1166         } else {
1167             double time= av_gettime()/1000000.0;
1168             double next_target;
1169             /* dequeue the picture */
1170             vp = &is->pictq[is->pictq_rindex];
1171
1172             if(time < vp->target_clock)
1173                 return;
1174             /* update current video pts */
1175             is->video_current_pts = vp->pts;
1176             is->video_current_pts_drift = is->video_current_pts - time;
1177             is->video_current_pos = vp->pos;
1178             if(is->pictq_size > 1){
1179                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1180                 assert(nextvp->target_clock >= vp->target_clock);
1181                 next_target= nextvp->target_clock;
1182             }else{
1183                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1184             }
1185             if(framedrop && time > next_target){
1186                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1187                 if(is->pictq_size > 1 || time > next_target + 0.5){
1188                     /* update queue size and signal for next picture */
1189                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1190                         is->pictq_rindex = 0;
1191
1192                     SDL_LockMutex(is->pictq_mutex);
1193                     is->pictq_size--;
1194                     SDL_CondSignal(is->pictq_cond);
1195                     SDL_UnlockMutex(is->pictq_mutex);
1196                     goto retry;
1197                 }
1198             }
1199
1200             if(is->subtitle_st) {
1201                 if (is->subtitle_stream_changed) {
1202                     SDL_LockMutex(is->subpq_mutex);
1203
1204                     while (is->subpq_size) {
1205                         free_subpicture(&is->subpq[is->subpq_rindex]);
1206
1207                         /* update queue size and signal for next picture */
1208                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1209                             is->subpq_rindex = 0;
1210
1211                         is->subpq_size--;
1212                     }
1213                     is->subtitle_stream_changed = 0;
1214
1215                     SDL_CondSignal(is->subpq_cond);
1216                     SDL_UnlockMutex(is->subpq_mutex);
1217                 } else {
1218                     if (is->subpq_size > 0) {
1219                         sp = &is->subpq[is->subpq_rindex];
1220
1221                         if (is->subpq_size > 1)
1222                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1223                         else
1224                             sp2 = NULL;
1225
1226                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1227                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1228                         {
1229                             free_subpicture(sp);
1230
1231                             /* update queue size and signal for next picture */
1232                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1233                                 is->subpq_rindex = 0;
1234
1235                             SDL_LockMutex(is->subpq_mutex);
1236                             is->subpq_size--;
1237                             SDL_CondSignal(is->subpq_cond);
1238                             SDL_UnlockMutex(is->subpq_mutex);
1239                         }
1240                     }
1241                 }
1242             }
1243
1244             /* display picture */
1245             video_display(is);
1246
1247             /* update queue size and signal for next picture */
1248             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1249                 is->pictq_rindex = 0;
1250
1251             SDL_LockMutex(is->pictq_mutex);
1252             is->pictq_size--;
1253             SDL_CondSignal(is->pictq_cond);
1254             SDL_UnlockMutex(is->pictq_mutex);
1255         }
1256     } else if (is->audio_st) {
1257         /* draw the next audio frame */
1258
1259         /* if only audio stream, then display the audio bars (better
1260            than nothing, just to test the implementation */
1261
1262         /* display picture */
1263         video_display(is);
1264     }
1265     if (show_status) {
1266         static int64_t last_time;
1267         int64_t cur_time;
1268         int aqsize, vqsize, sqsize;
1269         double av_diff;
1270
1271         cur_time = av_gettime();
1272         if (!last_time || (cur_time - last_time) >= 30000) {
1273             aqsize = 0;
1274             vqsize = 0;
1275             sqsize = 0;
1276             if (is->audio_st)
1277                 aqsize = is->audioq.size;
1278             if (is->video_st)
1279                 vqsize = is->videoq.size;
1280             if (is->subtitle_st)
1281                 sqsize = is->subtitleq.size;
1282             av_diff = 0;
1283             if (is->audio_st && is->video_st)
1284                 av_diff = get_audio_clock(is) - get_video_clock(is);
1285             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1286                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1287             fflush(stdout);
1288             last_time = cur_time;
1289         }
1290     }
1291 }
1292
1293 static void stream_close(VideoState *is)
1294 {
1295     VideoPicture *vp;
1296     int i;
1297     /* XXX: use a special url_shutdown call to abort parse cleanly */
1298     is->abort_request = 1;
1299     SDL_WaitThread(is->parse_tid, NULL);
1300     SDL_WaitThread(is->refresh_tid, NULL);
1301
1302     /* free all pictures */
1303     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1304         vp = &is->pictq[i];
1305 #if CONFIG_AVFILTER
1306         if (vp->picref) {
1307             avfilter_unref_buffer(vp->picref);
1308             vp->picref = NULL;
1309         }
1310 #endif
1311         if (vp->bmp) {
1312             SDL_FreeYUVOverlay(vp->bmp);
1313             vp->bmp = NULL;
1314         }
1315     }
1316     SDL_DestroyMutex(is->pictq_mutex);
1317     SDL_DestroyCond(is->pictq_cond);
1318     SDL_DestroyMutex(is->subpq_mutex);
1319     SDL_DestroyCond(is->subpq_cond);
1320 #if !CONFIG_AVFILTER
1321     if (is->img_convert_ctx)
1322         sws_freeContext(is->img_convert_ctx);
1323 #endif
1324     av_free(is);
1325 }
1326
1327 static void do_exit(void)
1328 {
1329     int i;
1330     if (cur_stream) {
1331         stream_close(cur_stream);
1332         cur_stream = NULL;
1333     }
1334     for (i = 0; i < AVMEDIA_TYPE_NB; i++)
1335         av_free(avcodec_opts[i]);
1336     av_free(avformat_opts);
1337     av_free(sws_opts);
1338 #if CONFIG_AVFILTER
1339     avfilter_uninit();
1340 #endif
1341     if (show_status)
1342         printf("\n");
1343     SDL_Quit();
1344     exit(0);
1345 }
1346
1347 /* allocate a picture (needs to do that in main thread to avoid
1348    potential locking problems */
1349 static void alloc_picture(void *opaque)
1350 {
1351     VideoState *is = opaque;
1352     VideoPicture *vp;
1353
1354     vp = &is->pictq[is->pictq_windex];
1355
1356     if (vp->bmp)
1357         SDL_FreeYUVOverlay(vp->bmp);
1358
1359 #if CONFIG_AVFILTER
1360     if (vp->picref)
1361         avfilter_unref_buffer(vp->picref);
1362     vp->picref = NULL;
1363
1364     vp->width   = is->out_video_filter->inputs[0]->w;
1365     vp->height  = is->out_video_filter->inputs[0]->h;
1366     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1367 #else
1368     vp->width   = is->video_st->codec->width;
1369     vp->height  = is->video_st->codec->height;
1370     vp->pix_fmt = is->video_st->codec->pix_fmt;
1371 #endif
1372
1373     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1374                                    SDL_YV12_OVERLAY,
1375                                    screen);
1376     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1377         /* SDL allocates a buffer smaller than requested if the video
1378          * overlay hardware is unable to support the requested size. */
1379         fprintf(stderr, "Error: the video system does not support an image\n"
1380                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1381                         "to reduce the image size.\n", vp->width, vp->height );
1382         do_exit();
1383     }
1384
1385     SDL_LockMutex(is->pictq_mutex);
1386     vp->allocated = 1;
1387     SDL_CondSignal(is->pictq_cond);
1388     SDL_UnlockMutex(is->pictq_mutex);
1389 }
1390
1391 /**
1392  *
1393  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1394  */
1395 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1396 {
1397     VideoPicture *vp;
1398     int dst_pix_fmt;
1399 #if CONFIG_AVFILTER
1400     AVPicture pict_src;
1401 #endif
1402     /* wait until we have space to put a new picture */
1403     SDL_LockMutex(is->pictq_mutex);
1404
1405     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1406         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1407
1408     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1409            !is->videoq.abort_request) {
1410         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1411     }
1412     SDL_UnlockMutex(is->pictq_mutex);
1413
1414     if (is->videoq.abort_request)
1415         return -1;
1416
1417     vp = &is->pictq[is->pictq_windex];
1418
1419     /* alloc or resize hardware picture buffer */
1420     if (!vp->bmp ||
1421 #if CONFIG_AVFILTER
1422         vp->width  != is->out_video_filter->inputs[0]->w ||
1423         vp->height != is->out_video_filter->inputs[0]->h) {
1424 #else
1425         vp->width != is->video_st->codec->width ||
1426         vp->height != is->video_st->codec->height) {
1427 #endif
1428         SDL_Event event;
1429
1430         vp->allocated = 0;
1431
1432         /* the allocation must be done in the main thread to avoid
1433            locking problems */
1434         event.type = FF_ALLOC_EVENT;
1435         event.user.data1 = is;
1436         SDL_PushEvent(&event);
1437
1438         /* wait until the picture is allocated */
1439         SDL_LockMutex(is->pictq_mutex);
1440         while (!vp->allocated && !is->videoq.abort_request) {
1441             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1442         }
1443         SDL_UnlockMutex(is->pictq_mutex);
1444
1445         if (is->videoq.abort_request)
1446             return -1;
1447     }
1448
1449     /* if the frame is not skipped, then display it */
1450     if (vp->bmp) {
1451         AVPicture pict;
1452 #if CONFIG_AVFILTER
1453         if(vp->picref)
1454             avfilter_unref_buffer(vp->picref);
1455         vp->picref = src_frame->opaque;
1456 #endif
1457
1458         /* get a pointer on the bitmap */
1459         SDL_LockYUVOverlay (vp->bmp);
1460
1461         dst_pix_fmt = PIX_FMT_YUV420P;
1462         memset(&pict,0,sizeof(AVPicture));
1463         pict.data[0] = vp->bmp->pixels[0];
1464         pict.data[1] = vp->bmp->pixels[2];
1465         pict.data[2] = vp->bmp->pixels[1];
1466
1467         pict.linesize[0] = vp->bmp->pitches[0];
1468         pict.linesize[1] = vp->bmp->pitches[2];
1469         pict.linesize[2] = vp->bmp->pitches[1];
1470
1471 #if CONFIG_AVFILTER
1472         pict_src.data[0] = src_frame->data[0];
1473         pict_src.data[1] = src_frame->data[1];
1474         pict_src.data[2] = src_frame->data[2];
1475
1476         pict_src.linesize[0] = src_frame->linesize[0];
1477         pict_src.linesize[1] = src_frame->linesize[1];
1478         pict_src.linesize[2] = src_frame->linesize[2];
1479
1480         //FIXME use direct rendering
1481         av_picture_copy(&pict, &pict_src,
1482                         vp->pix_fmt, vp->width, vp->height);
1483 #else
1484         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1485         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1486             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1487             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1488         if (is->img_convert_ctx == NULL) {
1489             fprintf(stderr, "Cannot initialize the conversion context\n");
1490             exit(1);
1491         }
1492         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1493                   0, vp->height, pict.data, pict.linesize);
1494 #endif
1495         /* update the bitmap content */
1496         SDL_UnlockYUVOverlay(vp->bmp);
1497
1498         vp->pts = pts;
1499         vp->pos = pos;
1500
1501         /* now we can update the picture count */
1502         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1503             is->pictq_windex = 0;
1504         SDL_LockMutex(is->pictq_mutex);
1505         vp->target_clock= compute_target_time(vp->pts, is);
1506
1507         is->pictq_size++;
1508         SDL_UnlockMutex(is->pictq_mutex);
1509     }
1510     return 0;
1511 }
1512
1513 /**
1514  * compute the exact PTS for the picture if it is omitted in the stream
1515  * @param pts1 the dts of the pkt / pts of the frame
1516  */
1517 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1518 {
1519     double frame_delay, pts;
1520
1521     pts = pts1;
1522
1523     if (pts != 0) {
1524         /* update video clock with pts, if present */
1525         is->video_clock = pts;
1526     } else {
1527         pts = is->video_clock;
1528     }
1529     /* update video clock for next frame */
1530     frame_delay = av_q2d(is->video_st->codec->time_base);
1531     /* for MPEG2, the frame can be repeated, so we update the
1532        clock accordingly */
1533     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1534     is->video_clock += frame_delay;
1535
1536 #if defined(DEBUG_SYNC) && 0
1537     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1538            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1539 #endif
1540     return queue_picture(is, src_frame, pts, pos);
1541 }
1542
1543 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1544 {
1545     int len1, got_picture, i;
1546
1547         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1548             return -1;
1549
1550         if(pkt->data == flush_pkt.data){
1551             avcodec_flush_buffers(is->video_st->codec);
1552
1553             SDL_LockMutex(is->pictq_mutex);
1554             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1555             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1556                 is->pictq[i].target_clock= 0;
1557             }
1558             while (is->pictq_size && !is->videoq.abort_request) {
1559                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1560             }
1561             is->video_current_pos= -1;
1562             SDL_UnlockMutex(is->pictq_mutex);
1563
1564             is->last_dts_for_fault_detection=
1565             is->last_pts_for_fault_detection= INT64_MIN;
1566             is->frame_last_pts= AV_NOPTS_VALUE;
1567             is->frame_last_delay = 0;
1568             is->frame_timer = (double)av_gettime() / 1000000.0;
1569             is->skip_frames= 1;
1570             is->skip_frames_index= 0;
1571             return 0;
1572         }
1573
1574         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1575            this packet, if any */
1576         is->video_st->codec->reordered_opaque= pkt->pts;
1577         len1 = avcodec_decode_video2(is->video_st->codec,
1578                                     frame, &got_picture,
1579                                     pkt);
1580
1581         if (got_picture) {
1582             if(pkt->dts != AV_NOPTS_VALUE){
1583                 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1584                 is->last_dts_for_fault_detection= pkt->dts;
1585             }
1586             if(frame->reordered_opaque != AV_NOPTS_VALUE){
1587                 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1588                 is->last_pts_for_fault_detection= frame->reordered_opaque;
1589             }
1590         }
1591
1592         if(   (   decoder_reorder_pts==1
1593                || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1594                || pkt->dts == AV_NOPTS_VALUE)
1595            && frame->reordered_opaque != AV_NOPTS_VALUE)
1596             *pts= frame->reordered_opaque;
1597         else if(pkt->dts != AV_NOPTS_VALUE)
1598             *pts= pkt->dts;
1599         else
1600             *pts= 0;
1601
1602 //            if (len1 < 0)
1603 //                break;
1604     if (got_picture){
1605         is->skip_frames_index += 1;
1606         if(is->skip_frames_index >= is->skip_frames){
1607             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1608             return 1;
1609         }
1610
1611     }
1612     return 0;
1613 }
1614
1615 #if CONFIG_AVFILTER
1616 typedef struct {
1617     VideoState *is;
1618     AVFrame *frame;
1619     int use_dr1;
1620 } FilterPriv;
1621
1622 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1623 {
1624     AVFilterContext *ctx = codec->opaque;
1625     AVFilterBufferRef  *ref;
1626     int perms = AV_PERM_WRITE;
1627     int i, w, h, stride[4];
1628     unsigned edge;
1629
1630     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1631         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1632         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1633         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1634     }
1635     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1636
1637     w = codec->width;
1638     h = codec->height;
1639     avcodec_align_dimensions2(codec, &w, &h, stride);
1640     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1641     w += edge << 1;
1642     h += edge << 1;
1643
1644     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1645         return -1;
1646
1647     ref->video->w = codec->width;
1648     ref->video->h = codec->height;
1649     for(i = 0; i < 4; i ++) {
1650         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1651         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1652
1653         if (ref->data[i]) {
1654             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1655         }
1656         pic->data[i]     = ref->data[i];
1657         pic->linesize[i] = ref->linesize[i];
1658     }
1659     pic->opaque = ref;
1660     pic->age    = INT_MAX;
1661     pic->type   = FF_BUFFER_TYPE_USER;
1662     pic->reordered_opaque = codec->reordered_opaque;
1663     return 0;
1664 }
1665
1666 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1667 {
1668     memset(pic->data, 0, sizeof(pic->data));
1669     avfilter_unref_buffer(pic->opaque);
1670 }
1671
1672 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1673 {
1674     AVFilterBufferRef *ref = pic->opaque;
1675
1676     if (pic->data[0] == NULL) {
1677         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1678         return codec->get_buffer(codec, pic);
1679     }
1680
1681     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1682         (codec->pix_fmt != ref->format)) {
1683         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1684         return -1;
1685     }
1686
1687     pic->reordered_opaque = codec->reordered_opaque;
1688     return 0;
1689 }
1690
1691 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1692 {
1693     FilterPriv *priv = ctx->priv;
1694     AVCodecContext *codec;
1695     if(!opaque) return -1;
1696
1697     priv->is = opaque;
1698     codec    = priv->is->video_st->codec;
1699     codec->opaque = ctx;
1700     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1701         priv->use_dr1 = 1;
1702         codec->get_buffer     = input_get_buffer;
1703         codec->release_buffer = input_release_buffer;
1704         codec->reget_buffer   = input_reget_buffer;
1705     }
1706
1707     priv->frame = avcodec_alloc_frame();
1708
1709     return 0;
1710 }
1711
1712 static void input_uninit(AVFilterContext *ctx)
1713 {
1714     FilterPriv *priv = ctx->priv;
1715     av_free(priv->frame);
1716 }
1717
1718 static int input_request_frame(AVFilterLink *link)
1719 {
1720     FilterPriv *priv = link->src->priv;
1721     AVFilterBufferRef *picref;
1722     int64_t pts = 0;
1723     AVPacket pkt;
1724     int ret;
1725
1726     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1727         av_free_packet(&pkt);
1728     if (ret < 0)
1729         return -1;
1730
1731     if(priv->use_dr1) {
1732         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1733     } else {
1734         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1735         av_picture_data_copy(picref->data, picref->linesize,
1736                              priv->frame->data, priv->frame->linesize,
1737                              picref->format, link->w, link->h);
1738     }
1739     av_free_packet(&pkt);
1740
1741     picref->pts = pts;
1742     picref->pos = pkt.pos;
1743     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1744     avfilter_start_frame(link, picref);
1745     avfilter_draw_slice(link, 0, link->h, 1);
1746     avfilter_end_frame(link);
1747
1748     return 0;
1749 }
1750
1751 static int input_query_formats(AVFilterContext *ctx)
1752 {
1753     FilterPriv *priv = ctx->priv;
1754     enum PixelFormat pix_fmts[] = {
1755         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1756     };
1757
1758     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1759     return 0;
1760 }
1761
1762 static int input_config_props(AVFilterLink *link)
1763 {
1764     FilterPriv *priv  = link->src->priv;
1765     AVCodecContext *c = priv->is->video_st->codec;
1766
1767     link->w = c->width;
1768     link->h = c->height;
1769
1770     return 0;
1771 }
1772
1773 static AVFilter input_filter =
1774 {
1775     .name      = "ffplay_input",
1776
1777     .priv_size = sizeof(FilterPriv),
1778
1779     .init      = input_init,
1780     .uninit    = input_uninit,
1781
1782     .query_formats = input_query_formats,
1783
1784     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1785     .outputs   = (AVFilterPad[]) {{ .name = "default",
1786                                     .type = AVMEDIA_TYPE_VIDEO,
1787                                     .request_frame = input_request_frame,
1788                                     .config_props  = input_config_props, },
1789                                   { .name = NULL }},
1790 };
1791
1792 static void output_end_frame(AVFilterLink *link)
1793 {
1794 }
1795
1796 static int output_query_formats(AVFilterContext *ctx)
1797 {
1798     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1799
1800     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1801     return 0;
1802 }
1803
1804 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1805                                     int64_t *pts, int64_t *pos)
1806 {
1807     AVFilterBufferRef *pic;
1808
1809     if(avfilter_request_frame(ctx->inputs[0]))
1810         return -1;
1811     if(!(pic = ctx->inputs[0]->cur_buf))
1812         return -1;
1813     ctx->inputs[0]->cur_buf = NULL;
1814
1815     frame->opaque = pic;
1816     *pts          = pic->pts;
1817     *pos          = pic->pos;
1818
1819     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1820     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1821
1822     return 1;
1823 }
1824
1825 static AVFilter output_filter =
1826 {
1827     .name      = "ffplay_output",
1828
1829     .query_formats = output_query_formats,
1830
1831     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1832                                     .type          = AVMEDIA_TYPE_VIDEO,
1833                                     .end_frame     = output_end_frame,
1834                                     .min_perms     = AV_PERM_READ, },
1835                                   { .name = NULL }},
1836     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1837 };
1838 #endif  /* CONFIG_AVFILTER */
1839
1840 static int video_thread(void *arg)
1841 {
1842     VideoState *is = arg;
1843     AVFrame *frame= avcodec_alloc_frame();
1844     int64_t pts_int;
1845     double pts;
1846     int ret;
1847
1848 #if CONFIG_AVFILTER
1849     int64_t pos;
1850     char sws_flags_str[128];
1851     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1852     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1853     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1854     graph->scale_sws_opts = av_strdup(sws_flags_str);
1855
1856     if (avfilter_open(&filt_src, &input_filter,  "src") < 0) goto the_end;
1857     if (avfilter_open(&filt_out, &output_filter, "out") < 0) goto the_end;
1858
1859     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1860     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1861
1862
1863     if(vfilters) {
1864         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1865         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1866
1867         outputs->name    = av_strdup("in");
1868         outputs->filter  = filt_src;
1869         outputs->pad_idx = 0;
1870         outputs->next    = NULL;
1871
1872         inputs->name    = av_strdup("out");
1873         inputs->filter  = filt_out;
1874         inputs->pad_idx = 0;
1875         inputs->next    = NULL;
1876
1877         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1878             goto the_end;
1879         av_freep(&vfilters);
1880     } else {
1881         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1882     }
1883     avfilter_graph_add_filter(graph, filt_src);
1884     avfilter_graph_add_filter(graph, filt_out);
1885
1886     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1887     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1888     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1889
1890     is->out_video_filter = filt_out;
1891 #endif
1892
1893     for(;;) {
1894 #if !CONFIG_AVFILTER
1895         AVPacket pkt;
1896 #endif
1897         while (is->paused && !is->videoq.abort_request)
1898             SDL_Delay(10);
1899 #if CONFIG_AVFILTER
1900         ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1901 #else
1902         ret = get_video_frame(is, frame, &pts_int, &pkt);
1903 #endif
1904
1905         if (ret < 0) goto the_end;
1906
1907         if (!ret)
1908             continue;
1909
1910         pts = pts_int*av_q2d(is->video_st->time_base);
1911
1912 #if CONFIG_AVFILTER
1913         ret = output_picture2(is, frame, pts, pos);
1914 #else
1915         ret = output_picture2(is, frame, pts,  pkt.pos);
1916         av_free_packet(&pkt);
1917 #endif
1918         if (ret < 0)
1919             goto the_end;
1920
1921         if (step)
1922             if (cur_stream)
1923                 stream_pause(cur_stream);
1924     }
1925  the_end:
1926 #if CONFIG_AVFILTER
1927     avfilter_graph_destroy(graph);
1928     av_freep(&graph);
1929 #endif
1930     av_free(frame);
1931     return 0;
1932 }
1933
1934 static int subtitle_thread(void *arg)
1935 {
1936     VideoState *is = arg;
1937     SubPicture *sp;
1938     AVPacket pkt1, *pkt = &pkt1;
1939     int len1, got_subtitle;
1940     double pts;
1941     int i, j;
1942     int r, g, b, y, u, v, a;
1943
1944     for(;;) {
1945         while (is->paused && !is->subtitleq.abort_request) {
1946             SDL_Delay(10);
1947         }
1948         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1949             break;
1950
1951         if(pkt->data == flush_pkt.data){
1952             avcodec_flush_buffers(is->subtitle_st->codec);
1953             continue;
1954         }
1955         SDL_LockMutex(is->subpq_mutex);
1956         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1957                !is->subtitleq.abort_request) {
1958             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1959         }
1960         SDL_UnlockMutex(is->subpq_mutex);
1961
1962         if (is->subtitleq.abort_request)
1963             goto the_end;
1964
1965         sp = &is->subpq[is->subpq_windex];
1966
1967        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1968            this packet, if any */
1969         pts = 0;
1970         if (pkt->pts != AV_NOPTS_VALUE)
1971             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1972
1973         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1974                                     &sp->sub, &got_subtitle,
1975                                     pkt);
1976 //            if (len1 < 0)
1977 //                break;
1978         if (got_subtitle && sp->sub.format == 0) {
1979             sp->pts = pts;
1980
1981             for (i = 0; i < sp->sub.num_rects; i++)
1982             {
1983                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1984                 {
1985                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1986                     y = RGB_TO_Y_CCIR(r, g, b);
1987                     u = RGB_TO_U_CCIR(r, g, b, 0);
1988                     v = RGB_TO_V_CCIR(r, g, b, 0);
1989                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1990                 }
1991             }
1992
1993             /* now we can update the picture count */
1994             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1995                 is->subpq_windex = 0;
1996             SDL_LockMutex(is->subpq_mutex);
1997             is->subpq_size++;
1998             SDL_UnlockMutex(is->subpq_mutex);
1999         }
2000         av_free_packet(pkt);
2001 //        if (step)
2002 //            if (cur_stream)
2003 //                stream_pause(cur_stream);
2004     }
2005  the_end:
2006     return 0;
2007 }
2008
2009 /* copy samples for viewing in editor window */
2010 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2011 {
2012     int size, len, channels;
2013
2014     channels = is->audio_st->codec->channels;
2015
2016     size = samples_size / sizeof(short);
2017     while (size > 0) {
2018         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2019         if (len > size)
2020             len = size;
2021         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2022         samples += len;
2023         is->sample_array_index += len;
2024         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2025             is->sample_array_index = 0;
2026         size -= len;
2027     }
2028 }
2029
2030 /* return the new audio buffer size (samples can be added or deleted
2031    to get better sync if video or external master clock) */
2032 static int synchronize_audio(VideoState *is, short *samples,
2033                              int samples_size1, double pts)
2034 {
2035     int n, samples_size;
2036     double ref_clock;
2037
2038     n = 2 * is->audio_st->codec->channels;
2039     samples_size = samples_size1;
2040
2041     /* if not master, then we try to remove or add samples to correct the clock */
2042     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2043          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2044         double diff, avg_diff;
2045         int wanted_size, min_size, max_size, nb_samples;
2046
2047         ref_clock = get_master_clock(is);
2048         diff = get_audio_clock(is) - ref_clock;
2049
2050         if (diff < AV_NOSYNC_THRESHOLD) {
2051             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2052             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2053                 /* not enough measures to have a correct estimate */
2054                 is->audio_diff_avg_count++;
2055             } else {
2056                 /* estimate the A-V difference */
2057                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2058
2059                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2060                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2061                     nb_samples = samples_size / n;
2062
2063                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2064                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2065                     if (wanted_size < min_size)
2066                         wanted_size = min_size;
2067                     else if (wanted_size > max_size)
2068                         wanted_size = max_size;
2069
2070                     /* add or remove samples to correction the synchro */
2071                     if (wanted_size < samples_size) {
2072                         /* remove samples */
2073                         samples_size = wanted_size;
2074                     } else if (wanted_size > samples_size) {
2075                         uint8_t *samples_end, *q;
2076                         int nb;
2077
2078                         /* add samples */
2079                         nb = (samples_size - wanted_size);
2080                         samples_end = (uint8_t *)samples + samples_size - n;
2081                         q = samples_end + n;
2082                         while (nb > 0) {
2083                             memcpy(q, samples_end, n);
2084                             q += n;
2085                             nb -= n;
2086                         }
2087                         samples_size = wanted_size;
2088                     }
2089                 }
2090 #if 0
2091                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2092                        diff, avg_diff, samples_size - samples_size1,
2093                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2094 #endif
2095             }
2096         } else {
2097             /* too big difference : may be initial PTS errors, so
2098                reset A-V filter */
2099             is->audio_diff_avg_count = 0;
2100             is->audio_diff_cum = 0;
2101         }
2102     }
2103
2104     return samples_size;
2105 }
2106
2107 /* decode one audio frame and returns its uncompressed size */
2108 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2109 {
2110     AVPacket *pkt_temp = &is->audio_pkt_temp;
2111     AVPacket *pkt = &is->audio_pkt;
2112     AVCodecContext *dec= is->audio_st->codec;
2113     int n, len1, data_size;
2114     double pts;
2115
2116     for(;;) {
2117         /* NOTE: the audio packet can contain several frames */
2118         while (pkt_temp->size > 0) {
2119             data_size = sizeof(is->audio_buf1);
2120             len1 = avcodec_decode_audio3(dec,
2121                                         (int16_t *)is->audio_buf1, &data_size,
2122                                         pkt_temp);
2123             if (len1 < 0) {
2124                 /* if error, we skip the frame */
2125                 pkt_temp->size = 0;
2126                 break;
2127             }
2128
2129             pkt_temp->data += len1;
2130             pkt_temp->size -= len1;
2131             if (data_size <= 0)
2132                 continue;
2133
2134             if (dec->sample_fmt != is->audio_src_fmt) {
2135                 if (is->reformat_ctx)
2136                     av_audio_convert_free(is->reformat_ctx);
2137                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2138                                                          dec->sample_fmt, 1, NULL, 0);
2139                 if (!is->reformat_ctx) {
2140                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2141                         avcodec_get_sample_fmt_name(dec->sample_fmt),
2142                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2143                         break;
2144                 }
2145                 is->audio_src_fmt= dec->sample_fmt;
2146             }
2147
2148             if (is->reformat_ctx) {
2149                 const void *ibuf[6]= {is->audio_buf1};
2150                 void *obuf[6]= {is->audio_buf2};
2151                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2152                 int ostride[6]= {2};
2153                 int len= data_size/istride[0];
2154                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2155                     printf("av_audio_convert() failed\n");
2156                     break;
2157                 }
2158                 is->audio_buf= is->audio_buf2;
2159                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2160                           remove this legacy cruft */
2161                 data_size= len*2;
2162             }else{
2163                 is->audio_buf= is->audio_buf1;
2164             }
2165
2166             /* if no pts, then compute it */
2167             pts = is->audio_clock;
2168             *pts_ptr = pts;
2169             n = 2 * dec->channels;
2170             is->audio_clock += (double)data_size /
2171                 (double)(n * dec->sample_rate);
2172 #if defined(DEBUG_SYNC)
2173             {
2174                 static double last_clock;
2175                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2176                        is->audio_clock - last_clock,
2177                        is->audio_clock, pts);
2178                 last_clock = is->audio_clock;
2179             }
2180 #endif
2181             return data_size;
2182         }
2183
2184         /* free the current packet */
2185         if (pkt->data)
2186             av_free_packet(pkt);
2187
2188         if (is->paused || is->audioq.abort_request) {
2189             return -1;
2190         }
2191
2192         /* read next packet */
2193         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2194             return -1;
2195         if(pkt->data == flush_pkt.data){
2196             avcodec_flush_buffers(dec);
2197             continue;
2198         }
2199
2200         pkt_temp->data = pkt->data;
2201         pkt_temp->size = pkt->size;
2202
2203         /* if update the audio clock with the pts */
2204         if (pkt->pts != AV_NOPTS_VALUE) {
2205             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2206         }
2207     }
2208 }
2209
2210 /* get the current audio output buffer size, in samples. With SDL, we
2211    cannot have a precise information */
2212 static int audio_write_get_buf_size(VideoState *is)
2213 {
2214     return is->audio_buf_size - is->audio_buf_index;
2215 }
2216
2217
2218 /* prepare a new audio buffer */
2219 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2220 {
2221     VideoState *is = opaque;
2222     int audio_size, len1;
2223     double pts;
2224
2225     audio_callback_time = av_gettime();
2226
2227     while (len > 0) {
2228         if (is->audio_buf_index >= is->audio_buf_size) {
2229            audio_size = audio_decode_frame(is, &pts);
2230            if (audio_size < 0) {
2231                 /* if error, just output silence */
2232                is->audio_buf = is->audio_buf1;
2233                is->audio_buf_size = 1024;
2234                memset(is->audio_buf, 0, is->audio_buf_size);
2235            } else {
2236                if (is->show_audio)
2237                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2238                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2239                                               pts);
2240                is->audio_buf_size = audio_size;
2241            }
2242            is->audio_buf_index = 0;
2243         }
2244         len1 = is->audio_buf_size - is->audio_buf_index;
2245         if (len1 > len)
2246             len1 = len;
2247         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2248         len -= len1;
2249         stream += len1;
2250         is->audio_buf_index += len1;
2251     }
2252 }
2253
2254 /* open a given stream. Return 0 if OK */
2255 static int stream_component_open(VideoState *is, int stream_index)
2256 {
2257     AVFormatContext *ic = is->ic;
2258     AVCodecContext *avctx;
2259     AVCodec *codec;
2260     SDL_AudioSpec wanted_spec, spec;
2261
2262     if (stream_index < 0 || stream_index >= ic->nb_streams)
2263         return -1;
2264     avctx = ic->streams[stream_index]->codec;
2265
2266     /* prepare audio output */
2267     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2268         if (avctx->channels > 0) {
2269             avctx->request_channels = FFMIN(2, avctx->channels);
2270         } else {
2271             avctx->request_channels = 2;
2272         }
2273     }
2274
2275     codec = avcodec_find_decoder(avctx->codec_id);
2276     avctx->debug_mv = debug_mv;
2277     avctx->debug = debug;
2278     avctx->workaround_bugs = workaround_bugs;
2279     avctx->lowres = lowres;
2280     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2281     avctx->idct_algo= idct;
2282     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2283     avctx->skip_frame= skip_frame;
2284     avctx->skip_idct= skip_idct;
2285     avctx->skip_loop_filter= skip_loop_filter;
2286     avctx->error_recognition= error_recognition;
2287     avctx->error_concealment= error_concealment;
2288     avcodec_thread_init(avctx, thread_count);
2289
2290     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2291
2292     if (!codec ||
2293         avcodec_open(avctx, codec) < 0)
2294         return -1;
2295
2296     /* prepare audio output */
2297     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2298         wanted_spec.freq = avctx->sample_rate;
2299         wanted_spec.format = AUDIO_S16SYS;
2300         wanted_spec.channels = avctx->channels;
2301         wanted_spec.silence = 0;
2302         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2303         wanted_spec.callback = sdl_audio_callback;
2304         wanted_spec.userdata = is;
2305         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2306             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2307             return -1;
2308         }
2309         is->audio_hw_buf_size = spec.size;
2310         is->audio_src_fmt= SAMPLE_FMT_S16;
2311     }
2312
2313     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2314     switch(avctx->codec_type) {
2315     case AVMEDIA_TYPE_AUDIO:
2316         is->audio_stream = stream_index;
2317         is->audio_st = ic->streams[stream_index];
2318         is->audio_buf_size = 0;
2319         is->audio_buf_index = 0;
2320
2321         /* init averaging filter */
2322         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2323         is->audio_diff_avg_count = 0;
2324         /* since we do not have a precise anough audio fifo fullness,
2325            we correct audio sync only if larger than this threshold */
2326         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2327
2328         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2329         packet_queue_init(&is->audioq);
2330         SDL_PauseAudio(0);
2331         break;
2332     case AVMEDIA_TYPE_VIDEO:
2333         is->video_stream = stream_index;
2334         is->video_st = ic->streams[stream_index];
2335
2336 //        is->video_current_pts_time = av_gettime();
2337
2338         packet_queue_init(&is->videoq);
2339         is->video_tid = SDL_CreateThread(video_thread, is);
2340         break;
2341     case AVMEDIA_TYPE_SUBTITLE:
2342         is->subtitle_stream = stream_index;
2343         is->subtitle_st = ic->streams[stream_index];
2344         packet_queue_init(&is->subtitleq);
2345
2346         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2347         break;
2348     default:
2349         break;
2350     }
2351     return 0;
2352 }
2353
2354 static void stream_component_close(VideoState *is, int stream_index)
2355 {
2356     AVFormatContext *ic = is->ic;
2357     AVCodecContext *avctx;
2358
2359     if (stream_index < 0 || stream_index >= ic->nb_streams)
2360         return;
2361     avctx = ic->streams[stream_index]->codec;
2362
2363     switch(avctx->codec_type) {
2364     case AVMEDIA_TYPE_AUDIO:
2365         packet_queue_abort(&is->audioq);
2366
2367         SDL_CloseAudio();
2368
2369         packet_queue_end(&is->audioq);
2370         if (is->reformat_ctx)
2371             av_audio_convert_free(is->reformat_ctx);
2372         is->reformat_ctx = NULL;
2373         break;
2374     case AVMEDIA_TYPE_VIDEO:
2375         packet_queue_abort(&is->videoq);
2376
2377         /* note: we also signal this mutex to make sure we deblock the
2378            video thread in all cases */
2379         SDL_LockMutex(is->pictq_mutex);
2380         SDL_CondSignal(is->pictq_cond);
2381         SDL_UnlockMutex(is->pictq_mutex);
2382
2383         SDL_WaitThread(is->video_tid, NULL);
2384
2385         packet_queue_end(&is->videoq);
2386         break;
2387     case AVMEDIA_TYPE_SUBTITLE:
2388         packet_queue_abort(&is->subtitleq);
2389
2390         /* note: we also signal this mutex to make sure we deblock the
2391            video thread in all cases */
2392         SDL_LockMutex(is->subpq_mutex);
2393         is->subtitle_stream_changed = 1;
2394
2395         SDL_CondSignal(is->subpq_cond);
2396         SDL_UnlockMutex(is->subpq_mutex);
2397
2398         SDL_WaitThread(is->subtitle_tid, NULL);
2399
2400         packet_queue_end(&is->subtitleq);
2401         break;
2402     default:
2403         break;
2404     }
2405
2406     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2407     avcodec_close(avctx);
2408     switch(avctx->codec_type) {
2409     case AVMEDIA_TYPE_AUDIO:
2410         is->audio_st = NULL;
2411         is->audio_stream = -1;
2412         break;
2413     case AVMEDIA_TYPE_VIDEO:
2414         is->video_st = NULL;
2415         is->video_stream = -1;
2416         break;
2417     case AVMEDIA_TYPE_SUBTITLE:
2418         is->subtitle_st = NULL;
2419         is->subtitle_stream = -1;
2420         break;
2421     default:
2422         break;
2423     }
2424 }
2425
2426 /* since we have only one decoding thread, we can use a global
2427    variable instead of a thread local variable */
2428 static VideoState *global_video_state;
2429
2430 static int decode_interrupt_cb(void)
2431 {
2432     return (global_video_state && global_video_state->abort_request);
2433 }
2434
2435 /* this thread gets the stream from the disk or the network */
2436 static int decode_thread(void *arg)
2437 {
2438     VideoState *is = arg;
2439     AVFormatContext *ic;
2440     int err, i, ret;
2441     int st_index[AVMEDIA_TYPE_NB];
2442     int st_count[AVMEDIA_TYPE_NB]={0};
2443     int st_best_packet_count[AVMEDIA_TYPE_NB];
2444     AVPacket pkt1, *pkt = &pkt1;
2445     AVFormatParameters params, *ap = &params;
2446     int eof=0;
2447     int pkt_in_play_range = 0;
2448
2449     ic = avformat_alloc_context();
2450
2451     memset(st_index, -1, sizeof(st_index));
2452     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2453     is->video_stream = -1;
2454     is->audio_stream = -1;
2455     is->subtitle_stream = -1;
2456
2457     global_video_state = is;
2458     url_set_interrupt_cb(decode_interrupt_cb);
2459
2460     memset(ap, 0, sizeof(*ap));
2461
2462     ap->prealloced_context = 1;
2463     ap->width = frame_width;
2464     ap->height= frame_height;
2465     ap->time_base= (AVRational){1, 25};
2466     ap->pix_fmt = frame_pix_fmt;
2467
2468     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2469
2470     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2471     if (err < 0) {
2472         print_error(is->filename, err);
2473         ret = -1;
2474         goto fail;
2475     }
2476     is->ic = ic;
2477
2478     if(genpts)
2479         ic->flags |= AVFMT_FLAG_GENPTS;
2480
2481     err = av_find_stream_info(ic);
2482     if (err < 0) {
2483         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2484         ret = -1;
2485         goto fail;
2486     }
2487     if(ic->pb)
2488         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2489
2490     if(seek_by_bytes<0)
2491         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2492
2493     /* if seeking requested, we execute it */
2494     if (start_time != AV_NOPTS_VALUE) {
2495         int64_t timestamp;
2496
2497         timestamp = start_time;
2498         /* add the stream start time */
2499         if (ic->start_time != AV_NOPTS_VALUE)
2500             timestamp += ic->start_time;
2501         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2502         if (ret < 0) {
2503             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2504                     is->filename, (double)timestamp / AV_TIME_BASE);
2505         }
2506     }
2507
2508     for(i = 0; i < ic->nb_streams; i++) {
2509         AVStream *st= ic->streams[i];
2510         AVCodecContext *avctx = st->codec;
2511         ic->streams[i]->discard = AVDISCARD_ALL;
2512         if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2513             continue;
2514         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2515             continue;
2516
2517         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2518             continue;
2519         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2520
2521         switch(avctx->codec_type) {
2522         case AVMEDIA_TYPE_AUDIO:
2523             if (!audio_disable)
2524                 st_index[AVMEDIA_TYPE_AUDIO] = i;
2525             break;
2526         case AVMEDIA_TYPE_VIDEO:
2527         case AVMEDIA_TYPE_SUBTITLE:
2528             if (!video_disable)
2529                 st_index[avctx->codec_type] = i;
2530             break;
2531         default:
2532             break;
2533         }
2534     }
2535     if (show_status) {
2536         dump_format(ic, 0, is->filename, 0);
2537     }
2538
2539     /* open the streams */
2540     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2541         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2542     }
2543
2544     ret=-1;
2545     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2546         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2547     }
2548     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2549     if(ret<0) {
2550         if (!display_disable)
2551             is->show_audio = 2;
2552     }
2553
2554     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2555         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2556     }
2557
2558     if (is->video_stream < 0 && is->audio_stream < 0) {
2559         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2560         ret = -1;
2561         goto fail;
2562     }
2563
2564     for(;;) {
2565         if (is->abort_request)
2566             break;
2567         if (is->paused != is->last_paused) {
2568             is->last_paused = is->paused;
2569             if (is->paused)
2570                 is->read_pause_return= av_read_pause(ic);
2571             else
2572                 av_read_play(ic);
2573         }
2574 #if CONFIG_RTSP_DEMUXER
2575         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2576             /* wait 10 ms to avoid trying to get another packet */
2577             /* XXX: horrible */
2578             SDL_Delay(10);
2579             continue;
2580         }
2581 #endif
2582         if (is->seek_req) {
2583             int64_t seek_target= is->seek_pos;
2584             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2585             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2586 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2587 //      of the seek_pos/seek_rel variables
2588
2589             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2590             if (ret < 0) {
2591                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2592             }else{
2593                 if (is->audio_stream >= 0) {
2594                     packet_queue_flush(&is->audioq);
2595                     packet_queue_put(&is->audioq, &flush_pkt);
2596                 }
2597                 if (is->subtitle_stream >= 0) {
2598                     packet_queue_flush(&is->subtitleq);
2599                     packet_queue_put(&is->subtitleq, &flush_pkt);
2600                 }
2601                 if (is->video_stream >= 0) {
2602                     packet_queue_flush(&is->videoq);
2603                     packet_queue_put(&is->videoq, &flush_pkt);
2604                 }
2605             }
2606             is->seek_req = 0;
2607             eof= 0;
2608         }
2609
2610         /* if the queue are full, no need to read more */
2611         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2612             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2613                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2614                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2615             /* wait 10 ms */
2616             SDL_Delay(10);
2617             continue;
2618         }
2619         if(url_feof(ic->pb) || eof) {
2620             if(is->video_stream >= 0){
2621                 av_init_packet(pkt);
2622                 pkt->data=NULL;
2623                 pkt->size=0;
2624                 pkt->stream_index= is->video_stream;
2625                 packet_queue_put(&is->videoq, pkt);
2626             }
2627             SDL_Delay(10);
2628             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2629                 if(loop!=1 && (!loop || --loop)){
2630                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2631                 }else if(autoexit){
2632                     ret=AVERROR_EOF;
2633                     goto fail;
2634                 }
2635             }
2636             continue;
2637         }
2638         ret = av_read_frame(ic, pkt);
2639         if (ret < 0) {
2640             if (ret == AVERROR_EOF)
2641                 eof=1;
2642             if (url_ferror(ic->pb))
2643                 break;
2644             SDL_Delay(100); /* wait for user event */
2645             continue;
2646         }
2647         /* check if packet is in play range specified by user, then queue, otherwise discard */
2648         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2649                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2650                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2651                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2652                 <= ((double)duration/1000000);
2653         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2654             packet_queue_put(&is->audioq, pkt);
2655         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2656             packet_queue_put(&is->videoq, pkt);
2657         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2658             packet_queue_put(&is->subtitleq, pkt);
2659         } else {
2660             av_free_packet(pkt);
2661         }
2662     }
2663     /* wait until the end */
2664     while (!is->abort_request) {
2665         SDL_Delay(100);
2666     }
2667
2668     ret = 0;
2669  fail:
2670     /* disable interrupting */
2671     global_video_state = NULL;
2672
2673     /* close each stream */
2674     if (is->audio_stream >= 0)
2675         stream_component_close(is, is->audio_stream);
2676     if (is->video_stream >= 0)
2677         stream_component_close(is, is->video_stream);
2678     if (is->subtitle_stream >= 0)
2679         stream_component_close(is, is->subtitle_stream);
2680     if (is->ic) {
2681         av_close_input_file(is->ic);
2682         is->ic = NULL; /* safety */
2683     }
2684     url_set_interrupt_cb(NULL);
2685
2686     if (ret != 0) {
2687         SDL_Event event;
2688
2689         event.type = FF_QUIT_EVENT;
2690         event.user.data1 = is;
2691         SDL_PushEvent(&event);
2692     }
2693     return 0;
2694 }
2695
2696 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2697 {
2698     VideoState *is;
2699
2700     is = av_mallocz(sizeof(VideoState));
2701     if (!is)
2702         return NULL;
2703     av_strlcpy(is->filename, filename, sizeof(is->filename));
2704     is->iformat = iformat;
2705     is->ytop = 0;
2706     is->xleft = 0;
2707
2708     /* start video display */
2709     is->pictq_mutex = SDL_CreateMutex();
2710     is->pictq_cond = SDL_CreateCond();
2711
2712     is->subpq_mutex = SDL_CreateMutex();
2713     is->subpq_cond = SDL_CreateCond();
2714
2715     is->av_sync_type = av_sync_type;
2716     is->parse_tid = SDL_CreateThread(decode_thread, is);
2717     if (!is->parse_tid) {
2718         av_free(is);
2719         return NULL;
2720     }
2721     return is;
2722 }
2723
2724 static void stream_cycle_channel(VideoState *is, int codec_type)
2725 {
2726     AVFormatContext *ic = is->ic;
2727     int start_index, stream_index;
2728     AVStream *st;
2729
2730     if (codec_type == AVMEDIA_TYPE_VIDEO)
2731         start_index = is->video_stream;
2732     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2733         start_index = is->audio_stream;
2734     else
2735         start_index = is->subtitle_stream;
2736     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2737         return;
2738     stream_index = start_index;
2739     for(;;) {
2740         if (++stream_index >= is->ic->nb_streams)
2741         {
2742             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2743             {
2744                 stream_index = -1;
2745                 goto the_end;
2746             } else
2747                 stream_index = 0;
2748         }
2749         if (stream_index == start_index)
2750             return;
2751         st = ic->streams[stream_index];
2752         if (st->codec->codec_type == codec_type) {
2753             /* check that parameters are OK */
2754             switch(codec_type) {
2755             case AVMEDIA_TYPE_AUDIO:
2756                 if (st->codec->sample_rate != 0 &&
2757                     st->codec->channels != 0)
2758                     goto the_end;
2759                 break;
2760             case AVMEDIA_TYPE_VIDEO:
2761             case AVMEDIA_TYPE_SUBTITLE:
2762                 goto the_end;
2763             default:
2764                 break;
2765             }
2766         }
2767     }
2768  the_end:
2769     stream_component_close(is, start_index);
2770     stream_component_open(is, stream_index);
2771 }
2772
2773
2774 static void toggle_full_screen(void)
2775 {
2776     is_full_screen = !is_full_screen;
2777     if (!fs_screen_width) {
2778         /* use default SDL method */
2779 //        SDL_WM_ToggleFullScreen(screen);
2780     }
2781     video_open(cur_stream);
2782 }
2783
2784 static void toggle_pause(void)
2785 {
2786     if (cur_stream)
2787         stream_pause(cur_stream);
2788     step = 0;
2789 }
2790
2791 static void step_to_next_frame(void)
2792 {
2793     if (cur_stream) {
2794         /* if the stream is paused unpause it, then step */
2795         if (cur_stream->paused)
2796             stream_pause(cur_stream);
2797     }
2798     step = 1;
2799 }
2800
2801 static void toggle_audio_display(void)
2802 {
2803     if (cur_stream) {
2804         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2805         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2806         fill_rectangle(screen,
2807                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2808                     bgcolor);
2809         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2810     }
2811 }
2812
2813 /* handle an event sent by the GUI */
2814 static void event_loop(void)
2815 {
2816     SDL_Event event;
2817     double incr, pos, frac;
2818
2819     for(;;) {
2820         double x;
2821         SDL_WaitEvent(&event);
2822         switch(event.type) {
2823         case SDL_KEYDOWN:
2824             if (exit_on_keydown) {
2825                 do_exit();
2826                 break;
2827             }
2828             switch(event.key.keysym.sym) {
2829             case SDLK_ESCAPE:
2830             case SDLK_q:
2831                 do_exit();
2832                 break;
2833             case SDLK_f:
2834                 toggle_full_screen();
2835                 break;
2836             case SDLK_p:
2837             case SDLK_SPACE:
2838                 toggle_pause();
2839                 break;
2840             case SDLK_s: //S: Step to next frame
2841                 step_to_next_frame();
2842                 break;
2843             case SDLK_a:
2844                 if (cur_stream)
2845                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2846                 break;
2847             case SDLK_v:
2848                 if (cur_stream)
2849                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2850                 break;
2851             case SDLK_t:
2852                 if (cur_stream)
2853                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2854                 break;
2855             case SDLK_w:
2856                 toggle_audio_display();
2857                 break;
2858             case SDLK_LEFT:
2859                 incr = -10.0;
2860                 goto do_seek;
2861             case SDLK_RIGHT:
2862                 incr = 10.0;
2863                 goto do_seek;
2864             case SDLK_UP:
2865                 incr = 60.0;
2866                 goto do_seek;
2867             case SDLK_DOWN:
2868                 incr = -60.0;
2869             do_seek:
2870                 if (cur_stream) {
2871                     if (seek_by_bytes) {
2872                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2873                             pos= cur_stream->video_current_pos;
2874                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2875                             pos= cur_stream->audio_pkt.pos;
2876                         }else
2877                             pos = url_ftell(cur_stream->ic->pb);
2878                         if (cur_stream->ic->bit_rate)
2879                             incr *= cur_stream->ic->bit_rate / 8.0;
2880                         else
2881                             incr *= 180000.0;
2882                         pos += incr;
2883                         stream_seek(cur_stream, pos, incr, 1);
2884                     } else {
2885                         pos = get_master_clock(cur_stream);
2886                         pos += incr;
2887                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2888                     }
2889                 }
2890                 break;
2891             default:
2892                 break;
2893             }
2894             break;
2895         case SDL_MOUSEBUTTONDOWN:
2896             if (exit_on_mousedown) {
2897                 do_exit();
2898                 break;
2899             }
2900         case SDL_MOUSEMOTION:
2901             if(event.type ==SDL_MOUSEBUTTONDOWN){
2902                 x= event.button.x;
2903             }else{
2904                 if(event.motion.state != SDL_PRESSED)
2905                     break;
2906                 x= event.motion.x;
2907             }
2908             if (cur_stream) {
2909                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2910                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2911                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2912                 }else{
2913                     int64_t ts;
2914                     int ns, hh, mm, ss;
2915                     int tns, thh, tmm, tss;
2916                     tns = cur_stream->ic->duration/1000000LL;
2917                     thh = tns/3600;
2918                     tmm = (tns%3600)/60;
2919                     tss = (tns%60);
2920                     frac = x/cur_stream->width;
2921                     ns = frac*tns;
2922                     hh = ns/3600;
2923                     mm = (ns%3600)/60;
2924                     ss = (ns%60);
2925                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2926                             hh, mm, ss, thh, tmm, tss);
2927                     ts = frac*cur_stream->ic->duration;
2928                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2929                         ts += cur_stream->ic->start_time;
2930                     stream_seek(cur_stream, ts, 0, 0);
2931                 }
2932             }
2933             break;
2934         case SDL_VIDEORESIZE:
2935             if (cur_stream) {
2936                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2937                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2938                 screen_width = cur_stream->width = event.resize.w;
2939                 screen_height= cur_stream->height= event.resize.h;
2940             }
2941             break;
2942         case SDL_QUIT:
2943         case FF_QUIT_EVENT:
2944             do_exit();
2945             break;
2946         case FF_ALLOC_EVENT:
2947             video_open(event.user.data1);
2948             alloc_picture(event.user.data1);
2949             break;
2950         case FF_REFRESH_EVENT:
2951             video_refresh_timer(event.user.data1);
2952             cur_stream->refresh=0;
2953             break;
2954         default:
2955             break;
2956         }
2957     }
2958 }
2959
2960 static void opt_frame_size(const char *arg)
2961 {
2962     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2963         fprintf(stderr, "Incorrect frame size\n");
2964         exit(1);
2965     }
2966     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2967         fprintf(stderr, "Frame size must be a multiple of 2\n");
2968         exit(1);
2969     }
2970 }
2971
2972 static int opt_width(const char *opt, const char *arg)
2973 {
2974     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2975     return 0;
2976 }
2977
2978 static int opt_height(const char *opt, const char *arg)
2979 {
2980     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2981     return 0;
2982 }
2983
2984 static void opt_format(const char *arg)
2985 {
2986     file_iformat = av_find_input_format(arg);
2987     if (!file_iformat) {
2988         fprintf(stderr, "Unknown input format: %s\n", arg);
2989         exit(1);
2990     }
2991 }
2992
2993 static void opt_frame_pix_fmt(const char *arg)
2994 {
2995     frame_pix_fmt = av_get_pix_fmt(arg);
2996 }
2997
2998 static int opt_sync(const char *opt, const char *arg)
2999 {
3000     if (!strcmp(arg, "audio"))
3001         av_sync_type = AV_SYNC_AUDIO_MASTER;
3002     else if (!strcmp(arg, "video"))
3003         av_sync_type = AV_SYNC_VIDEO_MASTER;
3004     else if (!strcmp(arg, "ext"))
3005         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3006     else {
3007         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3008         exit(1);
3009     }
3010     return 0;
3011 }
3012
3013 static int opt_seek(const char *opt, const char *arg)
3014 {
3015     start_time = parse_time_or_die(opt, arg, 1);
3016     return 0;
3017 }
3018
3019 static int opt_duration(const char *opt, const char *arg)
3020 {
3021     duration = parse_time_or_die(opt, arg, 1);
3022     return 0;
3023 }
3024
3025 static int opt_debug(const char *opt, const char *arg)
3026 {
3027     av_log_set_level(99);
3028     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3029     return 0;
3030 }
3031
3032 static int opt_vismv(const char *opt, const char *arg)
3033 {
3034     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3035     return 0;
3036 }
3037
3038 static int opt_thread_count(const char *opt, const char *arg)
3039 {
3040     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3041 #if !HAVE_THREADS
3042     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3043 #endif
3044     return 0;
3045 }
3046
3047 static const OptionDef options[] = {
3048 #include "cmdutils_common_opts.h"
3049     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3050     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3051     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3052     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3053     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3054     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3055     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3056     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3057     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3058     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3059     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3060     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3061     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3062     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3063     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3064     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3065     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3066     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3067     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3068     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3069     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3070     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3071     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3072     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3073     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3074     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3075     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3076     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3077     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3078     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3079     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3080     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3081     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3082     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3083     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3084     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3085     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3086 #if CONFIG_AVFILTER
3087     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3088 #endif
3089     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3090     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3091     { NULL, },
3092 };
3093
3094 static void show_usage(void)
3095 {
3096     printf("Simple media player\n");
3097     printf("usage: ffplay [options] input_file\n");
3098     printf("\n");
3099 }
3100
3101 static void show_help(void)
3102 {
3103     show_usage();
3104     show_help_options(options, "Main options:\n",
3105                       OPT_EXPERT, 0);
3106     show_help_options(options, "\nAdvanced options:\n",
3107                       OPT_EXPERT, OPT_EXPERT);
3108     printf("\nWhile playing:\n"
3109            "q, ESC              quit\n"
3110            "f                   toggle full screen\n"
3111            "p, SPC              pause\n"
3112            "a                   cycle audio channel\n"
3113            "v                   cycle video channel\n"
3114            "t                   cycle subtitle channel\n"
3115            "w                   show audio waves\n"
3116            "s                   activate frame-step mode\n"
3117            "left/right          seek backward/forward 10 seconds\n"
3118            "down/up             seek backward/forward 1 minute\n"
3119            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3120            );
3121 }
3122
3123 static void opt_input_file(const char *filename)
3124 {
3125     if (input_filename) {
3126         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3127                 filename, input_filename);
3128         exit(1);
3129     }
3130     if (!strcmp(filename, "-"))
3131         filename = "pipe:";
3132     input_filename = filename;
3133 }
3134
3135 /* Called from the main */
3136 int main(int argc, char **argv)
3137 {
3138     int flags, i;
3139
3140     /* register all codecs, demux and protocols */
3141     avcodec_register_all();
3142 #if CONFIG_AVDEVICE
3143     avdevice_register_all();
3144 #endif
3145 #if CONFIG_AVFILTER
3146     avfilter_register_all();
3147 #endif
3148     av_register_all();
3149
3150     for(i=0; i<AVMEDIA_TYPE_NB; i++){
3151         avcodec_opts[i]= avcodec_alloc_context2(i);
3152     }
3153     avformat_opts = avformat_alloc_context();
3154 #if !CONFIG_AVFILTER
3155     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3156 #endif
3157
3158     show_banner();
3159
3160     parse_options(argc, argv, options, opt_input_file);
3161
3162     if (!input_filename) {
3163         show_usage();
3164         fprintf(stderr, "An input file must be specified\n");
3165         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3166         exit(1);
3167     }
3168
3169     if (display_disable) {
3170         video_disable = 1;
3171     }
3172     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3173 #if !defined(__MINGW32__) && !defined(__APPLE__)
3174     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3175 #endif
3176     if (SDL_Init (flags)) {
3177         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3178         exit(1);
3179     }
3180
3181     if (!display_disable) {
3182 #if HAVE_SDL_VIDEO_SIZE
3183         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3184         fs_screen_width = vi->current_w;
3185         fs_screen_height = vi->current_h;
3186 #endif
3187     }
3188
3189     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3190     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3191     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3192
3193     av_init_packet(&flush_pkt);
3194     flush_pkt.data= "FLUSH";
3195
3196     cur_stream = stream_open(input_filename, file_iformat);
3197
3198     event_loop();
3199
3200     /* never returns */
3201
3202     return 0;
3203 }