OSDN Git Service

mjpegdec: add 'extern_huff' private option.
[coroid/libav_saccubus.git] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "avplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     //    QETimer *video_timer;
205     char filename[1024];
206     int width, height, xleft, ytop;
207
208     PtsCorrectionContext pts_ctx;
209
210 #if CONFIG_AVFILTER
211     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
212 #endif
213
214     float skip_frames;
215     float skip_frames_index;
216     int refresh;
217 } VideoState;
218
219 static void show_help(void);
220
221 /* options specified by the user */
222 static AVInputFormat *file_iformat;
223 static const char *input_filename;
224 static const char *window_title;
225 static int fs_screen_width;
226 static int fs_screen_height;
227 static int screen_width = 0;
228 static int screen_height = 0;
229 static int audio_disable;
230 static int video_disable;
231 static int wanted_stream[AVMEDIA_TYPE_NB]={
232     [AVMEDIA_TYPE_AUDIO]=-1,
233     [AVMEDIA_TYPE_VIDEO]=-1,
234     [AVMEDIA_TYPE_SUBTITLE]=-1,
235 };
236 static int seek_by_bytes=-1;
237 static int display_disable;
238 static int show_status = 1;
239 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
240 static int64_t start_time = AV_NOPTS_VALUE;
241 static int64_t duration = AV_NOPTS_VALUE;
242 static int debug = 0;
243 static int debug_mv = 0;
244 static int step = 0;
245 static int thread_count = 1;
246 static int workaround_bugs = 1;
247 static int fast = 0;
248 static int genpts = 0;
249 static int lowres = 0;
250 static int idct = FF_IDCT_AUTO;
251 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
254 static int error_recognition = FF_ER_CAREFUL;
255 static int error_concealment = 3;
256 static int decoder_reorder_pts= -1;
257 static int autoexit;
258 static int exit_on_keydown;
259 static int exit_on_mousedown;
260 static int loop=1;
261 static int framedrop=1;
262
263 static int rdftspeed=20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static VideoState *cur_stream;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
282
283 /* packet queue handling */
284 static void packet_queue_init(PacketQueue *q)
285 {
286     memset(q, 0, sizeof(PacketQueue));
287     q->mutex = SDL_CreateMutex();
288     q->cond = SDL_CreateCond();
289     packet_queue_put(q, &flush_pkt);
290 }
291
292 static void packet_queue_flush(PacketQueue *q)
293 {
294     AVPacketList *pkt, *pkt1;
295
296     SDL_LockMutex(q->mutex);
297     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
298         pkt1 = pkt->next;
299         av_free_packet(&pkt->pkt);
300         av_freep(&pkt);
301     }
302     q->last_pkt = NULL;
303     q->first_pkt = NULL;
304     q->nb_packets = 0;
305     q->size = 0;
306     SDL_UnlockMutex(q->mutex);
307 }
308
309 static void packet_queue_end(PacketQueue *q)
310 {
311     packet_queue_flush(q);
312     SDL_DestroyMutex(q->mutex);
313     SDL_DestroyCond(q->cond);
314 }
315
316 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
317 {
318     AVPacketList *pkt1;
319
320     /* duplicate the packet */
321     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
322         return -1;
323
324     pkt1 = av_malloc(sizeof(AVPacketList));
325     if (!pkt1)
326         return -1;
327     pkt1->pkt = *pkt;
328     pkt1->next = NULL;
329
330
331     SDL_LockMutex(q->mutex);
332
333     if (!q->last_pkt)
334
335         q->first_pkt = pkt1;
336     else
337         q->last_pkt->next = pkt1;
338     q->last_pkt = pkt1;
339     q->nb_packets++;
340     q->size += pkt1->pkt.size + sizeof(*pkt1);
341     /* XXX: should duplicate packet data in DV case */
342     SDL_CondSignal(q->cond);
343
344     SDL_UnlockMutex(q->mutex);
345     return 0;
346 }
347
348 static void packet_queue_abort(PacketQueue *q)
349 {
350     SDL_LockMutex(q->mutex);
351
352     q->abort_request = 1;
353
354     SDL_CondSignal(q->cond);
355
356     SDL_UnlockMutex(q->mutex);
357 }
358
359 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
360 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
361 {
362     AVPacketList *pkt1;
363     int ret;
364
365     SDL_LockMutex(q->mutex);
366
367     for(;;) {
368         if (q->abort_request) {
369             ret = -1;
370             break;
371         }
372
373         pkt1 = q->first_pkt;
374         if (pkt1) {
375             q->first_pkt = pkt1->next;
376             if (!q->first_pkt)
377                 q->last_pkt = NULL;
378             q->nb_packets--;
379             q->size -= pkt1->pkt.size + sizeof(*pkt1);
380             *pkt = pkt1->pkt;
381             av_free(pkt1);
382             ret = 1;
383             break;
384         } else if (!block) {
385             ret = 0;
386             break;
387         } else {
388             SDL_CondWait(q->cond, q->mutex);
389         }
390     }
391     SDL_UnlockMutex(q->mutex);
392     return ret;
393 }
394
395 static inline void fill_rectangle(SDL_Surface *screen,
396                                   int x, int y, int w, int h, int color)
397 {
398     SDL_Rect rect;
399     rect.x = x;
400     rect.y = y;
401     rect.w = w;
402     rect.h = h;
403     SDL_FillRect(screen, &rect, color);
404 }
405
406 #define ALPHA_BLEND(a, oldp, newp, s)\
407 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408
409 #define RGBA_IN(r, g, b, a, s)\
410 {\
411     unsigned int v = ((const uint32_t *)(s))[0];\
412     a = (v >> 24) & 0xff;\
413     r = (v >> 16) & 0xff;\
414     g = (v >> 8) & 0xff;\
415     b = v & 0xff;\
416 }
417
418 #define YUVA_IN(y, u, v, a, s, pal)\
419 {\
420     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
421     a = (val >> 24) & 0xff;\
422     y = (val >> 16) & 0xff;\
423     u = (val >> 8) & 0xff;\
424     v = val & 0xff;\
425 }
426
427 #define YUVA_OUT(d, y, u, v, a)\
428 {\
429     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
430 }
431
432
433 #define BPP 1
434
435 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
436 {
437     int wrap, wrap3, width2, skip2;
438     int y, u, v, a, u1, v1, a1, w, h;
439     uint8_t *lum, *cb, *cr;
440     const uint8_t *p;
441     const uint32_t *pal;
442     int dstx, dsty, dstw, dsth;
443
444     dstw = av_clip(rect->w, 0, imgw);
445     dsth = av_clip(rect->h, 0, imgh);
446     dstx = av_clip(rect->x, 0, imgw - dstw);
447     dsty = av_clip(rect->y, 0, imgh - dsth);
448     lum = dst->data[0] + dsty * dst->linesize[0];
449     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
450     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
451
452     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
453     skip2 = dstx >> 1;
454     wrap = dst->linesize[0];
455     wrap3 = rect->pict.linesize[0];
456     p = rect->pict.data[0];
457     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
458
459     if (dsty & 1) {
460         lum += dstx;
461         cb += skip2;
462         cr += skip2;
463
464         if (dstx & 1) {
465             YUVA_IN(y, u, v, a, p, pal);
466             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
468             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
469             cb++;
470             cr++;
471             lum++;
472             p += BPP;
473         }
474         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
475             YUVA_IN(y, u, v, a, p, pal);
476             u1 = u;
477             v1 = v;
478             a1 = a;
479             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480
481             YUVA_IN(y, u, v, a, p + BPP, pal);
482             u1 += u;
483             v1 += v;
484             a1 += a;
485             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
486             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
487             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
488             cb++;
489             cr++;
490             p += 2 * BPP;
491             lum += 2;
492         }
493         if (w) {
494             YUVA_IN(y, u, v, a, p, pal);
495             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498             p++;
499             lum++;
500         }
501         p += wrap3 - dstw * BPP;
502         lum += wrap - dstw - dstx;
503         cb += dst->linesize[1] - width2 - skip2;
504         cr += dst->linesize[2] - width2 - skip2;
505     }
506     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
507         lum += dstx;
508         cb += skip2;
509         cr += skip2;
510
511         if (dstx & 1) {
512             YUVA_IN(y, u, v, a, p, pal);
513             u1 = u;
514             v1 = v;
515             a1 = a;
516             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517             p += wrap3;
518             lum += wrap;
519             YUVA_IN(y, u, v, a, p, pal);
520             u1 += u;
521             v1 += v;
522             a1 += a;
523             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
525             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
526             cb++;
527             cr++;
528             p += -wrap3 + BPP;
529             lum += -wrap + 1;
530         }
531         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
532             YUVA_IN(y, u, v, a, p, pal);
533             u1 = u;
534             v1 = v;
535             a1 = a;
536             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537
538             YUVA_IN(y, u, v, a, p + BPP, pal);
539             u1 += u;
540             v1 += v;
541             a1 += a;
542             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
543             p += wrap3;
544             lum += wrap;
545
546             YUVA_IN(y, u, v, a, p, pal);
547             u1 += u;
548             v1 += v;
549             a1 += a;
550             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
551
552             YUVA_IN(y, u, v, a, p + BPP, pal);
553             u1 += u;
554             v1 += v;
555             a1 += a;
556             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
557
558             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
559             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
560
561             cb++;
562             cr++;
563             p += -wrap3 + 2 * BPP;
564             lum += -wrap + 2;
565         }
566         if (w) {
567             YUVA_IN(y, u, v, a, p, pal);
568             u1 = u;
569             v1 = v;
570             a1 = a;
571             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572             p += wrap3;
573             lum += wrap;
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 += u;
576             v1 += v;
577             a1 += a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
580             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
581             cb++;
582             cr++;
583             p += -wrap3 + BPP;
584             lum += -wrap + 1;
585         }
586         p += wrap3 + (wrap3 - dstw * BPP);
587         lum += wrap + (wrap - dstw - dstx);
588         cb += dst->linesize[1] - width2 - skip2;
589         cr += dst->linesize[2] - width2 - skip2;
590     }
591     /* handle odd height */
592     if (h) {
593         lum += dstx;
594         cb += skip2;
595         cr += skip2;
596
597         if (dstx & 1) {
598             YUVA_IN(y, u, v, a, p, pal);
599             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
600             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
601             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
602             cb++;
603             cr++;
604             lum++;
605             p += BPP;
606         }
607         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
608             YUVA_IN(y, u, v, a, p, pal);
609             u1 = u;
610             v1 = v;
611             a1 = a;
612             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613
614             YUVA_IN(y, u, v, a, p + BPP, pal);
615             u1 += u;
616             v1 += v;
617             a1 += a;
618             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
619             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
620             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
621             cb++;
622             cr++;
623             p += 2 * BPP;
624             lum += 2;
625         }
626         if (w) {
627             YUVA_IN(y, u, v, a, p, pal);
628             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
629             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
630             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
631         }
632     }
633 }
634
635 static void free_subpicture(SubPicture *sp)
636 {
637     avsubtitle_free(&sp->sub);
638 }
639
640 static void video_image_display(VideoState *is)
641 {
642     VideoPicture *vp;
643     SubPicture *sp;
644     AVPicture pict;
645     float aspect_ratio;
646     int width, height, x, y;
647     SDL_Rect rect;
648     int i;
649
650     vp = &is->pictq[is->pictq_rindex];
651     if (vp->bmp) {
652 #if CONFIG_AVFILTER
653          if (vp->picref->video->pixel_aspect.num == 0)
654              aspect_ratio = 0;
655          else
656              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
657 #else
658
659         /* XXX: use variable in the frame */
660         if (is->video_st->sample_aspect_ratio.num)
661             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
662         else if (is->video_st->codec->sample_aspect_ratio.num)
663             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
664         else
665             aspect_ratio = 0;
666 #endif
667         if (aspect_ratio <= 0.0)
668             aspect_ratio = 1.0;
669         aspect_ratio *= (float)vp->width / (float)vp->height;
670
671         if (is->subtitle_st)
672         {
673             if (is->subpq_size > 0)
674             {
675                 sp = &is->subpq[is->subpq_rindex];
676
677                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
678                 {
679                     SDL_LockYUVOverlay (vp->bmp);
680
681                     pict.data[0] = vp->bmp->pixels[0];
682                     pict.data[1] = vp->bmp->pixels[2];
683                     pict.data[2] = vp->bmp->pixels[1];
684
685                     pict.linesize[0] = vp->bmp->pitches[0];
686                     pict.linesize[1] = vp->bmp->pitches[2];
687                     pict.linesize[2] = vp->bmp->pitches[1];
688
689                     for (i = 0; i < sp->sub.num_rects; i++)
690                         blend_subrect(&pict, sp->sub.rects[i],
691                                       vp->bmp->w, vp->bmp->h);
692
693                     SDL_UnlockYUVOverlay (vp->bmp);
694                 }
695             }
696         }
697
698
699         /* XXX: we suppose the screen has a 1.0 pixel ratio */
700         height = is->height;
701         width = ((int)rint(height * aspect_ratio)) & ~1;
702         if (width > is->width) {
703             width = is->width;
704             height = ((int)rint(width / aspect_ratio)) & ~1;
705         }
706         x = (is->width - width) / 2;
707         y = (is->height - height) / 2;
708         is->no_background = 0;
709         rect.x = is->xleft + x;
710         rect.y = is->ytop  + y;
711         rect.w = width;
712         rect.h = height;
713         SDL_DisplayYUVOverlay(vp->bmp, &rect);
714     }
715 }
716
717 /* get the current audio output buffer size, in samples. With SDL, we
718    cannot have a precise information */
719 static int audio_write_get_buf_size(VideoState *is)
720 {
721     return is->audio_buf_size - is->audio_buf_index;
722 }
723
724 static inline int compute_mod(int a, int b)
725 {
726     a = a % b;
727     if (a >= 0)
728         return a;
729     else
730         return a + b;
731 }
732
733 static void video_audio_display(VideoState *s)
734 {
735     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
736     int ch, channels, h, h2, bgcolor, fgcolor;
737     int16_t time_diff;
738     int rdft_bits, nb_freq;
739
740     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
741         ;
742     nb_freq= 1<<(rdft_bits-1);
743
744     /* compute display index : center on currently output samples */
745     channels = s->audio_st->codec->channels;
746     nb_display_channels = channels;
747     if (!s->paused) {
748         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
749         n = 2 * channels;
750         delay = audio_write_get_buf_size(s);
751         delay /= n;
752
753         /* to be more precise, we take into account the time spent since
754            the last buffer computation */
755         if (audio_callback_time) {
756             time_diff = av_gettime() - audio_callback_time;
757             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
758         }
759
760         delay += 2*data_used;
761         if (delay < data_used)
762             delay = data_used;
763
764         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
765         if(s->show_audio==1){
766             h= INT_MIN;
767             for(i=0; i<1000; i+=channels){
768                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
769                 int a= s->sample_array[idx];
770                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
771                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
772                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
773                 int score= a-d;
774                 if(h<score && (b^c)<0){
775                     h= score;
776                     i_start= idx;
777                 }
778             }
779         }
780
781         s->last_i_start = i_start;
782     } else {
783         i_start = s->last_i_start;
784     }
785
786     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
787     if(s->show_audio==1){
788         fill_rectangle(screen,
789                        s->xleft, s->ytop, s->width, s->height,
790                        bgcolor);
791
792         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
793
794         /* total height for one channel */
795         h = s->height / nb_display_channels;
796         /* graph height / 2 */
797         h2 = (h * 9) / 20;
798         for(ch = 0;ch < nb_display_channels; ch++) {
799             i = i_start + ch;
800             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
801             for(x = 0; x < s->width; x++) {
802                 y = (s->sample_array[i] * h2) >> 15;
803                 if (y < 0) {
804                     y = -y;
805                     ys = y1 - y;
806                 } else {
807                     ys = y1;
808                 }
809                 fill_rectangle(screen,
810                                s->xleft + x, ys, 1, y,
811                                fgcolor);
812                 i += channels;
813                 if (i >= SAMPLE_ARRAY_SIZE)
814                     i -= SAMPLE_ARRAY_SIZE;
815             }
816         }
817
818         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
819
820         for(ch = 1;ch < nb_display_channels; ch++) {
821             y = s->ytop + ch * h;
822             fill_rectangle(screen,
823                            s->xleft, y, s->width, 1,
824                            fgcolor);
825         }
826         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
827     }else{
828         nb_display_channels= FFMIN(nb_display_channels, 2);
829         if(rdft_bits != s->rdft_bits){
830             av_rdft_end(s->rdft);
831             av_free(s->rdft_data);
832             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
833             s->rdft_bits= rdft_bits;
834             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
835         }
836         {
837             FFTSample *data[2];
838             for(ch = 0;ch < nb_display_channels; ch++) {
839                 data[ch] = s->rdft_data + 2*nb_freq*ch;
840                 i = i_start + ch;
841                 for(x = 0; x < 2*nb_freq; x++) {
842                     double w= (x-nb_freq)*(1.0/nb_freq);
843                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
844                     i += channels;
845                     if (i >= SAMPLE_ARRAY_SIZE)
846                         i -= SAMPLE_ARRAY_SIZE;
847                 }
848                 av_rdft_calc(s->rdft, data[ch]);
849             }
850             //least efficient way to do this, we should of course directly access it but its more than fast enough
851             for(y=0; y<s->height; y++){
852                 double w= 1/sqrt(nb_freq);
853                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
854                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
855                        + data[1][2*y+1]*data[1][2*y+1])) : a;
856                 a= FFMIN(a,255);
857                 b= FFMIN(b,255);
858                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
859
860                 fill_rectangle(screen,
861                             s->xpos, s->height-y, 1, 1,
862                             fgcolor);
863             }
864         }
865         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
866         s->xpos++;
867         if(s->xpos >= s->width)
868             s->xpos= s->xleft;
869     }
870 }
871
872 static int video_open(VideoState *is){
873     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
874     int w,h;
875
876     if(is_full_screen) flags |= SDL_FULLSCREEN;
877     else               flags |= SDL_RESIZABLE;
878
879     if (is_full_screen && fs_screen_width) {
880         w = fs_screen_width;
881         h = fs_screen_height;
882     } else if(!is_full_screen && screen_width){
883         w = screen_width;
884         h = screen_height;
885 #if CONFIG_AVFILTER
886     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
887         w = is->out_video_filter->inputs[0]->w;
888         h = is->out_video_filter->inputs[0]->h;
889 #else
890     }else if (is->video_st && is->video_st->codec->width){
891         w = is->video_st->codec->width;
892         h = is->video_st->codec->height;
893 #endif
894     } else {
895         w = 640;
896         h = 480;
897     }
898     if(screen && is->width == screen->w && screen->w == w
899        && is->height== screen->h && screen->h == h)
900         return 0;
901
902 #ifndef __APPLE__
903     screen = SDL_SetVideoMode(w, h, 0, flags);
904 #else
905     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
906     screen = SDL_SetVideoMode(w, h, 24, flags);
907 #endif
908     if (!screen) {
909         fprintf(stderr, "SDL: could not set video mode - exiting\n");
910         return -1;
911     }
912     if (!window_title)
913         window_title = input_filename;
914     SDL_WM_SetCaption(window_title, window_title);
915
916     is->width = screen->w;
917     is->height = screen->h;
918
919     return 0;
920 }
921
922 /* display the current picture, if any */
923 static void video_display(VideoState *is)
924 {
925     if(!screen)
926         video_open(cur_stream);
927     if (is->audio_st && is->show_audio)
928         video_audio_display(is);
929     else if (is->video_st)
930         video_image_display(is);
931 }
932
933 static int refresh_thread(void *opaque)
934 {
935     VideoState *is= opaque;
936     while(!is->abort_request){
937         SDL_Event event;
938         event.type = FF_REFRESH_EVENT;
939         event.user.data1 = opaque;
940         if(!is->refresh){
941             is->refresh=1;
942             SDL_PushEvent(&event);
943         }
944         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
945     }
946     return 0;
947 }
948
949 /* get the current audio clock value */
950 static double get_audio_clock(VideoState *is)
951 {
952     double pts;
953     int hw_buf_size, bytes_per_sec;
954     pts = is->audio_clock;
955     hw_buf_size = audio_write_get_buf_size(is);
956     bytes_per_sec = 0;
957     if (is->audio_st) {
958         bytes_per_sec = is->audio_st->codec->sample_rate *
959             2 * is->audio_st->codec->channels;
960     }
961     if (bytes_per_sec)
962         pts -= (double)hw_buf_size / bytes_per_sec;
963     return pts;
964 }
965
966 /* get the current video clock value */
967 static double get_video_clock(VideoState *is)
968 {
969     if (is->paused) {
970         return is->video_current_pts;
971     } else {
972         return is->video_current_pts_drift + av_gettime() / 1000000.0;
973     }
974 }
975
976 /* get the current external clock value */
977 static double get_external_clock(VideoState *is)
978 {
979     int64_t ti;
980     ti = av_gettime();
981     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
982 }
983
984 /* get the current master clock value */
985 static double get_master_clock(VideoState *is)
986 {
987     double val;
988
989     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
990         if (is->video_st)
991             val = get_video_clock(is);
992         else
993             val = get_audio_clock(is);
994     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
995         if (is->audio_st)
996             val = get_audio_clock(is);
997         else
998             val = get_video_clock(is);
999     } else {
1000         val = get_external_clock(is);
1001     }
1002     return val;
1003 }
1004
1005 /* seek in the stream */
1006 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1007 {
1008     if (!is->seek_req) {
1009         is->seek_pos = pos;
1010         is->seek_rel = rel;
1011         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1012         if (seek_by_bytes)
1013             is->seek_flags |= AVSEEK_FLAG_BYTE;
1014         is->seek_req = 1;
1015     }
1016 }
1017
1018 /* pause or resume the video */
1019 static void stream_pause(VideoState *is)
1020 {
1021     if (is->paused) {
1022         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1023         if(is->read_pause_return != AVERROR(ENOSYS)){
1024             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1025         }
1026         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1027     }
1028     is->paused = !is->paused;
1029 }
1030
1031 static double compute_target_time(double frame_current_pts, VideoState *is)
1032 {
1033     double delay, sync_threshold, diff;
1034
1035     /* compute nominal delay */
1036     delay = frame_current_pts - is->frame_last_pts;
1037     if (delay <= 0 || delay >= 10.0) {
1038         /* if incorrect delay, use previous one */
1039         delay = is->frame_last_delay;
1040     } else {
1041         is->frame_last_delay = delay;
1042     }
1043     is->frame_last_pts = frame_current_pts;
1044
1045     /* update delay to follow master synchronisation source */
1046     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1047          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1048         /* if video is slave, we try to correct big delays by
1049            duplicating or deleting a frame */
1050         diff = get_video_clock(is) - get_master_clock(is);
1051
1052         /* skip or repeat frame. We take into account the
1053            delay to compute the threshold. I still don't know
1054            if it is the best guess */
1055         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1056         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1057             if (diff <= -sync_threshold)
1058                 delay = 0;
1059             else if (diff >= sync_threshold)
1060                 delay = 2 * delay;
1061         }
1062     }
1063     is->frame_timer += delay;
1064
1065     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1066             delay, frame_current_pts, -diff);
1067
1068     return is->frame_timer;
1069 }
1070
1071 /* called to display each frame */
1072 static void video_refresh_timer(void *opaque)
1073 {
1074     VideoState *is = opaque;
1075     VideoPicture *vp;
1076
1077     SubPicture *sp, *sp2;
1078
1079     if (is->video_st) {
1080 retry:
1081         if (is->pictq_size == 0) {
1082             //nothing to do, no picture to display in the que
1083         } else {
1084             double time= av_gettime()/1000000.0;
1085             double next_target;
1086             /* dequeue the picture */
1087             vp = &is->pictq[is->pictq_rindex];
1088
1089             if(time < vp->target_clock)
1090                 return;
1091             /* update current video pts */
1092             is->video_current_pts = vp->pts;
1093             is->video_current_pts_drift = is->video_current_pts - time;
1094             is->video_current_pos = vp->pos;
1095             if(is->pictq_size > 1){
1096                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1097                 assert(nextvp->target_clock >= vp->target_clock);
1098                 next_target= nextvp->target_clock;
1099             }else{
1100                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1101             }
1102             if(framedrop && time > next_target){
1103                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1104                 if(is->pictq_size > 1 || time > next_target + 0.5){
1105                     /* update queue size and signal for next picture */
1106                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1107                         is->pictq_rindex = 0;
1108
1109                     SDL_LockMutex(is->pictq_mutex);
1110                     is->pictq_size--;
1111                     SDL_CondSignal(is->pictq_cond);
1112                     SDL_UnlockMutex(is->pictq_mutex);
1113                     goto retry;
1114                 }
1115             }
1116
1117             if(is->subtitle_st) {
1118                 if (is->subtitle_stream_changed) {
1119                     SDL_LockMutex(is->subpq_mutex);
1120
1121                     while (is->subpq_size) {
1122                         free_subpicture(&is->subpq[is->subpq_rindex]);
1123
1124                         /* update queue size and signal for next picture */
1125                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1126                             is->subpq_rindex = 0;
1127
1128                         is->subpq_size--;
1129                     }
1130                     is->subtitle_stream_changed = 0;
1131
1132                     SDL_CondSignal(is->subpq_cond);
1133                     SDL_UnlockMutex(is->subpq_mutex);
1134                 } else {
1135                     if (is->subpq_size > 0) {
1136                         sp = &is->subpq[is->subpq_rindex];
1137
1138                         if (is->subpq_size > 1)
1139                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1140                         else
1141                             sp2 = NULL;
1142
1143                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1144                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1145                         {
1146                             free_subpicture(sp);
1147
1148                             /* update queue size and signal for next picture */
1149                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1150                                 is->subpq_rindex = 0;
1151
1152                             SDL_LockMutex(is->subpq_mutex);
1153                             is->subpq_size--;
1154                             SDL_CondSignal(is->subpq_cond);
1155                             SDL_UnlockMutex(is->subpq_mutex);
1156                         }
1157                     }
1158                 }
1159             }
1160
1161             /* display picture */
1162             if (!display_disable)
1163                 video_display(is);
1164
1165             /* update queue size and signal for next picture */
1166             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1167                 is->pictq_rindex = 0;
1168
1169             SDL_LockMutex(is->pictq_mutex);
1170             is->pictq_size--;
1171             SDL_CondSignal(is->pictq_cond);
1172             SDL_UnlockMutex(is->pictq_mutex);
1173         }
1174     } else if (is->audio_st) {
1175         /* draw the next audio frame */
1176
1177         /* if only audio stream, then display the audio bars (better
1178            than nothing, just to test the implementation */
1179
1180         /* display picture */
1181         if (!display_disable)
1182             video_display(is);
1183     }
1184     if (show_status) {
1185         static int64_t last_time;
1186         int64_t cur_time;
1187         int aqsize, vqsize, sqsize;
1188         double av_diff;
1189
1190         cur_time = av_gettime();
1191         if (!last_time || (cur_time - last_time) >= 30000) {
1192             aqsize = 0;
1193             vqsize = 0;
1194             sqsize = 0;
1195             if (is->audio_st)
1196                 aqsize = is->audioq.size;
1197             if (is->video_st)
1198                 vqsize = is->videoq.size;
1199             if (is->subtitle_st)
1200                 sqsize = is->subtitleq.size;
1201             av_diff = 0;
1202             if (is->audio_st && is->video_st)
1203                 av_diff = get_audio_clock(is) - get_video_clock(is);
1204             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1205                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1206             fflush(stdout);
1207             last_time = cur_time;
1208         }
1209     }
1210 }
1211
1212 static void stream_close(VideoState *is)
1213 {
1214     VideoPicture *vp;
1215     int i;
1216     /* XXX: use a special url_shutdown call to abort parse cleanly */
1217     is->abort_request = 1;
1218     SDL_WaitThread(is->parse_tid, NULL);
1219     SDL_WaitThread(is->refresh_tid, NULL);
1220
1221     /* free all pictures */
1222     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1223         vp = &is->pictq[i];
1224 #if CONFIG_AVFILTER
1225         if (vp->picref) {
1226             avfilter_unref_buffer(vp->picref);
1227             vp->picref = NULL;
1228         }
1229 #endif
1230         if (vp->bmp) {
1231             SDL_FreeYUVOverlay(vp->bmp);
1232             vp->bmp = NULL;
1233         }
1234     }
1235     SDL_DestroyMutex(is->pictq_mutex);
1236     SDL_DestroyCond(is->pictq_cond);
1237     SDL_DestroyMutex(is->subpq_mutex);
1238     SDL_DestroyCond(is->subpq_cond);
1239 #if !CONFIG_AVFILTER
1240     if (is->img_convert_ctx)
1241         sws_freeContext(is->img_convert_ctx);
1242 #endif
1243     av_free(is);
1244 }
1245
1246 static void do_exit(void)
1247 {
1248     if (cur_stream) {
1249         stream_close(cur_stream);
1250         cur_stream = NULL;
1251     }
1252     uninit_opts();
1253 #if CONFIG_AVFILTER
1254     avfilter_uninit();
1255 #endif
1256     if (show_status)
1257         printf("\n");
1258     SDL_Quit();
1259     av_log(NULL, AV_LOG_QUIET, "");
1260     exit(0);
1261 }
1262
1263 /* allocate a picture (needs to do that in main thread to avoid
1264    potential locking problems */
1265 static void alloc_picture(void *opaque)
1266 {
1267     VideoState *is = opaque;
1268     VideoPicture *vp;
1269
1270     vp = &is->pictq[is->pictq_windex];
1271
1272     if (vp->bmp)
1273         SDL_FreeYUVOverlay(vp->bmp);
1274
1275 #if CONFIG_AVFILTER
1276     if (vp->picref)
1277         avfilter_unref_buffer(vp->picref);
1278     vp->picref = NULL;
1279
1280     vp->width   = is->out_video_filter->inputs[0]->w;
1281     vp->height  = is->out_video_filter->inputs[0]->h;
1282     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1283 #else
1284     vp->width   = is->video_st->codec->width;
1285     vp->height  = is->video_st->codec->height;
1286     vp->pix_fmt = is->video_st->codec->pix_fmt;
1287 #endif
1288
1289     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1290                                    SDL_YV12_OVERLAY,
1291                                    screen);
1292     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1293         /* SDL allocates a buffer smaller than requested if the video
1294          * overlay hardware is unable to support the requested size. */
1295         fprintf(stderr, "Error: the video system does not support an image\n"
1296                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1297                         "to reduce the image size.\n", vp->width, vp->height );
1298         do_exit();
1299     }
1300
1301     SDL_LockMutex(is->pictq_mutex);
1302     vp->allocated = 1;
1303     SDL_CondSignal(is->pictq_cond);
1304     SDL_UnlockMutex(is->pictq_mutex);
1305 }
1306
1307 /**
1308  *
1309  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1310  */
1311 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1312 {
1313     VideoPicture *vp;
1314 #if CONFIG_AVFILTER
1315     AVPicture pict_src;
1316 #else
1317     int dst_pix_fmt = PIX_FMT_YUV420P;
1318 #endif
1319     /* wait until we have space to put a new picture */
1320     SDL_LockMutex(is->pictq_mutex);
1321
1322     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1323         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1324
1325     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1326            !is->videoq.abort_request) {
1327         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1328     }
1329     SDL_UnlockMutex(is->pictq_mutex);
1330
1331     if (is->videoq.abort_request)
1332         return -1;
1333
1334     vp = &is->pictq[is->pictq_windex];
1335
1336     /* alloc or resize hardware picture buffer */
1337     if (!vp->bmp ||
1338 #if CONFIG_AVFILTER
1339         vp->width  != is->out_video_filter->inputs[0]->w ||
1340         vp->height != is->out_video_filter->inputs[0]->h) {
1341 #else
1342         vp->width != is->video_st->codec->width ||
1343         vp->height != is->video_st->codec->height) {
1344 #endif
1345         SDL_Event event;
1346
1347         vp->allocated = 0;
1348
1349         /* the allocation must be done in the main thread to avoid
1350            locking problems */
1351         event.type = FF_ALLOC_EVENT;
1352         event.user.data1 = is;
1353         SDL_PushEvent(&event);
1354
1355         /* wait until the picture is allocated */
1356         SDL_LockMutex(is->pictq_mutex);
1357         while (!vp->allocated && !is->videoq.abort_request) {
1358             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1359         }
1360         SDL_UnlockMutex(is->pictq_mutex);
1361
1362         if (is->videoq.abort_request)
1363             return -1;
1364     }
1365
1366     /* if the frame is not skipped, then display it */
1367     if (vp->bmp) {
1368         AVPicture pict;
1369 #if CONFIG_AVFILTER
1370         if(vp->picref)
1371             avfilter_unref_buffer(vp->picref);
1372         vp->picref = src_frame->opaque;
1373 #endif
1374
1375         /* get a pointer on the bitmap */
1376         SDL_LockYUVOverlay (vp->bmp);
1377
1378         memset(&pict,0,sizeof(AVPicture));
1379         pict.data[0] = vp->bmp->pixels[0];
1380         pict.data[1] = vp->bmp->pixels[2];
1381         pict.data[2] = vp->bmp->pixels[1];
1382
1383         pict.linesize[0] = vp->bmp->pitches[0];
1384         pict.linesize[1] = vp->bmp->pitches[2];
1385         pict.linesize[2] = vp->bmp->pitches[1];
1386
1387 #if CONFIG_AVFILTER
1388         pict_src.data[0] = src_frame->data[0];
1389         pict_src.data[1] = src_frame->data[1];
1390         pict_src.data[2] = src_frame->data[2];
1391
1392         pict_src.linesize[0] = src_frame->linesize[0];
1393         pict_src.linesize[1] = src_frame->linesize[1];
1394         pict_src.linesize[2] = src_frame->linesize[2];
1395
1396         //FIXME use direct rendering
1397         av_picture_copy(&pict, &pict_src,
1398                         vp->pix_fmt, vp->width, vp->height);
1399 #else
1400         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1401         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1402             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1403             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1404         if (is->img_convert_ctx == NULL) {
1405             fprintf(stderr, "Cannot initialize the conversion context\n");
1406             exit(1);
1407         }
1408         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1409                   0, vp->height, pict.data, pict.linesize);
1410 #endif
1411         /* update the bitmap content */
1412         SDL_UnlockYUVOverlay(vp->bmp);
1413
1414         vp->pts = pts;
1415         vp->pos = pos;
1416
1417         /* now we can update the picture count */
1418         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1419             is->pictq_windex = 0;
1420         SDL_LockMutex(is->pictq_mutex);
1421         vp->target_clock= compute_target_time(vp->pts, is);
1422
1423         is->pictq_size++;
1424         SDL_UnlockMutex(is->pictq_mutex);
1425     }
1426     return 0;
1427 }
1428
1429 /**
1430  * compute the exact PTS for the picture if it is omitted in the stream
1431  * @param pts1 the dts of the pkt / pts of the frame
1432  */
1433 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1434 {
1435     double frame_delay, pts;
1436
1437     pts = pts1;
1438
1439     if (pts != 0) {
1440         /* update video clock with pts, if present */
1441         is->video_clock = pts;
1442     } else {
1443         pts = is->video_clock;
1444     }
1445     /* update video clock for next frame */
1446     frame_delay = av_q2d(is->video_st->codec->time_base);
1447     /* for MPEG2, the frame can be repeated, so we update the
1448        clock accordingly */
1449     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1450     is->video_clock += frame_delay;
1451
1452     return queue_picture(is, src_frame, pts, pos);
1453 }
1454
1455 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1456 {
1457     int got_picture, i;
1458
1459     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1460         return -1;
1461
1462     if (pkt->data == flush_pkt.data) {
1463         avcodec_flush_buffers(is->video_st->codec);
1464
1465         SDL_LockMutex(is->pictq_mutex);
1466         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1467         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1468             is->pictq[i].target_clock= 0;
1469         }
1470         while (is->pictq_size && !is->videoq.abort_request) {
1471             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1472         }
1473         is->video_current_pos = -1;
1474         SDL_UnlockMutex(is->pictq_mutex);
1475
1476         init_pts_correction(&is->pts_ctx);
1477         is->frame_last_pts = AV_NOPTS_VALUE;
1478         is->frame_last_delay = 0;
1479         is->frame_timer = (double)av_gettime() / 1000000.0;
1480         is->skip_frames = 1;
1481         is->skip_frames_index = 0;
1482         return 0;
1483     }
1484
1485     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1486
1487     if (got_picture) {
1488         if (decoder_reorder_pts == -1) {
1489             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1490         } else if (decoder_reorder_pts) {
1491             *pts = frame->pkt_pts;
1492         } else {
1493             *pts = frame->pkt_dts;
1494         }
1495
1496         if (*pts == AV_NOPTS_VALUE) {
1497             *pts = 0;
1498         }
1499
1500         is->skip_frames_index += 1;
1501         if(is->skip_frames_index >= is->skip_frames){
1502             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1503             return 1;
1504         }
1505
1506     }
1507     return 0;
1508 }
1509
1510 #if CONFIG_AVFILTER
1511 typedef struct {
1512     VideoState *is;
1513     AVFrame *frame;
1514     int use_dr1;
1515 } FilterPriv;
1516
1517 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1518 {
1519     AVFilterContext *ctx = codec->opaque;
1520     AVFilterBufferRef  *ref;
1521     int perms = AV_PERM_WRITE;
1522     int i, w, h, stride[4];
1523     unsigned edge;
1524     int pixel_size;
1525
1526     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1527         perms |= AV_PERM_NEG_LINESIZES;
1528
1529     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1530         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1531         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1532         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1533     }
1534     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1535
1536     w = codec->width;
1537     h = codec->height;
1538     avcodec_align_dimensions2(codec, &w, &h, stride);
1539     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1540     w += edge << 1;
1541     h += edge << 1;
1542
1543     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1544         return -1;
1545
1546     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1547     ref->video->w = codec->width;
1548     ref->video->h = codec->height;
1549     for(i = 0; i < 4; i ++) {
1550         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1551         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1552
1553         if (ref->data[i]) {
1554             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1555         }
1556         pic->data[i]     = ref->data[i];
1557         pic->linesize[i] = ref->linesize[i];
1558     }
1559     pic->opaque = ref;
1560     pic->age    = INT_MAX;
1561     pic->type   = FF_BUFFER_TYPE_USER;
1562     pic->reordered_opaque = codec->reordered_opaque;
1563     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1564     else           pic->pkt_pts = AV_NOPTS_VALUE;
1565     return 0;
1566 }
1567
1568 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1569 {
1570     memset(pic->data, 0, sizeof(pic->data));
1571     avfilter_unref_buffer(pic->opaque);
1572 }
1573
1574 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1575 {
1576     AVFilterBufferRef *ref = pic->opaque;
1577
1578     if (pic->data[0] == NULL) {
1579         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1580         return codec->get_buffer(codec, pic);
1581     }
1582
1583     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1584         (codec->pix_fmt != ref->format)) {
1585         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1586         return -1;
1587     }
1588
1589     pic->reordered_opaque = codec->reordered_opaque;
1590     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1591     else           pic->pkt_pts = AV_NOPTS_VALUE;
1592     return 0;
1593 }
1594
1595 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1596 {
1597     FilterPriv *priv = ctx->priv;
1598     AVCodecContext *codec;
1599     if(!opaque) return -1;
1600
1601     priv->is = opaque;
1602     codec    = priv->is->video_st->codec;
1603     codec->opaque = ctx;
1604     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1605         priv->use_dr1 = 1;
1606         codec->get_buffer     = input_get_buffer;
1607         codec->release_buffer = input_release_buffer;
1608         codec->reget_buffer   = input_reget_buffer;
1609         codec->thread_safe_callbacks = 1;
1610     }
1611
1612     priv->frame = avcodec_alloc_frame();
1613
1614     return 0;
1615 }
1616
1617 static void input_uninit(AVFilterContext *ctx)
1618 {
1619     FilterPriv *priv = ctx->priv;
1620     av_free(priv->frame);
1621 }
1622
1623 static int input_request_frame(AVFilterLink *link)
1624 {
1625     FilterPriv *priv = link->src->priv;
1626     AVFilterBufferRef *picref;
1627     int64_t pts = 0;
1628     AVPacket pkt;
1629     int ret;
1630
1631     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1632         av_free_packet(&pkt);
1633     if (ret < 0)
1634         return -1;
1635
1636     if(priv->use_dr1) {
1637         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1638     } else {
1639         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1640         av_image_copy(picref->data, picref->linesize,
1641                       priv->frame->data, priv->frame->linesize,
1642                       picref->format, link->w, link->h);
1643     }
1644     av_free_packet(&pkt);
1645
1646     picref->pts = pts;
1647     picref->pos = pkt.pos;
1648     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1649     avfilter_start_frame(link, picref);
1650     avfilter_draw_slice(link, 0, link->h, 1);
1651     avfilter_end_frame(link);
1652
1653     return 0;
1654 }
1655
1656 static int input_query_formats(AVFilterContext *ctx)
1657 {
1658     FilterPriv *priv = ctx->priv;
1659     enum PixelFormat pix_fmts[] = {
1660         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1661     };
1662
1663     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1664     return 0;
1665 }
1666
1667 static int input_config_props(AVFilterLink *link)
1668 {
1669     FilterPriv *priv  = link->src->priv;
1670     AVCodecContext *c = priv->is->video_st->codec;
1671
1672     link->w = c->width;
1673     link->h = c->height;
1674     link->time_base = priv->is->video_st->time_base;
1675
1676     return 0;
1677 }
1678
1679 static AVFilter input_filter =
1680 {
1681     .name      = "avplay_input",
1682
1683     .priv_size = sizeof(FilterPriv),
1684
1685     .init      = input_init,
1686     .uninit    = input_uninit,
1687
1688     .query_formats = input_query_formats,
1689
1690     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1691     .outputs   = (AVFilterPad[]) {{ .name = "default",
1692                                     .type = AVMEDIA_TYPE_VIDEO,
1693                                     .request_frame = input_request_frame,
1694                                     .config_props  = input_config_props, },
1695                                   { .name = NULL }},
1696 };
1697
1698 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1699 {
1700     char sws_flags_str[128];
1701     int ret;
1702     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1703     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1704     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1705     graph->scale_sws_opts = av_strdup(sws_flags_str);
1706
1707     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1708                                             NULL, is, graph)) < 0)
1709         return ret;
1710     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1711                                             NULL, &ffsink_ctx, graph)) < 0)
1712         return ret;
1713
1714     if(vfilters) {
1715         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1716         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1717
1718         outputs->name    = av_strdup("in");
1719         outputs->filter_ctx = filt_src;
1720         outputs->pad_idx = 0;
1721         outputs->next    = NULL;
1722
1723         inputs->name    = av_strdup("out");
1724         inputs->filter_ctx = filt_out;
1725         inputs->pad_idx = 0;
1726         inputs->next    = NULL;
1727
1728         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1729             return ret;
1730         av_freep(&vfilters);
1731     } else {
1732         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1733             return ret;
1734     }
1735
1736     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1737         return ret;
1738
1739     is->out_video_filter = filt_out;
1740
1741     return ret;
1742 }
1743
1744 #endif  /* CONFIG_AVFILTER */
1745
1746 static int video_thread(void *arg)
1747 {
1748     VideoState *is = arg;
1749     AVFrame *frame= avcodec_alloc_frame();
1750     int64_t pts_int;
1751     double pts;
1752     int ret;
1753
1754 #if CONFIG_AVFILTER
1755     AVFilterGraph *graph = avfilter_graph_alloc();
1756     AVFilterContext *filt_out = NULL;
1757     int64_t pos;
1758     int last_w = is->video_st->codec->width;
1759     int last_h = is->video_st->codec->height;
1760
1761     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1762         goto the_end;
1763     filt_out = is->out_video_filter;
1764 #endif
1765
1766     for(;;) {
1767 #if !CONFIG_AVFILTER
1768         AVPacket pkt;
1769 #else
1770         AVFilterBufferRef *picref;
1771         AVRational tb;
1772 #endif
1773         while (is->paused && !is->videoq.abort_request)
1774             SDL_Delay(10);
1775 #if CONFIG_AVFILTER
1776         if (   last_w != is->video_st->codec->width
1777             || last_h != is->video_st->codec->height) {
1778             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1779                     is->video_st->codec->width, is->video_st->codec->height);
1780             avfilter_graph_free(&graph);
1781             graph = avfilter_graph_alloc();
1782             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1783                 goto the_end;
1784             filt_out = is->out_video_filter;
1785             last_w = is->video_st->codec->width;
1786             last_h = is->video_st->codec->height;
1787         }
1788         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1789         if (picref) {
1790             pts_int = picref->pts;
1791             pos     = picref->pos;
1792             frame->opaque = picref;
1793         }
1794
1795         if (av_cmp_q(tb, is->video_st->time_base)) {
1796             av_unused int64_t pts1 = pts_int;
1797             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1798             av_dlog(NULL, "video_thread(): "
1799                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1800                     tb.num, tb.den, pts1,
1801                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1802         }
1803 #else
1804         ret = get_video_frame(is, frame, &pts_int, &pkt);
1805 #endif
1806
1807         if (ret < 0) goto the_end;
1808
1809         if (!ret)
1810             continue;
1811
1812         pts = pts_int*av_q2d(is->video_st->time_base);
1813
1814 #if CONFIG_AVFILTER
1815         ret = output_picture2(is, frame, pts, pos);
1816 #else
1817         ret = output_picture2(is, frame, pts,  pkt.pos);
1818         av_free_packet(&pkt);
1819 #endif
1820         if (ret < 0)
1821             goto the_end;
1822
1823         if (step)
1824             if (cur_stream)
1825                 stream_pause(cur_stream);
1826     }
1827  the_end:
1828 #if CONFIG_AVFILTER
1829     avfilter_graph_free(&graph);
1830 #endif
1831     av_free(frame);
1832     return 0;
1833 }
1834
1835 static int subtitle_thread(void *arg)
1836 {
1837     VideoState *is = arg;
1838     SubPicture *sp;
1839     AVPacket pkt1, *pkt = &pkt1;
1840     int got_subtitle;
1841     double pts;
1842     int i, j;
1843     int r, g, b, y, u, v, a;
1844
1845     for(;;) {
1846         while (is->paused && !is->subtitleq.abort_request) {
1847             SDL_Delay(10);
1848         }
1849         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1850             break;
1851
1852         if(pkt->data == flush_pkt.data){
1853             avcodec_flush_buffers(is->subtitle_st->codec);
1854             continue;
1855         }
1856         SDL_LockMutex(is->subpq_mutex);
1857         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1858                !is->subtitleq.abort_request) {
1859             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1860         }
1861         SDL_UnlockMutex(is->subpq_mutex);
1862
1863         if (is->subtitleq.abort_request)
1864             return 0;
1865
1866         sp = &is->subpq[is->subpq_windex];
1867
1868        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1869            this packet, if any */
1870         pts = 0;
1871         if (pkt->pts != AV_NOPTS_VALUE)
1872             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1873
1874         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1875                                  &got_subtitle, pkt);
1876
1877         if (got_subtitle && sp->sub.format == 0) {
1878             sp->pts = pts;
1879
1880             for (i = 0; i < sp->sub.num_rects; i++)
1881             {
1882                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1883                 {
1884                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1885                     y = RGB_TO_Y_CCIR(r, g, b);
1886                     u = RGB_TO_U_CCIR(r, g, b, 0);
1887                     v = RGB_TO_V_CCIR(r, g, b, 0);
1888                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1889                 }
1890             }
1891
1892             /* now we can update the picture count */
1893             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1894                 is->subpq_windex = 0;
1895             SDL_LockMutex(is->subpq_mutex);
1896             is->subpq_size++;
1897             SDL_UnlockMutex(is->subpq_mutex);
1898         }
1899         av_free_packet(pkt);
1900     }
1901     return 0;
1902 }
1903
1904 /* copy samples for viewing in editor window */
1905 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1906 {
1907     int size, len;
1908
1909     size = samples_size / sizeof(short);
1910     while (size > 0) {
1911         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1912         if (len > size)
1913             len = size;
1914         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1915         samples += len;
1916         is->sample_array_index += len;
1917         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1918             is->sample_array_index = 0;
1919         size -= len;
1920     }
1921 }
1922
1923 /* return the new audio buffer size (samples can be added or deleted
1924    to get better sync if video or external master clock) */
1925 static int synchronize_audio(VideoState *is, short *samples,
1926                              int samples_size1, double pts)
1927 {
1928     int n, samples_size;
1929     double ref_clock;
1930
1931     n = 2 * is->audio_st->codec->channels;
1932     samples_size = samples_size1;
1933
1934     /* if not master, then we try to remove or add samples to correct the clock */
1935     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1936          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1937         double diff, avg_diff;
1938         int wanted_size, min_size, max_size, nb_samples;
1939
1940         ref_clock = get_master_clock(is);
1941         diff = get_audio_clock(is) - ref_clock;
1942
1943         if (diff < AV_NOSYNC_THRESHOLD) {
1944             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1945             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1946                 /* not enough measures to have a correct estimate */
1947                 is->audio_diff_avg_count++;
1948             } else {
1949                 /* estimate the A-V difference */
1950                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1951
1952                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1953                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1954                     nb_samples = samples_size / n;
1955
1956                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1957                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1958                     if (wanted_size < min_size)
1959                         wanted_size = min_size;
1960                     else if (wanted_size > max_size)
1961                         wanted_size = max_size;
1962
1963                     /* add or remove samples to correction the synchro */
1964                     if (wanted_size < samples_size) {
1965                         /* remove samples */
1966                         samples_size = wanted_size;
1967                     } else if (wanted_size > samples_size) {
1968                         uint8_t *samples_end, *q;
1969                         int nb;
1970
1971                         /* add samples */
1972                         nb = (samples_size - wanted_size);
1973                         samples_end = (uint8_t *)samples + samples_size - n;
1974                         q = samples_end + n;
1975                         while (nb > 0) {
1976                             memcpy(q, samples_end, n);
1977                             q += n;
1978                             nb -= n;
1979                         }
1980                         samples_size = wanted_size;
1981                     }
1982                 }
1983                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1984                         diff, avg_diff, samples_size - samples_size1,
1985                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1986             }
1987         } else {
1988             /* too big difference : may be initial PTS errors, so
1989                reset A-V filter */
1990             is->audio_diff_avg_count = 0;
1991             is->audio_diff_cum = 0;
1992         }
1993     }
1994
1995     return samples_size;
1996 }
1997
1998 /* decode one audio frame and returns its uncompressed size */
1999 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2000 {
2001     AVPacket *pkt_temp = &is->audio_pkt_temp;
2002     AVPacket *pkt = &is->audio_pkt;
2003     AVCodecContext *dec= is->audio_st->codec;
2004     int n, len1, data_size;
2005     double pts;
2006
2007     for(;;) {
2008         /* NOTE: the audio packet can contain several frames */
2009         while (pkt_temp->size > 0) {
2010             data_size = sizeof(is->audio_buf1);
2011             len1 = avcodec_decode_audio3(dec,
2012                                         (int16_t *)is->audio_buf1, &data_size,
2013                                         pkt_temp);
2014             if (len1 < 0) {
2015                 /* if error, we skip the frame */
2016                 pkt_temp->size = 0;
2017                 break;
2018             }
2019
2020             pkt_temp->data += len1;
2021             pkt_temp->size -= len1;
2022             if (data_size <= 0)
2023                 continue;
2024
2025             if (dec->sample_fmt != is->audio_src_fmt) {
2026                 if (is->reformat_ctx)
2027                     av_audio_convert_free(is->reformat_ctx);
2028                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2029                                                          dec->sample_fmt, 1, NULL, 0);
2030                 if (!is->reformat_ctx) {
2031                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2032                         av_get_sample_fmt_name(dec->sample_fmt),
2033                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2034                         break;
2035                 }
2036                 is->audio_src_fmt= dec->sample_fmt;
2037             }
2038
2039             if (is->reformat_ctx) {
2040                 const void *ibuf[6]= {is->audio_buf1};
2041                 void *obuf[6]= {is->audio_buf2};
2042                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2043                 int ostride[6]= {2};
2044                 int len= data_size/istride[0];
2045                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2046                     printf("av_audio_convert() failed\n");
2047                     break;
2048                 }
2049                 is->audio_buf= is->audio_buf2;
2050                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2051                           remove this legacy cruft */
2052                 data_size= len*2;
2053             }else{
2054                 is->audio_buf= is->audio_buf1;
2055             }
2056
2057             /* if no pts, then compute it */
2058             pts = is->audio_clock;
2059             *pts_ptr = pts;
2060             n = 2 * dec->channels;
2061             is->audio_clock += (double)data_size /
2062                 (double)(n * dec->sample_rate);
2063 #ifdef DEBUG
2064             {
2065                 static double last_clock;
2066                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2067                        is->audio_clock - last_clock,
2068                        is->audio_clock, pts);
2069                 last_clock = is->audio_clock;
2070             }
2071 #endif
2072             return data_size;
2073         }
2074
2075         /* free the current packet */
2076         if (pkt->data)
2077             av_free_packet(pkt);
2078
2079         if (is->paused || is->audioq.abort_request) {
2080             return -1;
2081         }
2082
2083         /* read next packet */
2084         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2085             return -1;
2086         if(pkt->data == flush_pkt.data){
2087             avcodec_flush_buffers(dec);
2088             continue;
2089         }
2090
2091         pkt_temp->data = pkt->data;
2092         pkt_temp->size = pkt->size;
2093
2094         /* if update the audio clock with the pts */
2095         if (pkt->pts != AV_NOPTS_VALUE) {
2096             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2097         }
2098     }
2099 }
2100
2101 /* prepare a new audio buffer */
2102 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2103 {
2104     VideoState *is = opaque;
2105     int audio_size, len1;
2106     double pts;
2107
2108     audio_callback_time = av_gettime();
2109
2110     while (len > 0) {
2111         if (is->audio_buf_index >= is->audio_buf_size) {
2112            audio_size = audio_decode_frame(is, &pts);
2113            if (audio_size < 0) {
2114                 /* if error, just output silence */
2115                is->audio_buf = is->audio_buf1;
2116                is->audio_buf_size = 1024;
2117                memset(is->audio_buf, 0, is->audio_buf_size);
2118            } else {
2119                if (is->show_audio)
2120                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2121                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2122                                               pts);
2123                is->audio_buf_size = audio_size;
2124            }
2125            is->audio_buf_index = 0;
2126         }
2127         len1 = is->audio_buf_size - is->audio_buf_index;
2128         if (len1 > len)
2129             len1 = len;
2130         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2131         len -= len1;
2132         stream += len1;
2133         is->audio_buf_index += len1;
2134     }
2135 }
2136
2137 /* open a given stream. Return 0 if OK */
2138 static int stream_component_open(VideoState *is, int stream_index)
2139 {
2140     AVFormatContext *ic = is->ic;
2141     AVCodecContext *avctx;
2142     AVCodec *codec;
2143     SDL_AudioSpec wanted_spec, spec;
2144     AVDictionary *opts;
2145     AVDictionaryEntry *t = NULL;
2146
2147     if (stream_index < 0 || stream_index >= ic->nb_streams)
2148         return -1;
2149     avctx = ic->streams[stream_index]->codec;
2150
2151     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2152
2153     /* prepare audio output */
2154     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2155         if (avctx->channels > 0) {
2156             avctx->request_channels = FFMIN(2, avctx->channels);
2157         } else {
2158             avctx->request_channels = 2;
2159         }
2160     }
2161
2162     codec = avcodec_find_decoder(avctx->codec_id);
2163     avctx->debug_mv = debug_mv;
2164     avctx->debug = debug;
2165     avctx->workaround_bugs = workaround_bugs;
2166     avctx->lowres = lowres;
2167     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2168     avctx->idct_algo= idct;
2169     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2170     avctx->skip_frame= skip_frame;
2171     avctx->skip_idct= skip_idct;
2172     avctx->skip_loop_filter= skip_loop_filter;
2173     avctx->error_recognition= error_recognition;
2174     avctx->error_concealment= error_concealment;
2175     avctx->thread_count= thread_count;
2176
2177     if (!codec ||
2178         avcodec_open2(avctx, codec, &opts) < 0)
2179         return -1;
2180     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2181         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2182         return AVERROR_OPTION_NOT_FOUND;
2183     }
2184
2185     /* prepare audio output */
2186     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2187         wanted_spec.freq = avctx->sample_rate;
2188         wanted_spec.format = AUDIO_S16SYS;
2189         wanted_spec.channels = avctx->channels;
2190         wanted_spec.silence = 0;
2191         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2192         wanted_spec.callback = sdl_audio_callback;
2193         wanted_spec.userdata = is;
2194         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2195             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2196             return -1;
2197         }
2198         is->audio_hw_buf_size = spec.size;
2199         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2200     }
2201
2202     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2203     switch(avctx->codec_type) {
2204     case AVMEDIA_TYPE_AUDIO:
2205         is->audio_stream = stream_index;
2206         is->audio_st = ic->streams[stream_index];
2207         is->audio_buf_size = 0;
2208         is->audio_buf_index = 0;
2209
2210         /* init averaging filter */
2211         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2212         is->audio_diff_avg_count = 0;
2213         /* since we do not have a precise anough audio fifo fullness,
2214            we correct audio sync only if larger than this threshold */
2215         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2216
2217         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2218         packet_queue_init(&is->audioq);
2219         SDL_PauseAudio(0);
2220         break;
2221     case AVMEDIA_TYPE_VIDEO:
2222         is->video_stream = stream_index;
2223         is->video_st = ic->streams[stream_index];
2224
2225         packet_queue_init(&is->videoq);
2226         is->video_tid = SDL_CreateThread(video_thread, is);
2227         break;
2228     case AVMEDIA_TYPE_SUBTITLE:
2229         is->subtitle_stream = stream_index;
2230         is->subtitle_st = ic->streams[stream_index];
2231         packet_queue_init(&is->subtitleq);
2232
2233         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2234         break;
2235     default:
2236         break;
2237     }
2238     return 0;
2239 }
2240
2241 static void stream_component_close(VideoState *is, int stream_index)
2242 {
2243     AVFormatContext *ic = is->ic;
2244     AVCodecContext *avctx;
2245
2246     if (stream_index < 0 || stream_index >= ic->nb_streams)
2247         return;
2248     avctx = ic->streams[stream_index]->codec;
2249
2250     switch(avctx->codec_type) {
2251     case AVMEDIA_TYPE_AUDIO:
2252         packet_queue_abort(&is->audioq);
2253
2254         SDL_CloseAudio();
2255
2256         packet_queue_end(&is->audioq);
2257         if (is->reformat_ctx)
2258             av_audio_convert_free(is->reformat_ctx);
2259         is->reformat_ctx = NULL;
2260         break;
2261     case AVMEDIA_TYPE_VIDEO:
2262         packet_queue_abort(&is->videoq);
2263
2264         /* note: we also signal this mutex to make sure we deblock the
2265            video thread in all cases */
2266         SDL_LockMutex(is->pictq_mutex);
2267         SDL_CondSignal(is->pictq_cond);
2268         SDL_UnlockMutex(is->pictq_mutex);
2269
2270         SDL_WaitThread(is->video_tid, NULL);
2271
2272         packet_queue_end(&is->videoq);
2273         break;
2274     case AVMEDIA_TYPE_SUBTITLE:
2275         packet_queue_abort(&is->subtitleq);
2276
2277         /* note: we also signal this mutex to make sure we deblock the
2278            video thread in all cases */
2279         SDL_LockMutex(is->subpq_mutex);
2280         is->subtitle_stream_changed = 1;
2281
2282         SDL_CondSignal(is->subpq_cond);
2283         SDL_UnlockMutex(is->subpq_mutex);
2284
2285         SDL_WaitThread(is->subtitle_tid, NULL);
2286
2287         packet_queue_end(&is->subtitleq);
2288         break;
2289     default:
2290         break;
2291     }
2292
2293     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2294     avcodec_close(avctx);
2295     switch(avctx->codec_type) {
2296     case AVMEDIA_TYPE_AUDIO:
2297         is->audio_st = NULL;
2298         is->audio_stream = -1;
2299         break;
2300     case AVMEDIA_TYPE_VIDEO:
2301         is->video_st = NULL;
2302         is->video_stream = -1;
2303         break;
2304     case AVMEDIA_TYPE_SUBTITLE:
2305         is->subtitle_st = NULL;
2306         is->subtitle_stream = -1;
2307         break;
2308     default:
2309         break;
2310     }
2311 }
2312
2313 /* since we have only one decoding thread, we can use a global
2314    variable instead of a thread local variable */
2315 static VideoState *global_video_state;
2316
2317 static int decode_interrupt_cb(void)
2318 {
2319     return (global_video_state && global_video_state->abort_request);
2320 }
2321
2322 /* this thread gets the stream from the disk or the network */
2323 static int decode_thread(void *arg)
2324 {
2325     VideoState *is = arg;
2326     AVFormatContext *ic = NULL;
2327     int err, i, ret;
2328     int st_index[AVMEDIA_TYPE_NB];
2329     AVPacket pkt1, *pkt = &pkt1;
2330     int eof=0;
2331     int pkt_in_play_range = 0;
2332     AVDictionaryEntry *t;
2333     AVDictionary **opts;
2334     int orig_nb_streams;
2335
2336     memset(st_index, -1, sizeof(st_index));
2337     is->video_stream = -1;
2338     is->audio_stream = -1;
2339     is->subtitle_stream = -1;
2340
2341     global_video_state = is;
2342     avio_set_interrupt_cb(decode_interrupt_cb);
2343
2344     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2345     if (err < 0) {
2346         print_error(is->filename, err);
2347         ret = -1;
2348         goto fail;
2349     }
2350     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2351         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2352         ret = AVERROR_OPTION_NOT_FOUND;
2353         goto fail;
2354     }
2355     is->ic = ic;
2356
2357     if(genpts)
2358         ic->flags |= AVFMT_FLAG_GENPTS;
2359
2360     opts = setup_find_stream_info_opts(ic, codec_opts);
2361     orig_nb_streams = ic->nb_streams;
2362
2363     err = avformat_find_stream_info(ic, opts);
2364     if (err < 0) {
2365         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2366         ret = -1;
2367         goto fail;
2368     }
2369     for (i = 0; i < orig_nb_streams; i++)
2370         av_dict_free(&opts[i]);
2371     av_freep(&opts);
2372
2373     if(ic->pb)
2374         ic->pb->eof_reached= 0; //FIXME hack, avplay maybe should not use url_feof() to test for the end
2375
2376     if(seek_by_bytes<0)
2377         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2378
2379     /* if seeking requested, we execute it */
2380     if (start_time != AV_NOPTS_VALUE) {
2381         int64_t timestamp;
2382
2383         timestamp = start_time;
2384         /* add the stream start time */
2385         if (ic->start_time != AV_NOPTS_VALUE)
2386             timestamp += ic->start_time;
2387         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2388         if (ret < 0) {
2389             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2390                     is->filename, (double)timestamp / AV_TIME_BASE);
2391         }
2392     }
2393
2394     for (i = 0; i < ic->nb_streams; i++)
2395         ic->streams[i]->discard = AVDISCARD_ALL;
2396     if (!video_disable)
2397         st_index[AVMEDIA_TYPE_VIDEO] =
2398             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2399                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2400     if (!audio_disable)
2401         st_index[AVMEDIA_TYPE_AUDIO] =
2402             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2403                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2404                                 st_index[AVMEDIA_TYPE_VIDEO],
2405                                 NULL, 0);
2406     if (!video_disable)
2407         st_index[AVMEDIA_TYPE_SUBTITLE] =
2408             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2409                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2410                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2411                                  st_index[AVMEDIA_TYPE_AUDIO] :
2412                                  st_index[AVMEDIA_TYPE_VIDEO]),
2413                                 NULL, 0);
2414     if (show_status) {
2415         av_dump_format(ic, 0, is->filename, 0);
2416     }
2417
2418     /* open the streams */
2419     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2420         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2421     }
2422
2423     ret=-1;
2424     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2425         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2426     }
2427     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2428     if(ret<0) {
2429         if (!display_disable)
2430             is->show_audio = 2;
2431     }
2432
2433     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2434         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2435     }
2436
2437     if (is->video_stream < 0 && is->audio_stream < 0) {
2438         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2439         ret = -1;
2440         goto fail;
2441     }
2442
2443     for(;;) {
2444         if (is->abort_request)
2445             break;
2446         if (is->paused != is->last_paused) {
2447             is->last_paused = is->paused;
2448             if (is->paused)
2449                 is->read_pause_return= av_read_pause(ic);
2450             else
2451                 av_read_play(ic);
2452         }
2453 #if CONFIG_RTSP_DEMUXER
2454         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2455             /* wait 10 ms to avoid trying to get another packet */
2456             /* XXX: horrible */
2457             SDL_Delay(10);
2458             continue;
2459         }
2460 #endif
2461         if (is->seek_req) {
2462             int64_t seek_target= is->seek_pos;
2463             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2464             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2465 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2466 //      of the seek_pos/seek_rel variables
2467
2468             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2469             if (ret < 0) {
2470                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2471             }else{
2472                 if (is->audio_stream >= 0) {
2473                     packet_queue_flush(&is->audioq);
2474                     packet_queue_put(&is->audioq, &flush_pkt);
2475                 }
2476                 if (is->subtitle_stream >= 0) {
2477                     packet_queue_flush(&is->subtitleq);
2478                     packet_queue_put(&is->subtitleq, &flush_pkt);
2479                 }
2480                 if (is->video_stream >= 0) {
2481                     packet_queue_flush(&is->videoq);
2482                     packet_queue_put(&is->videoq, &flush_pkt);
2483                 }
2484             }
2485             is->seek_req = 0;
2486             eof= 0;
2487         }
2488
2489         /* if the queue are full, no need to read more */
2490         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2491             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2492                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2493                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2494             /* wait 10 ms */
2495             SDL_Delay(10);
2496             continue;
2497         }
2498         if(eof) {
2499             if(is->video_stream >= 0){
2500                 av_init_packet(pkt);
2501                 pkt->data=NULL;
2502                 pkt->size=0;
2503                 pkt->stream_index= is->video_stream;
2504                 packet_queue_put(&is->videoq, pkt);
2505             }
2506             SDL_Delay(10);
2507             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2508                 if(loop!=1 && (!loop || --loop)){
2509                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2510                 }else if(autoexit){
2511                     ret=AVERROR_EOF;
2512                     goto fail;
2513                 }
2514             }
2515             continue;
2516         }
2517         ret = av_read_frame(ic, pkt);
2518         if (ret < 0) {
2519             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2520                 eof=1;
2521             if (ic->pb && ic->pb->error)
2522                 break;
2523             SDL_Delay(100); /* wait for user event */
2524             continue;
2525         }
2526         /* check if packet is in play range specified by user, then queue, otherwise discard */
2527         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2528                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2529                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2530                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2531                 <= ((double)duration/1000000);
2532         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2533             packet_queue_put(&is->audioq, pkt);
2534         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2535             packet_queue_put(&is->videoq, pkt);
2536         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2537             packet_queue_put(&is->subtitleq, pkt);
2538         } else {
2539             av_free_packet(pkt);
2540         }
2541     }
2542     /* wait until the end */
2543     while (!is->abort_request) {
2544         SDL_Delay(100);
2545     }
2546
2547     ret = 0;
2548  fail:
2549     /* disable interrupting */
2550     global_video_state = NULL;
2551
2552     /* close each stream */
2553     if (is->audio_stream >= 0)
2554         stream_component_close(is, is->audio_stream);
2555     if (is->video_stream >= 0)
2556         stream_component_close(is, is->video_stream);
2557     if (is->subtitle_stream >= 0)
2558         stream_component_close(is, is->subtitle_stream);
2559     if (is->ic) {
2560         av_close_input_file(is->ic);
2561         is->ic = NULL; /* safety */
2562     }
2563     avio_set_interrupt_cb(NULL);
2564
2565     if (ret != 0) {
2566         SDL_Event event;
2567
2568         event.type = FF_QUIT_EVENT;
2569         event.user.data1 = is;
2570         SDL_PushEvent(&event);
2571     }
2572     return 0;
2573 }
2574
2575 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2576 {
2577     VideoState *is;
2578
2579     is = av_mallocz(sizeof(VideoState));
2580     if (!is)
2581         return NULL;
2582     av_strlcpy(is->filename, filename, sizeof(is->filename));
2583     is->iformat = iformat;
2584     is->ytop = 0;
2585     is->xleft = 0;
2586
2587     /* start video display */
2588     is->pictq_mutex = SDL_CreateMutex();
2589     is->pictq_cond = SDL_CreateCond();
2590
2591     is->subpq_mutex = SDL_CreateMutex();
2592     is->subpq_cond = SDL_CreateCond();
2593
2594     is->av_sync_type = av_sync_type;
2595     is->parse_tid = SDL_CreateThread(decode_thread, is);
2596     if (!is->parse_tid) {
2597         av_free(is);
2598         return NULL;
2599     }
2600     return is;
2601 }
2602
2603 static void stream_cycle_channel(VideoState *is, int codec_type)
2604 {
2605     AVFormatContext *ic = is->ic;
2606     int start_index, stream_index;
2607     AVStream *st;
2608
2609     if (codec_type == AVMEDIA_TYPE_VIDEO)
2610         start_index = is->video_stream;
2611     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2612         start_index = is->audio_stream;
2613     else
2614         start_index = is->subtitle_stream;
2615     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2616         return;
2617     stream_index = start_index;
2618     for(;;) {
2619         if (++stream_index >= is->ic->nb_streams)
2620         {
2621             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2622             {
2623                 stream_index = -1;
2624                 goto the_end;
2625             } else
2626                 stream_index = 0;
2627         }
2628         if (stream_index == start_index)
2629             return;
2630         st = ic->streams[stream_index];
2631         if (st->codec->codec_type == codec_type) {
2632             /* check that parameters are OK */
2633             switch(codec_type) {
2634             case AVMEDIA_TYPE_AUDIO:
2635                 if (st->codec->sample_rate != 0 &&
2636                     st->codec->channels != 0)
2637                     goto the_end;
2638                 break;
2639             case AVMEDIA_TYPE_VIDEO:
2640             case AVMEDIA_TYPE_SUBTITLE:
2641                 goto the_end;
2642             default:
2643                 break;
2644             }
2645         }
2646     }
2647  the_end:
2648     stream_component_close(is, start_index);
2649     stream_component_open(is, stream_index);
2650 }
2651
2652
2653 static void toggle_full_screen(void)
2654 {
2655     is_full_screen = !is_full_screen;
2656     video_open(cur_stream);
2657 }
2658
2659 static void toggle_pause(void)
2660 {
2661     if (cur_stream)
2662         stream_pause(cur_stream);
2663     step = 0;
2664 }
2665
2666 static void step_to_next_frame(void)
2667 {
2668     if (cur_stream) {
2669         /* if the stream is paused unpause it, then step */
2670         if (cur_stream->paused)
2671             stream_pause(cur_stream);
2672     }
2673     step = 1;
2674 }
2675
2676 static void toggle_audio_display(void)
2677 {
2678     if (cur_stream) {
2679         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2680         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2681         fill_rectangle(screen,
2682                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2683                     bgcolor);
2684         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2685     }
2686 }
2687
2688 /* handle an event sent by the GUI */
2689 static void event_loop(void)
2690 {
2691     SDL_Event event;
2692     double incr, pos, frac;
2693
2694     for(;;) {
2695         double x;
2696         SDL_WaitEvent(&event);
2697         switch(event.type) {
2698         case SDL_KEYDOWN:
2699             if (exit_on_keydown) {
2700                 do_exit();
2701                 break;
2702             }
2703             switch(event.key.keysym.sym) {
2704             case SDLK_ESCAPE:
2705             case SDLK_q:
2706                 do_exit();
2707                 break;
2708             case SDLK_f:
2709                 toggle_full_screen();
2710                 break;
2711             case SDLK_p:
2712             case SDLK_SPACE:
2713                 toggle_pause();
2714                 break;
2715             case SDLK_s: //S: Step to next frame
2716                 step_to_next_frame();
2717                 break;
2718             case SDLK_a:
2719                 if (cur_stream)
2720                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2721                 break;
2722             case SDLK_v:
2723                 if (cur_stream)
2724                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2725                 break;
2726             case SDLK_t:
2727                 if (cur_stream)
2728                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2729                 break;
2730             case SDLK_w:
2731                 toggle_audio_display();
2732                 break;
2733             case SDLK_LEFT:
2734                 incr = -10.0;
2735                 goto do_seek;
2736             case SDLK_RIGHT:
2737                 incr = 10.0;
2738                 goto do_seek;
2739             case SDLK_UP:
2740                 incr = 60.0;
2741                 goto do_seek;
2742             case SDLK_DOWN:
2743                 incr = -60.0;
2744             do_seek:
2745                 if (cur_stream) {
2746                     if (seek_by_bytes) {
2747                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2748                             pos= cur_stream->video_current_pos;
2749                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2750                             pos= cur_stream->audio_pkt.pos;
2751                         }else
2752                             pos = avio_tell(cur_stream->ic->pb);
2753                         if (cur_stream->ic->bit_rate)
2754                             incr *= cur_stream->ic->bit_rate / 8.0;
2755                         else
2756                             incr *= 180000.0;
2757                         pos += incr;
2758                         stream_seek(cur_stream, pos, incr, 1);
2759                     } else {
2760                         pos = get_master_clock(cur_stream);
2761                         pos += incr;
2762                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2763                     }
2764                 }
2765                 break;
2766             default:
2767                 break;
2768             }
2769             break;
2770         case SDL_MOUSEBUTTONDOWN:
2771             if (exit_on_mousedown) {
2772                 do_exit();
2773                 break;
2774             }
2775         case SDL_MOUSEMOTION:
2776             if(event.type ==SDL_MOUSEBUTTONDOWN){
2777                 x= event.button.x;
2778             }else{
2779                 if(event.motion.state != SDL_PRESSED)
2780                     break;
2781                 x= event.motion.x;
2782             }
2783             if (cur_stream) {
2784                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2785                     uint64_t size=  avio_size(cur_stream->ic->pb);
2786                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2787                 }else{
2788                     int64_t ts;
2789                     int ns, hh, mm, ss;
2790                     int tns, thh, tmm, tss;
2791                     tns = cur_stream->ic->duration/1000000LL;
2792                     thh = tns/3600;
2793                     tmm = (tns%3600)/60;
2794                     tss = (tns%60);
2795                     frac = x/cur_stream->width;
2796                     ns = frac*tns;
2797                     hh = ns/3600;
2798                     mm = (ns%3600)/60;
2799                     ss = (ns%60);
2800                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2801                             hh, mm, ss, thh, tmm, tss);
2802                     ts = frac*cur_stream->ic->duration;
2803                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2804                         ts += cur_stream->ic->start_time;
2805                     stream_seek(cur_stream, ts, 0, 0);
2806                 }
2807             }
2808             break;
2809         case SDL_VIDEORESIZE:
2810             if (cur_stream) {
2811                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2812                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2813                 screen_width = cur_stream->width = event.resize.w;
2814                 screen_height= cur_stream->height= event.resize.h;
2815             }
2816             break;
2817         case SDL_QUIT:
2818         case FF_QUIT_EVENT:
2819             do_exit();
2820             break;
2821         case FF_ALLOC_EVENT:
2822             video_open(event.user.data1);
2823             alloc_picture(event.user.data1);
2824             break;
2825         case FF_REFRESH_EVENT:
2826             video_refresh_timer(event.user.data1);
2827             cur_stream->refresh=0;
2828             break;
2829         default:
2830             break;
2831         }
2832     }
2833 }
2834
2835 static int opt_frame_size(const char *opt, const char *arg)
2836 {
2837     av_log(NULL, AV_LOG_ERROR,
2838            "Option '%s' has been removed, use private format options instead\n", opt);
2839     return AVERROR(EINVAL);
2840 }
2841
2842 static int opt_width(const char *opt, const char *arg)
2843 {
2844     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2845     return 0;
2846 }
2847
2848 static int opt_height(const char *opt, const char *arg)
2849 {
2850     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2851     return 0;
2852 }
2853
2854 static int opt_format(const char *opt, const char *arg)
2855 {
2856     file_iformat = av_find_input_format(arg);
2857     if (!file_iformat) {
2858         fprintf(stderr, "Unknown input format: %s\n", arg);
2859         return AVERROR(EINVAL);
2860     }
2861     return 0;
2862 }
2863
2864 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2865 {
2866     av_log(NULL, AV_LOG_ERROR,
2867            "Option '%s' has been removed, use private format options instead\n", opt);
2868     return AVERROR(EINVAL);
2869 }
2870
2871 static int opt_sync(const char *opt, const char *arg)
2872 {
2873     if (!strcmp(arg, "audio"))
2874         av_sync_type = AV_SYNC_AUDIO_MASTER;
2875     else if (!strcmp(arg, "video"))
2876         av_sync_type = AV_SYNC_VIDEO_MASTER;
2877     else if (!strcmp(arg, "ext"))
2878         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2879     else {
2880         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2881         exit(1);
2882     }
2883     return 0;
2884 }
2885
2886 static int opt_seek(const char *opt, const char *arg)
2887 {
2888     start_time = parse_time_or_die(opt, arg, 1);
2889     return 0;
2890 }
2891
2892 static int opt_duration(const char *opt, const char *arg)
2893 {
2894     duration = parse_time_or_die(opt, arg, 1);
2895     return 0;
2896 }
2897
2898 static int opt_debug(const char *opt, const char *arg)
2899 {
2900     av_log_set_level(99);
2901     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2902     return 0;
2903 }
2904
2905 static int opt_vismv(const char *opt, const char *arg)
2906 {
2907     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2908     return 0;
2909 }
2910
2911 static int opt_thread_count(const char *opt, const char *arg)
2912 {
2913     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2914 #if !HAVE_THREADS
2915     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2916 #endif
2917     return 0;
2918 }
2919
2920 static const OptionDef options[] = {
2921 #include "cmdutils_common_opts.h"
2922     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2923     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2924     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2925     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2926     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2927     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2928     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2929     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2930     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2931     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2932     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2933     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2934     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2935     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2936     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2937     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2938     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2939     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2940     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2941     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2942     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2943     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2944     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2945     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2946     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2947     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2948     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2949     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2950     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2951     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2952     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2953     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2954     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2955     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2956     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2957     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2958     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2959 #if CONFIG_AVFILTER
2960     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2961 #endif
2962     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2963     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2964     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2965     { NULL, },
2966 };
2967
2968 static void show_usage(void)
2969 {
2970     printf("Simple media player\n");
2971     printf("usage: %s [options] input_file\n", program_name);
2972     printf("\n");
2973 }
2974
2975 static void show_help(void)
2976 {
2977     av_log_set_callback(log_callback_help);
2978     show_usage();
2979     show_help_options(options, "Main options:\n",
2980                       OPT_EXPERT, 0);
2981     show_help_options(options, "\nAdvanced options:\n",
2982                       OPT_EXPERT, OPT_EXPERT);
2983     printf("\n");
2984     av_opt_show2(avcodec_opts[0], NULL,
2985                  AV_OPT_FLAG_DECODING_PARAM, 0);
2986     printf("\n");
2987     av_opt_show2(avformat_opts, NULL,
2988                  AV_OPT_FLAG_DECODING_PARAM, 0);
2989 #if !CONFIG_AVFILTER
2990     printf("\n");
2991     av_opt_show2(sws_opts, NULL,
2992                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2993 #endif
2994     printf("\nWhile playing:\n"
2995            "q, ESC              quit\n"
2996            "f                   toggle full screen\n"
2997            "p, SPC              pause\n"
2998            "a                   cycle audio channel\n"
2999            "v                   cycle video channel\n"
3000            "t                   cycle subtitle channel\n"
3001            "w                   show audio waves\n"
3002            "s                   activate frame-step mode\n"
3003            "left/right          seek backward/forward 10 seconds\n"
3004            "down/up             seek backward/forward 1 minute\n"
3005            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3006            );
3007 }
3008
3009 static void opt_input_file(const char *filename)
3010 {
3011     if (input_filename) {
3012         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3013                 filename, input_filename);
3014         exit(1);
3015     }
3016     if (!strcmp(filename, "-"))
3017         filename = "pipe:";
3018     input_filename = filename;
3019 }
3020
3021 /* Called from the main */
3022 int main(int argc, char **argv)
3023 {
3024     int flags;
3025
3026     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3027
3028     /* register all codecs, demux and protocols */
3029     avcodec_register_all();
3030 #if CONFIG_AVDEVICE
3031     avdevice_register_all();
3032 #endif
3033 #if CONFIG_AVFILTER
3034     avfilter_register_all();
3035 #endif
3036     av_register_all();
3037
3038     init_opts();
3039
3040     show_banner();
3041
3042     parse_options(argc, argv, options, opt_input_file);
3043
3044     if (!input_filename) {
3045         show_usage();
3046         fprintf(stderr, "An input file must be specified\n");
3047         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3048         exit(1);
3049     }
3050
3051     if (display_disable) {
3052         video_disable = 1;
3053     }
3054     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3055 #if !defined(__MINGW32__) && !defined(__APPLE__)
3056     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3057 #endif
3058     if (SDL_Init (flags)) {
3059         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3060         exit(1);
3061     }
3062
3063     if (!display_disable) {
3064 #if HAVE_SDL_VIDEO_SIZE
3065         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3066         fs_screen_width = vi->current_w;
3067         fs_screen_height = vi->current_h;
3068 #endif
3069     }
3070
3071     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3072     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3073     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3074
3075     av_init_packet(&flush_pkt);
3076     flush_pkt.data= "FLUSH";
3077
3078     cur_stream = stream_open(input_filename, file_iformat);
3079
3080     event_loop();
3081
3082     /* never returns */
3083
3084     return 0;
3085 }