OSDN Git Service

Merge remote-tracking branch 'qatar/master'
[coroid/ffmpeg_saccubus.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum AVSampleFormat audio_src_fmt;
164     AVAudioConvert *reformat_ctx;
165
166     enum ShowMode {
167         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
168     } show_mode;
169     int16_t sample_array[SAMPLE_ARRAY_SIZE];
170     int sample_array_index;
171     int last_i_start;
172     RDFTContext *rdft;
173     int rdft_bits;
174     FFTSample *rdft_data;
175     int xpos;
176
177     SDL_Thread *subtitle_tid;
178     int subtitle_stream;
179     int subtitle_stream_changed;
180     AVStream *subtitle_st;
181     PacketQueue subtitleq;
182     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
183     int subpq_size, subpq_rindex, subpq_windex;
184     SDL_mutex *subpq_mutex;
185     SDL_cond *subpq_cond;
186
187     double frame_timer;
188     double frame_last_pts;
189     double frame_last_delay;
190     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
191     int video_stream;
192     AVStream *video_st;
193     PacketQueue videoq;
194     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
195     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
196     int64_t video_current_pos;                   ///<current displayed file pos
197     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
198     int pictq_size, pictq_rindex, pictq_windex;
199     SDL_mutex *pictq_mutex;
200     SDL_cond *pictq_cond;
201 #if !CONFIG_AVFILTER
202     struct SwsContext *img_convert_ctx;
203 #endif
204
205     char filename[1024];
206     int width, height, xleft, ytop;
207
208 #if CONFIG_AVFILTER
209     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
210 #endif
211
212     float skip_frames;
213     float skip_frames_index;
214     int refresh;
215 } VideoState;
216
217 static int opt_help(const char *opt, const char *arg);
218
219 /* options specified by the user */
220 static AVInputFormat *file_iformat;
221 static const char *input_filename;
222 static const char *window_title;
223 static int fs_screen_width;
224 static int fs_screen_height;
225 static int screen_width = 0;
226 static int screen_height = 0;
227 static int audio_disable;
228 static int video_disable;
229 static int wanted_stream[AVMEDIA_TYPE_NB]={
230     [AVMEDIA_TYPE_AUDIO]=-1,
231     [AVMEDIA_TYPE_VIDEO]=-1,
232     [AVMEDIA_TYPE_SUBTITLE]=-1,
233 };
234 static int seek_by_bytes=-1;
235 static int display_disable;
236 static int show_status = 1;
237 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
238 static int64_t start_time = AV_NOPTS_VALUE;
239 static int64_t duration = AV_NOPTS_VALUE;
240 static int step = 0;
241 static int thread_count = 1;
242 static int workaround_bugs = 1;
243 static int fast = 0;
244 static int genpts = 0;
245 static int lowres = 0;
246 static int idct = FF_IDCT_AUTO;
247 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
248 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
249 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
250 static int error_recognition = FF_ER_CAREFUL;
251 static int error_concealment = 3;
252 static int decoder_reorder_pts= -1;
253 static int autoexit;
254 static int exit_on_keydown;
255 static int exit_on_mousedown;
256 static int loop=1;
257 static int framedrop=-1;
258 static enum ShowMode show_mode = SHOW_MODE_NONE;
259
260 static int rdftspeed=20;
261 #if CONFIG_AVFILTER
262 static char *vfilters = NULL;
263 #endif
264
265 /* current context */
266 static int is_full_screen;
267 static VideoState *cur_stream;
268 static int64_t audio_callback_time;
269
270 static AVPacket flush_pkt;
271
272 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
273 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
274 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
275
276 static SDL_Surface *screen;
277
278 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
279 {
280     AVPacketList *pkt1;
281
282     /* duplicate the packet */
283     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
284         return -1;
285
286     pkt1 = av_malloc(sizeof(AVPacketList));
287     if (!pkt1)
288         return -1;
289     pkt1->pkt = *pkt;
290     pkt1->next = NULL;
291
292
293     SDL_LockMutex(q->mutex);
294
295     if (!q->last_pkt)
296
297         q->first_pkt = pkt1;
298     else
299         q->last_pkt->next = pkt1;
300     q->last_pkt = pkt1;
301     q->nb_packets++;
302     q->size += pkt1->pkt.size + sizeof(*pkt1);
303     /* XXX: should duplicate packet data in DV case */
304     SDL_CondSignal(q->cond);
305
306     SDL_UnlockMutex(q->mutex);
307     return 0;
308 }
309
310 /* packet queue handling */
311 static void packet_queue_init(PacketQueue *q)
312 {
313     memset(q, 0, sizeof(PacketQueue));
314     q->mutex = SDL_CreateMutex();
315     q->cond = SDL_CreateCond();
316     packet_queue_put(q, &flush_pkt);
317 }
318
319 static void packet_queue_flush(PacketQueue *q)
320 {
321     AVPacketList *pkt, *pkt1;
322
323     SDL_LockMutex(q->mutex);
324     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
325         pkt1 = pkt->next;
326         av_free_packet(&pkt->pkt);
327         av_freep(&pkt);
328     }
329     q->last_pkt = NULL;
330     q->first_pkt = NULL;
331     q->nb_packets = 0;
332     q->size = 0;
333     SDL_UnlockMutex(q->mutex);
334 }
335
336 static void packet_queue_end(PacketQueue *q)
337 {
338     packet_queue_flush(q);
339     SDL_DestroyMutex(q->mutex);
340     SDL_DestroyCond(q->cond);
341 }
342
343 static void packet_queue_abort(PacketQueue *q)
344 {
345     SDL_LockMutex(q->mutex);
346
347     q->abort_request = 1;
348
349     SDL_CondSignal(q->cond);
350
351     SDL_UnlockMutex(q->mutex);
352 }
353
354 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
355 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
356 {
357     AVPacketList *pkt1;
358     int ret;
359
360     SDL_LockMutex(q->mutex);
361
362     for(;;) {
363         if (q->abort_request) {
364             ret = -1;
365             break;
366         }
367
368         pkt1 = q->first_pkt;
369         if (pkt1) {
370             q->first_pkt = pkt1->next;
371             if (!q->first_pkt)
372                 q->last_pkt = NULL;
373             q->nb_packets--;
374             q->size -= pkt1->pkt.size + sizeof(*pkt1);
375             *pkt = pkt1->pkt;
376             av_free(pkt1);
377             ret = 1;
378             break;
379         } else if (!block) {
380             ret = 0;
381             break;
382         } else {
383             SDL_CondWait(q->cond, q->mutex);
384         }
385     }
386     SDL_UnlockMutex(q->mutex);
387     return ret;
388 }
389
390 static inline void fill_rectangle(SDL_Surface *screen,
391                                   int x, int y, int w, int h, int color)
392 {
393     SDL_Rect rect;
394     rect.x = x;
395     rect.y = y;
396     rect.w = w;
397     rect.h = h;
398     SDL_FillRect(screen, &rect, color);
399 }
400
401 #define ALPHA_BLEND(a, oldp, newp, s)\
402 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
403
404 #define RGBA_IN(r, g, b, a, s)\
405 {\
406     unsigned int v = ((const uint32_t *)(s))[0];\
407     a = (v >> 24) & 0xff;\
408     r = (v >> 16) & 0xff;\
409     g = (v >> 8) & 0xff;\
410     b = v & 0xff;\
411 }
412
413 #define YUVA_IN(y, u, v, a, s, pal)\
414 {\
415     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
416     a = (val >> 24) & 0xff;\
417     y = (val >> 16) & 0xff;\
418     u = (val >> 8) & 0xff;\
419     v = val & 0xff;\
420 }
421
422 #define YUVA_OUT(d, y, u, v, a)\
423 {\
424     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
425 }
426
427
428 #define BPP 1
429
430 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
431 {
432     int wrap, wrap3, width2, skip2;
433     int y, u, v, a, u1, v1, a1, w, h;
434     uint8_t *lum, *cb, *cr;
435     const uint8_t *p;
436     const uint32_t *pal;
437     int dstx, dsty, dstw, dsth;
438
439     dstw = av_clip(rect->w, 0, imgw);
440     dsth = av_clip(rect->h, 0, imgh);
441     dstx = av_clip(rect->x, 0, imgw - dstw);
442     dsty = av_clip(rect->y, 0, imgh - dsth);
443     lum = dst->data[0] + dsty * dst->linesize[0];
444     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
445     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
446
447     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
448     skip2 = dstx >> 1;
449     wrap = dst->linesize[0];
450     wrap3 = rect->pict.linesize[0];
451     p = rect->pict.data[0];
452     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
453
454     if (dsty & 1) {
455         lum += dstx;
456         cb += skip2;
457         cr += skip2;
458
459         if (dstx & 1) {
460             YUVA_IN(y, u, v, a, p, pal);
461             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
462             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
463             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
464             cb++;
465             cr++;
466             lum++;
467             p += BPP;
468         }
469         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
470             YUVA_IN(y, u, v, a, p, pal);
471             u1 = u;
472             v1 = v;
473             a1 = a;
474             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
475
476             YUVA_IN(y, u, v, a, p + BPP, pal);
477             u1 += u;
478             v1 += v;
479             a1 += a;
480             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
481             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
482             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
483             cb++;
484             cr++;
485             p += 2 * BPP;
486             lum += 2;
487         }
488         if (w) {
489             YUVA_IN(y, u, v, a, p, pal);
490             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
491             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
492             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
493             p++;
494             lum++;
495         }
496         p += wrap3 - dstw * BPP;
497         lum += wrap - dstw - dstx;
498         cb += dst->linesize[1] - width2 - skip2;
499         cr += dst->linesize[2] - width2 - skip2;
500     }
501     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             u1 = u;
509             v1 = v;
510             a1 = a;
511             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
512             p += wrap3;
513             lum += wrap;
514             YUVA_IN(y, u, v, a, p, pal);
515             u1 += u;
516             v1 += v;
517             a1 += a;
518             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
519             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
520             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
521             cb++;
522             cr++;
523             p += -wrap3 + BPP;
524             lum += -wrap + 1;
525         }
526         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
527             YUVA_IN(y, u, v, a, p, pal);
528             u1 = u;
529             v1 = v;
530             a1 = a;
531             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
532
533             YUVA_IN(y, u, v, a, p + BPP, pal);
534             u1 += u;
535             v1 += v;
536             a1 += a;
537             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
538             p += wrap3;
539             lum += wrap;
540
541             YUVA_IN(y, u, v, a, p, pal);
542             u1 += u;
543             v1 += v;
544             a1 += a;
545             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
546
547             YUVA_IN(y, u, v, a, p + BPP, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
552
553             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
554             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
555
556             cb++;
557             cr++;
558             p += -wrap3 + 2 * BPP;
559             lum += -wrap + 2;
560         }
561         if (w) {
562             YUVA_IN(y, u, v, a, p, pal);
563             u1 = u;
564             v1 = v;
565             a1 = a;
566             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567             p += wrap3;
568             lum += wrap;
569             YUVA_IN(y, u, v, a, p, pal);
570             u1 += u;
571             v1 += v;
572             a1 += a;
573             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
574             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
575             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
576             cb++;
577             cr++;
578             p += -wrap3 + BPP;
579             lum += -wrap + 1;
580         }
581         p += wrap3 + (wrap3 - dstw * BPP);
582         lum += wrap + (wrap - dstw - dstx);
583         cb += dst->linesize[1] - width2 - skip2;
584         cr += dst->linesize[2] - width2 - skip2;
585     }
586     /* handle odd height */
587     if (h) {
588         lum += dstx;
589         cb += skip2;
590         cr += skip2;
591
592         if (dstx & 1) {
593             YUVA_IN(y, u, v, a, p, pal);
594             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
596             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
597             cb++;
598             cr++;
599             lum++;
600             p += BPP;
601         }
602         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
603             YUVA_IN(y, u, v, a, p, pal);
604             u1 = u;
605             v1 = v;
606             a1 = a;
607             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
608
609             YUVA_IN(y, u, v, a, p + BPP, pal);
610             u1 += u;
611             v1 += v;
612             a1 += a;
613             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
614             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
615             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
616             cb++;
617             cr++;
618             p += 2 * BPP;
619             lum += 2;
620         }
621         if (w) {
622             YUVA_IN(y, u, v, a, p, pal);
623             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
624             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
625             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
626         }
627     }
628 }
629
630 static void free_subpicture(SubPicture *sp)
631 {
632     avsubtitle_free(&sp->sub);
633 }
634
635 static void video_image_display(VideoState *is)
636 {
637     VideoPicture *vp;
638     SubPicture *sp;
639     AVPicture pict;
640     float aspect_ratio;
641     int width, height, x, y;
642     SDL_Rect rect;
643     int i;
644
645     vp = &is->pictq[is->pictq_rindex];
646     if (vp->bmp) {
647 #if CONFIG_AVFILTER
648          if (vp->picref->video->sample_aspect_ratio.num == 0)
649              aspect_ratio = 0;
650          else
651              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
652 #else
653
654         /* XXX: use variable in the frame */
655         if (is->video_st->sample_aspect_ratio.num)
656             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
657         else if (is->video_st->codec->sample_aspect_ratio.num)
658             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
659         else
660             aspect_ratio = 0;
661 #endif
662         if (aspect_ratio <= 0.0)
663             aspect_ratio = 1.0;
664         aspect_ratio *= (float)vp->width / (float)vp->height;
665
666         if (is->subtitle_st) {
667             if (is->subpq_size > 0) {
668                 sp = &is->subpq[is->subpq_rindex];
669
670                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
671                     SDL_LockYUVOverlay (vp->bmp);
672
673                     pict.data[0] = vp->bmp->pixels[0];
674                     pict.data[1] = vp->bmp->pixels[2];
675                     pict.data[2] = vp->bmp->pixels[1];
676
677                     pict.linesize[0] = vp->bmp->pitches[0];
678                     pict.linesize[1] = vp->bmp->pitches[2];
679                     pict.linesize[2] = vp->bmp->pitches[1];
680
681                     for (i = 0; i < sp->sub.num_rects; i++)
682                         blend_subrect(&pict, sp->sub.rects[i],
683                                       vp->bmp->w, vp->bmp->h);
684
685                     SDL_UnlockYUVOverlay (vp->bmp);
686                 }
687             }
688         }
689
690
691         /* XXX: we suppose the screen has a 1.0 pixel ratio */
692         height = is->height;
693         width = ((int)rint(height * aspect_ratio)) & ~1;
694         if (width > is->width) {
695             width = is->width;
696             height = ((int)rint(width / aspect_ratio)) & ~1;
697         }
698         x = (is->width - width) / 2;
699         y = (is->height - height) / 2;
700         is->no_background = 0;
701         rect.x = is->xleft + x;
702         rect.y = is->ytop  + y;
703         rect.w = FFMAX(width,  1);
704         rect.h = FFMAX(height, 1);
705         SDL_DisplayYUVOverlay(vp->bmp, &rect);
706     }
707 }
708
709 /* get the current audio output buffer size, in samples. With SDL, we
710    cannot have a precise information */
711 static int audio_write_get_buf_size(VideoState *is)
712 {
713     return is->audio_buf_size - is->audio_buf_index;
714 }
715
716 static inline int compute_mod(int a, int b)
717 {
718     return a < 0 ? a%b + b : a%b;
719 }
720
721 static void video_audio_display(VideoState *s)
722 {
723     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
724     int ch, channels, h, h2, bgcolor, fgcolor;
725     int16_t time_diff;
726     int rdft_bits, nb_freq;
727
728     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
729         ;
730     nb_freq= 1<<(rdft_bits-1);
731
732     /* compute display index : center on currently output samples */
733     channels = s->audio_st->codec->channels;
734     nb_display_channels = channels;
735     if (!s->paused) {
736         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
737         n = 2 * channels;
738         delay = audio_write_get_buf_size(s);
739         delay /= n;
740
741         /* to be more precise, we take into account the time spent since
742            the last buffer computation */
743         if (audio_callback_time) {
744             time_diff = av_gettime() - audio_callback_time;
745             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
746         }
747
748         delay += 2*data_used;
749         if (delay < data_used)
750             delay = data_used;
751
752         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
753         if (s->show_mode == SHOW_MODE_WAVES) {
754             h= INT_MIN;
755             for(i=0; i<1000; i+=channels){
756                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
757                 int a= s->sample_array[idx];
758                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
759                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
760                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
761                 int score= a-d;
762                 if(h<score && (b^c)<0){
763                     h= score;
764                     i_start= idx;
765                 }
766             }
767         }
768
769         s->last_i_start = i_start;
770     } else {
771         i_start = s->last_i_start;
772     }
773
774     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
775     if (s->show_mode == SHOW_MODE_WAVES) {
776         fill_rectangle(screen,
777                        s->xleft, s->ytop, s->width, s->height,
778                        bgcolor);
779
780         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
781
782         /* total height for one channel */
783         h = s->height / nb_display_channels;
784         /* graph height / 2 */
785         h2 = (h * 9) / 20;
786         for(ch = 0;ch < nb_display_channels; ch++) {
787             i = i_start + ch;
788             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
789             for(x = 0; x < s->width; x++) {
790                 y = (s->sample_array[i] * h2) >> 15;
791                 if (y < 0) {
792                     y = -y;
793                     ys = y1 - y;
794                 } else {
795                     ys = y1;
796                 }
797                 fill_rectangle(screen,
798                                s->xleft + x, ys, 1, y,
799                                fgcolor);
800                 i += channels;
801                 if (i >= SAMPLE_ARRAY_SIZE)
802                     i -= SAMPLE_ARRAY_SIZE;
803             }
804         }
805
806         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
807
808         for(ch = 1;ch < nb_display_channels; ch++) {
809             y = s->ytop + ch * h;
810             fill_rectangle(screen,
811                            s->xleft, y, s->width, 1,
812                            fgcolor);
813         }
814         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
815     }else{
816         nb_display_channels= FFMIN(nb_display_channels, 2);
817         if(rdft_bits != s->rdft_bits){
818             av_rdft_end(s->rdft);
819             av_free(s->rdft_data);
820             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
821             s->rdft_bits= rdft_bits;
822             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
823         }
824         {
825             FFTSample *data[2];
826             for(ch = 0;ch < nb_display_channels; ch++) {
827                 data[ch] = s->rdft_data + 2*nb_freq*ch;
828                 i = i_start + ch;
829                 for(x = 0; x < 2*nb_freq; x++) {
830                     double w= (x-nb_freq)*(1.0/nb_freq);
831                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
832                     i += channels;
833                     if (i >= SAMPLE_ARRAY_SIZE)
834                         i -= SAMPLE_ARRAY_SIZE;
835                 }
836                 av_rdft_calc(s->rdft, data[ch]);
837             }
838             //least efficient way to do this, we should of course directly access it but its more than fast enough
839             for(y=0; y<s->height; y++){
840                 double w= 1/sqrt(nb_freq);
841                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
842                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
843                        + data[1][2*y+1]*data[1][2*y+1])) : a;
844                 a= FFMIN(a,255);
845                 b= FFMIN(b,255);
846                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
847
848                 fill_rectangle(screen,
849                             s->xpos, s->height-y, 1, 1,
850                             fgcolor);
851             }
852         }
853         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
854         s->xpos++;
855         if(s->xpos >= s->width)
856             s->xpos= s->xleft;
857     }
858 }
859
860 static void stream_close(VideoState *is)
861 {
862     VideoPicture *vp;
863     int i;
864     /* XXX: use a special url_shutdown call to abort parse cleanly */
865     is->abort_request = 1;
866     SDL_WaitThread(is->read_tid, NULL);
867     SDL_WaitThread(is->refresh_tid, NULL);
868
869     /* free all pictures */
870     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
871         vp = &is->pictq[i];
872 #if CONFIG_AVFILTER
873         if (vp->picref) {
874             avfilter_unref_buffer(vp->picref);
875             vp->picref = NULL;
876         }
877 #endif
878         if (vp->bmp) {
879             SDL_FreeYUVOverlay(vp->bmp);
880             vp->bmp = NULL;
881         }
882     }
883     SDL_DestroyMutex(is->pictq_mutex);
884     SDL_DestroyCond(is->pictq_cond);
885     SDL_DestroyMutex(is->subpq_mutex);
886     SDL_DestroyCond(is->subpq_cond);
887 #if !CONFIG_AVFILTER
888     if (is->img_convert_ctx)
889         sws_freeContext(is->img_convert_ctx);
890 #endif
891     av_free(is);
892 }
893
894 static void do_exit(void)
895 {
896     if (cur_stream) {
897         stream_close(cur_stream);
898         cur_stream = NULL;
899     }
900     uninit_opts();
901 #if CONFIG_AVFILTER
902     avfilter_uninit();
903 #endif
904     if (show_status)
905         printf("\n");
906     SDL_Quit();
907     av_log(NULL, AV_LOG_QUIET, "%s", "");
908     exit(0);
909 }
910
911 static int video_open(VideoState *is){
912     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
913     int w,h;
914
915     if(is_full_screen) flags |= SDL_FULLSCREEN;
916     else               flags |= SDL_RESIZABLE;
917
918     if (is_full_screen && fs_screen_width) {
919         w = fs_screen_width;
920         h = fs_screen_height;
921     } else if(!is_full_screen && screen_width){
922         w = screen_width;
923         h = screen_height;
924 #if CONFIG_AVFILTER
925     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
926         w = is->out_video_filter->inputs[0]->w;
927         h = is->out_video_filter->inputs[0]->h;
928 #else
929     }else if (is->video_st && is->video_st->codec->width){
930         w = is->video_st->codec->width;
931         h = is->video_st->codec->height;
932 #endif
933     } else {
934         w = 640;
935         h = 480;
936     }
937     if(screen && is->width == screen->w && screen->w == w
938        && is->height== screen->h && screen->h == h)
939         return 0;
940
941 #ifndef __APPLE__
942     screen = SDL_SetVideoMode(w, h, 0, flags);
943 #else
944     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
945     screen = SDL_SetVideoMode(w, h, 24, flags);
946 #endif
947     if (!screen) {
948         fprintf(stderr, "SDL: could not set video mode - exiting\n");
949         do_exit();
950     }
951     if (!window_title)
952         window_title = input_filename;
953     SDL_WM_SetCaption(window_title, window_title);
954
955     is->width = screen->w;
956     is->height = screen->h;
957
958     return 0;
959 }
960
961 /* display the current picture, if any */
962 static void video_display(VideoState *is)
963 {
964     if(!screen)
965         video_open(cur_stream);
966     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
967         video_audio_display(is);
968     else if (is->video_st)
969         video_image_display(is);
970 }
971
972 static int refresh_thread(void *opaque)
973 {
974     VideoState *is= opaque;
975     while(!is->abort_request){
976         SDL_Event event;
977         event.type = FF_REFRESH_EVENT;
978         event.user.data1 = opaque;
979         if(!is->refresh){
980             is->refresh=1;
981             SDL_PushEvent(&event);
982         }
983         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
984         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
985     }
986     return 0;
987 }
988
989 /* get the current audio clock value */
990 static double get_audio_clock(VideoState *is)
991 {
992     double pts;
993     int hw_buf_size, bytes_per_sec;
994     pts = is->audio_clock;
995     hw_buf_size = audio_write_get_buf_size(is);
996     bytes_per_sec = 0;
997     if (is->audio_st) {
998         bytes_per_sec = is->audio_st->codec->sample_rate *
999             2 * is->audio_st->codec->channels;
1000     }
1001     if (bytes_per_sec)
1002         pts -= (double)hw_buf_size / bytes_per_sec;
1003     return pts;
1004 }
1005
1006 /* get the current video clock value */
1007 static double get_video_clock(VideoState *is)
1008 {
1009     if (is->paused) {
1010         return is->video_current_pts;
1011     } else {
1012         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1013     }
1014 }
1015
1016 /* get the current external clock value */
1017 static double get_external_clock(VideoState *is)
1018 {
1019     int64_t ti;
1020     ti = av_gettime();
1021     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1022 }
1023
1024 /* get the current master clock value */
1025 static double get_master_clock(VideoState *is)
1026 {
1027     double val;
1028
1029     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1030         if (is->video_st)
1031             val = get_video_clock(is);
1032         else
1033             val = get_audio_clock(is);
1034     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1035         if (is->audio_st)
1036             val = get_audio_clock(is);
1037         else
1038             val = get_video_clock(is);
1039     } else {
1040         val = get_external_clock(is);
1041     }
1042     return val;
1043 }
1044
1045 /* seek in the stream */
1046 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1047 {
1048     if (!is->seek_req) {
1049         is->seek_pos = pos;
1050         is->seek_rel = rel;
1051         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1052         if (seek_by_bytes)
1053             is->seek_flags |= AVSEEK_FLAG_BYTE;
1054         is->seek_req = 1;
1055     }
1056 }
1057
1058 /* pause or resume the video */
1059 static void stream_toggle_pause(VideoState *is)
1060 {
1061     if (is->paused) {
1062         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1063         if(is->read_pause_return != AVERROR(ENOSYS)){
1064             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1065         }
1066         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1067     }
1068     is->paused = !is->paused;
1069 }
1070
1071 static double compute_target_time(double frame_current_pts, VideoState *is)
1072 {
1073     double delay, sync_threshold, diff;
1074
1075     /* compute nominal delay */
1076     delay = frame_current_pts - is->frame_last_pts;
1077     if (delay <= 0 || delay >= 10.0) {
1078         /* if incorrect delay, use previous one */
1079         delay = is->frame_last_delay;
1080     } else {
1081         is->frame_last_delay = delay;
1082     }
1083     is->frame_last_pts = frame_current_pts;
1084
1085     /* update delay to follow master synchronisation source */
1086     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1087          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1088         /* if video is slave, we try to correct big delays by
1089            duplicating or deleting a frame */
1090         diff = get_video_clock(is) - get_master_clock(is);
1091
1092         /* skip or repeat frame. We take into account the
1093            delay to compute the threshold. I still don't know
1094            if it is the best guess */
1095         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1096         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1097             if (diff <= -sync_threshold)
1098                 delay = 0;
1099             else if (diff >= sync_threshold)
1100                 delay = 2 * delay;
1101         }
1102     }
1103     is->frame_timer += delay;
1104
1105     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1106             delay, frame_current_pts, -diff);
1107
1108     return is->frame_timer;
1109 }
1110
1111 /* called to display each frame */
1112 static void video_refresh(void *opaque)
1113 {
1114     VideoState *is = opaque;
1115     VideoPicture *vp;
1116
1117     SubPicture *sp, *sp2;
1118
1119     if (is->video_st) {
1120 retry:
1121         if (is->pictq_size == 0) {
1122             //nothing to do, no picture to display in the que
1123         } else {
1124             double time= av_gettime()/1000000.0;
1125             double next_target;
1126             /* dequeue the picture */
1127             vp = &is->pictq[is->pictq_rindex];
1128
1129             if(time < vp->target_clock)
1130                 return;
1131             /* update current video pts */
1132             is->video_current_pts = vp->pts;
1133             is->video_current_pts_drift = is->video_current_pts - time;
1134             is->video_current_pos = vp->pos;
1135             if(is->pictq_size > 1){
1136                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1137                 assert(nextvp->target_clock >= vp->target_clock);
1138                 next_target= nextvp->target_clock;
1139             }else{
1140                 next_target= vp->target_clock + vp->duration;
1141             }
1142             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1143                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1144                 if(is->pictq_size > 1 || time > next_target + 0.5){
1145                     /* update queue size and signal for next picture */
1146                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1147                         is->pictq_rindex = 0;
1148
1149                     SDL_LockMutex(is->pictq_mutex);
1150                     is->pictq_size--;
1151                     SDL_CondSignal(is->pictq_cond);
1152                     SDL_UnlockMutex(is->pictq_mutex);
1153                     goto retry;
1154                 }
1155             }
1156
1157             if(is->subtitle_st) {
1158                 if (is->subtitle_stream_changed) {
1159                     SDL_LockMutex(is->subpq_mutex);
1160
1161                     while (is->subpq_size) {
1162                         free_subpicture(&is->subpq[is->subpq_rindex]);
1163
1164                         /* update queue size and signal for next picture */
1165                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1166                             is->subpq_rindex = 0;
1167
1168                         is->subpq_size--;
1169                     }
1170                     is->subtitle_stream_changed = 0;
1171
1172                     SDL_CondSignal(is->subpq_cond);
1173                     SDL_UnlockMutex(is->subpq_mutex);
1174                 } else {
1175                     if (is->subpq_size > 0) {
1176                         sp = &is->subpq[is->subpq_rindex];
1177
1178                         if (is->subpq_size > 1)
1179                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1180                         else
1181                             sp2 = NULL;
1182
1183                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1184                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1185                         {
1186                             free_subpicture(sp);
1187
1188                             /* update queue size and signal for next picture */
1189                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1190                                 is->subpq_rindex = 0;
1191
1192                             SDL_LockMutex(is->subpq_mutex);
1193                             is->subpq_size--;
1194                             SDL_CondSignal(is->subpq_cond);
1195                             SDL_UnlockMutex(is->subpq_mutex);
1196                         }
1197                     }
1198                 }
1199             }
1200
1201             /* display picture */
1202             if (!display_disable)
1203                 video_display(is);
1204
1205             /* update queue size and signal for next picture */
1206             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1207                 is->pictq_rindex = 0;
1208
1209             SDL_LockMutex(is->pictq_mutex);
1210             is->pictq_size--;
1211             SDL_CondSignal(is->pictq_cond);
1212             SDL_UnlockMutex(is->pictq_mutex);
1213         }
1214     } else if (is->audio_st) {
1215         /* draw the next audio frame */
1216
1217         /* if only audio stream, then display the audio bars (better
1218            than nothing, just to test the implementation */
1219
1220         /* display picture */
1221         if (!display_disable)
1222             video_display(is);
1223     }
1224     if (show_status) {
1225         static int64_t last_time;
1226         int64_t cur_time;
1227         int aqsize, vqsize, sqsize;
1228         double av_diff;
1229
1230         cur_time = av_gettime();
1231         if (!last_time || (cur_time - last_time) >= 30000) {
1232             aqsize = 0;
1233             vqsize = 0;
1234             sqsize = 0;
1235             if (is->audio_st)
1236                 aqsize = is->audioq.size;
1237             if (is->video_st)
1238                 vqsize = is->videoq.size;
1239             if (is->subtitle_st)
1240                 sqsize = is->subtitleq.size;
1241             av_diff = 0;
1242             if (is->audio_st && is->video_st)
1243                 av_diff = get_audio_clock(is) - get_video_clock(is);
1244             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1245                    get_master_clock(is),
1246                    av_diff,
1247                    FFMAX(is->skip_frames-1, 0),
1248                    aqsize / 1024,
1249                    vqsize / 1024,
1250                    sqsize,
1251                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1252                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1253             fflush(stdout);
1254             last_time = cur_time;
1255         }
1256     }
1257 }
1258
1259 /* allocate a picture (needs to do that in main thread to avoid
1260    potential locking problems */
1261 static void alloc_picture(void *opaque)
1262 {
1263     VideoState *is = opaque;
1264     VideoPicture *vp;
1265
1266     vp = &is->pictq[is->pictq_windex];
1267
1268     if (vp->bmp)
1269         SDL_FreeYUVOverlay(vp->bmp);
1270
1271 #if CONFIG_AVFILTER
1272     if (vp->picref)
1273         avfilter_unref_buffer(vp->picref);
1274     vp->picref = NULL;
1275
1276     vp->width   = is->out_video_filter->inputs[0]->w;
1277     vp->height  = is->out_video_filter->inputs[0]->h;
1278     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1279 #else
1280     vp->width   = is->video_st->codec->width;
1281     vp->height  = is->video_st->codec->height;
1282     vp->pix_fmt = is->video_st->codec->pix_fmt;
1283 #endif
1284
1285     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1286                                    SDL_YV12_OVERLAY,
1287                                    screen);
1288     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1289         /* SDL allocates a buffer smaller than requested if the video
1290          * overlay hardware is unable to support the requested size. */
1291         fprintf(stderr, "Error: the video system does not support an image\n"
1292                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1293                         "to reduce the image size.\n", vp->width, vp->height );
1294         do_exit();
1295     }
1296
1297     SDL_LockMutex(is->pictq_mutex);
1298     vp->allocated = 1;
1299     SDL_CondSignal(is->pictq_cond);
1300     SDL_UnlockMutex(is->pictq_mutex);
1301 }
1302
1303 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1304 {
1305     VideoPicture *vp;
1306     double frame_delay, pts = pts1;
1307
1308     /* compute the exact PTS for the picture if it is omitted in the stream
1309      * pts1 is the dts of the pkt / pts of the frame */
1310     if (pts != 0) {
1311         /* update video clock with pts, if present */
1312         is->video_clock = pts;
1313     } else {
1314         pts = is->video_clock;
1315     }
1316     /* update video clock for next frame */
1317     frame_delay = av_q2d(is->video_st->codec->time_base);
1318     /* for MPEG2, the frame can be repeated, so we update the
1319        clock accordingly */
1320     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1321     is->video_clock += frame_delay;
1322
1323 #if defined(DEBUG_SYNC) && 0
1324     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1325            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1326 #endif
1327
1328     /* wait until we have space to put a new picture */
1329     SDL_LockMutex(is->pictq_mutex);
1330
1331     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1332         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1333
1334     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1335            !is->videoq.abort_request) {
1336         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1337     }
1338     SDL_UnlockMutex(is->pictq_mutex);
1339
1340     if (is->videoq.abort_request)
1341         return -1;
1342
1343     vp = &is->pictq[is->pictq_windex];
1344
1345     vp->duration = frame_delay;
1346
1347     /* alloc or resize hardware picture buffer */
1348     if (!vp->bmp ||
1349 #if CONFIG_AVFILTER
1350         vp->width  != is->out_video_filter->inputs[0]->w ||
1351         vp->height != is->out_video_filter->inputs[0]->h) {
1352 #else
1353         vp->width != is->video_st->codec->width ||
1354         vp->height != is->video_st->codec->height) {
1355 #endif
1356         SDL_Event event;
1357
1358         vp->allocated = 0;
1359
1360         /* the allocation must be done in the main thread to avoid
1361            locking problems */
1362         event.type = FF_ALLOC_EVENT;
1363         event.user.data1 = is;
1364         SDL_PushEvent(&event);
1365
1366         /* wait until the picture is allocated */
1367         SDL_LockMutex(is->pictq_mutex);
1368         while (!vp->allocated && !is->videoq.abort_request) {
1369             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1370         }
1371         SDL_UnlockMutex(is->pictq_mutex);
1372
1373         if (is->videoq.abort_request)
1374             return -1;
1375     }
1376
1377     /* if the frame is not skipped, then display it */
1378     if (vp->bmp) {
1379         AVPicture pict;
1380 #if CONFIG_AVFILTER
1381         if(vp->picref)
1382             avfilter_unref_buffer(vp->picref);
1383         vp->picref = src_frame->opaque;
1384 #endif
1385
1386         /* get a pointer on the bitmap */
1387         SDL_LockYUVOverlay (vp->bmp);
1388
1389         memset(&pict,0,sizeof(AVPicture));
1390         pict.data[0] = vp->bmp->pixels[0];
1391         pict.data[1] = vp->bmp->pixels[2];
1392         pict.data[2] = vp->bmp->pixels[1];
1393
1394         pict.linesize[0] = vp->bmp->pitches[0];
1395         pict.linesize[1] = vp->bmp->pitches[2];
1396         pict.linesize[2] = vp->bmp->pitches[1];
1397
1398 #if CONFIG_AVFILTER
1399         //FIXME use direct rendering
1400         av_picture_copy(&pict, (AVPicture *)src_frame,
1401                         vp->pix_fmt, vp->width, vp->height);
1402 #else
1403         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1404         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1405             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1406             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1407         if (is->img_convert_ctx == NULL) {
1408             fprintf(stderr, "Cannot initialize the conversion context\n");
1409             exit(1);
1410         }
1411         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1412                   0, vp->height, pict.data, pict.linesize);
1413 #endif
1414         /* update the bitmap content */
1415         SDL_UnlockYUVOverlay(vp->bmp);
1416
1417         vp->pts = pts;
1418         vp->pos = pos;
1419
1420         /* now we can update the picture count */
1421         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1422             is->pictq_windex = 0;
1423         SDL_LockMutex(is->pictq_mutex);
1424         vp->target_clock= compute_target_time(vp->pts, is);
1425
1426         is->pictq_size++;
1427         SDL_UnlockMutex(is->pictq_mutex);
1428     }
1429     return 0;
1430 }
1431
1432 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1433 {
1434     int got_picture, i;
1435
1436     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1437         return -1;
1438
1439     if (pkt->data == flush_pkt.data) {
1440         avcodec_flush_buffers(is->video_st->codec);
1441
1442         SDL_LockMutex(is->pictq_mutex);
1443         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1444         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1445             is->pictq[i].target_clock= 0;
1446         }
1447         while (is->pictq_size && !is->videoq.abort_request) {
1448             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1449         }
1450         is->video_current_pos = -1;
1451         SDL_UnlockMutex(is->pictq_mutex);
1452
1453         is->frame_last_pts = AV_NOPTS_VALUE;
1454         is->frame_last_delay = 0;
1455         is->frame_timer = (double)av_gettime() / 1000000.0;
1456         is->skip_frames = 1;
1457         is->skip_frames_index = 0;
1458         return 0;
1459     }
1460
1461     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1462
1463     if (got_picture) {
1464         if (decoder_reorder_pts == -1) {
1465             *pts = frame->best_effort_timestamp;
1466         } else if (decoder_reorder_pts) {
1467             *pts = frame->pkt_pts;
1468         } else {
1469             *pts = frame->pkt_dts;
1470         }
1471
1472         if (*pts == AV_NOPTS_VALUE) {
1473             *pts = 0;
1474         }
1475
1476         is->skip_frames_index += 1;
1477         if(is->skip_frames_index >= is->skip_frames){
1478             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1479             return 1;
1480         }
1481
1482     }
1483     return 0;
1484 }
1485
1486 #if CONFIG_AVFILTER
1487 typedef struct {
1488     VideoState *is;
1489     AVFrame *frame;
1490     int use_dr1;
1491 } FilterPriv;
1492
1493 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1494 {
1495     AVFilterContext *ctx = codec->opaque;
1496     AVFilterBufferRef  *ref;
1497     int perms = AV_PERM_WRITE;
1498     int i, w, h, stride[4];
1499     unsigned edge;
1500     int pixel_size;
1501
1502     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1503
1504     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1505         perms |= AV_PERM_NEG_LINESIZES;
1506
1507     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1508         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1509         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1510         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1511     }
1512     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1513
1514     w = codec->width;
1515     h = codec->height;
1516
1517     if(av_image_check_size(w, h, 0, codec))
1518         return -1;
1519
1520     avcodec_align_dimensions2(codec, &w, &h, stride);
1521     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1522     w += edge << 1;
1523     h += edge << 1;
1524
1525     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1526         return -1;
1527
1528     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1529     ref->video->w = codec->width;
1530     ref->video->h = codec->height;
1531     for(i = 0; i < 4; i ++) {
1532         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1533         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1534
1535         if (ref->data[i]) {
1536             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1537         }
1538         pic->data[i]     = ref->data[i];
1539         pic->linesize[i] = ref->linesize[i];
1540     }
1541     pic->opaque = ref;
1542     pic->age    = INT_MAX;
1543     pic->type   = FF_BUFFER_TYPE_USER;
1544     pic->reordered_opaque = codec->reordered_opaque;
1545     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1546     else           pic->pkt_pts = AV_NOPTS_VALUE;
1547     return 0;
1548 }
1549
1550 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1551 {
1552     memset(pic->data, 0, sizeof(pic->data));
1553     avfilter_unref_buffer(pic->opaque);
1554 }
1555
1556 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1557 {
1558     AVFilterBufferRef *ref = pic->opaque;
1559
1560     if (pic->data[0] == NULL) {
1561         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1562         return codec->get_buffer(codec, pic);
1563     }
1564
1565     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1566         (codec->pix_fmt != ref->format)) {
1567         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1568         return -1;
1569     }
1570
1571     pic->reordered_opaque = codec->reordered_opaque;
1572     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1573     else           pic->pkt_pts = AV_NOPTS_VALUE;
1574     return 0;
1575 }
1576
1577 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1578 {
1579     FilterPriv *priv = ctx->priv;
1580     AVCodecContext *codec;
1581     if(!opaque) return -1;
1582
1583     priv->is = opaque;
1584     codec    = priv->is->video_st->codec;
1585     codec->opaque = ctx;
1586     if((codec->codec->capabilities & CODEC_CAP_DR1)
1587     ) {
1588         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1589         priv->use_dr1 = 1;
1590         codec->get_buffer     = input_get_buffer;
1591         codec->release_buffer = input_release_buffer;
1592         codec->reget_buffer   = input_reget_buffer;
1593         codec->thread_safe_callbacks = 1;
1594     }
1595
1596     priv->frame = avcodec_alloc_frame();
1597
1598     return 0;
1599 }
1600
1601 static void input_uninit(AVFilterContext *ctx)
1602 {
1603     FilterPriv *priv = ctx->priv;
1604     av_free(priv->frame);
1605 }
1606
1607 static int input_request_frame(AVFilterLink *link)
1608 {
1609     FilterPriv *priv = link->src->priv;
1610     AVFilterBufferRef *picref;
1611     int64_t pts = 0;
1612     AVPacket pkt;
1613     int ret;
1614
1615     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1616         av_free_packet(&pkt);
1617     if (ret < 0)
1618         return -1;
1619
1620     if(priv->use_dr1 && priv->frame->opaque) {
1621         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1622     } else {
1623         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1624         av_image_copy(picref->data, picref->linesize,
1625                       priv->frame->data, priv->frame->linesize,
1626                       picref->format, link->w, link->h);
1627     }
1628     av_free_packet(&pkt);
1629
1630     avfilter_copy_frame_props(picref, priv->frame);
1631     picref->pts = pts;
1632
1633     avfilter_start_frame(link, picref);
1634     avfilter_draw_slice(link, 0, link->h, 1);
1635     avfilter_end_frame(link);
1636
1637     return 0;
1638 }
1639
1640 static int input_query_formats(AVFilterContext *ctx)
1641 {
1642     FilterPriv *priv = ctx->priv;
1643     enum PixelFormat pix_fmts[] = {
1644         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1645     };
1646
1647     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1648     return 0;
1649 }
1650
1651 static int input_config_props(AVFilterLink *link)
1652 {
1653     FilterPriv *priv  = link->src->priv;
1654     AVCodecContext *c = priv->is->video_st->codec;
1655
1656     link->w = c->width;
1657     link->h = c->height;
1658     link->sample_aspect_ratio = priv->is->video_st->sample_aspect_ratio;
1659     link->time_base = priv->is->video_st->time_base;
1660
1661     return 0;
1662 }
1663
1664 static AVFilter input_filter =
1665 {
1666     .name      = "ffplay_input",
1667
1668     .priv_size = sizeof(FilterPriv),
1669
1670     .init      = input_init,
1671     .uninit    = input_uninit,
1672
1673     .query_formats = input_query_formats,
1674
1675     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1676     .outputs   = (AVFilterPad[]) {{ .name = "default",
1677                                     .type = AVMEDIA_TYPE_VIDEO,
1678                                     .request_frame = input_request_frame,
1679                                     .config_props  = input_config_props, },
1680                                   { .name = NULL }},
1681 };
1682
1683 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1684 {
1685     char sws_flags_str[128];
1686     int ret;
1687     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1688     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1689     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1690     graph->scale_sws_opts = av_strdup(sws_flags_str);
1691
1692     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1693                                             NULL, is, graph)) < 0)
1694         return ret;
1695     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1696                                             NULL, pix_fmts, graph)) < 0)
1697         return ret;
1698
1699     if(vfilters) {
1700         AVFilterInOut *outputs = avfilter_inout_alloc();
1701         AVFilterInOut *inputs  = avfilter_inout_alloc();
1702
1703         outputs->name    = av_strdup("in");
1704         outputs->filter_ctx = filt_src;
1705         outputs->pad_idx = 0;
1706         outputs->next    = NULL;
1707
1708         inputs->name    = av_strdup("out");
1709         inputs->filter_ctx = filt_out;
1710         inputs->pad_idx = 0;
1711         inputs->next    = NULL;
1712
1713         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1714             return ret;
1715         av_freep(&vfilters);
1716     } else {
1717         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1718             return ret;
1719     }
1720
1721     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1722         return ret;
1723
1724     is->out_video_filter = filt_out;
1725
1726     return ret;
1727 }
1728
1729 #endif  /* CONFIG_AVFILTER */
1730
1731 static int video_thread(void *arg)
1732 {
1733     VideoState *is = arg;
1734     AVFrame *frame= avcodec_alloc_frame();
1735     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1736     double pts;
1737     int ret;
1738
1739 #if CONFIG_AVFILTER
1740     AVFilterGraph *graph = avfilter_graph_alloc();
1741     AVFilterContext *filt_out = NULL;
1742
1743     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1744         goto the_end;
1745     filt_out = is->out_video_filter;
1746 #endif
1747
1748     for(;;) {
1749 #if !CONFIG_AVFILTER
1750         AVPacket pkt;
1751 #else
1752         AVFilterBufferRef *picref;
1753         AVRational tb = filt_out->inputs[0]->time_base;
1754 #endif
1755         while (is->paused && !is->videoq.abort_request)
1756             SDL_Delay(10);
1757 #if CONFIG_AVFILTER
1758         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1759         if (picref) {
1760             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1761             pts_int = picref->pts;
1762             pos     = picref->pos;
1763             frame->opaque = picref;
1764         }
1765
1766         if (av_cmp_q(tb, is->video_st->time_base)) {
1767             av_unused int64_t pts1 = pts_int;
1768             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1769             av_dlog(NULL, "video_thread(): "
1770                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1771                     tb.num, tb.den, pts1,
1772                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1773         }
1774 #else
1775         ret = get_video_frame(is, frame, &pts_int, &pkt);
1776         pos = pkt.pos;
1777         av_free_packet(&pkt);
1778 #endif
1779
1780         if (ret < 0) goto the_end;
1781
1782         if (!picref)
1783             continue;
1784
1785         pts = pts_int*av_q2d(is->video_st->time_base);
1786
1787         ret = queue_picture(is, frame, pts, pos);
1788
1789         if (ret < 0)
1790             goto the_end;
1791
1792         if (step)
1793             if (cur_stream)
1794                 stream_toggle_pause(cur_stream);
1795     }
1796  the_end:
1797 #if CONFIG_AVFILTER
1798     avfilter_graph_free(&graph);
1799 #endif
1800     av_free(frame);
1801     return 0;
1802 }
1803
1804 static int subtitle_thread(void *arg)
1805 {
1806     VideoState *is = arg;
1807     SubPicture *sp;
1808     AVPacket pkt1, *pkt = &pkt1;
1809     int got_subtitle;
1810     double pts;
1811     int i, j;
1812     int r, g, b, y, u, v, a;
1813
1814     for(;;) {
1815         while (is->paused && !is->subtitleq.abort_request) {
1816             SDL_Delay(10);
1817         }
1818         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1819             break;
1820
1821         if(pkt->data == flush_pkt.data){
1822             avcodec_flush_buffers(is->subtitle_st->codec);
1823             continue;
1824         }
1825         SDL_LockMutex(is->subpq_mutex);
1826         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1827                !is->subtitleq.abort_request) {
1828             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1829         }
1830         SDL_UnlockMutex(is->subpq_mutex);
1831
1832         if (is->subtitleq.abort_request)
1833             return 0;
1834
1835         sp = &is->subpq[is->subpq_windex];
1836
1837        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1838            this packet, if any */
1839         pts = 0;
1840         if (pkt->pts != AV_NOPTS_VALUE)
1841             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1842
1843         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1844                                  &got_subtitle, pkt);
1845
1846         if (got_subtitle && sp->sub.format == 0) {
1847             sp->pts = pts;
1848
1849             for (i = 0; i < sp->sub.num_rects; i++)
1850             {
1851                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1852                 {
1853                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1854                     y = RGB_TO_Y_CCIR(r, g, b);
1855                     u = RGB_TO_U_CCIR(r, g, b, 0);
1856                     v = RGB_TO_V_CCIR(r, g, b, 0);
1857                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1858                 }
1859             }
1860
1861             /* now we can update the picture count */
1862             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1863                 is->subpq_windex = 0;
1864             SDL_LockMutex(is->subpq_mutex);
1865             is->subpq_size++;
1866             SDL_UnlockMutex(is->subpq_mutex);
1867         }
1868         av_free_packet(pkt);
1869     }
1870     return 0;
1871 }
1872
1873 /* copy samples for viewing in editor window */
1874 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1875 {
1876     int size, len;
1877
1878     size = samples_size / sizeof(short);
1879     while (size > 0) {
1880         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1881         if (len > size)
1882             len = size;
1883         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1884         samples += len;
1885         is->sample_array_index += len;
1886         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1887             is->sample_array_index = 0;
1888         size -= len;
1889     }
1890 }
1891
1892 /* return the new audio buffer size (samples can be added or deleted
1893    to get better sync if video or external master clock) */
1894 static int synchronize_audio(VideoState *is, short *samples,
1895                              int samples_size1, double pts)
1896 {
1897     int n, samples_size;
1898     double ref_clock;
1899
1900     n = 2 * is->audio_st->codec->channels;
1901     samples_size = samples_size1;
1902
1903     /* if not master, then we try to remove or add samples to correct the clock */
1904     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1905          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1906         double diff, avg_diff;
1907         int wanted_size, min_size, max_size, nb_samples;
1908
1909         ref_clock = get_master_clock(is);
1910         diff = get_audio_clock(is) - ref_clock;
1911
1912         if (diff < AV_NOSYNC_THRESHOLD) {
1913             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1914             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1915                 /* not enough measures to have a correct estimate */
1916                 is->audio_diff_avg_count++;
1917             } else {
1918                 /* estimate the A-V difference */
1919                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1920
1921                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1922                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1923                     nb_samples = samples_size / n;
1924
1925                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1926                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1927                     if (wanted_size < min_size)
1928                         wanted_size = min_size;
1929                     else if (wanted_size > max_size)
1930                         wanted_size = max_size;
1931
1932                     /* add or remove samples to correction the synchro */
1933                     if (wanted_size < samples_size) {
1934                         /* remove samples */
1935                         samples_size = wanted_size;
1936                     } else if (wanted_size > samples_size) {
1937                         uint8_t *samples_end, *q;
1938                         int nb;
1939
1940                         /* add samples */
1941                         nb = (samples_size - wanted_size);
1942                         samples_end = (uint8_t *)samples + samples_size - n;
1943                         q = samples_end + n;
1944                         while (nb > 0) {
1945                             memcpy(q, samples_end, n);
1946                             q += n;
1947                             nb -= n;
1948                         }
1949                         samples_size = wanted_size;
1950                     }
1951                 }
1952 #if 0
1953                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1954                        diff, avg_diff, samples_size - samples_size1,
1955                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1956 #endif
1957             }
1958         } else {
1959             /* too big difference : may be initial PTS errors, so
1960                reset A-V filter */
1961             is->audio_diff_avg_count = 0;
1962             is->audio_diff_cum = 0;
1963         }
1964     }
1965
1966     return samples_size;
1967 }
1968
1969 /* decode one audio frame and returns its uncompressed size */
1970 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1971 {
1972     AVPacket *pkt_temp = &is->audio_pkt_temp;
1973     AVPacket *pkt = &is->audio_pkt;
1974     AVCodecContext *dec= is->audio_st->codec;
1975     int n, len1, data_size;
1976     double pts;
1977
1978     for(;;) {
1979         /* NOTE: the audio packet can contain several frames */
1980         while (pkt_temp->size > 0) {
1981             data_size = sizeof(is->audio_buf1);
1982             len1 = avcodec_decode_audio3(dec,
1983                                         (int16_t *)is->audio_buf1, &data_size,
1984                                         pkt_temp);
1985             if (len1 < 0) {
1986                 /* if error, we skip the frame */
1987                 pkt_temp->size = 0;
1988                 break;
1989             }
1990
1991             pkt_temp->data += len1;
1992             pkt_temp->size -= len1;
1993             if (data_size <= 0)
1994                 continue;
1995
1996             if (dec->sample_fmt != is->audio_src_fmt) {
1997                 if (is->reformat_ctx)
1998                     av_audio_convert_free(is->reformat_ctx);
1999                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2000                                                          dec->sample_fmt, 1, NULL, 0);
2001                 if (!is->reformat_ctx) {
2002                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2003                         av_get_sample_fmt_name(dec->sample_fmt),
2004                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2005                         break;
2006                 }
2007                 is->audio_src_fmt= dec->sample_fmt;
2008             }
2009
2010             if (is->reformat_ctx) {
2011                 const void *ibuf[6]= {is->audio_buf1};
2012                 void *obuf[6]= {is->audio_buf2};
2013                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2014                 int ostride[6]= {2};
2015                 int len= data_size/istride[0];
2016                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2017                     printf("av_audio_convert() failed\n");
2018                     break;
2019                 }
2020                 is->audio_buf= is->audio_buf2;
2021                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2022                           remove this legacy cruft */
2023                 data_size= len*2;
2024             }else{
2025                 is->audio_buf= is->audio_buf1;
2026             }
2027
2028             /* if no pts, then compute it */
2029             pts = is->audio_clock;
2030             *pts_ptr = pts;
2031             n = 2 * dec->channels;
2032             is->audio_clock += (double)data_size /
2033                 (double)(n * dec->sample_rate);
2034 #ifdef DEBUG
2035             {
2036                 static double last_clock;
2037                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2038                        is->audio_clock - last_clock,
2039                        is->audio_clock, pts);
2040                 last_clock = is->audio_clock;
2041             }
2042 #endif
2043             return data_size;
2044         }
2045
2046         /* free the current packet */
2047         if (pkt->data)
2048             av_free_packet(pkt);
2049
2050         if (is->paused || is->audioq.abort_request) {
2051             return -1;
2052         }
2053
2054         /* read next packet */
2055         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2056             return -1;
2057         if(pkt->data == flush_pkt.data){
2058             avcodec_flush_buffers(dec);
2059             continue;
2060         }
2061
2062         pkt_temp->data = pkt->data;
2063         pkt_temp->size = pkt->size;
2064
2065         /* if update the audio clock with the pts */
2066         if (pkt->pts != AV_NOPTS_VALUE) {
2067             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2068         }
2069     }
2070 }
2071
2072 /* prepare a new audio buffer */
2073 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2074 {
2075     VideoState *is = opaque;
2076     int audio_size, len1;
2077     double pts;
2078
2079     audio_callback_time = av_gettime();
2080
2081     while (len > 0) {
2082         if (is->audio_buf_index >= is->audio_buf_size) {
2083            audio_size = audio_decode_frame(is, &pts);
2084            if (audio_size < 0) {
2085                 /* if error, just output silence */
2086                is->audio_buf = is->audio_buf1;
2087                is->audio_buf_size = 1024;
2088                memset(is->audio_buf, 0, is->audio_buf_size);
2089            } else {
2090                if (is->show_mode != SHOW_MODE_VIDEO)
2091                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2092                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2093                                               pts);
2094                is->audio_buf_size = audio_size;
2095            }
2096            is->audio_buf_index = 0;
2097         }
2098         len1 = is->audio_buf_size - is->audio_buf_index;
2099         if (len1 > len)
2100             len1 = len;
2101         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2102         len -= len1;
2103         stream += len1;
2104         is->audio_buf_index += len1;
2105     }
2106 }
2107
2108 /* open a given stream. Return 0 if OK */
2109 static int stream_component_open(VideoState *is, int stream_index)
2110 {
2111     AVFormatContext *ic = is->ic;
2112     AVCodecContext *avctx;
2113     AVCodec *codec;
2114     SDL_AudioSpec wanted_spec, spec;
2115     AVDictionary *opts;
2116     AVDictionaryEntry *t = NULL;
2117
2118     if (stream_index < 0 || stream_index >= ic->nb_streams)
2119         return -1;
2120     avctx = ic->streams[stream_index]->codec;
2121
2122     opts = filter_codec_opts(codec_opts, avctx->codec_id, 0);
2123
2124     /* prepare audio output */
2125     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2126         if (avctx->channels > 0) {
2127             avctx->request_channels = FFMIN(2, avctx->channels);
2128         } else {
2129             avctx->request_channels = 2;
2130         }
2131     }
2132
2133     codec = avcodec_find_decoder(avctx->codec_id);
2134     if (!codec)
2135         return -1;
2136
2137     avctx->workaround_bugs = workaround_bugs;
2138     avctx->lowres = lowres;
2139     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2140     avctx->idct_algo= idct;
2141     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2142     avctx->skip_frame= skip_frame;
2143     avctx->skip_idct= skip_idct;
2144     avctx->skip_loop_filter= skip_loop_filter;
2145     avctx->error_recognition= error_recognition;
2146     avctx->error_concealment= error_concealment;
2147     avctx->thread_count= thread_count;
2148
2149     if(codec->capabilities & CODEC_CAP_DR1)
2150         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2151
2152     if (!codec ||
2153         avcodec_open2(avctx, codec, &opts) < 0)
2154         return -1;
2155     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2156         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2157         return AVERROR_OPTION_NOT_FOUND;
2158     }
2159
2160     /* prepare audio output */
2161     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2162         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2163             fprintf(stderr, "Invalid sample rate or channel count\n");
2164             return -1;
2165         }
2166         wanted_spec.freq = avctx->sample_rate;
2167         wanted_spec.format = AUDIO_S16SYS;
2168         wanted_spec.channels = avctx->channels;
2169         wanted_spec.silence = 0;
2170         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2171         wanted_spec.callback = sdl_audio_callback;
2172         wanted_spec.userdata = is;
2173         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2174             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2175             return -1;
2176         }
2177         is->audio_hw_buf_size = spec.size;
2178         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2179     }
2180
2181     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2182     switch(avctx->codec_type) {
2183     case AVMEDIA_TYPE_AUDIO:
2184         is->audio_stream = stream_index;
2185         is->audio_st = ic->streams[stream_index];
2186         is->audio_buf_size = 0;
2187         is->audio_buf_index = 0;
2188
2189         /* init averaging filter */
2190         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2191         is->audio_diff_avg_count = 0;
2192         /* since we do not have a precise anough audio fifo fullness,
2193            we correct audio sync only if larger than this threshold */
2194         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2195
2196         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2197         packet_queue_init(&is->audioq);
2198         SDL_PauseAudio(0);
2199         break;
2200     case AVMEDIA_TYPE_VIDEO:
2201         is->video_stream = stream_index;
2202         is->video_st = ic->streams[stream_index];
2203
2204         packet_queue_init(&is->videoq);
2205         is->video_tid = SDL_CreateThread(video_thread, is);
2206         break;
2207     case AVMEDIA_TYPE_SUBTITLE:
2208         is->subtitle_stream = stream_index;
2209         is->subtitle_st = ic->streams[stream_index];
2210         packet_queue_init(&is->subtitleq);
2211
2212         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2213         break;
2214     default:
2215         break;
2216     }
2217     return 0;
2218 }
2219
2220 static void stream_component_close(VideoState *is, int stream_index)
2221 {
2222     AVFormatContext *ic = is->ic;
2223     AVCodecContext *avctx;
2224
2225     if (stream_index < 0 || stream_index >= ic->nb_streams)
2226         return;
2227     avctx = ic->streams[stream_index]->codec;
2228
2229     switch(avctx->codec_type) {
2230     case AVMEDIA_TYPE_AUDIO:
2231         packet_queue_abort(&is->audioq);
2232
2233         SDL_CloseAudio();
2234
2235         packet_queue_end(&is->audioq);
2236         if (is->reformat_ctx)
2237             av_audio_convert_free(is->reformat_ctx);
2238         is->reformat_ctx = NULL;
2239         break;
2240     case AVMEDIA_TYPE_VIDEO:
2241         packet_queue_abort(&is->videoq);
2242
2243         /* note: we also signal this mutex to make sure we deblock the
2244            video thread in all cases */
2245         SDL_LockMutex(is->pictq_mutex);
2246         SDL_CondSignal(is->pictq_cond);
2247         SDL_UnlockMutex(is->pictq_mutex);
2248
2249         SDL_WaitThread(is->video_tid, NULL);
2250
2251         packet_queue_end(&is->videoq);
2252         break;
2253     case AVMEDIA_TYPE_SUBTITLE:
2254         packet_queue_abort(&is->subtitleq);
2255
2256         /* note: we also signal this mutex to make sure we deblock the
2257            video thread in all cases */
2258         SDL_LockMutex(is->subpq_mutex);
2259         is->subtitle_stream_changed = 1;
2260
2261         SDL_CondSignal(is->subpq_cond);
2262         SDL_UnlockMutex(is->subpq_mutex);
2263
2264         SDL_WaitThread(is->subtitle_tid, NULL);
2265
2266         packet_queue_end(&is->subtitleq);
2267         break;
2268     default:
2269         break;
2270     }
2271
2272     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2273     avcodec_close(avctx);
2274     switch(avctx->codec_type) {
2275     case AVMEDIA_TYPE_AUDIO:
2276         is->audio_st = NULL;
2277         is->audio_stream = -1;
2278         break;
2279     case AVMEDIA_TYPE_VIDEO:
2280         is->video_st = NULL;
2281         is->video_stream = -1;
2282         break;
2283     case AVMEDIA_TYPE_SUBTITLE:
2284         is->subtitle_st = NULL;
2285         is->subtitle_stream = -1;
2286         break;
2287     default:
2288         break;
2289     }
2290 }
2291
2292 /* since we have only one decoding thread, we can use a global
2293    variable instead of a thread local variable */
2294 static VideoState *global_video_state;
2295
2296 static int decode_interrupt_cb(void)
2297 {
2298     return (global_video_state && global_video_state->abort_request);
2299 }
2300
2301 /* this thread gets the stream from the disk or the network */
2302 static int read_thread(void *arg)
2303 {
2304     VideoState *is = arg;
2305     AVFormatContext *ic = NULL;
2306     int err, i, ret;
2307     int st_index[AVMEDIA_TYPE_NB];
2308     AVPacket pkt1, *pkt = &pkt1;
2309     int eof=0;
2310     int pkt_in_play_range = 0;
2311     AVDictionaryEntry *t;
2312     AVDictionary **opts;
2313     int orig_nb_streams;
2314
2315     memset(st_index, -1, sizeof(st_index));
2316     is->video_stream = -1;
2317     is->audio_stream = -1;
2318     is->subtitle_stream = -1;
2319
2320     global_video_state = is;
2321     avio_set_interrupt_cb(decode_interrupt_cb);
2322
2323     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2324     if (err < 0) {
2325         print_error(is->filename, err);
2326         ret = -1;
2327         goto fail;
2328     }
2329     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2330         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2331         ret = AVERROR_OPTION_NOT_FOUND;
2332         goto fail;
2333     }
2334     is->ic = ic;
2335
2336     if(genpts)
2337         ic->flags |= AVFMT_FLAG_GENPTS;
2338
2339     opts = setup_find_stream_info_opts(ic, codec_opts);
2340     orig_nb_streams = ic->nb_streams;
2341
2342     err = avformat_find_stream_info(ic, opts);
2343     if (err < 0) {
2344         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2345         ret = -1;
2346         goto fail;
2347     }
2348     for (i = 0; i < orig_nb_streams; i++)
2349         av_dict_free(&opts[i]);
2350     av_freep(&opts);
2351
2352     if(ic->pb)
2353         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2354
2355     if(seek_by_bytes<0)
2356         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2357
2358     /* if seeking requested, we execute it */
2359     if (start_time != AV_NOPTS_VALUE) {
2360         int64_t timestamp;
2361
2362         timestamp = start_time;
2363         /* add the stream start time */
2364         if (ic->start_time != AV_NOPTS_VALUE)
2365             timestamp += ic->start_time;
2366         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2367         if (ret < 0) {
2368             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2369                     is->filename, (double)timestamp / AV_TIME_BASE);
2370         }
2371     }
2372
2373     for (i = 0; i < ic->nb_streams; i++)
2374         ic->streams[i]->discard = AVDISCARD_ALL;
2375     if (!video_disable)
2376         st_index[AVMEDIA_TYPE_VIDEO] =
2377             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2378                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2379     if (!audio_disable)
2380         st_index[AVMEDIA_TYPE_AUDIO] =
2381             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2382                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2383                                 st_index[AVMEDIA_TYPE_VIDEO],
2384                                 NULL, 0);
2385     if (!video_disable)
2386         st_index[AVMEDIA_TYPE_SUBTITLE] =
2387             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2388                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2389                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2390                                  st_index[AVMEDIA_TYPE_AUDIO] :
2391                                  st_index[AVMEDIA_TYPE_VIDEO]),
2392                                 NULL, 0);
2393     if (show_status) {
2394         av_dump_format(ic, 0, is->filename, 0);
2395     }
2396
2397     is->show_mode = show_mode;
2398
2399     /* open the streams */
2400     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2401         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2402     }
2403
2404     ret=-1;
2405     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2406         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2407     }
2408     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2409     if (is->show_mode == SHOW_MODE_NONE)
2410         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2411
2412     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2413         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2414     }
2415
2416     if (is->video_stream < 0 && is->audio_stream < 0) {
2417         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2418         ret = -1;
2419         goto fail;
2420     }
2421
2422     for(;;) {
2423         if (is->abort_request)
2424             break;
2425         if (is->paused != is->last_paused) {
2426             is->last_paused = is->paused;
2427             if (is->paused)
2428                 is->read_pause_return= av_read_pause(ic);
2429             else
2430                 av_read_play(ic);
2431         }
2432 #if CONFIG_RTSP_DEMUXER
2433         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2434             /* wait 10 ms to avoid trying to get another packet */
2435             /* XXX: horrible */
2436             SDL_Delay(10);
2437             continue;
2438         }
2439 #endif
2440         if (is->seek_req) {
2441             int64_t seek_target= is->seek_pos;
2442             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2443             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2444 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2445 //      of the seek_pos/seek_rel variables
2446
2447             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2448             if (ret < 0) {
2449                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2450             }else{
2451                 if (is->audio_stream >= 0) {
2452                     packet_queue_flush(&is->audioq);
2453                     packet_queue_put(&is->audioq, &flush_pkt);
2454                 }
2455                 if (is->subtitle_stream >= 0) {
2456                     packet_queue_flush(&is->subtitleq);
2457                     packet_queue_put(&is->subtitleq, &flush_pkt);
2458                 }
2459                 if (is->video_stream >= 0) {
2460                     packet_queue_flush(&is->videoq);
2461                     packet_queue_put(&is->videoq, &flush_pkt);
2462                 }
2463             }
2464             is->seek_req = 0;
2465             eof= 0;
2466         }
2467
2468         /* if the queue are full, no need to read more */
2469         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2470             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2471                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2472                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2473             /* wait 10 ms */
2474             SDL_Delay(10);
2475             continue;
2476         }
2477         if(eof) {
2478             if(is->video_stream >= 0){
2479                 av_init_packet(pkt);
2480                 pkt->data=NULL;
2481                 pkt->size=0;
2482                 pkt->stream_index= is->video_stream;
2483                 packet_queue_put(&is->videoq, pkt);
2484             }
2485             SDL_Delay(10);
2486             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2487                 if(loop!=1 && (!loop || --loop)){
2488                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2489                 }else if(autoexit){
2490                     ret=AVERROR_EOF;
2491                     goto fail;
2492                 }
2493             }
2494             eof=0;
2495             continue;
2496         }
2497         ret = av_read_frame(ic, pkt);
2498         if (ret < 0) {
2499             if (ret == AVERROR_EOF || url_feof(ic->pb))
2500                 eof=1;
2501             if (ic->pb && ic->pb->error)
2502                 break;
2503             SDL_Delay(100); /* wait for user event */
2504             continue;
2505         }
2506         /* check if packet is in play range specified by user, then queue, otherwise discard */
2507         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2508                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2509                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2510                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2511                 <= ((double)duration/1000000);
2512         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2513             packet_queue_put(&is->audioq, pkt);
2514         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2515             packet_queue_put(&is->videoq, pkt);
2516         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2517             packet_queue_put(&is->subtitleq, pkt);
2518         } else {
2519             av_free_packet(pkt);
2520         }
2521     }
2522     /* wait until the end */
2523     while (!is->abort_request) {
2524         SDL_Delay(100);
2525     }
2526
2527     ret = 0;
2528  fail:
2529     /* disable interrupting */
2530     global_video_state = NULL;
2531
2532     /* close each stream */
2533     if (is->audio_stream >= 0)
2534         stream_component_close(is, is->audio_stream);
2535     if (is->video_stream >= 0)
2536         stream_component_close(is, is->video_stream);
2537     if (is->subtitle_stream >= 0)
2538         stream_component_close(is, is->subtitle_stream);
2539     if (is->ic) {
2540         av_close_input_file(is->ic);
2541         is->ic = NULL; /* safety */
2542     }
2543     avio_set_interrupt_cb(NULL);
2544
2545     if (ret != 0) {
2546         SDL_Event event;
2547
2548         event.type = FF_QUIT_EVENT;
2549         event.user.data1 = is;
2550         SDL_PushEvent(&event);
2551     }
2552     return 0;
2553 }
2554
2555 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2556 {
2557     VideoState *is;
2558
2559     is = av_mallocz(sizeof(VideoState));
2560     if (!is)
2561         return NULL;
2562     av_strlcpy(is->filename, filename, sizeof(is->filename));
2563     is->iformat = iformat;
2564     is->ytop = 0;
2565     is->xleft = 0;
2566
2567     /* start video display */
2568     is->pictq_mutex = SDL_CreateMutex();
2569     is->pictq_cond = SDL_CreateCond();
2570
2571     is->subpq_mutex = SDL_CreateMutex();
2572     is->subpq_cond = SDL_CreateCond();
2573
2574     is->av_sync_type = av_sync_type;
2575     is->read_tid = SDL_CreateThread(read_thread, is);
2576     if (!is->read_tid) {
2577         av_free(is);
2578         return NULL;
2579     }
2580     return is;
2581 }
2582
2583 static void stream_cycle_channel(VideoState *is, int codec_type)
2584 {
2585     AVFormatContext *ic = is->ic;
2586     int start_index, stream_index;
2587     AVStream *st;
2588
2589     if (codec_type == AVMEDIA_TYPE_VIDEO)
2590         start_index = is->video_stream;
2591     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2592         start_index = is->audio_stream;
2593     else
2594         start_index = is->subtitle_stream;
2595     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2596         return;
2597     stream_index = start_index;
2598     for(;;) {
2599         if (++stream_index >= is->ic->nb_streams)
2600         {
2601             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2602             {
2603                 stream_index = -1;
2604                 goto the_end;
2605             } else
2606                 stream_index = 0;
2607         }
2608         if (stream_index == start_index)
2609             return;
2610         st = ic->streams[stream_index];
2611         if (st->codec->codec_type == codec_type) {
2612             /* check that parameters are OK */
2613             switch(codec_type) {
2614             case AVMEDIA_TYPE_AUDIO:
2615                 if (st->codec->sample_rate != 0 &&
2616                     st->codec->channels != 0)
2617                     goto the_end;
2618                 break;
2619             case AVMEDIA_TYPE_VIDEO:
2620             case AVMEDIA_TYPE_SUBTITLE:
2621                 goto the_end;
2622             default:
2623                 break;
2624             }
2625         }
2626     }
2627  the_end:
2628     stream_component_close(is, start_index);
2629     stream_component_open(is, stream_index);
2630 }
2631
2632
2633 static void toggle_full_screen(void)
2634 {
2635     is_full_screen = !is_full_screen;
2636     video_open(cur_stream);
2637 }
2638
2639 static void toggle_pause(void)
2640 {
2641     if (cur_stream)
2642         stream_toggle_pause(cur_stream);
2643     step = 0;
2644 }
2645
2646 static void step_to_next_frame(void)
2647 {
2648     if (cur_stream) {
2649         /* if the stream is paused unpause it, then step */
2650         if (cur_stream->paused)
2651             stream_toggle_pause(cur_stream);
2652     }
2653     step = 1;
2654 }
2655
2656 static void toggle_audio_display(void)
2657 {
2658     if (cur_stream) {
2659         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2660         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2661         fill_rectangle(screen,
2662                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2663                     bgcolor);
2664         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2665     }
2666 }
2667
2668 /* handle an event sent by the GUI */
2669 static void event_loop(void)
2670 {
2671     SDL_Event event;
2672     double incr, pos, frac;
2673
2674     for(;;) {
2675         double x;
2676         SDL_WaitEvent(&event);
2677         switch(event.type) {
2678         case SDL_KEYDOWN:
2679             if (exit_on_keydown) {
2680                 do_exit();
2681                 break;
2682             }
2683             switch(event.key.keysym.sym) {
2684             case SDLK_ESCAPE:
2685             case SDLK_q:
2686                 do_exit();
2687                 break;
2688             case SDLK_f:
2689                 toggle_full_screen();
2690                 break;
2691             case SDLK_p:
2692             case SDLK_SPACE:
2693                 toggle_pause();
2694                 break;
2695             case SDLK_s: //S: Step to next frame
2696                 step_to_next_frame();
2697                 break;
2698             case SDLK_a:
2699                 if (cur_stream)
2700                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2701                 break;
2702             case SDLK_v:
2703                 if (cur_stream)
2704                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2705                 break;
2706             case SDLK_t:
2707                 if (cur_stream)
2708                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2709                 break;
2710             case SDLK_w:
2711                 toggle_audio_display();
2712                 break;
2713             case SDLK_LEFT:
2714                 incr = -10.0;
2715                 goto do_seek;
2716             case SDLK_RIGHT:
2717                 incr = 10.0;
2718                 goto do_seek;
2719             case SDLK_UP:
2720                 incr = 60.0;
2721                 goto do_seek;
2722             case SDLK_DOWN:
2723                 incr = -60.0;
2724             do_seek:
2725                 if (cur_stream) {
2726                     if (seek_by_bytes) {
2727                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2728                             pos= cur_stream->video_current_pos;
2729                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2730                             pos= cur_stream->audio_pkt.pos;
2731                         }else
2732                             pos = avio_tell(cur_stream->ic->pb);
2733                         if (cur_stream->ic->bit_rate)
2734                             incr *= cur_stream->ic->bit_rate / 8.0;
2735                         else
2736                             incr *= 180000.0;
2737                         pos += incr;
2738                         stream_seek(cur_stream, pos, incr, 1);
2739                     } else {
2740                         pos = get_master_clock(cur_stream);
2741                         pos += incr;
2742                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2743                     }
2744                 }
2745                 break;
2746             default:
2747                 break;
2748             }
2749             break;
2750         case SDL_MOUSEBUTTONDOWN:
2751             if (exit_on_mousedown) {
2752                 do_exit();
2753                 break;
2754             }
2755         case SDL_MOUSEMOTION:
2756             if(event.type ==SDL_MOUSEBUTTONDOWN){
2757                 x= event.button.x;
2758             }else{
2759                 if(event.motion.state != SDL_PRESSED)
2760                     break;
2761                 x= event.motion.x;
2762             }
2763             if (cur_stream) {
2764                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2765                     uint64_t size=  avio_size(cur_stream->ic->pb);
2766                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2767                 }else{
2768                     int64_t ts;
2769                     int ns, hh, mm, ss;
2770                     int tns, thh, tmm, tss;
2771                     tns = cur_stream->ic->duration/1000000LL;
2772                     thh = tns/3600;
2773                     tmm = (tns%3600)/60;
2774                     tss = (tns%60);
2775                     frac = x/cur_stream->width;
2776                     ns = frac*tns;
2777                     hh = ns/3600;
2778                     mm = (ns%3600)/60;
2779                     ss = (ns%60);
2780                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2781                             hh, mm, ss, thh, tmm, tss);
2782                     ts = frac*cur_stream->ic->duration;
2783                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2784                         ts += cur_stream->ic->start_time;
2785                     stream_seek(cur_stream, ts, 0, 0);
2786                 }
2787             }
2788             break;
2789         case SDL_VIDEORESIZE:
2790             if (cur_stream) {
2791                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2792                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2793                 screen_width = cur_stream->width = event.resize.w;
2794                 screen_height= cur_stream->height= event.resize.h;
2795             }
2796             break;
2797         case SDL_QUIT:
2798         case FF_QUIT_EVENT:
2799             do_exit();
2800             break;
2801         case FF_ALLOC_EVENT:
2802             video_open(event.user.data1);
2803             alloc_picture(event.user.data1);
2804             break;
2805         case FF_REFRESH_EVENT:
2806             video_refresh(event.user.data1);
2807             cur_stream->refresh=0;
2808             break;
2809         default:
2810             break;
2811         }
2812     }
2813 }
2814
2815 static int opt_frame_size(const char *opt, const char *arg)
2816 {
2817     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2818     return opt_default("video_size", arg);
2819 }
2820
2821 static int opt_width(const char *opt, const char *arg)
2822 {
2823     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2824     return 0;
2825 }
2826
2827 static int opt_height(const char *opt, const char *arg)
2828 {
2829     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2830     return 0;
2831 }
2832
2833 static int opt_format(const char *opt, const char *arg)
2834 {
2835     file_iformat = av_find_input_format(arg);
2836     if (!file_iformat) {
2837         fprintf(stderr, "Unknown input format: %s\n", arg);
2838         return AVERROR(EINVAL);
2839     }
2840     return 0;
2841 }
2842
2843 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2844 {
2845     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2846     return opt_default("pixel_format", arg);
2847 }
2848
2849 static int opt_sync(const char *opt, const char *arg)
2850 {
2851     if (!strcmp(arg, "audio"))
2852         av_sync_type = AV_SYNC_AUDIO_MASTER;
2853     else if (!strcmp(arg, "video"))
2854         av_sync_type = AV_SYNC_VIDEO_MASTER;
2855     else if (!strcmp(arg, "ext"))
2856         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2857     else {
2858         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2859         exit(1);
2860     }
2861     return 0;
2862 }
2863
2864 static int opt_seek(const char *opt, const char *arg)
2865 {
2866     start_time = parse_time_or_die(opt, arg, 1);
2867     return 0;
2868 }
2869
2870 static int opt_duration(const char *opt, const char *arg)
2871 {
2872     duration = parse_time_or_die(opt, arg, 1);
2873     return 0;
2874 }
2875
2876 static int opt_thread_count(const char *opt, const char *arg)
2877 {
2878     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2879 #if !HAVE_THREADS
2880     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2881 #endif
2882     return 0;
2883 }
2884
2885 static int opt_show_mode(const char *opt, const char *arg)
2886 {
2887     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2888                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2889                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2890                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2891     return 0;
2892 }
2893
2894 static int opt_input_file(const char *opt, const char *filename)
2895 {
2896     if (input_filename) {
2897         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2898                 filename, input_filename);
2899         exit(1);
2900     }
2901     if (!strcmp(filename, "-"))
2902         filename = "pipe:";
2903     input_filename = filename;
2904     return 0;
2905 }
2906
2907 static const OptionDef options[] = {
2908 #include "cmdutils_common_opts.h"
2909     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2910     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2911     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2912     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2913     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2914     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2915     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2916     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2917     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2918     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2919     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2920     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2921     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2922     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2923     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2924     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2925     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2926     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2927     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2928     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2929     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2930     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2931     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2932     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2933     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2934     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2935     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2936     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2937     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2938     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2939     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2940     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2941     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2942     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2943     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2944 #if CONFIG_AVFILTER
2945     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2946 #endif
2947     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2948     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2949     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2950     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2951     { NULL, },
2952 };
2953
2954 static void show_usage(void)
2955 {
2956     printf("Simple media player\n");
2957     printf("usage: %s [options] input_file\n", program_name);
2958     printf("\n");
2959 }
2960
2961 static int opt_help(const char *opt, const char *arg)
2962 {
2963     av_log_set_callback(log_callback_help);
2964     show_usage();
2965     show_help_options(options, "Main options:\n",
2966                       OPT_EXPERT, 0);
2967     show_help_options(options, "\nAdvanced options:\n",
2968                       OPT_EXPERT, OPT_EXPERT);
2969     printf("\n");
2970     av_opt_show2(avcodec_opts[0], NULL,
2971                  AV_OPT_FLAG_DECODING_PARAM, 0);
2972     printf("\n");
2973     av_opt_show2(avformat_opts, NULL,
2974                  AV_OPT_FLAG_DECODING_PARAM, 0);
2975 #if !CONFIG_AVFILTER
2976     printf("\n");
2977     av_opt_show2(sws_opts, NULL,
2978                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2979 #endif
2980     printf("\nWhile playing:\n"
2981            "q, ESC              quit\n"
2982            "f                   toggle full screen\n"
2983            "p, SPC              pause\n"
2984            "a                   cycle audio channel\n"
2985            "v                   cycle video channel\n"
2986            "t                   cycle subtitle channel\n"
2987            "w                   show audio waves\n"
2988            "s                   activate frame-step mode\n"
2989            "left/right          seek backward/forward 10 seconds\n"
2990            "down/up             seek backward/forward 1 minute\n"
2991            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2992            );
2993     return 0;
2994 }
2995
2996 /* Called from the main */
2997 int main(int argc, char **argv)
2998 {
2999     int flags;
3000
3001     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3002
3003     /* register all codecs, demux and protocols */
3004     avcodec_register_all();
3005 #if CONFIG_AVDEVICE
3006     avdevice_register_all();
3007 #endif
3008 #if CONFIG_AVFILTER
3009     avfilter_register_all();
3010 #endif
3011     av_register_all();
3012
3013     init_opts();
3014
3015     show_banner();
3016
3017     parse_options(argc, argv, options, opt_input_file);
3018
3019     if (!input_filename) {
3020         show_usage();
3021         fprintf(stderr, "An input file must be specified\n");
3022         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3023         exit(1);
3024     }
3025
3026     if (display_disable) {
3027         video_disable = 1;
3028     }
3029     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3030     if (audio_disable)
3031         flags &= ~SDL_INIT_AUDIO;
3032 #if !defined(__MINGW32__) && !defined(__APPLE__)
3033     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3034 #endif
3035     if (SDL_Init (flags)) {
3036         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3037         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3038         exit(1);
3039     }
3040
3041     if (!display_disable) {
3042 #if HAVE_SDL_VIDEO_SIZE
3043         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3044         fs_screen_width = vi->current_w;
3045         fs_screen_height = vi->current_h;
3046 #endif
3047     }
3048
3049     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3050     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3051     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3052
3053     av_init_packet(&flush_pkt);
3054     flush_pkt.data= "FLUSH";
3055
3056     cur_stream = stream_open(input_filename, file_iformat);
3057
3058     event_loop();
3059
3060     /* never returns */
3061
3062     return 0;
3063 }